diff --git a/.bin/ko b/.bin/ko new file mode 100755 index 0000000000..cea2a4aa69 Binary files /dev/null and b/.bin/ko differ diff --git a/.bin/kustomize b/.bin/kustomize new file mode 100755 index 0000000000..092867b70f Binary files /dev/null and b/.bin/kustomize differ diff --git a/cmd/openshift/operator/kodata/tekton-pipeline/0.0.0-nightly/00-pipelines.yaml b/cmd/openshift/operator/kodata/tekton-pipeline/0.0.0-nightly/00-pipelines.yaml new file mode 100644 index 0000000000..2b9c67175a --- /dev/null +++ b/cmd/openshift/operator/kodata/tekton-pipeline/0.0.0-nightly/00-pipelines.yaml @@ -0,0 +1,2123 @@ +# Copyright 2019 The Tekton Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: Namespace +metadata: + name: tekton-pipelines + labels: + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipelines + +--- +# Copyright 2019 The Tekton Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: tekton-pipelines + labels: + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipelines +spec: + privileged: false + allowPrivilegeEscalation: false + volumes: + - 'emptyDir' + - 'configMap' + - 'secret' + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'MustRunAsNonRoot' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + +--- +# Copyright 2020 The Tekton Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: tekton-pipelines-controller-cluster-access + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipelines +rules: + - apiGroups: [""] + # Controller needs to watch Pods created by TaskRuns to see them progress. + resources: ["pods"] + verbs: ["list", "watch"] + # Controller needs cluster access to all of the CRDs that it is responsible for + # managing. + - apiGroups: ["tekton.dev"] + resources: ["tasks", "clustertasks", "taskruns", "pipelines", "pipelineruns", "pipelineresources", "conditions", "runs"] + verbs: ["get", "list", "create", "update", "delete", "patch", "watch"] + - apiGroups: ["tekton.dev"] + resources: ["taskruns/finalizers", "pipelineruns/finalizers", "runs/finalizers"] + verbs: ["get", "list", "create", "update", "delete", "patch", "watch"] + - apiGroups: ["tekton.dev"] + resources: ["tasks/status", "clustertasks/status", "taskruns/status", "pipelines/status", "pipelineruns/status", "pipelineresources/status", "runs/status"] + verbs: ["get", "list", "create", "update", "delete", "patch", "watch"] +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + # This is the access that the controller needs on a per-namespace basis. + name: tekton-pipelines-controller-tenant-access + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipelines +rules: + # Read-write access to create Pods, K8s Events and PVCs (for Workspaces) + - apiGroups: [""] + resources: ["pods", "pods/log", "events", "persistentvolumeclaims"] + verbs: ["get", "list", "create", "update", "delete", "patch", "watch"] + # Read-only access to these. + - apiGroups: [""] + resources: ["configmaps", "limitranges", "secrets", "serviceaccounts"] + verbs: ["get", "list", "watch"] + # Read-write access to StatefulSets for Affinity Assistant. + - apiGroups: ["apps"] + resources: ["statefulsets"] + verbs: ["get", "list", "create", "update", "delete", "patch", "watch"] +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: tekton-pipelines-webhook-cluster-access + labels: + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipelines +rules: + # The webhook needs to be able to list and update customresourcedefinitions, + # mainly to update the webhook certificates. + - apiGroups: ["apiextensions.k8s.io"] + resources: ["customresourcedefinitions", "customresourcedefinitions/status"] + verbs: ["get", "list", "update", "patch", "watch"] + - apiGroups: ["admissionregistration.k8s.io"] + # The webhook performs a reconciliation on these two resources and continuously + # updates configuration. + resources: ["mutatingwebhookconfigurations", "validatingwebhookconfigurations"] + # knative starts informers on these things, which is why we need get, list and watch. + verbs: ["list", "watch"] + - apiGroups: ["admissionregistration.k8s.io"] + resources: ["mutatingwebhookconfigurations"] + # This mutating webhook is responsible for applying defaults to tekton objects + # as they are received. + resourceNames: ["webhook.pipeline.tekton.dev"] + # When there are changes to the configs or secrets, knative updates the mutatingwebhook config + # with the updated certificates or the refreshed set of rules. + verbs: ["get", "update"] + - apiGroups: ["admissionregistration.k8s.io"] + resources: ["validatingwebhookconfigurations"] + # validation.webhook.pipeline.tekton.dev performs schema validation when you, for example, create TaskRuns. + # config.webhook.pipeline.tekton.dev validates the logging configuration against knative's logging structure + resourceNames: ["validation.webhook.pipeline.tekton.dev", "config.webhook.pipeline.tekton.dev"] + # When there are changes to the configs or secrets, knative updates the validatingwebhook config + # with the updated certificates or the refreshed set of rules. + verbs: ["get", "update"] + - apiGroups: ["policy"] + resources: ["podsecuritypolicies"] + resourceNames: ["tekton-pipelines"] + verbs: ["use"] + - apiGroups: [""] + resources: ["namespaces"] + verbs: ["get"] + # The webhook configured the namespace as the OwnerRef on various cluster-scoped resources, + # which requires we can Get the system namespace. + resourceNames: ["tekton-pipelines"] + +--- +# Copyright 2020 The Tekton Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: tekton-pipelines-controller + namespace: tekton-pipelines + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipelines +rules: + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["list", "watch"] + # The controller needs access to these configmaps for logging information and runtime configuration. + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["get"] + resourceNames: ["config-logging", "config-observability", "config-artifact-bucket", "config-artifact-pvc", "feature-flags", "config-leader-election", "config-registry-cert"] + - apiGroups: ["policy"] + resources: ["podsecuritypolicies"] + resourceNames: ["tekton-pipelines"] + verbs: ["use"] +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: tekton-pipelines-webhook + namespace: tekton-pipelines + labels: + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipelines +rules: + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["list", "watch"] + # The webhook needs access to these configmaps for logging information. + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["get"] + resourceNames: ["config-logging", "config-observability", "config-leader-election"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["list", "watch"] + # The webhook daemon makes a reconciliation loop on webhook-certs. Whenever + # the secret changes it updates the webhook configurations with the certificates + # stored in the secret. + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "update"] + resourceNames: ["webhook-certs"] + - apiGroups: ["policy"] + resources: ["podsecuritypolicies"] + resourceNames: ["tekton-pipelines"] + verbs: ["use"] +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: tekton-pipelines-leader-election + namespace: tekton-pipelines + labels: + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipelines +rules: + # We uses leases for leaderelection + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "list", "create", "update", "delete", "patch", "watch"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: tekton-pipelines-info + namespace: tekton-pipelines + labels: + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipelines +rules: + # All system:authenticated users needs to have access + # of the pipelines-info ConfigMap even if they don't + # have access to the other resources present in the + # installed namespace. + - apiGroups: [""] + resources: ["configmaps"] + resourceNames: ["pipelines-info"] + verbs: ["get"] + +--- +# Copyright 2019 The Tekton Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +apiVersion: v1 +kind: ServiceAccount +metadata: + name: tekton-pipelines-controller + namespace: tekton-pipelines + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipelines +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: tekton-pipelines-webhook + namespace: tekton-pipelines + labels: + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipelines + +--- +# Copyright 2019 The Tekton Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: tekton-pipelines-controller-cluster-access + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipelines +subjects: + - kind: ServiceAccount + name: tekton-pipelines-controller + namespace: tekton-pipelines +roleRef: + kind: ClusterRole + name: tekton-pipelines-controller-cluster-access + apiGroup: rbac.authorization.k8s.io +--- +# If this ClusterRoleBinding is replaced with a RoleBinding +# then the ClusterRole would be namespaced. The access described by +# the tekton-pipelines-controller-tenant-access ClusterRole would +# be scoped to individual tenant namespaces. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: tekton-pipelines-controller-tenant-access + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipelines +subjects: + - kind: ServiceAccount + name: tekton-pipelines-controller + namespace: tekton-pipelines +roleRef: + kind: ClusterRole + name: tekton-pipelines-controller-tenant-access + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: tekton-pipelines-webhook-cluster-access + labels: + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipelines +subjects: + - kind: ServiceAccount + name: tekton-pipelines-webhook + namespace: tekton-pipelines +roleRef: + kind: ClusterRole + name: tekton-pipelines-webhook-cluster-access + apiGroup: rbac.authorization.k8s.io + +--- +# Copyright 2020 The Tekton Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: tekton-pipelines-controller + namespace: tekton-pipelines + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipelines +subjects: + - kind: ServiceAccount + name: tekton-pipelines-controller + namespace: tekton-pipelines +roleRef: + kind: Role + name: tekton-pipelines-controller + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: tekton-pipelines-webhook + namespace: tekton-pipelines + labels: + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipelines +subjects: + - kind: ServiceAccount + name: tekton-pipelines-webhook + namespace: tekton-pipelines +roleRef: + kind: Role + name: tekton-pipelines-webhook + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: tekton-pipelines-controller-leaderelection + namespace: tekton-pipelines + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipelines +subjects: + - kind: ServiceAccount + name: tekton-pipelines-controller + namespace: tekton-pipelines +roleRef: + kind: Role + name: tekton-pipelines-leader-election + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: tekton-pipelines-webhook-leaderelection + namespace: tekton-pipelines + labels: + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipelines +subjects: + - kind: ServiceAccount + name: tekton-pipelines-webhook + namespace: tekton-pipelines +roleRef: + kind: Role + name: tekton-pipelines-leader-election + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: tekton-pipelines-info + namespace: tekton-pipelines + labels: + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipelines +subjects: + # Giving all system:authenticated users the access of the + # ConfigMap which contains version information. + - kind: Group + name: system:authenticated + apiGroup: rbac.authorization.k8s.io +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: tekton-pipelines-info + +--- +# Copyright 2019 The Tekton Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: clustertasks.tekton.dev + labels: + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipelines + pipeline.tekton.dev/release: "v20210819-67b318ba62" + version: "v20210819-67b318ba62" +spec: + group: tekton.dev + preserveUnknownFields: false + versions: + - name: v1alpha1 + served: true + storage: false + schema: + openAPIV3Schema: + type: object + # One can use x-kubernetes-preserve-unknown-fields: true + # at the root of the schema (and inside any properties, additionalProperties) + # to get the traditional CRD behaviour that nothing is pruned, despite + # setting spec.preserveUnknownProperties: false. + # + # See https://kubernetes.io/blog/2019/06/20/crd-structural-schema/ + # See issue: https://github.com/knative/serving/issues/912 + x-kubernetes-preserve-unknown-fields: true + # Opt into the status subresource so metadata.generation + # starts to increment + subresources: + status: {} + - name: v1beta1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + # One can use x-kubernetes-preserve-unknown-fields: true + # at the root of the schema (and inside any properties, additionalProperties) + # to get the traditional CRD behaviour that nothing is pruned, despite + # setting spec.preserveUnknownProperties: false. + # + # See https://kubernetes.io/blog/2019/06/20/crd-structural-schema/ + # See issue: https://github.com/knative/serving/issues/912 + x-kubernetes-preserve-unknown-fields: true + # Opt into the status subresource so metadata.generation + # starts to increment + subresources: + status: {} + names: + kind: ClusterTask + plural: clustertasks + categories: + - tekton + - tekton-pipelines + scope: Cluster + conversion: + strategy: Webhook + webhook: + conversionReviewVersions: ["v1beta1"] + clientConfig: + service: + name: tekton-pipelines-webhook + namespace: tekton-pipelines + +--- +# Copyright 2019 The Tekton Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: conditions.tekton.dev + labels: + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipelines + pipeline.tekton.dev/release: "v20210819-67b318ba62" + version: "v20210819-67b318ba62" +spec: + group: tekton.dev + versions: + - name: v1alpha1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + # One can use x-kubernetes-preserve-unknown-fields: true + # at the root of the schema (and inside any properties, additionalProperties) + # to get the traditional CRD behaviour that nothing is pruned, despite + # setting spec.preserveUnknownProperties: false. + # + # See https://kubernetes.io/blog/2019/06/20/crd-structural-schema/ + # See issue: https://github.com/knative/serving/issues/912 + x-kubernetes-preserve-unknown-fields: true + # Opt into the status subresource so metadata.generation + # starts to increment + subresources: + status: {} + names: + kind: Condition + plural: conditions + categories: + - tekton + - tekton-pipelines + scope: Namespaced + +--- +# Copyright 2019 The Tekton Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: pipelines.tekton.dev + labels: + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipelines + pipeline.tekton.dev/release: "v20210819-67b318ba62" + version: "v20210819-67b318ba62" +spec: + group: tekton.dev + preserveUnknownFields: false + versions: + - name: v1alpha1 + served: true + storage: false + # Opt into the status subresource so metadata.generation + # starts to increment + subresources: + status: {} + schema: + openAPIV3Schema: + type: object + # One can use x-kubernetes-preserve-unknown-fields: true + # at the root of the schema (and inside any properties, additionalProperties) + # to get the traditional CRD behaviour that nothing is pruned, despite + # setting spec.preserveUnknownProperties: false. + # + # See https://kubernetes.io/blog/2019/06/20/crd-structural-schema/ + # See issue: https://github.com/knative/serving/issues/912 + x-kubernetes-preserve-unknown-fields: true + - name: v1beta1 + served: true + storage: true + # Opt into the status subresource so metadata.generation + # starts to increment + subresources: + status: {} + schema: + openAPIV3Schema: + type: object + # One can use x-kubernetes-preserve-unknown-fields: true + # at the root of the schema (and inside any properties, additionalProperties) + # to get the traditional CRD behaviour that nothing is pruned, despite + # setting spec.preserveUnknownProperties: false. + # + # See https://kubernetes.io/blog/2019/06/20/crd-structural-schema/ + # See issue: https://github.com/knative/serving/issues/912 + x-kubernetes-preserve-unknown-fields: true + names: + kind: Pipeline + plural: pipelines + categories: + - tekton + - tekton-pipelines + scope: Namespaced + conversion: + strategy: Webhook + webhook: + conversionReviewVersions: ["v1beta1"] + clientConfig: + service: + name: tekton-pipelines-webhook + namespace: tekton-pipelines + +--- +# Copyright 2019 The Tekton Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: pipelineruns.tekton.dev + labels: + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipelines + pipeline.tekton.dev/release: "v20210819-67b318ba62" + version: "v20210819-67b318ba62" +spec: + group: tekton.dev + preserveUnknownFields: false + versions: + - name: v1alpha1 + served: true + storage: false + schema: + openAPIV3Schema: + type: object + # One can use x-kubernetes-preserve-unknown-fields: true + # at the root of the schema (and inside any properties, additionalProperties) + # to get the traditional CRD behaviour that nothing is pruned, despite + # setting spec.preserveUnknownProperties: false. + # + # See https://kubernetes.io/blog/2019/06/20/crd-structural-schema/ + # See issue: https://github.com/knative/serving/issues/912 + x-kubernetes-preserve-unknown-fields: true + additionalPrinterColumns: + - name: Succeeded + type: string + jsonPath: ".status.conditions[?(@.type==\"Succeeded\")].status" + - name: Reason + type: string + jsonPath: ".status.conditions[?(@.type==\"Succeeded\")].reason" + - name: StartTime + type: date + jsonPath: .status.startTime + - name: CompletionTime + type: date + jsonPath: .status.completionTime + # Opt into the status subresource so metadata.generation + # starts to increment + subresources: + status: {} + - name: v1beta1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + # One can use x-kubernetes-preserve-unknown-fields: true + # at the root of the schema (and inside any properties, additionalProperties) + # to get the traditional CRD behaviour that nothing is pruned, despite + # setting spec.preserveUnknownProperties: false. + # + # See https://kubernetes.io/blog/2019/06/20/crd-structural-schema/ + # See issue: https://github.com/knative/serving/issues/912 + x-kubernetes-preserve-unknown-fields: true + additionalPrinterColumns: + - name: Succeeded + type: string + jsonPath: ".status.conditions[?(@.type==\"Succeeded\")].status" + - name: Reason + type: string + jsonPath: ".status.conditions[?(@.type==\"Succeeded\")].reason" + - name: StartTime + type: date + jsonPath: .status.startTime + - name: CompletionTime + type: date + jsonPath: .status.completionTime + # Opt into the status subresource so metadata.generation + # starts to increment + subresources: + status: {} + names: + kind: PipelineRun + plural: pipelineruns + categories: + - tekton + - tekton-pipelines + shortNames: + - pr + - prs + scope: Namespaced + conversion: + strategy: Webhook + webhook: + conversionReviewVersions: ["v1beta1"] + clientConfig: + service: + name: tekton-pipelines-webhook + namespace: tekton-pipelines + +--- +# Copyright 2019 The Tekton Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: pipelineresources.tekton.dev + labels: + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipelines + pipeline.tekton.dev/release: "v20210819-67b318ba62" + version: "v20210819-67b318ba62" +spec: + group: tekton.dev + versions: + - name: v1alpha1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + # One can use x-kubernetes-preserve-unknown-fields: true + # at the root of the schema (and inside any properties, additionalProperties) + # to get the traditional CRD behaviour that nothing is pruned, despite + # setting spec.preserveUnknownProperties: false. + # + # See https://kubernetes.io/blog/2019/06/20/crd-structural-schema/ + # See issue: https://github.com/knative/serving/issues/912 + x-kubernetes-preserve-unknown-fields: true + # Opt into the status subresource so metadata.generation + # starts to increment + subresources: + status: {} + names: + kind: PipelineResource + plural: pipelineresources + categories: + - tekton + - tekton-pipelines + scope: Namespaced + +--- +# Copyright 2020 The Tekton Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: runs.tekton.dev + labels: + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipelines + pipeline.tekton.dev/release: "v20210819-67b318ba62" + version: "v20210819-67b318ba62" +spec: + group: tekton.dev + preserveUnknownFields: false + versions: + - name: v1alpha1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + # One can use x-kubernetes-preserve-unknown-fields: true + # at the root of the schema (and inside any properties, additionalProperties) + # to get the traditional CRD behaviour that nothing is pruned, despite + # setting spec.preserveUnknownProperties: false. + # + # See https://kubernetes.io/blog/2019/06/20/crd-structural-schema/ + # See issue: https://github.com/knative/serving/issues/912 + x-kubernetes-preserve-unknown-fields: true + additionalPrinterColumns: + - name: Succeeded + type: string + jsonPath: ".status.conditions[?(@.type==\"Succeeded\")].status" + - name: Reason + type: string + jsonPath: ".status.conditions[?(@.type==\"Succeeded\")].reason" + - name: StartTime + type: date + jsonPath: .status.startTime + - name: CompletionTime + type: date + jsonPath: .status.completionTime + # Opt into the status subresource so metadata.generation + # starts to increment + subresources: + status: {} + names: + kind: Run + plural: runs + categories: + - tekton + - tekton-pipelines + scope: Namespaced + +--- +# Copyright 2019 The Tekton Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: tasks.tekton.dev + labels: + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipelines + pipeline.tekton.dev/release: "v20210819-67b318ba62" + version: "v20210819-67b318ba62" +spec: + group: tekton.dev + preserveUnknownFields: false + versions: + - name: v1alpha1 + served: true + storage: false + schema: + openAPIV3Schema: + type: object + # One can use x-kubernetes-preserve-unknown-fields: true + # at the root of the schema (and inside any properties, additionalProperties) + # to get the traditional CRD behaviour that nothing is pruned, despite + # setting spec.preserveUnknownProperties: false. + # + # See https://kubernetes.io/blog/2019/06/20/crd-structural-schema/ + # See issue: https://github.com/knative/serving/issues/912 + x-kubernetes-preserve-unknown-fields: true + # Opt into the status subresource so metadata.generation + # starts to increment + subresources: + status: {} + - name: v1beta1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + # One can use x-kubernetes-preserve-unknown-fields: true + # at the root of the schema (and inside any properties, additionalProperties) + # to get the traditional CRD behaviour that nothing is pruned, despite + # setting spec.preserveUnknownProperties: false. + # + # See https://kubernetes.io/blog/2019/06/20/crd-structural-schema/ + # See issue: https://github.com/knative/serving/issues/912 + x-kubernetes-preserve-unknown-fields: true + # Opt into the status subresource so metadata.generation + # starts to increment + subresources: + status: {} + names: + kind: Task + plural: tasks + categories: + - tekton + - tekton-pipelines + scope: Namespaced + conversion: + strategy: Webhook + webhook: + conversionReviewVersions: ["v1beta1"] + clientConfig: + service: + name: tekton-pipelines-webhook + namespace: tekton-pipelines + +--- +# Copyright 2019 The Tekton Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: taskruns.tekton.dev + labels: + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipelines + pipeline.tekton.dev/release: "v20210819-67b318ba62" + version: "v20210819-67b318ba62" +spec: + group: tekton.dev + preserveUnknownFields: false + versions: + - name: v1alpha1 + served: true + storage: false + schema: + openAPIV3Schema: + type: object + # One can use x-kubernetes-preserve-unknown-fields: true + # at the root of the schema (and inside any properties, additionalProperties) + # to get the traditional CRD behaviour that nothing is pruned, despite + # setting spec.preserveUnknownProperties: false. + # + # See https://kubernetes.io/blog/2019/06/20/crd-structural-schema/ + # See issue: https://github.com/knative/serving/issues/912 + x-kubernetes-preserve-unknown-fields: true + additionalPrinterColumns: + - name: Succeeded + type: string + jsonPath: ".status.conditions[?(@.type==\"Succeeded\")].status" + - name: Reason + type: string + jsonPath: ".status.conditions[?(@.type==\"Succeeded\")].reason" + - name: StartTime + type: date + jsonPath: .status.startTime + - name: CompletionTime + type: date + jsonPath: .status.completionTime + # Opt into the status subresource so metadata.generation + # starts to increment + subresources: + status: {} + - name: v1beta1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + # One can use x-kubernetes-preserve-unknown-fields: true + # at the root of the schema (and inside any properties, additionalProperties) + # to get the traditional CRD behaviour that nothing is pruned, despite + # setting spec.preserveUnknownProperties: false. + # + # See https://kubernetes.io/blog/2019/06/20/crd-structural-schema/ + # See issue: https://github.com/knative/serving/issues/912 + x-kubernetes-preserve-unknown-fields: true + additionalPrinterColumns: + - name: Succeeded + type: string + jsonPath: ".status.conditions[?(@.type==\"Succeeded\")].status" + - name: Reason + type: string + jsonPath: ".status.conditions[?(@.type==\"Succeeded\")].reason" + - name: StartTime + type: date + jsonPath: .status.startTime + - name: CompletionTime + type: date + jsonPath: .status.completionTime + # Opt into the status subresource so metadata.generation + # starts to increment + subresources: + status: {} + names: + kind: TaskRun + plural: taskruns + categories: + - tekton + - tekton-pipelines + shortNames: + - tr + - trs + scope: Namespaced + conversion: + strategy: Webhook + webhook: + conversionReviewVersions: ["v1beta1"] + clientConfig: + service: + name: tekton-pipelines-webhook + namespace: tekton-pipelines + +--- +# Copyright 2020 The Tekton Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: Secret +metadata: + name: webhook-certs + namespace: tekton-pipelines + labels: + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipelines + pipeline.tekton.dev/release: "v20210819-67b318ba62" +# The data is populated at install time. +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + name: validation.webhook.pipeline.tekton.dev + labels: + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipelines + pipeline.tekton.dev/release: "v20210819-67b318ba62" +webhooks: + - admissionReviewVersions: ["v1"] + clientConfig: + service: + name: tekton-pipelines-webhook + namespace: tekton-pipelines + failurePolicy: Fail + sideEffects: None + name: validation.webhook.pipeline.tekton.dev +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: MutatingWebhookConfiguration +metadata: + name: webhook.pipeline.tekton.dev + labels: + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipelines + pipeline.tekton.dev/release: "v20210819-67b318ba62" +webhooks: + - admissionReviewVersions: ["v1"] + clientConfig: + service: + name: tekton-pipelines-webhook + namespace: tekton-pipelines + failurePolicy: Fail + sideEffects: None + name: webhook.pipeline.tekton.dev +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + name: config.webhook.pipeline.tekton.dev + labels: + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipelines + pipeline.tekton.dev/release: "v20210819-67b318ba62" +webhooks: + - admissionReviewVersions: ["v1"] + clientConfig: + service: + name: tekton-pipelines-webhook + namespace: tekton-pipelines + failurePolicy: Fail + sideEffects: None + name: config.webhook.pipeline.tekton.dev + objectSelector: + matchLabels: + app.kubernetes.io/part-of: tekton-pipelines + +--- +# Copyright 2019 The Tekton Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: tekton-aggregate-edit + labels: + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipelines + rbac.authorization.k8s.io/aggregate-to-edit: "true" + rbac.authorization.k8s.io/aggregate-to-admin: "true" +rules: + - apiGroups: + - tekton.dev + resources: + - tasks + - taskruns + - pipelines + - pipelineruns + - pipelineresources + - conditions + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch + +--- +# Copyright 2019 The Tekton Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: tekton-aggregate-view + labels: + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipelines + rbac.authorization.k8s.io/aggregate-to-view: "true" +rules: + - apiGroups: + - tekton.dev + resources: + - tasks + - taskruns + - pipelines + - pipelineruns + - pipelineresources + - conditions + verbs: + - get + - list + - watch + +--- +# Copyright 2019 The Tekton Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: ConfigMap +metadata: + name: config-artifact-bucket + namespace: tekton-pipelines + labels: + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipelines +# data: +# # location of the gcs bucket to be used for artifact storage +# location: "gs://bucket-name" +# # name of the secret that will contain the credentials for the service account +# # with access to the bucket +# bucket.service.account.secret.name: +# # The key in the secret with the required service account json +# bucket.service.account.secret.key: +# # The field name that should be used for the service account +# # Valid values: GOOGLE_APPLICATION_CREDENTIALS, BOTO_CONFIG. +# bucket.service.account.field.name: GOOGLE_APPLICATION_CREDENTIALS + +--- +# Copyright 2019 The Tekton Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: ConfigMap +metadata: + name: config-artifact-pvc + namespace: tekton-pipelines + labels: + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipelines +# data: +# # size of the PVC volume +# size: 5Gi +# +# # storage class of the PVC volume +# storageClassName: storage-class-name + +--- +# Copyright 2019 The Tekton Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: ConfigMap +metadata: + name: config-defaults + namespace: tekton-pipelines + labels: + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipelines +data: + _example: | + ################################ + # # + # EXAMPLE CONFIGURATION # + # # + ################################ + + # This block is not actually functional configuration, + # but serves to illustrate the available configuration + # options and document them in a way that is accessible + # to users that `kubectl edit` this config map. + # + # These sample configuration options may be copied out of + # this example block and unindented to be in the data block + # to actually change the configuration. + + # default-timeout-minutes contains the default number of + # minutes to use for TaskRun and PipelineRun, if none is specified. + default-timeout-minutes: "60" # 60 minutes + + # default-service-account contains the default service account name + # to use for TaskRun and PipelineRun, if none is specified. + default-service-account: "default" + + # default-managed-by-label-value contains the default value given to the + # "app.kubernetes.io/managed-by" label applied to all Pods created for + # TaskRuns. If a user's requested TaskRun specifies another value for this + # label, the user's request supercedes. + default-managed-by-label-value: "tekton-pipelines" + + # default-pod-template contains the default pod template to use + # TaskRun and PipelineRun, if none is specified. If a pod template + # is specified, the default pod template is ignored. + # default-pod-template: + + # default-cloud-events-sink contains the default CloudEvents sink to be + # used for TaskRun and PipelineRun, when no sink is specified. + # Note that right now it is still not possible to set a PipelineRun or + # TaskRun specific sink, so the default is the only option available. + # If no sink is specified, no CloudEvent is generated + # default-cloud-events-sink: + + # default-task-run-workspace-binding contains the default workspace + # configuration provided for any Workspaces that a Task declares + # but that a TaskRun does not explicitly provide. + # default-task-run-workspace-binding: | + # emptyDir: {} + +--- +# Copyright 2019 The Tekton Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: ConfigMap +metadata: + name: feature-flags + namespace: tekton-pipelines + labels: + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipelines +data: + # Setting this flag to "true" will prevent Tekton to create an + # Affinity Assistant for every TaskRun sharing a PVC workspace + # + # The default behaviour is for Tekton to create Affinity Assistants + # + # See more in the workspace documentation about Affinity Assistant + # https://github.com/tektoncd/pipeline/blob/main/docs/workspaces.md#affinity-assistant-and-specifying-workspace-order-in-a-pipeline + # or https://github.com/tektoncd/pipeline/pull/2630 for more info. + disable-affinity-assistant: "false" + # Setting this flag to "false" will allow Tekton to override your + # Task container's $HOME environment variable. + # + # See https://github.com/tektoncd/pipeline/issues/2013 for more + # info. + disable-home-env-overwrite: "true" + # Setting this flag to "false" will allow Tekton to override your + # Task container's working directory. + # + # See https://github.com/tektoncd/pipeline/issues/1836 for more + # info. + disable-working-directory-overwrite: "true" + # Setting this flag to "true" will prevent Tekton scanning attached + # service accounts and injecting any credentials it finds into your + # Steps. + # + # The default behaviour currently is for Tekton to search service + # accounts for secrets matching a specified format and automatically + # mount those into your Steps. + # + # Note: setting this to "true" will prevent PipelineResources from + # working. + # + # See https://github.com/tektoncd/pipeline/issues/2791 for more + # info. + disable-creds-init: "false" + # This option should be set to false when Pipelines is running in a + # cluster that does not use injected sidecars such as Istio. Setting + # it to false should decrease the time it takes for a TaskRun to start + # running. For clusters that use injected sidecars, setting this + # option to false can lead to unexpected behavior. + # + # See https://github.com/tektoncd/pipeline/issues/2080 for more info. + running-in-environment-with-injected-sidecars: "true" + # Setting this flag to "true" will require that any Git SSH Secret + # offered to Tekton must have known_hosts included. + # + # See https://github.com/tektoncd/pipeline/issues/2981 for more + # info. + require-git-ssh-secret-known-hosts: "false" + # Setting this flag to "true" enables the use of Tekton OCI bundle. + # This is an experimental feature and thus should still be considered + # an alpha feature. + enable-tekton-oci-bundles: "false" + # Setting this flag to "true" enables the use of custom tasks from + # within pipelines. + # This is an experimental feature and thus should still be considered + # an alpha feature. + enable-custom-tasks: "false" + # Setting this flag will determine which gated features are enabled. + # Acceptable values are "stable" or "alpha". + enable-api-fields: "stable" + # Setting this flag to "true" scopes when expressions to guard a Task only + # instead of a Task and its dependent Tasks. + scope-when-expressions-to-task: "false" + +--- +# Copyright 2021 The Tekton Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: ConfigMap +metadata: + name: pipelines-info + namespace: tekton-pipelines + labels: + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipelines +data: + # Contains pipelines version which can be queried by external + # tools such as CLI. Elevated permissions are already given to + # this ConfigMap such that even if we don't have access to + # other resources in the namespace we still can have access to + # this ConfigMap. + version: "v20210819-67b318ba62" + +--- +# Copyright 2020 Tekton Authors LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: ConfigMap +metadata: + name: config-leader-election + namespace: tekton-pipelines + labels: + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipelines +data: + # An inactive but valid configuration follows; see example. + leaseDuration: "15s" + renewDeadline: "10s" + retryPeriod: "2s" + +--- +# Copyright 2019 Tekton Authors LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: ConfigMap +metadata: + name: config-logging + namespace: tekton-pipelines + labels: + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipelines +data: + # Common configuration for all knative codebase + zap-logger-config: | + { + "level": "info", + "development": false, + "sampling": { + "initial": 100, + "thereafter": 100 + }, + "outputPaths": ["stdout"], + "errorOutputPaths": ["stderr"], + "encoding": "json", + "encoderConfig": { + "timeKey": "ts", + "levelKey": "level", + "nameKey": "logger", + "callerKey": "caller", + "messageKey": "msg", + "stacktraceKey": "stacktrace", + "lineEnding": "", + "levelEncoder": "", + "timeEncoder": "iso8601", + "durationEncoder": "", + "callerEncoder": "" + } + } + # Log level overrides + loglevel.controller: "info" + loglevel.webhook: "info" + +--- +# Copyright 2019 The Tekton Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: ConfigMap +metadata: + name: config-observability + namespace: tekton-pipelines + labels: + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipelines +data: + _example: | + ################################ + # # + # EXAMPLE CONFIGURATION # + # # + ################################ + + # This block is not actually functional configuration, + # but serves to illustrate the available configuration + # options and document them in a way that is accessible + # to users that `kubectl edit` this config map. + # + # These sample configuration options may be copied out of + # this example block and unindented to be in the data block + # to actually change the configuration. + + # metrics.backend-destination field specifies the system metrics destination. + # It supports either prometheus (the default) or stackdriver. + # Note: Using Stackdriver will incur additional charges. + metrics.backend-destination: prometheus + + # metrics.stackdriver-project-id field specifies the Stackdriver project ID. This + # field is optional. When running on GCE, application default credentials will be + # used and metrics will be sent to the cluster's project if this field is + # not provided. + metrics.stackdriver-project-id: "" + + # metrics.allow-stackdriver-custom-metrics indicates whether it is allowed + # to send metrics to Stackdriver using "global" resource type and custom + # metric type. Setting this flag to "true" could cause extra Stackdriver + # charge. If metrics.backend-destination is not Stackdriver, this is + # ignored. + metrics.allow-stackdriver-custom-metrics: "false" + +--- +# Copyright 2020 Tekton Authors LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: ConfigMap +metadata: + name: config-registry-cert + namespace: tekton-pipelines + labels: + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipelines +# data: +# # Registry's self-signed certificate +# cert: | + +--- +# Copyright 2019 The Tekton Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: tekton-pipelines-controller + namespace: tekton-pipelines + labels: + app.kubernetes.io/name: controller + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/version: "v20210819-67b318ba62" + app.kubernetes.io/part-of: tekton-pipelines + # tekton.dev/release value replaced with inputs.params.versionTag in pipeline/tekton/publish.yaml + pipeline.tekton.dev/release: "v20210819-67b318ba62" + # labels below are related to istio and should not be used for resource lookup + version: "v20210819-67b318ba62" +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: controller + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipelines + template: + metadata: + labels: + app.kubernetes.io/name: controller + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/version: "v20210819-67b318ba62" + app.kubernetes.io/part-of: tekton-pipelines + # tekton.dev/release value replaced with inputs.params.versionTag in pipeline/tekton/publish.yaml + pipeline.tekton.dev/release: "v20210819-67b318ba62" + # labels below are related to istio and should not be used for resource lookup + app: tekton-pipelines-controller + version: "v20210819-67b318ba62" + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/os + operator: NotIn + values: + - windows + serviceAccountName: tekton-pipelines-controller + containers: + - name: tekton-pipelines-controller + image: gcr.io/tekton-nightly/github.com/tektoncd/pipeline/cmd/controller@sha256:978e69c3dfa6d9f32bafa78ff62b0db43db2ad57704e9ed9c7c1d8b661c9ae0c + args: [ + # These images are built on-demand by `ko resolve` and are replaced + # by image references by digest. + "-kubeconfig-writer-image", "gcr.io/tekton-nightly/github.com/tektoncd/pipeline/cmd/kubeconfigwriter@sha256:ea9264f4e18b09b07e1e70c4d6ad211c330f71a1ec89c6a4bcc5b2b75720b78d", "-git-image", "gcr.io/tekton-nightly/github.com/tektoncd/pipeline/cmd/git-init@sha256:433d1c7c850276d9277da4ce1d897aa07260d3bd35b4a9484b8a7e5c5b3b8057", "-entrypoint-image", "gcr.io/tekton-nightly/github.com/tektoncd/pipeline/cmd/entrypoint@sha256:4945f127e504dc22d65f5b3c04ef27306c9dff0d08539450f31bd5b5be96fb2f", "-nop-image", "gcr.io/tekton-nightly/github.com/tektoncd/pipeline/cmd/nop@sha256:340acc618ba7ccebde3b1117706e47e72026b437ab9e8a6c469ffe5cdfc070fe", "-imagedigest-exporter-image", "gcr.io/tekton-nightly/github.com/tektoncd/pipeline/cmd/imagedigestexporter@sha256:2059abdc6140d6b43a7469756eeecc37e6d06151f0268f876e511bcbfaac837c", "-pr-image", "gcr.io/tekton-nightly/github.com/tektoncd/pipeline/cmd/pullrequest-init@sha256:f97d9285ce0fa1cfd2a7a1feeda0fee48f05569a3504f5ffccc13ab8910d44f4", + # This is gcr.io/google.com/cloudsdktool/cloud-sdk:302.0.0-slim + "-gsutil-image", "gcr.io/google.com/cloudsdktool/cloud-sdk@sha256:27b2c22bf259d9bc1a291e99c63791ba0c27a04d2db0a43241ba0f1f20f4067f", + # The shell image must be root in order to create directories and copy files to PVCs. + # gcr.io/distroless/base:debug as of Apirl 17, 2021 + # image shall not contains tag, so it will be supported on a runtime like cri-o + "-shell-image", "gcr.io/distroless/base@sha256:aa4fd987555ea10e1a4ec8765da8158b5ffdfef1e72da512c7ede509bc9966c4"] + volumeMounts: + - name: config-logging + mountPath: /etc/config-logging + - name: config-registry-cert + mountPath: /etc/config-registry-cert + env: + - name: SYSTEM_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + # These phony AWS credentials are here to work around a bug in the aws go sdk + # that causes extremely long delays in the execution of tasks after the initial + # deployment of the Tekton Pipelines controller. See issue https://github.com/tektoncd/pipeline/issues/4087 + # for more information. + - name: AWS_ACCESS_KEY_ID + value: foobarbaz + - name: AWS_SECRET_ACCESS_KEY + value: foobarbaz + - name: AWS_DEFAULT_REGION + value: foobarbaz + # If you are changing these names, you will also need to update + # the controller's Role in 200-role.yaml to include the new + # values in the "configmaps" "get" rule. + - name: CONFIG_DEFAULTS_NAME + value: config-defaults + - name: CONFIG_LOGGING_NAME + value: config-logging + - name: CONFIG_OBSERVABILITY_NAME + value: config-observability + - name: CONFIG_ARTIFACT_BUCKET_NAME + value: config-artifact-bucket + - name: CONFIG_ARTIFACT_PVC_NAME + value: config-artifact-pvc + - name: CONFIG_FEATURE_FLAGS_NAME + value: feature-flags + - name: CONFIG_LEADERELECTION_NAME + value: config-leader-election + - name: SSL_CERT_FILE + value: /etc/config-registry-cert/cert + - name: SSL_CERT_DIR + value: /etc/ssl/certs + - name: METRICS_DOMAIN + value: tekton.dev/pipeline + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - all + # User 65532 is the distroless nonroot user ID + runAsUser: 65532 + runAsGroup: 65532 + ports: + - name: probes + containerPort: 8080 + livenessProbe: + httpGet: + path: /health + port: probes + scheme: HTTP + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + readinessProbe: + httpGet: + path: /readiness + port: probes + scheme: HTTP + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + volumes: + - name: config-logging + configMap: + name: config-logging + - name: config-registry-cert + configMap: + name: config-registry-cert +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app.kubernetes.io/name: controller + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/version: "v20210819-67b318ba62" + app.kubernetes.io/part-of: tekton-pipelines + # tekton.dev/release value replaced with inputs.params.versionTag in pipeline/tekton/publish.yaml + pipeline.tekton.dev/release: "v20210819-67b318ba62" + # labels below are related to istio and should not be used for resource lookup + app: tekton-pipelines-controller + version: "v20210819-67b318ba62" + name: tekton-pipelines-controller + namespace: tekton-pipelines +spec: + ports: + - name: http-metrics + port: 9090 + protocol: TCP + targetPort: 9090 + - name: probes + port: 8080 + selector: + app.kubernetes.io/name: controller + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipelines + +--- +# Copyright 2020 The Tekton Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: tekton-pipelines-webhook + namespace: tekton-pipelines + labels: + app.kubernetes.io/name: webhook + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/version: "v20210819-67b318ba62" + app.kubernetes.io/part-of: tekton-pipelines + # tekton.dev/release value replaced with inputs.params.versionTag in pipeline/tekton/publish.yaml + pipeline.tekton.dev/release: "v20210819-67b318ba62" + # labels below are related to istio and should not be used for resource lookup + version: "v20210819-67b318ba62" +spec: + minReplicas: 1 + maxReplicas: 5 + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: tekton-pipelines-webhook + metrics: + - type: Resource + resource: + name: cpu + targetAverageUtilization: 100 + +--- +# Copyright 2020 The Tekton Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: apps/v1 +kind: Deployment +metadata: + # Note: the Deployment name must be the same as the Service name specified in + # config/400-webhook-service.yaml. If you change this name, you must also + # change the value of WEBHOOK_SERVICE_NAME below. + name: tekton-pipelines-webhook + namespace: tekton-pipelines + labels: + app.kubernetes.io/name: webhook + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/version: "v20210819-67b318ba62" + app.kubernetes.io/part-of: tekton-pipelines + # tekton.dev/release value replaced with inputs.params.versionTag in pipeline/tekton/publish.yaml + pipeline.tekton.dev/release: "v20210819-67b318ba62" + # labels below are related to istio and should not be used for resource lookup + version: "v20210819-67b318ba62" +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: webhook + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipelines + template: + metadata: + labels: + app.kubernetes.io/name: webhook + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/version: "v20210819-67b318ba62" + app.kubernetes.io/part-of: tekton-pipelines + # tekton.dev/release value replaced with inputs.params.versionTag in pipeline/tekton/publish.yaml + pipeline.tekton.dev/release: "v20210819-67b318ba62" + # labels below are related to istio and should not be used for resource lookup + app: tekton-pipelines-webhook + version: "v20210819-67b318ba62" + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/os + operator: NotIn + values: + - windows + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchLabels: + app.kubernetes.io/name: webhook + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipelines + topologyKey: kubernetes.io/hostname + weight: 100 + serviceAccountName: tekton-pipelines-webhook + containers: + - name: webhook + # This is the Go import path for the binary that is containerized + # and substituted here. + image: gcr.io/tekton-nightly/github.com/tektoncd/pipeline/cmd/webhook@sha256:de42c9f55f7554414d67c350bb04d8cf234f56bafaaa944652e1471bab3091f5 + # Resource request required for autoscaler to take any action for a metric + resources: + requests: + cpu: 100m + memory: 100Mi + limits: + cpu: 500m + memory: 500Mi + env: + - name: SYSTEM_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + # If you are changing these names, you will also need to update + # the webhook's Role in 200-role.yaml to include the new + # values in the "configmaps" "get" rule. + - name: CONFIG_LOGGING_NAME + value: config-logging + - name: CONFIG_OBSERVABILITY_NAME + value: config-observability + - name: CONFIG_LEADERELECTION_NAME + value: config-leader-election + - name: WEBHOOK_SERVICE_NAME + value: tekton-pipelines-webhook + - name: WEBHOOK_SECRET_NAME + value: webhook-certs + - name: METRICS_DOMAIN + value: tekton.dev/pipeline + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - all + # User 65532 is the distroless nonroot user ID + runAsUser: 65532 + runAsGroup: 65532 + ports: + - name: metrics + containerPort: 9090 + - name: profiling + containerPort: 8008 + - name: https-webhook + containerPort: 8443 + - name: probes + containerPort: 8080 + livenessProbe: + httpGet: + path: /health + port: probes + scheme: HTTP + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + readinessProbe: + httpGet: + path: /readiness + port: probes + scheme: HTTP + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app.kubernetes.io/name: webhook + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/version: "v20210819-67b318ba62" + app.kubernetes.io/part-of: tekton-pipelines + # tekton.dev/release value replaced with inputs.params.versionTag in pipeline/tekton/publish.yaml + pipeline.tekton.dev/release: "v20210819-67b318ba62" + # labels below are related to istio and should not be used for resource lookup + app: tekton-pipelines-webhook + version: "v20210819-67b318ba62" + name: tekton-pipelines-webhook + namespace: tekton-pipelines +spec: + ports: + # Define metrics and profiling for them to be accessible within service meshes. + - name: http-metrics + port: 9090 + targetPort: 9090 + - name: http-profiling + port: 8008 + targetPort: 8008 + - name: https-webhook + port: 443 + targetPort: 8443 + - name: probes + port: 8080 + selector: + app.kubernetes.io/name: webhook + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipelines + +--- diff --git a/cmd/openshift/operator/kodata/tekton-pipeline/0.0.0-nightly/01-clusterrole.yaml b/cmd/openshift/operator/kodata/tekton-pipeline/0.0.0-nightly/01-clusterrole.yaml new file mode 100644 index 0000000000..df751a62fc --- /dev/null +++ b/cmd/openshift/operator/kodata/tekton-pipeline/0.0.0-nightly/01-clusterrole.yaml @@ -0,0 +1,9 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: pipelines-scc-role +rules: +- apiGroups: [security.openshift.io] + resourceNames: [nonroot] + resources: [securitycontextconstraints] + verbs: [use] diff --git a/cmd/openshift/operator/kodata/tekton-pipeline/0.0.0-nightly/02-rolebinding.yaml b/cmd/openshift/operator/kodata/tekton-pipeline/0.0.0-nightly/02-rolebinding.yaml new file mode 100644 index 0000000000..116e79a6a6 --- /dev/null +++ b/cmd/openshift/operator/kodata/tekton-pipeline/0.0.0-nightly/02-rolebinding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: openshift-pipelines-permission +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: pipelines-scc-role +subjects: +- kind: ServiceAccount + name: tekton-pipelines-controller + namespace: openshift-pipelines diff --git a/cmd/openshift/operator/kodata/tekton-pipeline/0.0.0-nightly/03-monitoring.yaml b/cmd/openshift/operator/kodata/tekton-pipeline/0.0.0-nightly/03-monitoring.yaml new file mode 100644 index 0000000000..18b6a5ca42 --- /dev/null +++ b/cmd/openshift/operator/kodata/tekton-pipeline/0.0.0-nightly/03-monitoring.yaml @@ -0,0 +1,68 @@ +--- +# Copyright 2018 The Tekton Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: openshift-pipelines-read + namespace: tekton-pipelines +rules: + - apiGroups: + - "" + resources: + - services + - endpoints + - pods + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: openshift-pipelines-prometheus-k8s-read-binding + namespace: tekton-pipelines + annotations: + operator.tekton.dev/preserve-rb-subject-namespace: "true" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: openshift-pipelines-read +subjects: + - kind: ServiceAccount + name: prometheus-k8s + namespace: openshift-monitoring +--- +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + labels: + app: controller + annotations: + networkoperator.openshift.io/ignore-errors: "" + name: openshift-pipelines-monitor + namespace: tekton-pipelines +spec: + endpoints: + - interval: 10s + port: http-metrics + jobLabel: app + namespaceSelector: + matchNames: + - openshift-pipelines + selector: + matchLabels: + app: tekton-pipelines-controller diff --git a/cmd/openshift/operator/kodata/tekton-pipeline/00-prereconcile/openshift-pipelines-scc.yaml b/cmd/openshift/operator/kodata/tekton-pipeline/00-prereconcile/openshift-pipelines-scc.yaml new file mode 100644 index 0000000000..fe60ded292 --- /dev/null +++ b/cmd/openshift/operator/kodata/tekton-pipeline/00-prereconcile/openshift-pipelines-scc.yaml @@ -0,0 +1,40 @@ +apiVersion: security.openshift.io/v1 +kind: SecurityContextConstraints +metadata: + annotations: + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + kubernetes.io/description: pipelines-scc is a close replica of anyuid scc. pipelines-scc has fsGroup - MustRunAs. + release.openshift.io/create-only: "true" + name: pipelines-scc +allowHostDirVolumePlugin: false +allowHostIPC: false +allowHostNetwork: false +allowHostPID: false +allowHostPorts: false +allowPrivilegeEscalation: true +allowPrivilegedContainer: false +allowedCapabilities: null +defaultAddCapabilities: null +fsGroup: + type: MustRunAs +groups: +- system:cluster-admins +priority: 10 +readOnlyRootFilesystem: false +requiredDropCapabilities: +- MKNOD +runAsUser: + type: RunAsAny +seLinuxContext: + type: MustRunAs +supplementalGroups: + type: RunAsAny +volumes: +- configMap +- downwardAPI +- emptyDir +- persistentVolumeClaim +- projected +- secret diff --git a/cmd/openshift/operator/kodata/tekton-trigger/0.0.0-nightly/00-triggers.yaml b/cmd/openshift/operator/kodata/tekton-trigger/0.0.0-nightly/00-triggers.yaml new file mode 100644 index 0000000000..942bee33fc --- /dev/null +++ b/cmd/openshift/operator/kodata/tekton-trigger/0.0.0-nightly/00-triggers.yaml @@ -0,0 +1,1497 @@ +# Copyright 2019 The Tekton Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: tekton-triggers + labels: + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-triggers +spec: + privileged: false + allowPrivilegeEscalation: false + volumes: + - 'emptyDir' + - 'configMap' + - 'secret' + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'MustRunAsNonRoot' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + +--- +# Copyright 2019 The Tekton Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: tekton-triggers-admin + labels: + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-triggers +rules: + - apiGroups: [""] + resources: ["configmaps", "services", "events"] + verbs: ["get", "list", "create", "update", "delete", "patch", "watch"] + - apiGroups: ["apps"] + resources: ["deployments", "deployments/finalizers"] + verbs: ["get", "list", "create", "update", "delete", "patch", "watch"] + - apiGroups: ["admissionregistration.k8s.io"] + resources: ["mutatingwebhookconfigurations", "validatingwebhookconfigurations"] + verbs: ["get", "list", "create", "update", "delete", "patch", "watch"] + - apiGroups: ["triggers.tekton.dev"] + resources: ["clustertriggerbindings", "clusterinterceptors", "eventlisteners", "triggerbindings", "triggertemplates", "triggers", "eventlisteners/finalizers"] + verbs: ["get", "list", "create", "update", "delete", "patch", "watch"] + - apiGroups: ["triggers.tekton.dev"] + resources: ["clustertriggerbindings/status", "clusterinterceptors/status", "eventlisteners/status", "triggerbindings/status", "triggertemplates/status", "triggers/status"] + verbs: ["get", "list", "create", "update", "delete", "patch", "watch"] + # We uses leases for leaderelection + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "list", "create", "update", "delete", "patch", "watch"] + - apiGroups: ["serving.knative.dev"] + resources: ["*", "*/status", "*/finalizers"] + verbs: ["get", "list", "create", "update", "delete", "deletecollection", "patch", "watch"] +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: tekton-triggers-core-interceptors + labels: + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-triggers +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: tekton-triggers-eventlistener-roles + labels: + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-triggers +rules: + - apiGroups: ["triggers.tekton.dev"] + resources: ["eventlisteners", "triggerbindings", "triggertemplates", "triggers"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["get", "list", "watch"] + - apiGroups: ["tekton.dev"] + resources: ["pipelineruns", "pipelineresources", "taskruns"] + verbs: ["create"] + - apiGroups: [""] + resources: ["serviceaccounts"] + verbs: ["impersonate"] + - apiGroups: ["policy"] + resources: ["podsecuritypolicies"] + resourceNames: ["tekton-triggers"] + verbs: ["use"] +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: tekton-triggers-eventlistener-clusterroles + labels: + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-triggers +rules: + - apiGroups: ["triggers.tekton.dev"] + resources: ["clustertriggerbindings", "clusterinterceptors"] + verbs: ["get", "list", "watch"] + +--- +# Copyright 2020 The Tekton Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# NOTE: when multi-tenant EventListener progresses, moving this Role +# to a ClusterRole is not the advisable path. Additional Roles that +# adds access to Secrets to the Namespaces managed by the multi-tenant +# EventListener is what should be done. While not as simple, it avoids +# giving access to K8S system level, cluster admin privileged level Secrets + +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: tekton-triggers-admin + namespace: tekton-pipelines + labels: + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-triggers +rules: + - apiGroups: ["policy"] + resources: ["podsecuritypolicies"] + resourceNames: ["tekton-triggers"] + verbs: ["use"] +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: tekton-triggers-admin-webhook + namespace: tekton-pipelines + labels: + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-triggers +rules: + - apiGroups: ["policy"] + resources: ["podsecuritypolicies"] + resourceNames: ["tekton-triggers"] + verbs: ["use"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "create", "update", "delete", "patch", "watch"] +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: tekton-triggers-core-interceptors + namespace: tekton-pipelines + labels: + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-triggers +rules: + - apiGroups: ["policy"] + resources: ["podsecuritypolicies"] + resourceNames: ["tekton-triggers"] + verbs: ["use"] + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["get", "list", "watch"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: tekton-triggers-info + namespace: tekton-pipelines + labels: + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-triggers +rules: + # All system:authenticated users needs to have access + # of the triggers-info ConfigMap even if they don't + # have access to the other resources present in the + # installed namespace. + - apiGroups: [""] + resources: ["configmaps"] + resourceNames: ["triggers-info"] + verbs: ["get"] + +--- +# Copyright 2019 The Tekton Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: tekton-triggers-controller + namespace: tekton-pipelines + labels: + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-triggers +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: tekton-triggers-webhook + namespace: tekton-pipelines + labels: + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-triggers +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: tekton-triggers-core-interceptors + namespace: tekton-pipelines + labels: + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-triggers + +--- +# Copyright 2019 The Tekton Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: tekton-triggers-controller-admin + labels: + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-triggers +subjects: + - kind: ServiceAccount + name: tekton-triggers-controller + namespace: tekton-pipelines +roleRef: + kind: ClusterRole + name: tekton-triggers-admin + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: tekton-triggers-webhook-admin + labels: + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-triggers +subjects: + - kind: ServiceAccount + name: tekton-triggers-webhook + namespace: tekton-pipelines +roleRef: + kind: ClusterRole + name: tekton-triggers-admin + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: tekton-triggers-core-interceptors + labels: + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-triggers +subjects: + - kind: ServiceAccount + name: tekton-triggers-core-interceptors + namespace: tekton-pipelines +roleRef: + kind: ClusterRole + name: tekton-triggers-core-interceptors + apiGroup: rbac.authorization.k8s.io + +--- +# Copyright 2020 The Tekton Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: tekton-triggers-controller-admin + namespace: tekton-pipelines + labels: + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-triggers +subjects: + - kind: ServiceAccount + name: tekton-triggers-controller + namespace: tekton-pipelines +roleRef: + kind: Role + name: tekton-triggers-admin + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: tekton-triggers-webhook-admin + namespace: tekton-pipelines + labels: + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-triggers +subjects: + - kind: ServiceAccount + name: tekton-triggers-webhook + namespace: tekton-pipelines +roleRef: + kind: Role + name: tekton-triggers-admin-webhook + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: tekton-triggers-core-interceptors + namespace: tekton-pipelines + labels: + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-triggers +subjects: + - kind: ServiceAccount + name: tekton-triggers-core-interceptors + namespace: tekton-pipelines +roleRef: + kind: Role + name: tekton-triggers-core-interceptors + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: tekton-triggers-info + namespace: tekton-pipelines + labels: + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-triggers +subjects: + # Giving all system:authenticated users the access of the + # ConfigMap which contains version information. + - kind: Group + name: system:authenticated + apiGroup: rbac.authorization.k8s.io +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: tekton-triggers-info + +--- +# Copyright 2021 The Tekton Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: clusterinterceptors.triggers.tekton.dev + labels: + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-triggers + triggers.tekton.dev/release: "v20210820-92eb94311f" + version: "v20210820-92eb94311f" +spec: + group: triggers.tekton.dev + scope: Cluster + names: + kind: ClusterInterceptor + plural: clusterinterceptors + singular: clusterinterceptor + shortNames: + - ci + categories: + - tekton + - tekton-triggers + versions: + - name: v1alpha1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + # One can use x-kubernetes-preserve-unknown-fields: true + # at the root of the schema (and inside any properties, additionalProperties) + # to get the traditional CRD behaviour that nothing is pruned, despite + # setting spec.preserveUnknownProperties: false. + # + # See https://kubernetes.io/blog/2019/06/20/crd-structural-schema/ + # See issue: https://github.com/knative/serving/issues/912 + x-kubernetes-preserve-unknown-fields: true + # Opt into the status subresource so metadata.generation + # starts to increment + subresources: + status: {} + +--- +# Copyright 2019 The Tekton Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: clustertriggerbindings.triggers.tekton.dev + labels: + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-triggers + triggers.tekton.dev/release: "v20210820-92eb94311f" + version: "v20210820-92eb94311f" +spec: + group: triggers.tekton.dev + scope: Cluster + names: + kind: ClusterTriggerBinding + plural: clustertriggerbindings + singular: clustertriggerbinding + shortNames: + - ctb + categories: + - tekton + - tekton-triggers + versions: + - name: v1beta1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + # One can use x-kubernetes-preserve-unknown-fields: true + # at the root of the schema (and inside any properties, additionalProperties) + # to get the traditional CRD behaviour that nothing is pruned, despite + # setting spec.preserveUnknownProperties: false. + # + # See https://kubernetes.io/blog/2019/06/20/crd-structural-schema/ + # See issue: https://github.com/knative/serving/issues/912 + x-kubernetes-preserve-unknown-fields: true + subresources: + status: {} + - name: v1alpha1 + served: true + storage: false + schema: + openAPIV3Schema: + type: object + # One can use x-kubernetes-preserve-unknown-fields: true + # at the root of the schema (and inside any properties, additionalProperties) + # to get the traditional CRD behaviour that nothing is pruned, despite + # setting spec.preserveUnknownProperties: false. + # + # See https://kubernetes.io/blog/2019/06/20/crd-structural-schema/ + # See issue: https://github.com/knative/serving/issues/912 + x-kubernetes-preserve-unknown-fields: true + subresources: + status: {} + +--- +# Copyright 2019 The Tekton Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: eventlisteners.triggers.tekton.dev + labels: + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-triggers + triggers.tekton.dev/release: "v20210820-92eb94311f" + version: "v20210820-92eb94311f" +spec: + group: triggers.tekton.dev + scope: Namespaced + names: + kind: EventListener + plural: eventlisteners + singular: eventlistener + shortNames: + - el + categories: + - tekton + - tekton-triggers + versions: + - name: v1beta1 + served: true + storage: true + # Opt into the status subresource so metadata.generation + # starts to increment + subresources: + status: {} + schema: + openAPIV3Schema: + type: object + # One can use x-kubernetes-preserve-unknown-fields: true + # at the root of the schema (and inside any properties, additionalProperties) + # to get the traditional CRD behaviour that nothing is pruned, despite + # setting spec.preserveUnknownProperties: false. + # + # See https://kubernetes.io/blog/2019/06/20/crd-structural-schema/ + # See issue: https://github.com/knative/serving/issues/912 + x-kubernetes-preserve-unknown-fields: true + additionalPrinterColumns: + - name: Address + type: string + jsonPath: .status.address.url + - name: Available + type: string + jsonPath: ".status.conditions[?(@.type=='Available')].status" + - name: Reason + type: string + jsonPath: ".status.conditions[?(@.type=='Available')].reason" + - name: Ready + type: string + jsonPath: ".status.conditions[?(@.type=='Ready')].status" + - name: Reason + type: string + jsonPath: ".status.conditions[?(@.type=='Ready')].reason" + - name: v1alpha1 + served: true + storage: false + schema: + openAPIV3Schema: + type: object + # One can use x-kubernetes-preserve-unknown-fields: true + # at the root of the schema (and inside any properties, additionalProperties) + # to get the traditional CRD behaviour that nothing is pruned, despite + # setting spec.preserveUnknownProperties: false. + # + # See https://kubernetes.io/blog/2019/06/20/crd-structural-schema/ + # See issue: https://github.com/knative/serving/issues/912 + x-kubernetes-preserve-unknown-fields: true + # Opt into the status subresource so metadata.generation + # starts to increment + subresources: + status: {} + additionalPrinterColumns: + - name: Address + type: string + jsonPath: .status.address.url + - name: Available + type: string + jsonPath: ".status.conditions[?(@.type=='Available')].status" + - name: Reason + type: string + jsonPath: ".status.conditions[?(@.type=='Available')].reason" + - name: Ready + type: string + jsonPath: ".status.conditions[?(@.type=='Ready')].status" + - name: Reason + type: string + jsonPath: ".status.conditions[?(@.type=='Ready')].reason" + +--- +# Copyright 2019 The Tekton Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: triggers.triggers.tekton.dev + labels: + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-triggers + triggers.tekton.dev/release: "v20210820-92eb94311f" + version: "v20210820-92eb94311f" +spec: + group: triggers.tekton.dev + scope: Namespaced + names: + kind: Trigger + plural: triggers + singular: trigger + shortNames: + - tri + categories: + - tekton + - tekton-triggers + versions: + - name: v1beta1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + # One can use x-kubernetes-preserve-unknown-fields: true + # at the root of the schema (and inside any properties, additionalProperties) + # to get the traditional CRD behaviour that nothing is pruned, despite + # setting spec.preserveUnknownProperties: false. + # + # See https://kubernetes.io/blog/2019/06/20/crd-structural-schema/ + # See issue: https://github.com/knative/serving/issues/912 + x-kubernetes-preserve-unknown-fields: true + subresources: + status: {} + - name: v1alpha1 + served: true + storage: false + schema: + openAPIV3Schema: + type: object + # One can use x-kubernetes-preserve-unknown-fields: true + # at the root of the schema (and inside any properties, additionalProperties) + # to get the traditional CRD behaviour that nothing is pruned, despite + # setting spec.preserveUnknownProperties: false. + # + # See https://kubernetes.io/blog/2019/06/20/crd-structural-schema/ + # See issue: https://github.com/knative/serving/issues/912 + x-kubernetes-preserve-unknown-fields: true + # Opt into the status subresource so metadata.generation + # starts to increment + subresources: + status: {} + +--- +# Copyright 2019 The Tekton Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: triggerbindings.triggers.tekton.dev + labels: + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-triggers + triggers.tekton.dev/release: "v20210820-92eb94311f" + version: "v20210820-92eb94311f" +spec: + group: triggers.tekton.dev + scope: Namespaced + names: + kind: TriggerBinding + plural: triggerbindings + singular: triggerbinding + shortNames: + - tb + categories: + - tekton + - tekton-triggers + versions: + - name: v1beta1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + # One can use x-kubernetes-preserve-unknown-fields: true + # at the root of the schema (and inside any properties, additionalProperties) + # to get the traditional CRD behaviour that nothing is pruned, despite + # setting spec.preserveUnknownProperties: false. + # + # See https://kubernetes.io/blog/2019/06/20/crd-structural-schema/ + # See issue: https://github.com/knative/serving/issues/912 + x-kubernetes-preserve-unknown-fields: true + # Opt into the status subresource so metadata.generation + # starts to increment + subresources: + status: {} + - name: v1alpha1 + served: true + storage: false + schema: + openAPIV3Schema: + type: object + # One can use x-kubernetes-preserve-unknown-fields: true + # at the root of the schema (and inside any properties, additionalProperties) + # to get the traditional CRD behaviour that nothing is pruned, despite + # setting spec.preserveUnknownProperties: false. + # + # See https://kubernetes.io/blog/2019/06/20/crd-structural-schema/ + # See issue: https://github.com/knative/serving/issues/912 + x-kubernetes-preserve-unknown-fields: true + # Opt into the status subresource so metadata.generation + # starts to increment + subresources: + status: {} + +--- +# Copyright 2019 The Tekton Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: triggertemplates.triggers.tekton.dev + labels: + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-triggers + triggers.tekton.dev/release: "v20210820-92eb94311f" + version: "v20210820-92eb94311f" +spec: + group: triggers.tekton.dev + scope: Namespaced + names: + kind: TriggerTemplate + plural: triggertemplates + singular: triggertemplate + shortNames: + - tt + categories: + - tekton + - tekton-triggers + versions: + - name: v1beta1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + # One can use x-kubernetes-preserve-unknown-fields: true + # at the root of the schema (and inside any properties, additionalProperties) + # to get the traditional CRD behaviour that nothing is pruned, despite + # setting spec.preserveUnknownProperties: false. + # + # See https://kubernetes.io/blog/2019/06/20/crd-structural-schema/ + # See issue: https://github.com/knative/serving/issues/912 + x-kubernetes-preserve-unknown-fields: true + # Opt into the status subresource so metadata.generation + # starts to increment + subresources: + status: {} + - name: v1alpha1 + served: true + storage: false + schema: + openAPIV3Schema: + type: object + # One can use x-kubernetes-preserve-unknown-fields: true + # at the root of the schema (and inside any properties, additionalProperties) + # to get the traditional CRD behaviour that nothing is pruned, despite + # setting spec.preserveUnknownProperties: false. + # + # See https://kubernetes.io/blog/2019/06/20/crd-structural-schema/ + # See issue: https://github.com/knative/serving/issues/912 + x-kubernetes-preserve-unknown-fields: true + # Opt into the status subresource so metadata.generation + # starts to increment + subresources: + status: {} + +--- +# Copyright 2020 The Tekton Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: Secret +metadata: + name: triggers-webhook-certs + namespace: tekton-pipelines + labels: + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-triggers + triggers.tekton.dev/release: "v20210820-92eb94311f" +# The data is populated at install time. +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + name: validation.webhook.triggers.tekton.dev + labels: + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-triggers + triggers.tekton.dev/release: "v20210820-92eb94311f" +webhooks: + - admissionReviewVersions: + - v1beta1 + - v1 + clientConfig: + service: + name: tekton-triggers-webhook + namespace: tekton-pipelines + failurePolicy: Fail + sideEffects: None + name: validation.webhook.triggers.tekton.dev +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: MutatingWebhookConfiguration +metadata: + name: webhook.triggers.tekton.dev + labels: + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-triggers + triggers.tekton.dev/release: "v20210820-92eb94311f" +webhooks: + - admissionReviewVersions: + - v1beta1 + - v1 + clientConfig: + service: + name: tekton-triggers-webhook + namespace: tekton-pipelines + failurePolicy: Fail + sideEffects: None + name: webhook.triggers.tekton.dev +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + name: config.webhook.triggers.tekton.dev + labels: + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-triggers + triggers.tekton.dev/release: "v20210820-92eb94311f" +webhooks: + - admissionReviewVersions: + - v1beta1 + - v1 + clientConfig: + service: + name: tekton-triggers-webhook + namespace: tekton-pipelines + failurePolicy: Fail + sideEffects: None + name: config.webhook.triggers.tekton.dev + namespaceSelector: + matchExpressions: + - key: triggers.tekton.dev/release + operator: Exists + +--- +# Copyright 2019 The Tekton Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: tekton-triggers-aggregate-edit + labels: + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-triggers + rbac.authorization.k8s.io/aggregate-to-edit: "true" + rbac.authorization.k8s.io/aggregate-to-admin: "true" +rules: + - apiGroups: + - triggers.tekton.dev + resources: + - clustertriggerbindings + - clusterinterceptors + - eventlisteners + - triggers + - triggerbindings + - triggertemplates + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch + +--- +# Copyright 2019 The Tekton Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: tekton-triggers-aggregate-view + labels: + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-triggers + rbac.authorization.k8s.io/aggregate-to-view: "true" +rules: + - apiGroups: + - triggers.tekton.dev + resources: + - clustertriggerbindings + - clusterinterceptors + - eventlisteners + - triggers + - triggerbindings + - triggertemplates + verbs: + - get + - list + - watch + +--- +# Copyright 2021 The Tekton Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: ConfigMap +metadata: + name: config-defaults-triggers + namespace: tekton-pipelines + labels: + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-triggers +data: + _example: | + ################################ + # # + # EXAMPLE CONFIGURATION # + # # + ################################ + + # This block is not actually functional configuration, + # but serves to illustrate the available configuration + # options and document them in a way that is accessible + # to users that `kubectl edit` this config map. + # + # These sample configuration options may be copied out of + # this example block and unindented to be in the data block + # to actually change the configuration. + + # default-service-account contains the default service account name + # to use for TaskRun and PipelineRun, if none is specified. + default-service-account: "default" + +--- +# Copyright 2021 The Tekton Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: ConfigMap +metadata: + name: feature-flags-triggers + namespace: tekton-pipelines + labels: + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipelines +data: + # Setting this flag will determine which gated features are enabled. + # Acceptable values are "stable" or "alpha". + enable-api-fields: "stable" + +--- +# Copyright 2021 The Tekton Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: ConfigMap +metadata: + name: triggers-info + namespace: tekton-pipelines + labels: + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-triggers +data: + # Contains triggers version which can be queried by external + # tools such as CLI. Elevated permissions are already given to + # this ConfigMap such that even if we don't have access to + # other resources in the namespace we still can have access to + # this ConfigMap. + version: "v20210820-92eb94311f" + +--- +# Copyright 2019 Tekton Authors LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: ConfigMap +metadata: + name: config-logging-triggers + namespace: tekton-pipelines + labels: + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-triggers +data: + # Common configuration for all knative codebase + zap-logger-config: | + { + "level": "info", + "development": false, + "sampling": { + "initial": 100, + "thereafter": 100 + }, + "outputPaths": ["stdout"], + "errorOutputPaths": ["stderr"], + "encoding": "json", + "encoderConfig": { + "timeKey": "ts", + "levelKey": "level", + "nameKey": "logger", + "callerKey": "caller", + "messageKey": "msg", + "stacktraceKey": "stacktrace", + "lineEnding": "", + "levelEncoder": "", + "timeEncoder": "iso8601", + "durationEncoder": "", + "callerEncoder": "" + } + } + # Log level overrides + loglevel.controller: "info" + loglevel.webhook: "info" + loglevel.eventlistener: "info" + +--- +# Copyright 2019 The Tekton Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: ConfigMap +metadata: + name: config-observability-triggers + namespace: tekton-pipelines + labels: + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-triggers +data: + _example: | + ################################ + # # + # EXAMPLE CONFIGURATION # + # # + ################################ + + # This block is not actually functional configuration, + # but serves to illustrate the available configuration + # options and document them in a way that is accessible + # to users that `kubectl edit` this config map. + # + # These sample configuration options may be copied out of + # this example block and unindented to be in the data block + # to actually change the configuration. + + # metrics.backend-destination field specifies the system metrics destination. + # It supports either prometheus (the default) or stackdriver. + # Note: Using stackdriver will incur additional charges + metrics.backend-destination: prometheus + + # metrics.stackdriver-project-id field specifies the stackdriver project ID. This + # field is optional. When running on GCE, application default credentials will be + # used if this field is not provided. + metrics.stackdriver-project-id: "" + + # metrics.allow-stackdriver-custom-metrics indicates whether it is allowed to send metrics to + # Stackdriver using "global" resource type and custom metric type if the + # metrics are not supported by "knative_revision" resource type. Setting this + # flag to "true" could cause extra Stackdriver charge. + # If metrics.backend-destination is not Stackdriver, this is ignored. + metrics.allow-stackdriver-custom-metrics: "false" + +--- +# Copyright 2019 Tekton Authors LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: Service +metadata: + labels: + app.kubernetes.io/name: controller + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/version: "v20210820-92eb94311f" + app.kubernetes.io/part-of: tekton-triggers + triggers.tekton.dev/release: "v20210820-92eb94311f" + app: tekton-triggers-controller + version: "v20210820-92eb94311f" + name: tekton-triggers-controller + namespace: tekton-pipelines +spec: + ports: + - name: http-metrics + port: 9000 + protocol: TCP + targetPort: 9000 + selector: + app.kubernetes.io/name: controller + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-triggers + +--- +# Copyright 2019 The Tekton Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: tekton-triggers-controller + namespace: tekton-pipelines + labels: + app.kubernetes.io/name: controller + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/version: "v20210820-92eb94311f" + app.kubernetes.io/part-of: tekton-triggers + # tekton.dev/release value replaced with inputs.params.versionTag in triggers/tekton/publish.yaml + triggers.tekton.dev/release: "v20210820-92eb94311f" +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: controller + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-triggers + template: + metadata: + labels: + app.kubernetes.io/name: controller + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/version: "v20210820-92eb94311f" + app.kubernetes.io/part-of: tekton-triggers + app: tekton-triggers-controller + triggers.tekton.dev/release: "v20210820-92eb94311f" + # version value replaced with inputs.params.versionTag in triggers/tekton/publish.yaml + version: "v20210820-92eb94311f" + spec: + serviceAccountName: tekton-triggers-controller + containers: + - name: tekton-triggers-controller + image: "gcr.io/tekton-nightly/github.com/tektoncd/triggers/cmd/controller:v20210820-92eb94311f@sha256:076031889150066dcb82eba7d8e0721f85c9f974ee415e1d9220478fcd279ed5" + args: ["-logtostderr", "-stderrthreshold", "INFO", "-el-image", "gcr.io/tekton-nightly/github.com/tektoncd/triggers/cmd/eventlistenersink:v20210820-92eb94311f@sha256:d0e931a790586ba38241f11ae790c6b8ecbf40db2990652b8346ef1ef24d9a6b", "-el-port", "8080", "-el-security-context=true", "-el-readtimeout", "5", "-el-writetimeout", "40", "-el-idletimeout", "120", "-el-timeouthandler", "30", "-period-seconds", "10", "-failure-threshold", "1"] + env: + - name: SYSTEM_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: CONFIG_LOGGING_NAME + value: config-logging-triggers + - name: CONFIG_OBSERVABILITY_NAME + value: config-observability-triggers + - name: CONFIG_DEFAULTS_NAME + value: config-defaults-triggers + - name: METRICS_DOMAIN + value: tekton.dev/triggers + - name: METRICS_PROMETHEUS_PORT + value: "9000" + securityContext: + allowPrivilegeEscalation: false + # User 65532 is the distroless nonroot user ID + runAsUser: 65532 + +--- +# Copyright 2019 The Tekton Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: Service +metadata: + name: tekton-triggers-webhook + namespace: tekton-pipelines + labels: + app.kubernetes.io/name: webhook + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/version: "v20210820-92eb94311f" + app.kubernetes.io/part-of: tekton-triggers + app: tekton-triggers-webhook + version: "v20210820-92eb94311f" + triggers.tekton.dev/release: "v20210820-92eb94311f" +spec: + ports: + - name: https-webhook + port: 443 + targetPort: 8443 + selector: + app.kubernetes.io/name: webhook + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-triggers + +--- +# Copyright 2019 The Tekton Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: tekton-triggers-webhook + namespace: tekton-pipelines + labels: + app.kubernetes.io/name: webhook + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/version: "v20210820-92eb94311f" + app.kubernetes.io/part-of: tekton-triggers + # tekton.dev/release value replaced with inputs.params.versionTag in triggers/tekton/publish.yaml + triggers.tekton.dev/release: "v20210820-92eb94311f" +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: webhook + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-triggers + template: + metadata: + labels: + app.kubernetes.io/name: webhook + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/version: "v20210820-92eb94311f" + app.kubernetes.io/part-of: tekton-triggers + app: tekton-triggers-webhook + triggers.tekton.dev/release: "v20210820-92eb94311f" + # version value replaced with inputs.params.versionTag in triggers/tekton/publish.yaml + version: "v20210820-92eb94311f" + spec: + serviceAccountName: tekton-triggers-webhook + containers: + - name: webhook + # This is the Go import path for the binary that is containerized + # and substituted here. + image: "gcr.io/tekton-nightly/github.com/tektoncd/triggers/cmd/webhook:v20210820-92eb94311f@sha256:52bbb8dffd7a85b1fc0c5c9e956101e5d88100e2e3024a0a97416e1e64b08ee9" + env: + - name: SYSTEM_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: CONFIG_LOGGING_NAME + value: config-logging-triggers + - name: WEBHOOK_SERVICE_NAME + value: tekton-triggers-webhook + - name: WEBHOOK_SECRET_NAME + value: triggers-webhook-certs + - name: METRICS_DOMAIN + value: tekton.dev/triggers + ports: + - name: metrics + containerPort: 9000 + - name: profiling + containerPort: 8008 + - name: https-webhook + containerPort: 8443 + securityContext: + allowPrivilegeEscalation: false + # User 65532 is the distroless nonroot user ID + runAsUser: 65532 + +--- diff --git a/cmd/openshift/operator/kodata/tekton-trigger/0.0.0-nightly/01-interceptors.yaml b/cmd/openshift/operator/kodata/tekton-trigger/0.0.0-nightly/01-interceptors.yaml new file mode 100644 index 0000000000..47d77060e9 --- /dev/null +++ b/cmd/openshift/operator/kodata/tekton-trigger/0.0.0-nightly/01-interceptors.yaml @@ -0,0 +1,166 @@ +# Copyright 2020 The Tekton Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: tekton-triggers-core-interceptors + namespace: tekton-pipelines + labels: + app.kubernetes.io/name: core-interceptors + app.kubernetes.io/component: interceptors + app.kubernetes.io/instance: default + app.kubernetes.io/version: "v20210820-92eb94311f" + app.kubernetes.io/part-of: tekton-triggers + # tekton.dev/release value replaced with inputs.params.versionTag in triggers/tekton/publish.yaml + triggers.tekton.dev/release: "v20210820-92eb94311f" +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: core-interceptors + app.kubernetes.io/component: interceptors + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-triggers + template: + metadata: + labels: + app.kubernetes.io/name: core-interceptors + app.kubernetes.io/component: interceptors + app.kubernetes.io/instance: default + app.kubernetes.io/version: "v20210820-92eb94311f" + app.kubernetes.io/part-of: tekton-triggers + app: tekton-triggers-core-interceptors + triggers.tekton.dev/release: "v20210820-92eb94311f" + # version value replaced with inputs.params.versionTag in triggers/tekton/publish.yaml + version: "v20210820-92eb94311f" + spec: + serviceAccountName: tekton-triggers-core-interceptors + containers: + - name: tekton-triggers-core-interceptors + image: "gcr.io/tekton-nightly/github.com/tektoncd/triggers/cmd/interceptors:v20210820-92eb94311f@sha256:b730e88d378846aeefd5d3221e2090f85a64ac2123559f7531715a86389703d8" + args: ["-logtostderr", "-stderrthreshold", "INFO"] + env: + - name: SYSTEM_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: CONFIG_LOGGING_NAME + value: config-logging-triggers + - name: CONFIG_OBSERVABILITY_NAME + value: config-observability-triggers + - name: METRICS_DOMAIN + value: tekton.dev/triggers + readinessProbe: + httpGet: + path: /ready + port: 8082 + scheme: HTTP + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + securityContext: + allowPrivilegeEscalation: false + # User 65532 is the distroless nonroot user ID + runAsUser: 65532 + runAsGroup: 65532 + capabilities: + drop: + - all +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app.kubernetes.io/name: tekton-triggers-core-interceptors + app.kubernetes.io/component: interceptors + app.kubernetes.io/instance: default + app.kubernetes.io/version: "v20210820-92eb94311f" + app.kubernetes.io/part-of: tekton-triggers + triggers.tekton.dev/release: "v20210820-92eb94311f" + app: tekton-triggers-core-interceptors + version: "v20210820-92eb94311f" + name: tekton-triggers-core-interceptors + namespace: tekton-pipelines +spec: + ports: + - name: "http" + port: 80 + targetPort: 8082 + selector: + app.kubernetes.io/name: core-interceptors + app.kubernetes.io/component: interceptors + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-triggers + +--- +# Copyright 2021 The Tekton Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: triggers.tekton.dev/v1alpha1 +kind: ClusterInterceptor +metadata: + name: cel +spec: + clientConfig: + service: + name: tekton-triggers-core-interceptors + namespace: tekton-pipelines + path: "cel" +--- +apiVersion: triggers.tekton.dev/v1alpha1 +kind: ClusterInterceptor +metadata: + name: bitbucket +spec: + clientConfig: + service: + name: tekton-triggers-core-interceptors + namespace: tekton-pipelines + path: "bitbucket" +--- +apiVersion: triggers.tekton.dev/v1alpha1 +kind: ClusterInterceptor +metadata: + name: github +spec: + clientConfig: + service: + name: tekton-triggers-core-interceptors + namespace: tekton-pipelines + path: "github" +--- +apiVersion: triggers.tekton.dev/v1alpha1 +kind: ClusterInterceptor +metadata: + name: gitlab +spec: + clientConfig: + service: + name: tekton-triggers-core-interceptors + namespace: tekton-pipelines + path: "gitlab" + +--- diff --git a/cmd/openshift/proxy-webhook/main.go b/cmd/openshift/proxy-webhook/main.go index 9838ea569e..24b509a3ad 100644 --- a/cmd/openshift/proxy-webhook/main.go +++ b/cmd/openshift/proxy-webhook/main.go @@ -23,6 +23,7 @@ import ( "github.com/tektoncd/operator/pkg/reconciler/proxy" "knative.dev/pkg/configmap" "knative.dev/pkg/controller" + "knative.dev/pkg/injection" "knative.dev/pkg/injection/sharedmain" "knative.dev/pkg/webhook/certificates" ) @@ -49,7 +50,7 @@ func newAnnotationDefaultingAdmissionController(ctx context.Context, cmw configm func main() { sharedmain.WebhookMainWithConfig(proxy.Getctx(), "webhook-operator", - sharedmain.ParseAndGetConfigOrDie(), + injection.ParseAndGetRESTConfigOrDie(), certificates.NewController, proxy.NewProxyDefaultingAdmissionController, newAnnotationDefaultingAdmissionController, diff --git a/go.mod b/go.mod index 4d534c3e2a..7996c33c11 100644 --- a/go.mod +++ b/go.mod @@ -2,24 +2,24 @@ module github.com/tektoncd/operator require ( github.com/go-logr/zapr v0.4.0 - github.com/google/go-cmp v0.5.5 + github.com/google/go-cmp v0.5.6 github.com/manifestival/client-go-client v0.5.0 github.com/manifestival/controller-runtime-client v0.4.0 github.com/manifestival/manifestival v0.7.0 github.com/markbates/inflect v1.0.4 github.com/tektoncd/plumbing v0.0.0-20210514044347-f8a9689d5bd5 - github.com/tektoncd/triggers v0.14.1 - go.uber.org/zap v1.16.0 - golang.org/x/mod v0.4.1 - gomodules.xyz/jsonpatch/v2 v2.1.0 + github.com/tektoncd/triggers v0.15.1 + go.uber.org/zap v1.18.1 + golang.org/x/mod v0.4.2 + gomodules.xyz/jsonpatch/v2 v2.2.0 gotest.tools v2.2.0+incompatible gotest.tools/v3 v3.0.3 - k8s.io/api v0.19.7 - k8s.io/apiextensions-apiserver v0.19.7 - k8s.io/apimachinery v0.19.7 - k8s.io/client-go v0.19.7 - k8s.io/code-generator v0.19.7 - knative.dev/pkg v0.0.0-20210331065221-952fdd90dbb0 + k8s.io/api v0.20.7 + k8s.io/apiextensions-apiserver v0.20.7 + k8s.io/apimachinery v0.20.7 + k8s.io/client-go v0.20.7 + k8s.io/code-generator v0.20.7 + knative.dev/pkg v0.0.0-20210730172132-bb4aaf09c430 sigs.k8s.io/controller-runtime v0.7.2 ) diff --git a/go.sum b/go.sum index 4ed2a24546..c5324f7268 100644 --- a/go.sum +++ b/go.sum @@ -25,6 +25,7 @@ cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4g cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= @@ -38,6 +39,8 @@ contrib.go.opencensus.io/exporter/ocagent v0.7.1-0.20200907061046-05415f1de66d h contrib.go.opencensus.io/exporter/ocagent v0.7.1-0.20200907061046-05415f1de66d/go.mod h1:IshRmMJBhDfFj5Y67nVhMYTTIze91RUeT73ipWKs/GY= contrib.go.opencensus.io/exporter/prometheus v0.2.1-0.20200609204449-6bcf6f8577f0 h1:2O3c1g5CzMc1+Uah4Waot9Obm0yw70VXJzWaP6Fz3nw= contrib.go.opencensus.io/exporter/prometheus v0.2.1-0.20200609204449-6bcf6f8577f0/go.mod h1:MjHoxkI7Ny27toPeFkRbXbzVjzIGkwOAptrAy8Mxtm8= +contrib.go.opencensus.io/exporter/prometheus v0.3.0 h1:08FMdJYpItzsknogU6PiiNo7XQZg/25GjH236+YCwD0= +contrib.go.opencensus.io/exporter/prometheus v0.3.0/go.mod h1:rpCPVQKhiyH8oomWgm34ZmgIdZa8OVYO5WAIygPbBBE= contrib.go.opencensus.io/exporter/stackdriver v0.13.5 h1:TNaexHK16gPUoc7uzELKOU7JULqccn1NDuqUxmxSqfo= contrib.go.opencensus.io/exporter/stackdriver v0.13.5/go.mod h1:aXENhDJ1Y4lIg4EUaVTwzvYETVNZk10Pu26tevFKLUc= contrib.go.opencensus.io/exporter/zipkin v0.1.2/go.mod h1:mP5xM3rrgOjpn79MM8fZbj3gsxcuytSqtH0dxSWW1RE= @@ -49,8 +52,10 @@ github.com/Azure/go-autorest v11.1.2+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSW github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= github.com/Azure/go-autorest/autorest v0.9.6/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= +github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= +github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= @@ -58,11 +63,13 @@ github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSY github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= +github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= github.com/Azure/go-autorest/autorest/to v0.2.0/go.mod h1:GunWKJp1AEqgMaGLV+iocmRAJWqST1wQYhyyjXJ3SJc= github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA= github.com/Azure/go-autorest/autorest/validation v0.1.0/go.mod h1:Ha3z/SqBeaalWQvokg3NZAlQTalVMtOIAs1aGK7G6u8= github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= +github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= @@ -120,13 +127,18 @@ github.com/aws/aws-sdk-go v1.28.2/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN github.com/aws/aws-sdk-go v1.31.12 h1:SxRRGyhlCagI0DYkhOg+FgdXGXzRTE3vEX/gsgFaiKQ= github.com/aws/aws-sdk-go v1.31.12/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= +github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= github.com/blang/semver v3.5.0+incompatible h1:CGxCgetQ64DKk7rdZ++Vfnb1+ogGNnB17OJKJXD2Cfs= github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= +github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/blendle/zapdriver v1.3.1 h1:C3dydBOWYRiOk+B8X9IVZ5IOe+7cl+tGOexN4QqHfpE= @@ -158,6 +170,7 @@ github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMX github.com/containerd/stargz-snapshotter/estargz v0.0.0-20201223015020-a9a0c2d64694/go.mod h1:E9uVkkBKf0EaC39j2JVW9EzdNhYvpz6eQIjILHebruk= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= @@ -213,8 +226,10 @@ github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymF github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v0.0.0-20190203023257-5858425f7550/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.9.0+incompatible h1:kLcOMZeuLAJvL2BPWLMIj5oaZQobrkAqrL+WFZwQses= @@ -222,6 +237,8 @@ github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLi github.com/evanphx/json-patch/v5 v5.0.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= github.com/evanphx/json-patch/v5 v5.2.0 h1:8ozOH5xxoMYDt5/u+yMTsVXydVCbTORFnOOoq2lumco= github.com/evanphx/json-patch/v5 v5.2.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= +github.com/evanphx/json-patch/v5 v5.5.0 h1:bAmFiUJ+o0o2B4OiTFeE3MqCOtyo+jjPP9iZ0VRxYUc= +github.com/evanphx/json-patch/v5 v5.5.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= @@ -242,6 +259,7 @@ github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2 github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= @@ -303,6 +321,8 @@ github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh github.com/go-openapi/swag v0.19.7/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY= github.com/go-openapi/swag v0.19.13 h1:233UVgMy1DlmCYYfOiFpta6e2urloh+sEs5id6lyzog= github.com/go-openapi/swag v0.19.13/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-openapi/swag v0.19.15 h1:D2NRCBzS9/pEY3gP9Nl8aDqGUcPFrwG2p+CNFrLyrCM= +github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= github.com/go-openapi/validate v0.19.5/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= @@ -313,6 +333,8 @@ github.com/gobuffalo/envy v1.6.5 h1:X3is06x7v0nW2xiy2yFbbIjwHz57CD6z6MkvqULTCm8= github.com/gobuffalo/envy v1.6.5/go.mod h1:N+GkhhZ/93bGZc6ZKhJLP6+m+tCNPKwgSpH9kaifseQ= github.com/gobuffalo/flect v0.2.2 h1:PAVD7sp0KOdfswjAw9BpLCU9hXo7wFSzgpQ+zNeks/A= github.com/gobuffalo/flect v0.2.2/go.mod h1:vmkQwuZYhN5Pc4ljYQZzP+1sq+NEkK+lh20jmEmX3jc= +github.com/gobuffalo/flect v0.2.3 h1:f/ZukRnSNA/DUpSNDadko7Qc0PhGvsew35p/2tu+CRY= +github.com/gobuffalo/flect v0.2.3/go.mod h1:vmkQwuZYhN5Pc4ljYQZzP+1sq+NEkK+lh20jmEmX3jc= github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/protobuf v0.0.0-20171007142547-342cbe0a0415/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.0.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= @@ -354,6 +376,9 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/gonum/blas v0.0.0-20181208220705-f22b278b28ac/go.mod h1:P32wAyui1PQ58Oce/KYkOqQv8cVw1zAapXOl+dRFGbc= @@ -383,6 +408,8 @@ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-containerregistry v0.4.1-0.20210128200529-19c2b639fab1 h1:o2ykCuuhHeUwtzNg89pH2hi+821aqjLWkaREVR3ziTQ= github.com/google/go-containerregistry v0.4.1-0.20210128200529-19c2b639fab1/go.mod h1:GU9FUA/X9rd2cV3ZoUNaWihp27tki6/38EsVzL2Dyzc= github.com/google/go-containerregistry/pkg/authn/k8schain v0.0.0-20210129212729-5c4818de4025/go.mod h1:n9wRxRfKkHy6ZFyj0jJQHw11P+mGLnED4sqegwrXxDk= @@ -450,7 +477,9 @@ github.com/grpc-ecosystem/grpc-gateway v1.14.6/go.mod h1:zdiPV4Yse/1gnckTHtghG4G github.com/grpc-ecosystem/grpc-gateway v1.14.8 h1:hXClj+iFpmLM8i3lkO6i4Psli4P2qObQuQReiII26U8= github.com/grpc-ecosystem/grpc-gateway v1.14.8/go.mod h1:NZE8t6vs6TnwLL/ITkaK8W3ecMLGAbh2jXTclvpiwYo= github.com/h2non/gock v1.0.9/go.mod h1:CZMcB0Lg5IWnr9bF79pPMg9WeV6WumxQiUJ1UvdO1iE= +github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= +github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -511,6 +540,8 @@ github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/u github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= @@ -542,6 +573,7 @@ github.com/lightstep/tracecontext.go v0.0.0-20181129014701-1757c391b1ac h1:+2b6i github.com/lightstep/tracecontext.go v0.0.0-20181129014701-1757c391b1ac/go.mod h1:Frd2bnT3w5FB5q49ENTfVlztJES+1k/7lyWX2+9gq/M= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= @@ -551,6 +583,8 @@ github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7 github.com/mailru/easyjson v0.7.1-0.20191009090205-6c0755d89d1e/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA= github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/manifestival/client-go-client v0.5.0 h1:LZUidASM6rwTEI40wtxYDKi+VHhDRnYT4xuYuLjExp4= github.com/manifestival/client-go-client v0.5.0/go.mod h1:sDehep6aHdIEdUKnRSvueGf2TbQfd653Sn2picTeQqM= github.com/manifestival/controller-runtime-client v0.4.0 h1:AK+nTZ1rhPTfAc3upnSe3TiOvAWuRmxi1vjEr55dSUM= @@ -674,9 +708,12 @@ github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5Fsn github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= github.com/prometheus/client_golang v1.2.1/go.mod h1:XMU6Z2MjaRKVu/dC1qupJI9SiNkDYzz3xecMgSW/F+U= github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= +github.com/prometheus/client_golang v1.6.0/go.mod h1:ZLOG9ck3JLRdB5MgO8f+lLTe83AXG6ro35rLTxvnIl4= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.9.0 h1:Rrch9mh17XcxvEu9D9DEpb4isxjGBtcevQjKvxPRQIU= github.com/prometheus/client_golang v1.9.0/go.mod h1:FqZLKOZnGdFAhOK4nqGHa7D66IdsO+O441Eve7ptJDU= +github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ= +github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -690,10 +727,13 @@ github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8 github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= github.com/prometheus/common v0.19.0 h1:Itb4+NjG9wRdkAWgVucbM/adyIXxEhbw0866e0uZE6A= github.com/prometheus/common v0.19.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= +github.com/prometheus/common v0.26.0 h1:iMAkS2TDoNWnKM+Kopnx/8tnEStIfpYA0ur0xQzzhMQ= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= @@ -702,11 +742,16 @@ github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDa github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/prometheus/procfs v0.0.6/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.2.0 h1:wH4vA7pcjKuZzjF7lM8awk4fnuJO6idemZXoKnULUx4= github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/statsd_exporter v0.15.0 h1:UiwC1L5HkxEPeapXdm2Ye0u1vUJfTj7uwT5yydYpa1E= github.com/prometheus/statsd_exporter v0.15.0/go.mod h1:Dv8HnkoLQkeEjkIE4/2ndAA7WL1zHKK7WMqFQqu72rw= +github.com/prometheus/statsd_exporter v0.20.0 h1:M0hQphnq2WyWKS5CefQL8PqWwBOBPhiAkyLo5l4ZYvE= +github.com/prometheus/statsd_exporter v0.20.0/go.mod h1:YL3FWCG8JBBtaUSxAg4Gz2ZYu22bS84XM89ZQXXTWmQ= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rcrowley/go-metrics v0.0.0-20190706150252-9beb055b7962/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= @@ -742,6 +787,7 @@ github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkU github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= +github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= @@ -750,6 +796,7 @@ github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= +github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= github.com/src-d/gcfg v1.4.0/go.mod h1:p/UMsR43ujA89BJY9duynAwIpvqEujIH/jFlfL7jWoI= github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ainaU= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= @@ -766,19 +813,24 @@ github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81P github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/tektoncd/pipeline v0.24.1 h1:c+Odgmf8dkUVRdQ5NVbOliwFB8608kM9i+18t5QDO+g= github.com/tektoncd/pipeline v0.24.1/go.mod h1:ChFD/vfu14VOtCVlLWdtlvOwXfBfVotULoNV6yz+CKY= github.com/tektoncd/plumbing v0.0.0-20210420200944-17170d5e7bc9/go.mod h1:WTWwsg91xgm+jPOKoyKVK/yRYxnVDlUYeDlypB1lDdQ= github.com/tektoncd/plumbing v0.0.0-20210514044347-f8a9689d5bd5 h1:tY3t38AFNwlSWALhulEHryANpQ53Hfjp9jM5zl8ImSQ= github.com/tektoncd/plumbing v0.0.0-20210514044347-f8a9689d5bd5/go.mod h1:WTWwsg91xgm+jPOKoyKVK/yRYxnVDlUYeDlypB1lDdQ= -github.com/tektoncd/triggers v0.14.1 h1:DxWR37DJuAVtnGGBYgp/jGeyH1WhnKkx74tzl+fO7SI= -github.com/tektoncd/triggers v0.14.1/go.mod h1:JajnZFFjm5t2ZlHR5TtsERuWkbhAvIOK5TMjyUemJbw= -github.com/tidwall/gjson v1.3.5 h1:2oW9FBNu8qt9jy5URgrzsVx/T/KSn3qn/smJQ0crlDQ= -github.com/tidwall/gjson v1.3.5/go.mod h1:P256ACg0Mn+j1RXIDXoss50DeIABTYK1PULOJHhxOls= -github.com/tidwall/match v1.0.1 h1:PnKP62LPNxHKTwvHHZZzdOAOCtsJTjo6dZLCwpKm5xc= -github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E= +github.com/tektoncd/triggers v0.15.1 h1:zTIwxvu8p9unajsZKF8KNOtN/hbSLR6GRWeDuhEo1Ic= +github.com/tektoncd/triggers v0.15.1/go.mod h1:QZIUVMUVNkeF41U3GryjUQ1nX3H9q/zVt+bWbm80iyM= +github.com/tidwall/gjson v1.6.5 h1:P/K9r+1pt9AK54uap7HcoIp6T3a7AoMg3v18tUis+Cg= +github.com/tidwall/gjson v1.6.5/go.mod h1:zeFuBCIqD4sN/gmqBzZ4j7Jd6UcA2Fc56x7QFsv+8fI= +github.com/tidwall/match v1.0.3 h1:FQUVvBImDutD8wJLN6c5eMzWtjgONK9MwIBCOrUJKeE= +github.com/tidwall/match v1.0.3/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/tidwall/pretty v1.0.2 h1:Z7S3cePv9Jwm1KwS0513MRaoUe3S01WPbLNV40pwWZU= +github.com/tidwall/pretty v1.0.2/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tidwall/sjson v1.0.4 h1:UcdIRXff12Lpnu3OLtZvnc03g4vH2suXDXhBwBqmzYg= github.com/tidwall/sjson v1.0.4/go.mod h1:bURseu1nuBkFpIES5cz6zBtjmYeOQmEESshn7VpF15Y= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= @@ -803,11 +855,13 @@ github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= go.etcd.io/etcd v0.5.0-alpha.5.0.20200819165624-17cef6e3e9d5/go.mod h1:skWido08r9w6Lq/w70DO5XYIKMu4QFu1+4VsqLQuJy8= +go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= @@ -828,6 +882,8 @@ go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.8.0 h1:CUhrE4N1rqSE6FM9ecihEjRkLQu8cDfgDyoOs83mEY4= +go.uber.org/atomic v1.8.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/automaxprocs v1.4.0 h1:CpDZl6aOlLhReez+8S3eEotD7Jx0Os++lemPlMULQP0= go.uber.org/automaxprocs v1.4.0/go.mod h1:/mTEdr7LvHhs0v7mjdxDreTz1OG5zdZGqgOnhWiR/+Q= go.uber.org/goleak v1.1.10 h1:z+mqJhf6ss6BSfSM671tgKyZBFPTTJM+HLxnhPC3wu0= @@ -836,6 +892,8 @@ go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/ go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A= go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= +go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.8.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= @@ -844,6 +902,8 @@ go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= go.uber.org/zap v1.16.0 h1:uFRZXykJGK9lLY4HtgSw44DnIcAM+kRBP7x5m+NpAOM= go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= +go.uber.org/zap v1.18.1 h1:CSUJ2mjFszzEWt4CdKISEuChVIXGBn3lAPwkRGyVrc4= +go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181025213731-e84da0312774/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -866,6 +926,8 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad h1:DN0cp81fZ3njFcrLCytUHRSUkqBjfTo4Tx9RJTWs0EY= golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a h1:kr2P4QFmQr29mSLA43kwrOcgcReGTfbE9N577tCTuBc= +golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -891,6 +953,8 @@ golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRu golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5 h1:2M3HP5CCK1Si9FQhwnzYhXdG6DXeebvUHFpre8QvbyI= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mobile v0.0.0-20190806162312-597adff16ade/go.mod h1:AlhUtkH4DA4asiFC5RgK7ZKmauvtkAVcy9L0epCzlWo= @@ -902,6 +966,8 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1 h1:Kvvh58BN8Y9/lBi7hTekvtMpm07eUZ0ck5pRHpsMWrY= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -954,6 +1020,11 @@ golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210119194325-5f4716e94777 h1:003p0dJM77cxMSyCPFphvZf/Y5/NXf5fzg6ufd1/Oew= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210614182718-04defd469f4e h1:XpT3nA5TvE525Ne3hInMh6+GETgn27Zfm9dxsThnX2Q= +golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -965,6 +1036,8 @@ golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210126194326-f9ce19ea3013 h1:55H5j7lotzuFCEOKDsMch+fRNUQ9DgtyHOUP31FNqKc= golang.org/x/oauth2 v0.0.0-20210126194326-f9ce19ea3013/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914 h1:3B43BWw0xEBsLZ/NO1VALz6fppU3481pik+2Ksv45z8= +golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -976,6 +1049,8 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a h1:DcqTD9SDLc+1P/r1EmRBwnVsrOwW+kk2vWf9n+1sGhs= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1026,6 +1101,7 @@ golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1044,6 +1120,13 @@ golang.org/x/sys v0.0.0-20201214210602-f9fddec55a1e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c h1:VwygUrnw9jn88c4u8GD3rZQbqrP/tgas88tPUbBxQrk= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40 h1:JWgyZ1qgdTaF3N3oxC+MdTV7qvEEgHo3otj+HB5CM7Q= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf h1:MZ2shdL+ZM/XzY3ZGOnh4Nlpnxz5GSOhOmtHo3iPU6M= @@ -1058,6 +1141,8 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5 h1:i6eZZ+zk0SOf0xgBpEpPD18qWcJda6q1sxt3S0kzyUQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/time v0.0.0-20161028155119-f51c12702a4d/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1067,6 +1152,8 @@ golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20201208040808-7e3f01d25324 h1:Hir2P/De0WpUhtrKGGjvSb2YxUgyZ7EFOSLIcSSpiwE= golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba h1:O8mE0/t419eoIwhTFpKVkHiTs/Igowgfkj25AcZrtiE= +golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -1100,6 +1187,7 @@ golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191118222007-07fc4c7f2b98/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -1138,6 +1226,8 @@ golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0 h1:po9/4sTYwZU9lPhi1tOrb4hCv3qrhiQ77LZfGa2OjwY= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.4 h1:cVngSRcfgyZCzys3KYOpCFa+4dqX/Oub9tAq00ttGVs= +golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1145,6 +1235,8 @@ golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1N golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gomodules.xyz/jsonpatch/v2 v2.1.0 h1:Phva6wqu+xR//Njw6iorylFFgn/z547tw5Ne3HZPQ+k= gomodules.xyz/jsonpatch/v2 v2.1.0/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU= +gomodules.xyz/jsonpatch/v2 v2.2.0 h1:4pT439QV83L+G9FkcCriY6EkpcK6r6bK+A5FBUMI7qY= +gomodules.xyz/jsonpatch/v2 v2.2.0/go.mod h1:WXp+iVDkoLQqPudfQ9GBlwB2eZ5DKOnjQZCYdOS8GPY= gonum.org/v1/gonum v0.0.0-20181121035319-3f7ecaa7e8ca/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= gonum.org/v1/netlib v0.0.0-20181029234149-ec6d1f5cefe6/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= @@ -1218,9 +1310,12 @@ google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201102152239-715cce707fb0/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d h1:HV9Z9qMhQEsdlvxNFELgQ11RkMzO3CMkjEySjCtuLes= google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210416161957-9910b6c460de h1:+nG/xknR+Gc5ByHOtK1dT0Pl3LYo8NLR+Jz3XeBeGEg= +google.golang.org/genproto v0.0.0-20210416161957-9910b6c460de/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= @@ -1242,6 +1337,9 @@ google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.36.0 h1:o1bcQ6imQMIOpdrO3SWf2z5RV72WbDwdXuK0MDlc8As= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.38.0 h1:/9BgsAsa5nWe26HqOlvlgJnqBuktYOLCgjCPqsa56W0= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1253,6 +1351,9 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -1271,6 +1372,7 @@ gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/jcmturner/aescts.v1 v1.0.1/go.mod h1:nsR8qBOg+OucoIW+WMhB3GspUQXq9XorLnQb9XtvcOo= gopkg.in/jcmturner/dnsutils.v1 v1.0.1/go.mod h1:m3v+5svpVOhtFAP/wSz+yzh4Mc0Fg7eRhxkJMWSIz9Q= gopkg.in/jcmturner/gokrb5.v7 v7.2.3/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM= @@ -1320,27 +1422,40 @@ k8s.io/api v0.15.7/go.mod h1:a/tUxscL+UxvYyA7Tj5DRc8ivYqJIO1Y5KDdlI6wSvo= k8s.io/api v0.19.2/go.mod h1:IQpK0zFQ1xc5iNIQPqzgoOwuFugaYHK4iCknlAQP9nI= k8s.io/api v0.19.7 h1:MpHhls03C2pyzoYcpbe4QqYiiZjdvW+tuWq6TbjV14Y= k8s.io/api v0.19.7/go.mod h1:KTryDUT3l6Mtv7K2J2486PNL9DBns3wOYTkGR+iz63Y= +k8s.io/api v0.20.7 h1:wOEPJ3NoimUfR9v9sAO2JosPiEP9IGFNplf7zZvYzPU= +k8s.io/api v0.20.7/go.mod h1:4x0yErUkcEWYG+O0S4QdrYa2+PLEeY2M7aeQe++2nmk= k8s.io/apiextensions-apiserver v0.19.2/go.mod h1:EYNjpqIAvNZe+svXVx9j4uBaVhTB4C94HkY3w058qcg= k8s.io/apiextensions-apiserver v0.19.7 h1:aV9DANMSCCYBEMbtoT/5oesrtcciQrjy9yqWVtZZL5A= k8s.io/apiextensions-apiserver v0.19.7/go.mod h1:XJNNtjISNNePDEUClHt/igzMpQcmjVVh88QH+PKztPU= +k8s.io/apiextensions-apiserver v0.20.7 h1:bAFRPfYpUvs7U2+SdCZuNFThZm8ZbQhUpEKMe1H2BYw= +k8s.io/apiextensions-apiserver v0.20.7/go.mod h1:rBGJeRYoDJi1jJFHPA4QWXV6YX/5scZfSdkuMSgWoyA= k8s.io/apimachinery v0.0.0-20190703205208-4cfb76a8bf76/go.mod h1:M2fZgZL9DbLfeJaPBCDqSqNsdsmLN+V29knYJnIXlMA= k8s.io/apimachinery v0.15.7/go.mod h1:Xc10RHc1U+F/e9GCloJ8QAeCGevSVP5xhOhqlE+e1kM= k8s.io/apimachinery v0.19.2/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= k8s.io/apimachinery v0.19.7 h1:nTaEnYVH+i//aPgMA0zTEV2lfVLCV9LextqVd67mulc= k8s.io/apimachinery v0.19.7/go.mod h1:6sRbGRAVY5DOCuZwB5XkqguBqpqLU6q/kOaOdk29z6Q= +k8s.io/apimachinery v0.20.7 h1:tBfhql7OggSCahvASeEpLRzvxc7FK77wNivi1uXCQWM= +k8s.io/apimachinery v0.20.7/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc= k8s.io/apiserver v0.19.2/go.mod h1:FreAq0bJ2vtZFj9Ago/X0oNGC51GfubKK/ViOKfVAOA= k8s.io/apiserver v0.19.7/go.mod h1:DmWVQggNePspa+vSsVytVbS3iBSDTXdJVt0akfHacKk= +k8s.io/apiserver v0.20.7/go.mod h1:7gbB7UjDdP1/epYBGnIUE6jWY4Wpz99cZ7igfDa9rv4= k8s.io/client-go v0.15.7/go.mod h1:QMNB76d3lKPvPQdOOnnxUF693C3hnCzUbC2umg70pWA= k8s.io/client-go v0.19.2/go.mod h1:S5wPhCqyDNAlzM9CnEdgTGV4OqhsW3jGO1UM1epwfJA= k8s.io/client-go v0.19.7 h1:SoJ4mzZ9LyXBGDe8MmpMznw0CwQ1ITWgsmG7GixvhUU= k8s.io/client-go v0.19.7/go.mod h1:iytGI7S3kmv6bWnn+bSQUE4VlrEi4YFssvVB7J7Hvqg= +k8s.io/client-go v0.20.7 h1:Ot22456XfYAWrCWddw/quevMrFHqP7s1qT499FoumVU= +k8s.io/client-go v0.20.7/go.mod h1:uGl3qh/Jy3cTF1nDoIKBqUZlRWnj/EM+/leAXETKRuA= k8s.io/cloud-provider v0.19.7/go.mod h1:aO/VpUwkG+JQN7ZXc5WBLZ5NBXuq/Y5B6vri6U94PZ8= k8s.io/code-generator v0.19.2/go.mod h1:moqLn7w0t9cMs4+5CQyxnfA/HV8MF6aAVENF+WZZhgk= k8s.io/code-generator v0.19.7 h1:kM/68Y26Z/u//TFc1ggVVcg62te8A2yQh57jBfD0FWQ= k8s.io/code-generator v0.19.7/go.mod h1:lwEq3YnLYb/7uVXLorOJfxg+cUu2oihFhHZ0n9NIla0= +k8s.io/code-generator v0.20.7 h1:iXz1ME6EQqoCkLefa7bcniKHu0SzgbxsFV1RlBcfypc= +k8s.io/code-generator v0.20.7/go.mod h1:i6FmG+QxaLxvJsezvZp0q/gAEzzOz3U53KFibghWToU= k8s.io/component-base v0.19.2/go.mod h1:g5LrsiTiabMLZ40AR6Hl45f088DevyGY+cCE2agEIVo= k8s.io/component-base v0.19.7 h1:ZXS2VRWOWBOc2fTd1zjzhi/b/mkqFT9FDqiNsn1cH30= k8s.io/component-base v0.19.7/go.mod h1:YX8spPBgwl3I6UGcSdQiEMAqRMSUsGQOW7SEr4+Qa3U= +k8s.io/component-base v0.20.7 h1:TdRMMGxxxhcArvkem+FVqBljPOczs9j+tVGpYRM6TM8= +k8s.io/component-base v0.20.7/go.mod h1:878UWprXC07P2CWFg+jjvTfxJSlkHp1v2m1MTkNQnJY= k8s.io/csi-translation-lib v0.19.7/go.mod h1:WghizPQuzuygr2WdpgN2EjcNpDD2V4EAbxFXsgHgSBk= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= @@ -1348,6 +1463,8 @@ k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8 k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027 h1:Uusb3oh8XcdzDF/ndlI4ToKTYVlkCSJP39SRY2mfRAw= k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/gengo v0.0.0-20210203185629-de9496dff47b h1:bAU8IlrMA6KbP0dIg/sVSJn95pDCUHDZx0DpTGrf2v4= +k8s.io/gengo v0.0.0-20210203185629-de9496dff47b/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v0.3.1/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= @@ -1360,28 +1477,36 @@ k8s.io/klog/v2 v2.5.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= k8s.io/kube-openapi v0.0.0-20200204173128-addea2498afe/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= +k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= k8s.io/kube-openapi v0.0.0-20210113233702-8566a335510f h1:ZcWbnalfwIst131Zff7dGd1HQdt+NA9q7z9Fi0vbsHY= k8s.io/kube-openapi v0.0.0-20210113233702-8566a335510f/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= k8s.io/legacy-cloud-providers v0.19.7/go.mod h1:dsZk4gH9QIwAtHQ8CK0Ps257xlfgoXE3tMkMNhW2xDU= k8s.io/utils v0.0.0-20190221042446-c2654d5206da/go.mod h1:8k8uAuAQ0rXslZKaEWd0c3oVhZz7sSzSiPnVZayjIX0= k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20200912215256-4140de9c8800/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20210111153108-fddb29f9d009 h1:0T5IaWHO3sJTEmCP6mUlBvMukxPKUQWqiI/YuiBNMiQ= k8s.io/utils v0.0.0-20210111153108-fddb29f9d009/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= knative.dev/hack v0.0.0-20210325223819-b6ab329907d3/go.mod h1:PHt8x8yX5Z9pPquBEfIj0X66f8iWkWfR0S/sarACJrI= +knative.dev/hack v0.0.0-20210622141627-e28525d8d260/go.mod h1:PHt8x8yX5Z9pPquBEfIj0X66f8iWkWfR0S/sarACJrI= knative.dev/pkg v0.0.0-20210331065221-952fdd90dbb0 h1:z05hcB4br0qz7JdwIoUSTXLTF+7ThuJ+R6NFfXd1Y4Q= knative.dev/pkg v0.0.0-20210331065221-952fdd90dbb0/go.mod h1:PD5g8hUCXq6iR3tILjmZeJBvQfXGnHMPKryq54qHJhg= +knative.dev/pkg v0.0.0-20210730172132-bb4aaf09c430 h1:1ZIFVpdWK7fyXVMsCz7Ko/vfsE3p9WI3oB20Uz0AulY= +knative.dev/pkg v0.0.0-20210730172132-bb4aaf09c430/go.mod h1:NYZRIPU+Pv39VfbZV1BtMIe4kCavNle1udsPrvOLm+Y= pgregory.net/rapid v0.3.3/go.mod h1:UYpPVyjFHzYBGHIxLFoupi8vwk6rXNzRY9OMvVxFIOU= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.9/go.mod h1:dzAXnQbTRyDlZPJX2SUPEqvnB+j7AJjtlox7PEwigU0= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/controller-runtime v0.7.2 h1:gD2JZp0bBLLuvSRYVNvox+bRCz1UUUxKDjPUCb56Ukk= sigs.k8s.io/controller-runtime v0.7.2/go.mod h1:pJ3YBrJiAqMAZKi6UVGuE98ZrroV1p+pIhoHsMm9wdU= sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.0.2 h1:YHQV7Dajm86OuqnIR6zAelnDWBRjo+YhYV9PmGrh1s8= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.0.3 h1:4oyYo8NREp49LBBhKxEqCulFjg26rawYKrnCmg+Sr6c= +sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/hack/update-deps.sh b/hack/update-deps.sh index c8ab37ed21..4e3fd6e19a 100755 --- a/hack/update-deps.sh +++ b/hack/update-deps.sh @@ -23,9 +23,9 @@ source $(git rev-parse --show-toplevel)/vendor/github.com/tektoncd/plumbing/scri cd ${REPO_ROOT_DIR} VERSION="release-0.22" -K8S_VERSION="v0.19.7" -TRIGGERS_VERSION="v0.14.1" -PIPELINE_VERSION="v0.24.1" +K8S_VERSION="v0.20.7" +TRIGGERS_VERSION="v0.15.0" +PIPELINE_VERSION="v0.27.1" # The list of dependencies that we track at HEAD and periodically # float forward in this repository. diff --git a/pkg/client/injection/informers/factory/filtered/fake/fake_filtered_factory.go b/pkg/client/injection/informers/factory/filtered/fake/fake_filtered_factory.go index 4cb6e7a2e8..15679be3e3 100644 --- a/pkg/client/injection/informers/factory/filtered/fake/fake_filtered_factory.go +++ b/pkg/client/injection/informers/factory/filtered/fake/fake_filtered_factory.go @@ -38,10 +38,6 @@ func init() { func withInformerFactory(ctx context.Context) context.Context { c := fake.Get(ctx) - opts := []externalversions.SharedInformerOption{} - if injection.HasNamespaceScope(ctx) { - opts = append(opts, externalversions.WithNamespace(injection.GetNamespaceScope(ctx))) - } untyped := ctx.Value(filtered.LabelKey{}) if untyped == nil { logging.FromContext(ctx).Panic( @@ -49,11 +45,15 @@ func withInformerFactory(ctx context.Context) context.Context { } labelSelectors := untyped.([]string) for _, selector := range labelSelectors { - thisOpts := append(opts, externalversions.WithTweakListOptions(func(l *v1.ListOptions) { + opts := []externalversions.SharedInformerOption{} + if injection.HasNamespaceScope(ctx) { + opts = append(opts, externalversions.WithNamespace(injection.GetNamespaceScope(ctx))) + } + opts = append(opts, externalversions.WithTweakListOptions(func(l *v1.ListOptions) { l.LabelSelector = selector })) ctx = context.WithValue(ctx, filtered.Key{Selector: selector}, - externalversions.NewSharedInformerFactoryWithOptions(c, controller.GetResyncPeriod(ctx), thisOpts...)) + externalversions.NewSharedInformerFactoryWithOptions(c, controller.GetResyncPeriod(ctx), opts...)) } return ctx } diff --git a/pkg/client/injection/informers/factory/filtered/filtered_factory.go b/pkg/client/injection/informers/factory/filtered/filtered_factory.go index 84ebb49dff..cb5af385ee 100644 --- a/pkg/client/injection/informers/factory/filtered/filtered_factory.go +++ b/pkg/client/injection/informers/factory/filtered/filtered_factory.go @@ -46,10 +46,6 @@ func WithSelectors(ctx context.Context, selector ...string) context.Context { func withInformerFactory(ctx context.Context) context.Context { c := client.Get(ctx) - opts := []externalversions.SharedInformerOption{} - if injection.HasNamespaceScope(ctx) { - opts = append(opts, externalversions.WithNamespace(injection.GetNamespaceScope(ctx))) - } untyped := ctx.Value(LabelKey{}) if untyped == nil { logging.FromContext(ctx).Panic( @@ -57,11 +53,15 @@ func withInformerFactory(ctx context.Context) context.Context { } labelSelectors := untyped.([]string) for _, selector := range labelSelectors { - thisOpts := append(opts, externalversions.WithTweakListOptions(func(l *v1.ListOptions) { + opts := []externalversions.SharedInformerOption{} + if injection.HasNamespaceScope(ctx) { + opts = append(opts, externalversions.WithNamespace(injection.GetNamespaceScope(ctx))) + } + opts = append(opts, externalversions.WithTweakListOptions(func(l *v1.ListOptions) { l.LabelSelector = selector })) ctx = context.WithValue(ctx, Key{Selector: selector}, - externalversions.NewSharedInformerFactoryWithOptions(c, controller.GetResyncPeriod(ctx), thisOpts...)) + externalversions.NewSharedInformerFactoryWithOptions(c, controller.GetResyncPeriod(ctx), opts...)) } return ctx } diff --git a/pkg/client/injection/reconciler/operator/v1alpha1/tektonaddon/controller.go b/pkg/client/injection/reconciler/operator/v1alpha1/tektonaddon/controller.go index cdac800509..ec92b90371 100644 --- a/pkg/client/injection/reconciler/operator/v1alpha1/tektonaddon/controller.go +++ b/pkg/client/injection/reconciler/operator/v1alpha1/tektonaddon/controller.go @@ -63,6 +63,8 @@ func NewImpl(ctx context.Context, r Interface, optionsFns ...controller.OptionsF lister := tektonaddonInformer.Lister() + var promoteFilterFunc func(obj interface{}) bool + rec := &reconcilerImpl{ LeaderAwareFuncs: reconciler.LeaderAwareFuncs{ PromoteFunc: func(bkt reconciler.Bucket, enq func(reconciler.Bucket, types.NamespacedName)) error { @@ -71,7 +73,11 @@ func NewImpl(ctx context.Context, r Interface, optionsFns ...controller.OptionsF return err } for _, elt := range all { - // TODO: Consider letting users specify a filter in options. + if promoteFilterFunc != nil { + if ok := promoteFilterFunc(elt); !ok { + continue + } + } enq(bkt, types.NamespacedName{ Namespace: elt.GetNamespace(), Name: elt.GetName(), @@ -116,6 +122,9 @@ func NewImpl(ctx context.Context, r Interface, optionsFns ...controller.OptionsF if opts.DemoteFunc != nil { rec.DemoteFunc = opts.DemoteFunc } + if opts.PromoteFilterFunc != nil { + promoteFilterFunc = opts.PromoteFilterFunc + } } rec.Recorder = createRecorder(ctx, agentName) diff --git a/pkg/client/injection/reconciler/operator/v1alpha1/tektonaddon/reconciler.go b/pkg/client/injection/reconciler/operator/v1alpha1/tektonaddon/reconciler.go index c6f3070351..7d3df3d219 100644 --- a/pkg/client/injection/reconciler/operator/v1alpha1/tektonaddon/reconciler.go +++ b/pkg/client/injection/reconciler/operator/v1alpha1/tektonaddon/reconciler.go @@ -22,7 +22,6 @@ import ( context "context" json "encoding/json" fmt "fmt" - reflect "reflect" v1alpha1 "github.com/tektoncd/operator/pkg/apis/operator/v1alpha1" versioned "github.com/tektoncd/operator/pkg/client/clientset/versioned" @@ -88,13 +87,13 @@ type doReconcile func(ctx context.Context, o *v1alpha1.TektonAddon) reconciler.E // reconcilerImpl implements controller.Reconciler for v1alpha1.TektonAddon resources. type reconcilerImpl struct { - // LeaderAwareFuncs is inlined to help us implement reconciler.LeaderAware + // LeaderAwareFuncs is inlined to help us implement reconciler.LeaderAware. reconciler.LeaderAwareFuncs // Client is used to write back status updates. Client versioned.Interface - // Listers index properties about resources + // Listers index properties about resources. Lister operatorv1alpha1.TektonAddonLister // Recorder is an event recorder for recording Event resources to the @@ -116,7 +115,7 @@ type reconcilerImpl struct { skipStatusUpdates bool } -// Check that our Reconciler implements controller.Reconciler +// Check that our Reconciler implements controller.Reconciler. var _ controller.Reconciler = (*reconcilerImpl)(nil) // Check that our generated Reconciler is always LeaderAware. @@ -212,8 +211,15 @@ func (r *reconcilerImpl) Reconcile(ctx context.Context, key string) error { original, err := getter.Get(s.name) if errors.IsNotFound(err) { - // The resource may no longer exist, in which case we stop processing. + // The resource may no longer exist, in which case we stop processing and call + // the ObserveDeletion handler if appropriate. logger.Debugf("Resource %q no longer exists", key) + if del, ok := r.reconciler.(reconciler.OnDeletionInterface); ok { + return del.ObserveDeletion(ctx, types.NamespacedName{ + Namespace: s.namespace, + Name: s.name, + }) + } return nil } else if err != nil { return err @@ -282,7 +288,7 @@ func (r *reconcilerImpl) Reconcile(ctx context.Context, key string) error { var event *reconciler.ReconcilerEvent if reconciler.EventAs(reconcileEvent, &event) { logger.Infow("Returned an event", zap.Any("event", reconcileEvent)) - r.Recorder.Eventf(resource, event.EventType, event.Reason, event.Format, event.Args...) + r.Recorder.Event(resource, event.EventType, event.Reason, event.Error()) // the event was wrapped inside an error, consider the reconciliation as failed if _, isEvent := reconcileEvent.(*reconciler.ReconcilerEvent); !isEvent { @@ -291,8 +297,14 @@ func (r *reconcilerImpl) Reconcile(ctx context.Context, key string) error { return nil } - logger.Errorw("Returned an error", zap.Error(reconcileEvent)) - r.Recorder.Event(resource, v1.EventTypeWarning, "InternalError", reconcileEvent.Error()) + if controller.IsSkipKey(reconcileEvent) { + // This is a wrapped error, don't emit an event. + } else if ok, _ := controller.IsRequeueKey(reconcileEvent); ok { + // This is a wrapped error, don't emit an event. + } else { + logger.Errorw("Returned an error", zap.Error(reconcileEvent)) + r.Recorder.Event(resource, v1.EventTypeWarning, "InternalError", reconcileEvent.Error()) + } return reconcileEvent } @@ -314,7 +326,7 @@ func (r *reconcilerImpl) updateStatus(ctx context.Context, existing *v1alpha1.Te } // If there's nothing to update, just return. - if reflect.DeepEqual(existing.Status, desired.Status) { + if equality.Semantic.DeepEqual(existing.Status, desired.Status) { return nil } diff --git a/pkg/client/injection/reconciler/operator/v1alpha1/tektonaddon/state.go b/pkg/client/injection/reconciler/operator/v1alpha1/tektonaddon/state.go index 6469e4b30a..1aed67b7cf 100644 --- a/pkg/client/injection/reconciler/operator/v1alpha1/tektonaddon/state.go +++ b/pkg/client/injection/reconciler/operator/v1alpha1/tektonaddon/state.go @@ -29,28 +29,28 @@ import ( // state is used to track the state of a reconciler in a single run. type state struct { - // Key is the original reconciliation key from the queue. + // key is the original reconciliation key from the queue. key string - // Namespace is the namespace split from the reconciliation key. + // namespace is the namespace split from the reconciliation key. namespace string - // Namespace is the name split from the reconciliation key. + // name is the name split from the reconciliation key. name string // reconciler is the reconciler. reconciler Interface - // rof is the read only interface cast of the reconciler. + // roi is the read only interface cast of the reconciler. roi ReadOnlyInterface - // IsROI (Read Only Interface) the reconciler only observes reconciliation. + // isROI (Read Only Interface) the reconciler only observes reconciliation. isROI bool // rof is the read only finalizer cast of the reconciler. rof ReadOnlyFinalizer - // IsROF (Read Only Finalizer) the reconciler only observes finalize. + // isROF (Read Only Finalizer) the reconciler only observes finalize. isROF bool - // IsLeader the instance of the reconciler is the elected leader. + // isLeader the instance of the reconciler is the elected leader. isLeader bool } func newState(key string, r *reconcilerImpl) (*state, error) { - // Convert the namespace/name string into a distinct namespace and name + // Convert the namespace/name string into a distinct namespace and name. namespace, name, err := cache.SplitMetaNamespaceKey(key) if err != nil { return nil, fmt.Errorf("invalid resource key: %s", key) diff --git a/pkg/client/injection/reconciler/operator/v1alpha1/tektonconfig/controller.go b/pkg/client/injection/reconciler/operator/v1alpha1/tektonconfig/controller.go index 7469314c4b..a50695cc72 100644 --- a/pkg/client/injection/reconciler/operator/v1alpha1/tektonconfig/controller.go +++ b/pkg/client/injection/reconciler/operator/v1alpha1/tektonconfig/controller.go @@ -63,6 +63,8 @@ func NewImpl(ctx context.Context, r Interface, optionsFns ...controller.OptionsF lister := tektonconfigInformer.Lister() + var promoteFilterFunc func(obj interface{}) bool + rec := &reconcilerImpl{ LeaderAwareFuncs: reconciler.LeaderAwareFuncs{ PromoteFunc: func(bkt reconciler.Bucket, enq func(reconciler.Bucket, types.NamespacedName)) error { @@ -71,7 +73,11 @@ func NewImpl(ctx context.Context, r Interface, optionsFns ...controller.OptionsF return err } for _, elt := range all { - // TODO: Consider letting users specify a filter in options. + if promoteFilterFunc != nil { + if ok := promoteFilterFunc(elt); !ok { + continue + } + } enq(bkt, types.NamespacedName{ Namespace: elt.GetNamespace(), Name: elt.GetName(), @@ -116,6 +122,9 @@ func NewImpl(ctx context.Context, r Interface, optionsFns ...controller.OptionsF if opts.DemoteFunc != nil { rec.DemoteFunc = opts.DemoteFunc } + if opts.PromoteFilterFunc != nil { + promoteFilterFunc = opts.PromoteFilterFunc + } } rec.Recorder = createRecorder(ctx, agentName) diff --git a/pkg/client/injection/reconciler/operator/v1alpha1/tektonconfig/reconciler.go b/pkg/client/injection/reconciler/operator/v1alpha1/tektonconfig/reconciler.go index 27ad4058fd..1136a3cced 100644 --- a/pkg/client/injection/reconciler/operator/v1alpha1/tektonconfig/reconciler.go +++ b/pkg/client/injection/reconciler/operator/v1alpha1/tektonconfig/reconciler.go @@ -22,7 +22,6 @@ import ( context "context" json "encoding/json" fmt "fmt" - reflect "reflect" v1alpha1 "github.com/tektoncd/operator/pkg/apis/operator/v1alpha1" versioned "github.com/tektoncd/operator/pkg/client/clientset/versioned" @@ -88,13 +87,13 @@ type doReconcile func(ctx context.Context, o *v1alpha1.TektonConfig) reconciler. // reconcilerImpl implements controller.Reconciler for v1alpha1.TektonConfig resources. type reconcilerImpl struct { - // LeaderAwareFuncs is inlined to help us implement reconciler.LeaderAware + // LeaderAwareFuncs is inlined to help us implement reconciler.LeaderAware. reconciler.LeaderAwareFuncs // Client is used to write back status updates. Client versioned.Interface - // Listers index properties about resources + // Listers index properties about resources. Lister operatorv1alpha1.TektonConfigLister // Recorder is an event recorder for recording Event resources to the @@ -116,7 +115,7 @@ type reconcilerImpl struct { skipStatusUpdates bool } -// Check that our Reconciler implements controller.Reconciler +// Check that our Reconciler implements controller.Reconciler. var _ controller.Reconciler = (*reconcilerImpl)(nil) // Check that our generated Reconciler is always LeaderAware. @@ -212,8 +211,15 @@ func (r *reconcilerImpl) Reconcile(ctx context.Context, key string) error { original, err := getter.Get(s.name) if errors.IsNotFound(err) { - // The resource may no longer exist, in which case we stop processing. + // The resource may no longer exist, in which case we stop processing and call + // the ObserveDeletion handler if appropriate. logger.Debugf("Resource %q no longer exists", key) + if del, ok := r.reconciler.(reconciler.OnDeletionInterface); ok { + return del.ObserveDeletion(ctx, types.NamespacedName{ + Namespace: s.namespace, + Name: s.name, + }) + } return nil } else if err != nil { return err @@ -282,7 +288,7 @@ func (r *reconcilerImpl) Reconcile(ctx context.Context, key string) error { var event *reconciler.ReconcilerEvent if reconciler.EventAs(reconcileEvent, &event) { logger.Infow("Returned an event", zap.Any("event", reconcileEvent)) - r.Recorder.Eventf(resource, event.EventType, event.Reason, event.Format, event.Args...) + r.Recorder.Event(resource, event.EventType, event.Reason, event.Error()) // the event was wrapped inside an error, consider the reconciliation as failed if _, isEvent := reconcileEvent.(*reconciler.ReconcilerEvent); !isEvent { @@ -291,8 +297,14 @@ func (r *reconcilerImpl) Reconcile(ctx context.Context, key string) error { return nil } - logger.Errorw("Returned an error", zap.Error(reconcileEvent)) - r.Recorder.Event(resource, v1.EventTypeWarning, "InternalError", reconcileEvent.Error()) + if controller.IsSkipKey(reconcileEvent) { + // This is a wrapped error, don't emit an event. + } else if ok, _ := controller.IsRequeueKey(reconcileEvent); ok { + // This is a wrapped error, don't emit an event. + } else { + logger.Errorw("Returned an error", zap.Error(reconcileEvent)) + r.Recorder.Event(resource, v1.EventTypeWarning, "InternalError", reconcileEvent.Error()) + } return reconcileEvent } @@ -314,7 +326,7 @@ func (r *reconcilerImpl) updateStatus(ctx context.Context, existing *v1alpha1.Te } // If there's nothing to update, just return. - if reflect.DeepEqual(existing.Status, desired.Status) { + if equality.Semantic.DeepEqual(existing.Status, desired.Status) { return nil } diff --git a/pkg/client/injection/reconciler/operator/v1alpha1/tektonconfig/state.go b/pkg/client/injection/reconciler/operator/v1alpha1/tektonconfig/state.go index ac3e3e68a1..74f65dd2f2 100644 --- a/pkg/client/injection/reconciler/operator/v1alpha1/tektonconfig/state.go +++ b/pkg/client/injection/reconciler/operator/v1alpha1/tektonconfig/state.go @@ -29,28 +29,28 @@ import ( // state is used to track the state of a reconciler in a single run. type state struct { - // Key is the original reconciliation key from the queue. + // key is the original reconciliation key from the queue. key string - // Namespace is the namespace split from the reconciliation key. + // namespace is the namespace split from the reconciliation key. namespace string - // Namespace is the name split from the reconciliation key. + // name is the name split from the reconciliation key. name string // reconciler is the reconciler. reconciler Interface - // rof is the read only interface cast of the reconciler. + // roi is the read only interface cast of the reconciler. roi ReadOnlyInterface - // IsROI (Read Only Interface) the reconciler only observes reconciliation. + // isROI (Read Only Interface) the reconciler only observes reconciliation. isROI bool // rof is the read only finalizer cast of the reconciler. rof ReadOnlyFinalizer - // IsROF (Read Only Finalizer) the reconciler only observes finalize. + // isROF (Read Only Finalizer) the reconciler only observes finalize. isROF bool - // IsLeader the instance of the reconciler is the elected leader. + // isLeader the instance of the reconciler is the elected leader. isLeader bool } func newState(key string, r *reconcilerImpl) (*state, error) { - // Convert the namespace/name string into a distinct namespace and name + // Convert the namespace/name string into a distinct namespace and name. namespace, name, err := cache.SplitMetaNamespaceKey(key) if err != nil { return nil, fmt.Errorf("invalid resource key: %s", key) diff --git a/pkg/client/injection/reconciler/operator/v1alpha1/tektondashboard/controller.go b/pkg/client/injection/reconciler/operator/v1alpha1/tektondashboard/controller.go index aa8369a068..fa4fe44036 100644 --- a/pkg/client/injection/reconciler/operator/v1alpha1/tektondashboard/controller.go +++ b/pkg/client/injection/reconciler/operator/v1alpha1/tektondashboard/controller.go @@ -63,6 +63,8 @@ func NewImpl(ctx context.Context, r Interface, optionsFns ...controller.OptionsF lister := tektondashboardInformer.Lister() + var promoteFilterFunc func(obj interface{}) bool + rec := &reconcilerImpl{ LeaderAwareFuncs: reconciler.LeaderAwareFuncs{ PromoteFunc: func(bkt reconciler.Bucket, enq func(reconciler.Bucket, types.NamespacedName)) error { @@ -71,7 +73,11 @@ func NewImpl(ctx context.Context, r Interface, optionsFns ...controller.OptionsF return err } for _, elt := range all { - // TODO: Consider letting users specify a filter in options. + if promoteFilterFunc != nil { + if ok := promoteFilterFunc(elt); !ok { + continue + } + } enq(bkt, types.NamespacedName{ Namespace: elt.GetNamespace(), Name: elt.GetName(), @@ -116,6 +122,9 @@ func NewImpl(ctx context.Context, r Interface, optionsFns ...controller.OptionsF if opts.DemoteFunc != nil { rec.DemoteFunc = opts.DemoteFunc } + if opts.PromoteFilterFunc != nil { + promoteFilterFunc = opts.PromoteFilterFunc + } } rec.Recorder = createRecorder(ctx, agentName) diff --git a/pkg/client/injection/reconciler/operator/v1alpha1/tektondashboard/reconciler.go b/pkg/client/injection/reconciler/operator/v1alpha1/tektondashboard/reconciler.go index 7280180041..429a5d6e93 100644 --- a/pkg/client/injection/reconciler/operator/v1alpha1/tektondashboard/reconciler.go +++ b/pkg/client/injection/reconciler/operator/v1alpha1/tektondashboard/reconciler.go @@ -22,7 +22,6 @@ import ( context "context" json "encoding/json" fmt "fmt" - reflect "reflect" v1alpha1 "github.com/tektoncd/operator/pkg/apis/operator/v1alpha1" versioned "github.com/tektoncd/operator/pkg/client/clientset/versioned" @@ -88,13 +87,13 @@ type doReconcile func(ctx context.Context, o *v1alpha1.TektonDashboard) reconcil // reconcilerImpl implements controller.Reconciler for v1alpha1.TektonDashboard resources. type reconcilerImpl struct { - // LeaderAwareFuncs is inlined to help us implement reconciler.LeaderAware + // LeaderAwareFuncs is inlined to help us implement reconciler.LeaderAware. reconciler.LeaderAwareFuncs // Client is used to write back status updates. Client versioned.Interface - // Listers index properties about resources + // Listers index properties about resources. Lister operatorv1alpha1.TektonDashboardLister // Recorder is an event recorder for recording Event resources to the @@ -116,7 +115,7 @@ type reconcilerImpl struct { skipStatusUpdates bool } -// Check that our Reconciler implements controller.Reconciler +// Check that our Reconciler implements controller.Reconciler. var _ controller.Reconciler = (*reconcilerImpl)(nil) // Check that our generated Reconciler is always LeaderAware. @@ -212,8 +211,15 @@ func (r *reconcilerImpl) Reconcile(ctx context.Context, key string) error { original, err := getter.Get(s.name) if errors.IsNotFound(err) { - // The resource may no longer exist, in which case we stop processing. + // The resource may no longer exist, in which case we stop processing and call + // the ObserveDeletion handler if appropriate. logger.Debugf("Resource %q no longer exists", key) + if del, ok := r.reconciler.(reconciler.OnDeletionInterface); ok { + return del.ObserveDeletion(ctx, types.NamespacedName{ + Namespace: s.namespace, + Name: s.name, + }) + } return nil } else if err != nil { return err @@ -282,7 +288,7 @@ func (r *reconcilerImpl) Reconcile(ctx context.Context, key string) error { var event *reconciler.ReconcilerEvent if reconciler.EventAs(reconcileEvent, &event) { logger.Infow("Returned an event", zap.Any("event", reconcileEvent)) - r.Recorder.Eventf(resource, event.EventType, event.Reason, event.Format, event.Args...) + r.Recorder.Event(resource, event.EventType, event.Reason, event.Error()) // the event was wrapped inside an error, consider the reconciliation as failed if _, isEvent := reconcileEvent.(*reconciler.ReconcilerEvent); !isEvent { @@ -291,8 +297,14 @@ func (r *reconcilerImpl) Reconcile(ctx context.Context, key string) error { return nil } - logger.Errorw("Returned an error", zap.Error(reconcileEvent)) - r.Recorder.Event(resource, v1.EventTypeWarning, "InternalError", reconcileEvent.Error()) + if controller.IsSkipKey(reconcileEvent) { + // This is a wrapped error, don't emit an event. + } else if ok, _ := controller.IsRequeueKey(reconcileEvent); ok { + // This is a wrapped error, don't emit an event. + } else { + logger.Errorw("Returned an error", zap.Error(reconcileEvent)) + r.Recorder.Event(resource, v1.EventTypeWarning, "InternalError", reconcileEvent.Error()) + } return reconcileEvent } @@ -314,7 +326,7 @@ func (r *reconcilerImpl) updateStatus(ctx context.Context, existing *v1alpha1.Te } // If there's nothing to update, just return. - if reflect.DeepEqual(existing.Status, desired.Status) { + if equality.Semantic.DeepEqual(existing.Status, desired.Status) { return nil } diff --git a/pkg/client/injection/reconciler/operator/v1alpha1/tektondashboard/state.go b/pkg/client/injection/reconciler/operator/v1alpha1/tektondashboard/state.go index 34c7e1a4cd..a73c902a94 100644 --- a/pkg/client/injection/reconciler/operator/v1alpha1/tektondashboard/state.go +++ b/pkg/client/injection/reconciler/operator/v1alpha1/tektondashboard/state.go @@ -29,28 +29,28 @@ import ( // state is used to track the state of a reconciler in a single run. type state struct { - // Key is the original reconciliation key from the queue. + // key is the original reconciliation key from the queue. key string - // Namespace is the namespace split from the reconciliation key. + // namespace is the namespace split from the reconciliation key. namespace string - // Namespace is the name split from the reconciliation key. + // name is the name split from the reconciliation key. name string // reconciler is the reconciler. reconciler Interface - // rof is the read only interface cast of the reconciler. + // roi is the read only interface cast of the reconciler. roi ReadOnlyInterface - // IsROI (Read Only Interface) the reconciler only observes reconciliation. + // isROI (Read Only Interface) the reconciler only observes reconciliation. isROI bool // rof is the read only finalizer cast of the reconciler. rof ReadOnlyFinalizer - // IsROF (Read Only Finalizer) the reconciler only observes finalize. + // isROF (Read Only Finalizer) the reconciler only observes finalize. isROF bool - // IsLeader the instance of the reconciler is the elected leader. + // isLeader the instance of the reconciler is the elected leader. isLeader bool } func newState(key string, r *reconcilerImpl) (*state, error) { - // Convert the namespace/name string into a distinct namespace and name + // Convert the namespace/name string into a distinct namespace and name. namespace, name, err := cache.SplitMetaNamespaceKey(key) if err != nil { return nil, fmt.Errorf("invalid resource key: %s", key) diff --git a/pkg/client/injection/reconciler/operator/v1alpha1/tektonpipeline/controller.go b/pkg/client/injection/reconciler/operator/v1alpha1/tektonpipeline/controller.go index 3286a70462..fc93109ffe 100644 --- a/pkg/client/injection/reconciler/operator/v1alpha1/tektonpipeline/controller.go +++ b/pkg/client/injection/reconciler/operator/v1alpha1/tektonpipeline/controller.go @@ -63,6 +63,8 @@ func NewImpl(ctx context.Context, r Interface, optionsFns ...controller.OptionsF lister := tektonpipelineInformer.Lister() + var promoteFilterFunc func(obj interface{}) bool + rec := &reconcilerImpl{ LeaderAwareFuncs: reconciler.LeaderAwareFuncs{ PromoteFunc: func(bkt reconciler.Bucket, enq func(reconciler.Bucket, types.NamespacedName)) error { @@ -71,7 +73,11 @@ func NewImpl(ctx context.Context, r Interface, optionsFns ...controller.OptionsF return err } for _, elt := range all { - // TODO: Consider letting users specify a filter in options. + if promoteFilterFunc != nil { + if ok := promoteFilterFunc(elt); !ok { + continue + } + } enq(bkt, types.NamespacedName{ Namespace: elt.GetNamespace(), Name: elt.GetName(), @@ -116,6 +122,9 @@ func NewImpl(ctx context.Context, r Interface, optionsFns ...controller.OptionsF if opts.DemoteFunc != nil { rec.DemoteFunc = opts.DemoteFunc } + if opts.PromoteFilterFunc != nil { + promoteFilterFunc = opts.PromoteFilterFunc + } } rec.Recorder = createRecorder(ctx, agentName) diff --git a/pkg/client/injection/reconciler/operator/v1alpha1/tektonpipeline/reconciler.go b/pkg/client/injection/reconciler/operator/v1alpha1/tektonpipeline/reconciler.go index 11fea65192..0c835294fd 100644 --- a/pkg/client/injection/reconciler/operator/v1alpha1/tektonpipeline/reconciler.go +++ b/pkg/client/injection/reconciler/operator/v1alpha1/tektonpipeline/reconciler.go @@ -22,7 +22,6 @@ import ( context "context" json "encoding/json" fmt "fmt" - reflect "reflect" v1alpha1 "github.com/tektoncd/operator/pkg/apis/operator/v1alpha1" versioned "github.com/tektoncd/operator/pkg/client/clientset/versioned" @@ -88,13 +87,13 @@ type doReconcile func(ctx context.Context, o *v1alpha1.TektonPipeline) reconcile // reconcilerImpl implements controller.Reconciler for v1alpha1.TektonPipeline resources. type reconcilerImpl struct { - // LeaderAwareFuncs is inlined to help us implement reconciler.LeaderAware + // LeaderAwareFuncs is inlined to help us implement reconciler.LeaderAware. reconciler.LeaderAwareFuncs // Client is used to write back status updates. Client versioned.Interface - // Listers index properties about resources + // Listers index properties about resources. Lister operatorv1alpha1.TektonPipelineLister // Recorder is an event recorder for recording Event resources to the @@ -116,7 +115,7 @@ type reconcilerImpl struct { skipStatusUpdates bool } -// Check that our Reconciler implements controller.Reconciler +// Check that our Reconciler implements controller.Reconciler. var _ controller.Reconciler = (*reconcilerImpl)(nil) // Check that our generated Reconciler is always LeaderAware. @@ -212,8 +211,15 @@ func (r *reconcilerImpl) Reconcile(ctx context.Context, key string) error { original, err := getter.Get(s.name) if errors.IsNotFound(err) { - // The resource may no longer exist, in which case we stop processing. + // The resource may no longer exist, in which case we stop processing and call + // the ObserveDeletion handler if appropriate. logger.Debugf("Resource %q no longer exists", key) + if del, ok := r.reconciler.(reconciler.OnDeletionInterface); ok { + return del.ObserveDeletion(ctx, types.NamespacedName{ + Namespace: s.namespace, + Name: s.name, + }) + } return nil } else if err != nil { return err @@ -282,7 +288,7 @@ func (r *reconcilerImpl) Reconcile(ctx context.Context, key string) error { var event *reconciler.ReconcilerEvent if reconciler.EventAs(reconcileEvent, &event) { logger.Infow("Returned an event", zap.Any("event", reconcileEvent)) - r.Recorder.Eventf(resource, event.EventType, event.Reason, event.Format, event.Args...) + r.Recorder.Event(resource, event.EventType, event.Reason, event.Error()) // the event was wrapped inside an error, consider the reconciliation as failed if _, isEvent := reconcileEvent.(*reconciler.ReconcilerEvent); !isEvent { @@ -291,8 +297,14 @@ func (r *reconcilerImpl) Reconcile(ctx context.Context, key string) error { return nil } - logger.Errorw("Returned an error", zap.Error(reconcileEvent)) - r.Recorder.Event(resource, v1.EventTypeWarning, "InternalError", reconcileEvent.Error()) + if controller.IsSkipKey(reconcileEvent) { + // This is a wrapped error, don't emit an event. + } else if ok, _ := controller.IsRequeueKey(reconcileEvent); ok { + // This is a wrapped error, don't emit an event. + } else { + logger.Errorw("Returned an error", zap.Error(reconcileEvent)) + r.Recorder.Event(resource, v1.EventTypeWarning, "InternalError", reconcileEvent.Error()) + } return reconcileEvent } @@ -314,7 +326,7 @@ func (r *reconcilerImpl) updateStatus(ctx context.Context, existing *v1alpha1.Te } // If there's nothing to update, just return. - if reflect.DeepEqual(existing.Status, desired.Status) { + if equality.Semantic.DeepEqual(existing.Status, desired.Status) { return nil } diff --git a/pkg/client/injection/reconciler/operator/v1alpha1/tektonpipeline/state.go b/pkg/client/injection/reconciler/operator/v1alpha1/tektonpipeline/state.go index 706e9f6c9b..44c44a40b6 100644 --- a/pkg/client/injection/reconciler/operator/v1alpha1/tektonpipeline/state.go +++ b/pkg/client/injection/reconciler/operator/v1alpha1/tektonpipeline/state.go @@ -29,28 +29,28 @@ import ( // state is used to track the state of a reconciler in a single run. type state struct { - // Key is the original reconciliation key from the queue. + // key is the original reconciliation key from the queue. key string - // Namespace is the namespace split from the reconciliation key. + // namespace is the namespace split from the reconciliation key. namespace string - // Namespace is the name split from the reconciliation key. + // name is the name split from the reconciliation key. name string // reconciler is the reconciler. reconciler Interface - // rof is the read only interface cast of the reconciler. + // roi is the read only interface cast of the reconciler. roi ReadOnlyInterface - // IsROI (Read Only Interface) the reconciler only observes reconciliation. + // isROI (Read Only Interface) the reconciler only observes reconciliation. isROI bool // rof is the read only finalizer cast of the reconciler. rof ReadOnlyFinalizer - // IsROF (Read Only Finalizer) the reconciler only observes finalize. + // isROF (Read Only Finalizer) the reconciler only observes finalize. isROF bool - // IsLeader the instance of the reconciler is the elected leader. + // isLeader the instance of the reconciler is the elected leader. isLeader bool } func newState(key string, r *reconcilerImpl) (*state, error) { - // Convert the namespace/name string into a distinct namespace and name + // Convert the namespace/name string into a distinct namespace and name. namespace, name, err := cache.SplitMetaNamespaceKey(key) if err != nil { return nil, fmt.Errorf("invalid resource key: %s", key) diff --git a/pkg/client/injection/reconciler/operator/v1alpha1/tektonresult/controller.go b/pkg/client/injection/reconciler/operator/v1alpha1/tektonresult/controller.go index a7530f2292..0b6965c365 100644 --- a/pkg/client/injection/reconciler/operator/v1alpha1/tektonresult/controller.go +++ b/pkg/client/injection/reconciler/operator/v1alpha1/tektonresult/controller.go @@ -63,6 +63,8 @@ func NewImpl(ctx context.Context, r Interface, optionsFns ...controller.OptionsF lister := tektonresultInformer.Lister() + var promoteFilterFunc func(obj interface{}) bool + rec := &reconcilerImpl{ LeaderAwareFuncs: reconciler.LeaderAwareFuncs{ PromoteFunc: func(bkt reconciler.Bucket, enq func(reconciler.Bucket, types.NamespacedName)) error { @@ -71,7 +73,11 @@ func NewImpl(ctx context.Context, r Interface, optionsFns ...controller.OptionsF return err } for _, elt := range all { - // TODO: Consider letting users specify a filter in options. + if promoteFilterFunc != nil { + if ok := promoteFilterFunc(elt); !ok { + continue + } + } enq(bkt, types.NamespacedName{ Namespace: elt.GetNamespace(), Name: elt.GetName(), @@ -116,6 +122,9 @@ func NewImpl(ctx context.Context, r Interface, optionsFns ...controller.OptionsF if opts.DemoteFunc != nil { rec.DemoteFunc = opts.DemoteFunc } + if opts.PromoteFilterFunc != nil { + promoteFilterFunc = opts.PromoteFilterFunc + } } rec.Recorder = createRecorder(ctx, agentName) diff --git a/pkg/client/injection/reconciler/operator/v1alpha1/tektonresult/reconciler.go b/pkg/client/injection/reconciler/operator/v1alpha1/tektonresult/reconciler.go index 82790d1b00..89463c0fba 100644 --- a/pkg/client/injection/reconciler/operator/v1alpha1/tektonresult/reconciler.go +++ b/pkg/client/injection/reconciler/operator/v1alpha1/tektonresult/reconciler.go @@ -22,7 +22,6 @@ import ( context "context" json "encoding/json" fmt "fmt" - reflect "reflect" v1alpha1 "github.com/tektoncd/operator/pkg/apis/operator/v1alpha1" versioned "github.com/tektoncd/operator/pkg/client/clientset/versioned" @@ -88,13 +87,13 @@ type doReconcile func(ctx context.Context, o *v1alpha1.TektonResult) reconciler. // reconcilerImpl implements controller.Reconciler for v1alpha1.TektonResult resources. type reconcilerImpl struct { - // LeaderAwareFuncs is inlined to help us implement reconciler.LeaderAware + // LeaderAwareFuncs is inlined to help us implement reconciler.LeaderAware. reconciler.LeaderAwareFuncs // Client is used to write back status updates. Client versioned.Interface - // Listers index properties about resources + // Listers index properties about resources. Lister operatorv1alpha1.TektonResultLister // Recorder is an event recorder for recording Event resources to the @@ -116,7 +115,7 @@ type reconcilerImpl struct { skipStatusUpdates bool } -// Check that our Reconciler implements controller.Reconciler +// Check that our Reconciler implements controller.Reconciler. var _ controller.Reconciler = (*reconcilerImpl)(nil) // Check that our generated Reconciler is always LeaderAware. @@ -212,8 +211,15 @@ func (r *reconcilerImpl) Reconcile(ctx context.Context, key string) error { original, err := getter.Get(s.name) if errors.IsNotFound(err) { - // The resource may no longer exist, in which case we stop processing. + // The resource may no longer exist, in which case we stop processing and call + // the ObserveDeletion handler if appropriate. logger.Debugf("Resource %q no longer exists", key) + if del, ok := r.reconciler.(reconciler.OnDeletionInterface); ok { + return del.ObserveDeletion(ctx, types.NamespacedName{ + Namespace: s.namespace, + Name: s.name, + }) + } return nil } else if err != nil { return err @@ -282,7 +288,7 @@ func (r *reconcilerImpl) Reconcile(ctx context.Context, key string) error { var event *reconciler.ReconcilerEvent if reconciler.EventAs(reconcileEvent, &event) { logger.Infow("Returned an event", zap.Any("event", reconcileEvent)) - r.Recorder.Eventf(resource, event.EventType, event.Reason, event.Format, event.Args...) + r.Recorder.Event(resource, event.EventType, event.Reason, event.Error()) // the event was wrapped inside an error, consider the reconciliation as failed if _, isEvent := reconcileEvent.(*reconciler.ReconcilerEvent); !isEvent { @@ -291,8 +297,14 @@ func (r *reconcilerImpl) Reconcile(ctx context.Context, key string) error { return nil } - logger.Errorw("Returned an error", zap.Error(reconcileEvent)) - r.Recorder.Event(resource, v1.EventTypeWarning, "InternalError", reconcileEvent.Error()) + if controller.IsSkipKey(reconcileEvent) { + // This is a wrapped error, don't emit an event. + } else if ok, _ := controller.IsRequeueKey(reconcileEvent); ok { + // This is a wrapped error, don't emit an event. + } else { + logger.Errorw("Returned an error", zap.Error(reconcileEvent)) + r.Recorder.Event(resource, v1.EventTypeWarning, "InternalError", reconcileEvent.Error()) + } return reconcileEvent } @@ -314,7 +326,7 @@ func (r *reconcilerImpl) updateStatus(ctx context.Context, existing *v1alpha1.Te } // If there's nothing to update, just return. - if reflect.DeepEqual(existing.Status, desired.Status) { + if equality.Semantic.DeepEqual(existing.Status, desired.Status) { return nil } diff --git a/pkg/client/injection/reconciler/operator/v1alpha1/tektonresult/state.go b/pkg/client/injection/reconciler/operator/v1alpha1/tektonresult/state.go index 0849f49a5b..a2e0372c9d 100644 --- a/pkg/client/injection/reconciler/operator/v1alpha1/tektonresult/state.go +++ b/pkg/client/injection/reconciler/operator/v1alpha1/tektonresult/state.go @@ -29,28 +29,28 @@ import ( // state is used to track the state of a reconciler in a single run. type state struct { - // Key is the original reconciliation key from the queue. + // key is the original reconciliation key from the queue. key string - // Namespace is the namespace split from the reconciliation key. + // namespace is the namespace split from the reconciliation key. namespace string - // Namespace is the name split from the reconciliation key. + // name is the name split from the reconciliation key. name string // reconciler is the reconciler. reconciler Interface - // rof is the read only interface cast of the reconciler. + // roi is the read only interface cast of the reconciler. roi ReadOnlyInterface - // IsROI (Read Only Interface) the reconciler only observes reconciliation. + // isROI (Read Only Interface) the reconciler only observes reconciliation. isROI bool // rof is the read only finalizer cast of the reconciler. rof ReadOnlyFinalizer - // IsROF (Read Only Finalizer) the reconciler only observes finalize. + // isROF (Read Only Finalizer) the reconciler only observes finalize. isROF bool - // IsLeader the instance of the reconciler is the elected leader. + // isLeader the instance of the reconciler is the elected leader. isLeader bool } func newState(key string, r *reconcilerImpl) (*state, error) { - // Convert the namespace/name string into a distinct namespace and name + // Convert the namespace/name string into a distinct namespace and name. namespace, name, err := cache.SplitMetaNamespaceKey(key) if err != nil { return nil, fmt.Errorf("invalid resource key: %s", key) diff --git a/pkg/client/injection/reconciler/operator/v1alpha1/tektontrigger/controller.go b/pkg/client/injection/reconciler/operator/v1alpha1/tektontrigger/controller.go index c7e9096110..3d89a5100a 100644 --- a/pkg/client/injection/reconciler/operator/v1alpha1/tektontrigger/controller.go +++ b/pkg/client/injection/reconciler/operator/v1alpha1/tektontrigger/controller.go @@ -63,6 +63,8 @@ func NewImpl(ctx context.Context, r Interface, optionsFns ...controller.OptionsF lister := tektontriggerInformer.Lister() + var promoteFilterFunc func(obj interface{}) bool + rec := &reconcilerImpl{ LeaderAwareFuncs: reconciler.LeaderAwareFuncs{ PromoteFunc: func(bkt reconciler.Bucket, enq func(reconciler.Bucket, types.NamespacedName)) error { @@ -71,7 +73,11 @@ func NewImpl(ctx context.Context, r Interface, optionsFns ...controller.OptionsF return err } for _, elt := range all { - // TODO: Consider letting users specify a filter in options. + if promoteFilterFunc != nil { + if ok := promoteFilterFunc(elt); !ok { + continue + } + } enq(bkt, types.NamespacedName{ Namespace: elt.GetNamespace(), Name: elt.GetName(), @@ -116,6 +122,9 @@ func NewImpl(ctx context.Context, r Interface, optionsFns ...controller.OptionsF if opts.DemoteFunc != nil { rec.DemoteFunc = opts.DemoteFunc } + if opts.PromoteFilterFunc != nil { + promoteFilterFunc = opts.PromoteFilterFunc + } } rec.Recorder = createRecorder(ctx, agentName) diff --git a/pkg/client/injection/reconciler/operator/v1alpha1/tektontrigger/reconciler.go b/pkg/client/injection/reconciler/operator/v1alpha1/tektontrigger/reconciler.go index 013eaceb52..9ddf0f97a0 100644 --- a/pkg/client/injection/reconciler/operator/v1alpha1/tektontrigger/reconciler.go +++ b/pkg/client/injection/reconciler/operator/v1alpha1/tektontrigger/reconciler.go @@ -22,7 +22,6 @@ import ( context "context" json "encoding/json" fmt "fmt" - reflect "reflect" v1alpha1 "github.com/tektoncd/operator/pkg/apis/operator/v1alpha1" versioned "github.com/tektoncd/operator/pkg/client/clientset/versioned" @@ -88,13 +87,13 @@ type doReconcile func(ctx context.Context, o *v1alpha1.TektonTrigger) reconciler // reconcilerImpl implements controller.Reconciler for v1alpha1.TektonTrigger resources. type reconcilerImpl struct { - // LeaderAwareFuncs is inlined to help us implement reconciler.LeaderAware + // LeaderAwareFuncs is inlined to help us implement reconciler.LeaderAware. reconciler.LeaderAwareFuncs // Client is used to write back status updates. Client versioned.Interface - // Listers index properties about resources + // Listers index properties about resources. Lister operatorv1alpha1.TektonTriggerLister // Recorder is an event recorder for recording Event resources to the @@ -116,7 +115,7 @@ type reconcilerImpl struct { skipStatusUpdates bool } -// Check that our Reconciler implements controller.Reconciler +// Check that our Reconciler implements controller.Reconciler. var _ controller.Reconciler = (*reconcilerImpl)(nil) // Check that our generated Reconciler is always LeaderAware. @@ -212,8 +211,15 @@ func (r *reconcilerImpl) Reconcile(ctx context.Context, key string) error { original, err := getter.Get(s.name) if errors.IsNotFound(err) { - // The resource may no longer exist, in which case we stop processing. + // The resource may no longer exist, in which case we stop processing and call + // the ObserveDeletion handler if appropriate. logger.Debugf("Resource %q no longer exists", key) + if del, ok := r.reconciler.(reconciler.OnDeletionInterface); ok { + return del.ObserveDeletion(ctx, types.NamespacedName{ + Namespace: s.namespace, + Name: s.name, + }) + } return nil } else if err != nil { return err @@ -282,7 +288,7 @@ func (r *reconcilerImpl) Reconcile(ctx context.Context, key string) error { var event *reconciler.ReconcilerEvent if reconciler.EventAs(reconcileEvent, &event) { logger.Infow("Returned an event", zap.Any("event", reconcileEvent)) - r.Recorder.Eventf(resource, event.EventType, event.Reason, event.Format, event.Args...) + r.Recorder.Event(resource, event.EventType, event.Reason, event.Error()) // the event was wrapped inside an error, consider the reconciliation as failed if _, isEvent := reconcileEvent.(*reconciler.ReconcilerEvent); !isEvent { @@ -291,8 +297,14 @@ func (r *reconcilerImpl) Reconcile(ctx context.Context, key string) error { return nil } - logger.Errorw("Returned an error", zap.Error(reconcileEvent)) - r.Recorder.Event(resource, v1.EventTypeWarning, "InternalError", reconcileEvent.Error()) + if controller.IsSkipKey(reconcileEvent) { + // This is a wrapped error, don't emit an event. + } else if ok, _ := controller.IsRequeueKey(reconcileEvent); ok { + // This is a wrapped error, don't emit an event. + } else { + logger.Errorw("Returned an error", zap.Error(reconcileEvent)) + r.Recorder.Event(resource, v1.EventTypeWarning, "InternalError", reconcileEvent.Error()) + } return reconcileEvent } @@ -314,7 +326,7 @@ func (r *reconcilerImpl) updateStatus(ctx context.Context, existing *v1alpha1.Te } // If there's nothing to update, just return. - if reflect.DeepEqual(existing.Status, desired.Status) { + if equality.Semantic.DeepEqual(existing.Status, desired.Status) { return nil } diff --git a/pkg/client/injection/reconciler/operator/v1alpha1/tektontrigger/state.go b/pkg/client/injection/reconciler/operator/v1alpha1/tektontrigger/state.go index ea256befb3..259e168745 100644 --- a/pkg/client/injection/reconciler/operator/v1alpha1/tektontrigger/state.go +++ b/pkg/client/injection/reconciler/operator/v1alpha1/tektontrigger/state.go @@ -29,28 +29,28 @@ import ( // state is used to track the state of a reconciler in a single run. type state struct { - // Key is the original reconciliation key from the queue. + // key is the original reconciliation key from the queue. key string - // Namespace is the namespace split from the reconciliation key. + // namespace is the namespace split from the reconciliation key. namespace string - // Namespace is the name split from the reconciliation key. + // name is the name split from the reconciliation key. name string // reconciler is the reconciler. reconciler Interface - // rof is the read only interface cast of the reconciler. + // roi is the read only interface cast of the reconciler. roi ReadOnlyInterface - // IsROI (Read Only Interface) the reconciler only observes reconciliation. + // isROI (Read Only Interface) the reconciler only observes reconciliation. isROI bool // rof is the read only finalizer cast of the reconciler. rof ReadOnlyFinalizer - // IsROF (Read Only Finalizer) the reconciler only observes finalize. + // isROF (Read Only Finalizer) the reconciler only observes finalize. isROF bool - // IsLeader the instance of the reconciler is the elected leader. + // isLeader the instance of the reconciler is the elected leader. isLeader bool } func newState(key string, r *reconcilerImpl) (*state, error) { - // Convert the namespace/name string into a distinct namespace and name + // Convert the namespace/name string into a distinct namespace and name. namespace, name, err := cache.SplitMetaNamespaceKey(key) if err != nil { return nil, fmt.Errorf("invalid resource key: %s", key) diff --git a/pkg/reconciler/openshift/annotation/annotation.go b/pkg/reconciler/openshift/annotation/annotation.go index 448c276459..4b5b9db30e 100644 --- a/pkg/reconciler/openshift/annotation/annotation.go +++ b/pkg/reconciler/openshift/annotation/annotation.go @@ -24,7 +24,7 @@ import ( "strings" "github.com/markbates/inflect" - "github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1" + "github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1" "go.uber.org/zap" "gomodules.xyz/jsonpatch/v2" admissionv1 "k8s.io/api/admission/v1" @@ -142,7 +142,7 @@ func (ac *reconciler) reconcileMutatingWebhook(ctx context.Context, caCert []byt }, Rule: admissionregistrationv1.Rule{ APIGroups: []string{"triggers.tekton.dev"}, - APIVersions: []string{"v1alpha1"}, + APIVersions: []string{"v1alpha1", "v1beta1"}, Resources: []string{pluralEL, pluralEL + "/status"}, }, }, @@ -209,13 +209,13 @@ func (ac *reconciler) mutate(ctx context.Context, req *admissionv1.AdmissionRequ } logger := logging.FromContext(ctx) - if gvk.Group != "triggers.tekton.dev" || gvk.Version != "v1alpha1" || gvk.Kind != "EventListener" { + if gvk.Group != "triggers.tekton.dev" || !(gvk.Version == "v1alpha1" || gvk.Version == "v1beta1") || gvk.Kind != "EventListener" { logger.Error("Unhandled kind: ", gvk) return nil, fmt.Errorf("unhandled kind: %v", gvk) } // nil values denote absence of `old` (create) or `new` (delete) objects. - var oldObj, newObj v1alpha1.EventListener + var oldObj, newObj v1beta1.EventListener if len(newBytes) != 0 { newDecoder := json.NewDecoder(bytes.NewBuffer(newBytes)) @@ -281,7 +281,7 @@ func roundTripPatch(bytes []byte, unmarshalled interface{}) (duck.JSONPatch, err } // setDefaults simply leverages apis.Defaultable to set defaults. -func setDefaults(ctx context.Context, patches duck.JSONPatch, el v1alpha1.EventListener) (duck.JSONPatch, error) { +func setDefaults(ctx context.Context, patches duck.JSONPatch, el v1beta1.EventListener) (duck.JSONPatch, error) { before, after := el.DeepCopyObject(), el secretName := "el-" + el.Name @@ -289,8 +289,8 @@ func setDefaults(ctx context.Context, patches duck.JSONPatch, el v1alpha1.EventL "service.beta.openshift.io/serving-cert-secret-name": secretName, } if after.Spec.Resources.KubernetesResource == nil { - after.Spec.Resources.KubernetesResource = &v1alpha1.KubernetesResource{} - after.Spec.Resources.KubernetesResource = &v1alpha1.KubernetesResource{ + after.Spec.Resources.KubernetesResource = &v1beta1.KubernetesResource{} + after.Spec.Resources.KubernetesResource = &v1beta1.KubernetesResource{ WithPodSpec: duckv1.WithPodSpec{ Template: duckv1.PodSpecable{ Spec: corev1.PodSpec{ diff --git a/third_party/cloud.google.com/go/LICENSE b/third_party/cloud.google.com/go/compute/metadata/LICENSE similarity index 100% rename from third_party/cloud.google.com/go/LICENSE rename to third_party/cloud.google.com/go/compute/metadata/LICENSE diff --git a/third_party/contrib.go.opencensus.io/exporter/stackdriver/LICENSE b/third_party/contrib.go.opencensus.io/exporter/stackdriver/LICENSE deleted file mode 100644 index d645695673..0000000000 --- a/third_party/contrib.go.opencensus.io/exporter/stackdriver/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/third_party/github.com/antlr/antlr4/runtime/Go/antlr/LICENSE.txt b/third_party/github.com/antlr/antlr4/runtime/Go/antlr/LICENSE.txt deleted file mode 100644 index 2042d1bda6..0000000000 --- a/third_party/github.com/antlr/antlr4/runtime/Go/antlr/LICENSE.txt +++ /dev/null @@ -1,52 +0,0 @@ -[The "BSD 3-clause license"] -Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions -are met: - - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - 3. Neither the name of the copyright holder nor the names of its contributors - may be used to endorse or promote products derived from this software - without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR -IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES -OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. -IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, -INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT -NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF -THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -===== - -MIT License for codepointat.js from https://git.io/codepointat -MIT License for fromcodepoint.js from https://git.io/vDW1m - -Copyright Mathias Bynens - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/third_party/github.com/aws/aws-sdk-go/LICENSE.txt b/third_party/github.com/aws/aws-sdk-go/LICENSE.txt deleted file mode 100644 index d645695673..0000000000 --- a/third_party/github.com/aws/aws-sdk-go/LICENSE.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/third_party/github.com/aws/aws-sdk-go/NOTICE.txt b/third_party/github.com/aws/aws-sdk-go/NOTICE.txt deleted file mode 100644 index 899129ecc4..0000000000 --- a/third_party/github.com/aws/aws-sdk-go/NOTICE.txt +++ /dev/null @@ -1,3 +0,0 @@ -AWS SDK for Go -Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. -Copyright 2014-2015 Stripe, Inc. diff --git a/third_party/github.com/aws/aws-sdk-go/internal/sync/singleflight/LICENSE b/third_party/github.com/aws/aws-sdk-go/internal/sync/singleflight/LICENSE deleted file mode 100644 index 6a66aea5ea..0000000000 --- a/third_party/github.com/aws/aws-sdk-go/internal/sync/singleflight/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/third_party/github.com/google/cel-go/LICENSE b/third_party/github.com/google/cel-go/LICENSE deleted file mode 100644 index d645695673..0000000000 --- a/third_party/github.com/google/cel-go/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/third_party/github.com/googleapis/gax-go/v2/LICENSE b/third_party/github.com/googleapis/gax-go/v2/LICENSE deleted file mode 100644 index 6d16b6578a..0000000000 --- a/third_party/github.com/googleapis/gax-go/v2/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright 2016, Google Inc. -All rights reserved. -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/third_party/github.com/hashicorp/golang-lru/simplelru/lru_interface.go b/third_party/github.com/hashicorp/golang-lru/simplelru/lru_interface.go index a0b97e3f77..92d70934d6 100644 --- a/third_party/github.com/hashicorp/golang-lru/simplelru/lru_interface.go +++ b/third_party/github.com/hashicorp/golang-lru/simplelru/lru_interface.go @@ -34,6 +34,6 @@ type LRUCache interface { // Clears all cache entries. Purge() - // Resizes cache, returning number evicted - Resize(int) int + // Resizes cache, returning number evicted + Resize(int) int } diff --git a/third_party/github.com/stoewer/go-strcase/LICENSE b/third_party/github.com/stoewer/go-strcase/LICENSE deleted file mode 100644 index a105a3819a..0000000000 --- a/third_party/github.com/stoewer/go-strcase/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2017, Adrian Stoewer - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/third_party/google.golang.org/api/LICENSE b/third_party/google.golang.org/api/support/bundler/LICENSE similarity index 100% rename from third_party/google.golang.org/api/LICENSE rename to third_party/google.golang.org/api/support/bundler/LICENSE diff --git a/third_party/gopkg.in/evanphx/json-patch.v4/LICENSE b/third_party/gopkg.in/evanphx/json-patch.v4/LICENSE deleted file mode 100644 index df76d7d771..0000000000 --- a/third_party/gopkg.in/evanphx/json-patch.v4/LICENSE +++ /dev/null @@ -1,25 +0,0 @@ -Copyright (c) 2014, Evan Phoenix -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. -* Neither the name of the Evan Phoenix nor the names of its contributors - may be used to endorse or promote products derived from this software - without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/cloud.google.com/go/container/apiv1/cluster_manager_client.go b/vendor/cloud.google.com/go/container/apiv1/cluster_manager_client.go deleted file mode 100644 index 5949cc99dc..0000000000 --- a/vendor/cloud.google.com/go/container/apiv1/cluster_manager_client.go +++ /dev/null @@ -1,1064 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go_gapic. DO NOT EDIT. - -package container - -import ( - "context" - "fmt" - "math" - "net/url" - "time" - - "github.com/golang/protobuf/proto" - gax "github.com/googleapis/gax-go/v2" - "google.golang.org/api/iterator" - "google.golang.org/api/option" - "google.golang.org/api/option/internaloption" - gtransport "google.golang.org/api/transport/grpc" - containerpb "google.golang.org/genproto/googleapis/container/v1" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/metadata" -) - -var newClusterManagerClientHook clientHook - -// ClusterManagerCallOptions contains the retry settings for each method of ClusterManagerClient. -type ClusterManagerCallOptions struct { - ListClusters []gax.CallOption - GetCluster []gax.CallOption - CreateCluster []gax.CallOption - UpdateCluster []gax.CallOption - UpdateNodePool []gax.CallOption - SetNodePoolAutoscaling []gax.CallOption - SetLoggingService []gax.CallOption - SetMonitoringService []gax.CallOption - SetAddonsConfig []gax.CallOption - SetLocations []gax.CallOption - UpdateMaster []gax.CallOption - SetMasterAuth []gax.CallOption - DeleteCluster []gax.CallOption - ListOperations []gax.CallOption - GetOperation []gax.CallOption - CancelOperation []gax.CallOption - GetServerConfig []gax.CallOption - ListNodePools []gax.CallOption - GetNodePool []gax.CallOption - CreateNodePool []gax.CallOption - DeleteNodePool []gax.CallOption - RollbackNodePoolUpgrade []gax.CallOption - SetNodePoolManagement []gax.CallOption - SetLabels []gax.CallOption - SetLegacyAbac []gax.CallOption - StartIPRotation []gax.CallOption - CompleteIPRotation []gax.CallOption - SetNodePoolSize []gax.CallOption - SetNetworkPolicy []gax.CallOption - SetMaintenancePolicy []gax.CallOption - ListUsableSubnetworks []gax.CallOption -} - -func defaultClusterManagerClientOptions() []option.ClientOption { - return []option.ClientOption{ - internaloption.WithDefaultEndpoint("container.googleapis.com:443"), - internaloption.WithDefaultMTLSEndpoint("container.mtls.googleapis.com:443"), - option.WithGRPCDialOption(grpc.WithDisableServiceConfig()), - option.WithScopes(DefaultAuthScopes()...), - option.WithGRPCDialOption(grpc.WithDefaultCallOptions( - grpc.MaxCallRecvMsgSize(math.MaxInt32))), - } -} - -func defaultClusterManagerCallOptions() *ClusterManagerCallOptions { - return &ClusterManagerCallOptions{ - ListClusters: []gax.CallOption{ - gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.Unavailable, - codes.DeadlineExceeded, - }, gax.Backoff{ - Initial: 100 * time.Millisecond, - Max: 60000 * time.Millisecond, - Multiplier: 1.30, - }) - }), - }, - GetCluster: []gax.CallOption{ - gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.Unavailable, - codes.DeadlineExceeded, - }, gax.Backoff{ - Initial: 100 * time.Millisecond, - Max: 60000 * time.Millisecond, - Multiplier: 1.30, - }) - }), - }, - CreateCluster: []gax.CallOption{}, - UpdateCluster: []gax.CallOption{}, - UpdateNodePool: []gax.CallOption{}, - SetNodePoolAutoscaling: []gax.CallOption{}, - SetLoggingService: []gax.CallOption{}, - SetMonitoringService: []gax.CallOption{}, - SetAddonsConfig: []gax.CallOption{}, - SetLocations: []gax.CallOption{}, - UpdateMaster: []gax.CallOption{}, - SetMasterAuth: []gax.CallOption{}, - DeleteCluster: []gax.CallOption{ - gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.Unavailable, - codes.DeadlineExceeded, - }, gax.Backoff{ - Initial: 100 * time.Millisecond, - Max: 60000 * time.Millisecond, - Multiplier: 1.30, - }) - }), - }, - ListOperations: []gax.CallOption{ - gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.Unavailable, - codes.DeadlineExceeded, - }, gax.Backoff{ - Initial: 100 * time.Millisecond, - Max: 60000 * time.Millisecond, - Multiplier: 1.30, - }) - }), - }, - GetOperation: []gax.CallOption{ - gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.Unavailable, - codes.DeadlineExceeded, - }, gax.Backoff{ - Initial: 100 * time.Millisecond, - Max: 60000 * time.Millisecond, - Multiplier: 1.30, - }) - }), - }, - CancelOperation: []gax.CallOption{}, - GetServerConfig: []gax.CallOption{ - gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.Unavailable, - codes.DeadlineExceeded, - }, gax.Backoff{ - Initial: 100 * time.Millisecond, - Max: 60000 * time.Millisecond, - Multiplier: 1.30, - }) - }), - }, - ListNodePools: []gax.CallOption{ - gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.Unavailable, - codes.DeadlineExceeded, - }, gax.Backoff{ - Initial: 100 * time.Millisecond, - Max: 60000 * time.Millisecond, - Multiplier: 1.30, - }) - }), - }, - GetNodePool: []gax.CallOption{ - gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.Unavailable, - codes.DeadlineExceeded, - }, gax.Backoff{ - Initial: 100 * time.Millisecond, - Max: 60000 * time.Millisecond, - Multiplier: 1.30, - }) - }), - }, - CreateNodePool: []gax.CallOption{}, - DeleteNodePool: []gax.CallOption{ - gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.Unavailable, - codes.DeadlineExceeded, - }, gax.Backoff{ - Initial: 100 * time.Millisecond, - Max: 60000 * time.Millisecond, - Multiplier: 1.30, - }) - }), - }, - RollbackNodePoolUpgrade: []gax.CallOption{}, - SetNodePoolManagement: []gax.CallOption{}, - SetLabels: []gax.CallOption{}, - SetLegacyAbac: []gax.CallOption{}, - StartIPRotation: []gax.CallOption{}, - CompleteIPRotation: []gax.CallOption{}, - SetNodePoolSize: []gax.CallOption{}, - SetNetworkPolicy: []gax.CallOption{}, - SetMaintenancePolicy: []gax.CallOption{}, - ListUsableSubnetworks: []gax.CallOption{}, - } -} - -// ClusterManagerClient is a client for interacting with Kubernetes Engine API. -// -// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. -type ClusterManagerClient struct { - // Connection pool of gRPC connections to the service. - connPool gtransport.ConnPool - - // flag to opt out of default deadlines via GOOGLE_API_GO_EXPERIMENTAL_DISABLE_DEFAULT_DEADLINE - disableDeadlines bool - - // The gRPC API client. - clusterManagerClient containerpb.ClusterManagerClient - - // The call options for this service. - CallOptions *ClusterManagerCallOptions - - // The x-goog-* metadata to be sent with each request. - xGoogMetadata metadata.MD -} - -// NewClusterManagerClient creates a new cluster manager client. -// -// Google Kubernetes Engine Cluster Manager v1 -func NewClusterManagerClient(ctx context.Context, opts ...option.ClientOption) (*ClusterManagerClient, error) { - clientOpts := defaultClusterManagerClientOptions() - - if newClusterManagerClientHook != nil { - hookOpts, err := newClusterManagerClientHook(ctx, clientHookParams{}) - if err != nil { - return nil, err - } - clientOpts = append(clientOpts, hookOpts...) - } - - disableDeadlines, err := checkDisableDeadlines() - if err != nil { - return nil, err - } - - connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...) - if err != nil { - return nil, err - } - c := &ClusterManagerClient{ - connPool: connPool, - disableDeadlines: disableDeadlines, - CallOptions: defaultClusterManagerCallOptions(), - - clusterManagerClient: containerpb.NewClusterManagerClient(connPool), - } - c.setGoogleClientInfo() - - return c, nil -} - -// Connection returns a connection to the API service. -// -// Deprecated. -func (c *ClusterManagerClient) Connection() *grpc.ClientConn { - return c.connPool.Conn() -} - -// Close closes the connection to the API service. The user should invoke this when -// the client is no longer required. -func (c *ClusterManagerClient) Close() error { - return c.connPool.Close() -} - -// setGoogleClientInfo sets the name and version of the application in -// the `x-goog-api-client` header passed on each request. Intended for -// use by Google-written clients. -func (c *ClusterManagerClient) setGoogleClientInfo(keyval ...string) { - kv := append([]string{"gl-go", versionGo()}, keyval...) - kv = append(kv, "gapic", versionClient, "gax", gax.Version, "grpc", grpc.Version) - c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) -} - -// ListClusters lists all clusters owned by a project in either the specified zone or all -// zones. -func (c *ClusterManagerClient) ListClusters(ctx context.Context, req *containerpb.ListClustersRequest, opts ...gax.CallOption) (*containerpb.ListClustersResponse, error) { - if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { - cctx, cancel := context.WithTimeout(ctx, 20000*time.Millisecond) - defer cancel() - ctx = cctx - } - md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v&%s=%v&%s=%v", "parent", url.QueryEscape(req.GetParent()), "project_id", url.QueryEscape(req.GetProjectId()), "zone", url.QueryEscape(req.GetZone()))) - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.ListClusters[0:len(c.CallOptions.ListClusters):len(c.CallOptions.ListClusters)], opts...) - var resp *containerpb.ListClustersResponse - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.clusterManagerClient.ListClusters(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -// GetCluster gets the details of a specific cluster. -func (c *ClusterManagerClient) GetCluster(ctx context.Context, req *containerpb.GetClusterRequest, opts ...gax.CallOption) (*containerpb.Cluster, error) { - if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { - cctx, cancel := context.WithTimeout(ctx, 20000*time.Millisecond) - defer cancel() - ctx = cctx - } - md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v&%s=%v&%s=%v&%s=%v", "name", url.QueryEscape(req.GetName()), "project_id", url.QueryEscape(req.GetProjectId()), "zone", url.QueryEscape(req.GetZone()), "cluster_id", url.QueryEscape(req.GetClusterId()))) - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.GetCluster[0:len(c.CallOptions.GetCluster):len(c.CallOptions.GetCluster)], opts...) - var resp *containerpb.Cluster - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.clusterManagerClient.GetCluster(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -// CreateCluster creates a cluster, consisting of the specified number and type of Google -// Compute Engine instances. -// -// By default, the cluster is created in the project’s -// default network (at https://cloud.google.com/compute/docs/networks-and-firewalls#networks). -// -// One firewall is added for the cluster. After cluster creation, -// the Kubelet creates routes for each node to allow the containers -// on that node to communicate with all other instances in the -// cluster. -// -// Finally, an entry is added to the project’s global metadata indicating -// which CIDR range the cluster is using. -func (c *ClusterManagerClient) CreateCluster(ctx context.Context, req *containerpb.CreateClusterRequest, opts ...gax.CallOption) (*containerpb.Operation, error) { - if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { - cctx, cancel := context.WithTimeout(ctx, 45000*time.Millisecond) - defer cancel() - ctx = cctx - } - md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v&%s=%v&%s=%v", "parent", url.QueryEscape(req.GetParent()), "project_id", url.QueryEscape(req.GetProjectId()), "zone", url.QueryEscape(req.GetZone()))) - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.CreateCluster[0:len(c.CallOptions.CreateCluster):len(c.CallOptions.CreateCluster)], opts...) - var resp *containerpb.Operation - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.clusterManagerClient.CreateCluster(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -// UpdateCluster updates the settings of a specific cluster. -func (c *ClusterManagerClient) UpdateCluster(ctx context.Context, req *containerpb.UpdateClusterRequest, opts ...gax.CallOption) (*containerpb.Operation, error) { - if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { - cctx, cancel := context.WithTimeout(ctx, 45000*time.Millisecond) - defer cancel() - ctx = cctx - } - md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v&%s=%v&%s=%v&%s=%v", "name", url.QueryEscape(req.GetName()), "project_id", url.QueryEscape(req.GetProjectId()), "zone", url.QueryEscape(req.GetZone()), "cluster_id", url.QueryEscape(req.GetClusterId()))) - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.UpdateCluster[0:len(c.CallOptions.UpdateCluster):len(c.CallOptions.UpdateCluster)], opts...) - var resp *containerpb.Operation - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.clusterManagerClient.UpdateCluster(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -// UpdateNodePool updates the version and/or image type for the specified node pool. -func (c *ClusterManagerClient) UpdateNodePool(ctx context.Context, req *containerpb.UpdateNodePoolRequest, opts ...gax.CallOption) (*containerpb.Operation, error) { - if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { - cctx, cancel := context.WithTimeout(ctx, 45000*time.Millisecond) - defer cancel() - ctx = cctx - } - md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v&%s=%v&%s=%v&%s=%v&%s=%v", "name", url.QueryEscape(req.GetName()), "project_id", url.QueryEscape(req.GetProjectId()), "zone", url.QueryEscape(req.GetZone()), "cluster_id", url.QueryEscape(req.GetClusterId()), "node_pool_id", url.QueryEscape(req.GetNodePoolId()))) - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.UpdateNodePool[0:len(c.CallOptions.UpdateNodePool):len(c.CallOptions.UpdateNodePool)], opts...) - var resp *containerpb.Operation - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.clusterManagerClient.UpdateNodePool(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -// SetNodePoolAutoscaling sets the autoscaling settings for the specified node pool. -func (c *ClusterManagerClient) SetNodePoolAutoscaling(ctx context.Context, req *containerpb.SetNodePoolAutoscalingRequest, opts ...gax.CallOption) (*containerpb.Operation, error) { - if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { - cctx, cancel := context.WithTimeout(ctx, 45000*time.Millisecond) - defer cancel() - ctx = cctx - } - md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v&%s=%v&%s=%v&%s=%v&%s=%v", "name", url.QueryEscape(req.GetName()), "project_id", url.QueryEscape(req.GetProjectId()), "zone", url.QueryEscape(req.GetZone()), "cluster_id", url.QueryEscape(req.GetClusterId()), "node_pool_id", url.QueryEscape(req.GetNodePoolId()))) - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.SetNodePoolAutoscaling[0:len(c.CallOptions.SetNodePoolAutoscaling):len(c.CallOptions.SetNodePoolAutoscaling)], opts...) - var resp *containerpb.Operation - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.clusterManagerClient.SetNodePoolAutoscaling(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -// SetLoggingService sets the logging service for a specific cluster. -func (c *ClusterManagerClient) SetLoggingService(ctx context.Context, req *containerpb.SetLoggingServiceRequest, opts ...gax.CallOption) (*containerpb.Operation, error) { - if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { - cctx, cancel := context.WithTimeout(ctx, 45000*time.Millisecond) - defer cancel() - ctx = cctx - } - md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v&%s=%v&%s=%v&%s=%v", "name", url.QueryEscape(req.GetName()), "project_id", url.QueryEscape(req.GetProjectId()), "zone", url.QueryEscape(req.GetZone()), "cluster_id", url.QueryEscape(req.GetClusterId()))) - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.SetLoggingService[0:len(c.CallOptions.SetLoggingService):len(c.CallOptions.SetLoggingService)], opts...) - var resp *containerpb.Operation - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.clusterManagerClient.SetLoggingService(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -// SetMonitoringService sets the monitoring service for a specific cluster. -func (c *ClusterManagerClient) SetMonitoringService(ctx context.Context, req *containerpb.SetMonitoringServiceRequest, opts ...gax.CallOption) (*containerpb.Operation, error) { - if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { - cctx, cancel := context.WithTimeout(ctx, 45000*time.Millisecond) - defer cancel() - ctx = cctx - } - md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v&%s=%v&%s=%v&%s=%v", "name", url.QueryEscape(req.GetName()), "project_id", url.QueryEscape(req.GetProjectId()), "zone", url.QueryEscape(req.GetZone()), "cluster_id", url.QueryEscape(req.GetClusterId()))) - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.SetMonitoringService[0:len(c.CallOptions.SetMonitoringService):len(c.CallOptions.SetMonitoringService)], opts...) - var resp *containerpb.Operation - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.clusterManagerClient.SetMonitoringService(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -// SetAddonsConfig sets the addons for a specific cluster. -func (c *ClusterManagerClient) SetAddonsConfig(ctx context.Context, req *containerpb.SetAddonsConfigRequest, opts ...gax.CallOption) (*containerpb.Operation, error) { - if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { - cctx, cancel := context.WithTimeout(ctx, 45000*time.Millisecond) - defer cancel() - ctx = cctx - } - md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v&%s=%v&%s=%v&%s=%v", "name", url.QueryEscape(req.GetName()), "project_id", url.QueryEscape(req.GetProjectId()), "zone", url.QueryEscape(req.GetZone()), "cluster_id", url.QueryEscape(req.GetClusterId()))) - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.SetAddonsConfig[0:len(c.CallOptions.SetAddonsConfig):len(c.CallOptions.SetAddonsConfig)], opts...) - var resp *containerpb.Operation - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.clusterManagerClient.SetAddonsConfig(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -// SetLocations sets the locations for a specific cluster. -func (c *ClusterManagerClient) SetLocations(ctx context.Context, req *containerpb.SetLocationsRequest, opts ...gax.CallOption) (*containerpb.Operation, error) { - if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { - cctx, cancel := context.WithTimeout(ctx, 45000*time.Millisecond) - defer cancel() - ctx = cctx - } - md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v&%s=%v&%s=%v&%s=%v", "name", url.QueryEscape(req.GetName()), "project_id", url.QueryEscape(req.GetProjectId()), "zone", url.QueryEscape(req.GetZone()), "cluster_id", url.QueryEscape(req.GetClusterId()))) - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.SetLocations[0:len(c.CallOptions.SetLocations):len(c.CallOptions.SetLocations)], opts...) - var resp *containerpb.Operation - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.clusterManagerClient.SetLocations(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -// UpdateMaster updates the master for a specific cluster. -func (c *ClusterManagerClient) UpdateMaster(ctx context.Context, req *containerpb.UpdateMasterRequest, opts ...gax.CallOption) (*containerpb.Operation, error) { - if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { - cctx, cancel := context.WithTimeout(ctx, 45000*time.Millisecond) - defer cancel() - ctx = cctx - } - md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v&%s=%v&%s=%v&%s=%v", "name", url.QueryEscape(req.GetName()), "project_id", url.QueryEscape(req.GetProjectId()), "zone", url.QueryEscape(req.GetZone()), "cluster_id", url.QueryEscape(req.GetClusterId()))) - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.UpdateMaster[0:len(c.CallOptions.UpdateMaster):len(c.CallOptions.UpdateMaster)], opts...) - var resp *containerpb.Operation - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.clusterManagerClient.UpdateMaster(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -// SetMasterAuth sets master auth materials. Currently supports changing the admin password -// or a specific cluster, either via password generation or explicitly setting -// the password. -func (c *ClusterManagerClient) SetMasterAuth(ctx context.Context, req *containerpb.SetMasterAuthRequest, opts ...gax.CallOption) (*containerpb.Operation, error) { - if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { - cctx, cancel := context.WithTimeout(ctx, 45000*time.Millisecond) - defer cancel() - ctx = cctx - } - md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v&%s=%v&%s=%v&%s=%v", "name", url.QueryEscape(req.GetName()), "project_id", url.QueryEscape(req.GetProjectId()), "zone", url.QueryEscape(req.GetZone()), "cluster_id", url.QueryEscape(req.GetClusterId()))) - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.SetMasterAuth[0:len(c.CallOptions.SetMasterAuth):len(c.CallOptions.SetMasterAuth)], opts...) - var resp *containerpb.Operation - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.clusterManagerClient.SetMasterAuth(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -// DeleteCluster deletes the cluster, including the Kubernetes endpoint and all worker -// nodes. -// -// Firewalls and routes that were configured during cluster creation -// are also deleted. -// -// Other Google Compute Engine resources that might be in use by the cluster, -// such as load balancer resources, are not deleted if they weren’t present -// when the cluster was initially created. -func (c *ClusterManagerClient) DeleteCluster(ctx context.Context, req *containerpb.DeleteClusterRequest, opts ...gax.CallOption) (*containerpb.Operation, error) { - if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { - cctx, cancel := context.WithTimeout(ctx, 20000*time.Millisecond) - defer cancel() - ctx = cctx - } - md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v&%s=%v&%s=%v&%s=%v", "name", url.QueryEscape(req.GetName()), "project_id", url.QueryEscape(req.GetProjectId()), "zone", url.QueryEscape(req.GetZone()), "cluster_id", url.QueryEscape(req.GetClusterId()))) - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.DeleteCluster[0:len(c.CallOptions.DeleteCluster):len(c.CallOptions.DeleteCluster)], opts...) - var resp *containerpb.Operation - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.clusterManagerClient.DeleteCluster(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -// ListOperations lists all operations in a project in a specific zone or all zones. -func (c *ClusterManagerClient) ListOperations(ctx context.Context, req *containerpb.ListOperationsRequest, opts ...gax.CallOption) (*containerpb.ListOperationsResponse, error) { - if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { - cctx, cancel := context.WithTimeout(ctx, 20000*time.Millisecond) - defer cancel() - ctx = cctx - } - md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v&%s=%v&%s=%v", "parent", url.QueryEscape(req.GetParent()), "project_id", url.QueryEscape(req.GetProjectId()), "zone", url.QueryEscape(req.GetZone()))) - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.ListOperations[0:len(c.CallOptions.ListOperations):len(c.CallOptions.ListOperations)], opts...) - var resp *containerpb.ListOperationsResponse - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.clusterManagerClient.ListOperations(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -// GetOperation gets the specified operation. -func (c *ClusterManagerClient) GetOperation(ctx context.Context, req *containerpb.GetOperationRequest, opts ...gax.CallOption) (*containerpb.Operation, error) { - if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { - cctx, cancel := context.WithTimeout(ctx, 20000*time.Millisecond) - defer cancel() - ctx = cctx - } - md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v&%s=%v&%s=%v&%s=%v", "name", url.QueryEscape(req.GetName()), "project_id", url.QueryEscape(req.GetProjectId()), "zone", url.QueryEscape(req.GetZone()), "operation_id", url.QueryEscape(req.GetOperationId()))) - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.GetOperation[0:len(c.CallOptions.GetOperation):len(c.CallOptions.GetOperation)], opts...) - var resp *containerpb.Operation - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.clusterManagerClient.GetOperation(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -// CancelOperation cancels the specified operation. -func (c *ClusterManagerClient) CancelOperation(ctx context.Context, req *containerpb.CancelOperationRequest, opts ...gax.CallOption) error { - if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { - cctx, cancel := context.WithTimeout(ctx, 45000*time.Millisecond) - defer cancel() - ctx = cctx - } - md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v&%s=%v&%s=%v&%s=%v", "name", url.QueryEscape(req.GetName()), "project_id", url.QueryEscape(req.GetProjectId()), "zone", url.QueryEscape(req.GetZone()), "operation_id", url.QueryEscape(req.GetOperationId()))) - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.CancelOperation[0:len(c.CallOptions.CancelOperation):len(c.CallOptions.CancelOperation)], opts...) - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - _, err = c.clusterManagerClient.CancelOperation(ctx, req, settings.GRPC...) - return err - }, opts...) - return err -} - -// GetServerConfig returns configuration info about the Google Kubernetes Engine service. -func (c *ClusterManagerClient) GetServerConfig(ctx context.Context, req *containerpb.GetServerConfigRequest, opts ...gax.CallOption) (*containerpb.ServerConfig, error) { - if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { - cctx, cancel := context.WithTimeout(ctx, 20000*time.Millisecond) - defer cancel() - ctx = cctx - } - md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v&%s=%v&%s=%v", "name", url.QueryEscape(req.GetName()), "project_id", url.QueryEscape(req.GetProjectId()), "zone", url.QueryEscape(req.GetZone()))) - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.GetServerConfig[0:len(c.CallOptions.GetServerConfig):len(c.CallOptions.GetServerConfig)], opts...) - var resp *containerpb.ServerConfig - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.clusterManagerClient.GetServerConfig(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -// ListNodePools lists the node pools for a cluster. -func (c *ClusterManagerClient) ListNodePools(ctx context.Context, req *containerpb.ListNodePoolsRequest, opts ...gax.CallOption) (*containerpb.ListNodePoolsResponse, error) { - if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { - cctx, cancel := context.WithTimeout(ctx, 20000*time.Millisecond) - defer cancel() - ctx = cctx - } - md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v&%s=%v&%s=%v&%s=%v", "parent", url.QueryEscape(req.GetParent()), "project_id", url.QueryEscape(req.GetProjectId()), "zone", url.QueryEscape(req.GetZone()), "cluster_id", url.QueryEscape(req.GetClusterId()))) - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.ListNodePools[0:len(c.CallOptions.ListNodePools):len(c.CallOptions.ListNodePools)], opts...) - var resp *containerpb.ListNodePoolsResponse - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.clusterManagerClient.ListNodePools(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -// GetNodePool retrieves the requested node pool. -func (c *ClusterManagerClient) GetNodePool(ctx context.Context, req *containerpb.GetNodePoolRequest, opts ...gax.CallOption) (*containerpb.NodePool, error) { - if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { - cctx, cancel := context.WithTimeout(ctx, 20000*time.Millisecond) - defer cancel() - ctx = cctx - } - md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v&%s=%v&%s=%v&%s=%v&%s=%v", "name", url.QueryEscape(req.GetName()), "project_id", url.QueryEscape(req.GetProjectId()), "zone", url.QueryEscape(req.GetZone()), "cluster_id", url.QueryEscape(req.GetClusterId()), "node_pool_id", url.QueryEscape(req.GetNodePoolId()))) - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.GetNodePool[0:len(c.CallOptions.GetNodePool):len(c.CallOptions.GetNodePool)], opts...) - var resp *containerpb.NodePool - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.clusterManagerClient.GetNodePool(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -// CreateNodePool creates a node pool for a cluster. -func (c *ClusterManagerClient) CreateNodePool(ctx context.Context, req *containerpb.CreateNodePoolRequest, opts ...gax.CallOption) (*containerpb.Operation, error) { - if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { - cctx, cancel := context.WithTimeout(ctx, 45000*time.Millisecond) - defer cancel() - ctx = cctx - } - md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v&%s=%v&%s=%v&%s=%v", "parent", url.QueryEscape(req.GetParent()), "project_id", url.QueryEscape(req.GetProjectId()), "zone", url.QueryEscape(req.GetZone()), "cluster_id", url.QueryEscape(req.GetClusterId()))) - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.CreateNodePool[0:len(c.CallOptions.CreateNodePool):len(c.CallOptions.CreateNodePool)], opts...) - var resp *containerpb.Operation - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.clusterManagerClient.CreateNodePool(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -// DeleteNodePool deletes a node pool from a cluster. -func (c *ClusterManagerClient) DeleteNodePool(ctx context.Context, req *containerpb.DeleteNodePoolRequest, opts ...gax.CallOption) (*containerpb.Operation, error) { - if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { - cctx, cancel := context.WithTimeout(ctx, 20000*time.Millisecond) - defer cancel() - ctx = cctx - } - md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v&%s=%v&%s=%v&%s=%v&%s=%v", "name", url.QueryEscape(req.GetName()), "project_id", url.QueryEscape(req.GetProjectId()), "zone", url.QueryEscape(req.GetZone()), "cluster_id", url.QueryEscape(req.GetClusterId()), "node_pool_id", url.QueryEscape(req.GetNodePoolId()))) - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.DeleteNodePool[0:len(c.CallOptions.DeleteNodePool):len(c.CallOptions.DeleteNodePool)], opts...) - var resp *containerpb.Operation - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.clusterManagerClient.DeleteNodePool(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -// RollbackNodePoolUpgrade rolls back a previously Aborted or Failed NodePool upgrade. -// This makes no changes if the last upgrade successfully completed. -func (c *ClusterManagerClient) RollbackNodePoolUpgrade(ctx context.Context, req *containerpb.RollbackNodePoolUpgradeRequest, opts ...gax.CallOption) (*containerpb.Operation, error) { - if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { - cctx, cancel := context.WithTimeout(ctx, 45000*time.Millisecond) - defer cancel() - ctx = cctx - } - md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v&%s=%v&%s=%v&%s=%v&%s=%v", "name", url.QueryEscape(req.GetName()), "project_id", url.QueryEscape(req.GetProjectId()), "zone", url.QueryEscape(req.GetZone()), "cluster_id", url.QueryEscape(req.GetClusterId()), "node_pool_id", url.QueryEscape(req.GetNodePoolId()))) - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.RollbackNodePoolUpgrade[0:len(c.CallOptions.RollbackNodePoolUpgrade):len(c.CallOptions.RollbackNodePoolUpgrade)], opts...) - var resp *containerpb.Operation - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.clusterManagerClient.RollbackNodePoolUpgrade(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -// SetNodePoolManagement sets the NodeManagement options for a node pool. -func (c *ClusterManagerClient) SetNodePoolManagement(ctx context.Context, req *containerpb.SetNodePoolManagementRequest, opts ...gax.CallOption) (*containerpb.Operation, error) { - if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { - cctx, cancel := context.WithTimeout(ctx, 45000*time.Millisecond) - defer cancel() - ctx = cctx - } - md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v&%s=%v&%s=%v&%s=%v&%s=%v", "name", url.QueryEscape(req.GetName()), "project_id", url.QueryEscape(req.GetProjectId()), "zone", url.QueryEscape(req.GetZone()), "cluster_id", url.QueryEscape(req.GetClusterId()), "node_pool_id", url.QueryEscape(req.GetNodePoolId()))) - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.SetNodePoolManagement[0:len(c.CallOptions.SetNodePoolManagement):len(c.CallOptions.SetNodePoolManagement)], opts...) - var resp *containerpb.Operation - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.clusterManagerClient.SetNodePoolManagement(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -// SetLabels sets labels on a cluster. -func (c *ClusterManagerClient) SetLabels(ctx context.Context, req *containerpb.SetLabelsRequest, opts ...gax.CallOption) (*containerpb.Operation, error) { - if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { - cctx, cancel := context.WithTimeout(ctx, 45000*time.Millisecond) - defer cancel() - ctx = cctx - } - md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v&%s=%v&%s=%v&%s=%v", "name", url.QueryEscape(req.GetName()), "project_id", url.QueryEscape(req.GetProjectId()), "zone", url.QueryEscape(req.GetZone()), "cluster_id", url.QueryEscape(req.GetClusterId()))) - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.SetLabels[0:len(c.CallOptions.SetLabels):len(c.CallOptions.SetLabels)], opts...) - var resp *containerpb.Operation - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.clusterManagerClient.SetLabels(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -// SetLegacyAbac enables or disables the ABAC authorization mechanism on a cluster. -func (c *ClusterManagerClient) SetLegacyAbac(ctx context.Context, req *containerpb.SetLegacyAbacRequest, opts ...gax.CallOption) (*containerpb.Operation, error) { - if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { - cctx, cancel := context.WithTimeout(ctx, 45000*time.Millisecond) - defer cancel() - ctx = cctx - } - md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v&%s=%v&%s=%v&%s=%v", "name", url.QueryEscape(req.GetName()), "project_id", url.QueryEscape(req.GetProjectId()), "zone", url.QueryEscape(req.GetZone()), "cluster_id", url.QueryEscape(req.GetClusterId()))) - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.SetLegacyAbac[0:len(c.CallOptions.SetLegacyAbac):len(c.CallOptions.SetLegacyAbac)], opts...) - var resp *containerpb.Operation - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.clusterManagerClient.SetLegacyAbac(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -// StartIPRotation starts master IP rotation. -func (c *ClusterManagerClient) StartIPRotation(ctx context.Context, req *containerpb.StartIPRotationRequest, opts ...gax.CallOption) (*containerpb.Operation, error) { - if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { - cctx, cancel := context.WithTimeout(ctx, 45000*time.Millisecond) - defer cancel() - ctx = cctx - } - md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v&%s=%v&%s=%v&%s=%v", "name", url.QueryEscape(req.GetName()), "project_id", url.QueryEscape(req.GetProjectId()), "zone", url.QueryEscape(req.GetZone()), "cluster_id", url.QueryEscape(req.GetClusterId()))) - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.StartIPRotation[0:len(c.CallOptions.StartIPRotation):len(c.CallOptions.StartIPRotation)], opts...) - var resp *containerpb.Operation - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.clusterManagerClient.StartIPRotation(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -// CompleteIPRotation completes master IP rotation. -func (c *ClusterManagerClient) CompleteIPRotation(ctx context.Context, req *containerpb.CompleteIPRotationRequest, opts ...gax.CallOption) (*containerpb.Operation, error) { - if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { - cctx, cancel := context.WithTimeout(ctx, 45000*time.Millisecond) - defer cancel() - ctx = cctx - } - md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v&%s=%v&%s=%v&%s=%v", "name", url.QueryEscape(req.GetName()), "project_id", url.QueryEscape(req.GetProjectId()), "zone", url.QueryEscape(req.GetZone()), "cluster_id", url.QueryEscape(req.GetClusterId()))) - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.CompleteIPRotation[0:len(c.CallOptions.CompleteIPRotation):len(c.CallOptions.CompleteIPRotation)], opts...) - var resp *containerpb.Operation - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.clusterManagerClient.CompleteIPRotation(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -// SetNodePoolSize sets the size for a specific node pool. -func (c *ClusterManagerClient) SetNodePoolSize(ctx context.Context, req *containerpb.SetNodePoolSizeRequest, opts ...gax.CallOption) (*containerpb.Operation, error) { - if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { - cctx, cancel := context.WithTimeout(ctx, 45000*time.Millisecond) - defer cancel() - ctx = cctx - } - md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v&%s=%v&%s=%v&%s=%v&%s=%v", "name", url.QueryEscape(req.GetName()), "project_id", url.QueryEscape(req.GetProjectId()), "zone", url.QueryEscape(req.GetZone()), "cluster_id", url.QueryEscape(req.GetClusterId()), "node_pool_id", url.QueryEscape(req.GetNodePoolId()))) - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.SetNodePoolSize[0:len(c.CallOptions.SetNodePoolSize):len(c.CallOptions.SetNodePoolSize)], opts...) - var resp *containerpb.Operation - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.clusterManagerClient.SetNodePoolSize(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -// SetNetworkPolicy enables or disables Network Policy for a cluster. -func (c *ClusterManagerClient) SetNetworkPolicy(ctx context.Context, req *containerpb.SetNetworkPolicyRequest, opts ...gax.CallOption) (*containerpb.Operation, error) { - if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { - cctx, cancel := context.WithTimeout(ctx, 45000*time.Millisecond) - defer cancel() - ctx = cctx - } - md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v&%s=%v&%s=%v&%s=%v", "name", url.QueryEscape(req.GetName()), "project_id", url.QueryEscape(req.GetProjectId()), "zone", url.QueryEscape(req.GetZone()), "cluster_id", url.QueryEscape(req.GetClusterId()))) - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.SetNetworkPolicy[0:len(c.CallOptions.SetNetworkPolicy):len(c.CallOptions.SetNetworkPolicy)], opts...) - var resp *containerpb.Operation - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.clusterManagerClient.SetNetworkPolicy(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -// SetMaintenancePolicy sets the maintenance policy for a cluster. -func (c *ClusterManagerClient) SetMaintenancePolicy(ctx context.Context, req *containerpb.SetMaintenancePolicyRequest, opts ...gax.CallOption) (*containerpb.Operation, error) { - if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { - cctx, cancel := context.WithTimeout(ctx, 45000*time.Millisecond) - defer cancel() - ctx = cctx - } - md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v&%s=%v&%s=%v&%s=%v", "name", url.QueryEscape(req.GetName()), "project_id", url.QueryEscape(req.GetProjectId()), "zone", url.QueryEscape(req.GetZone()), "cluster_id", url.QueryEscape(req.GetClusterId()))) - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.SetMaintenancePolicy[0:len(c.CallOptions.SetMaintenancePolicy):len(c.CallOptions.SetMaintenancePolicy)], opts...) - var resp *containerpb.Operation - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.clusterManagerClient.SetMaintenancePolicy(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -// ListUsableSubnetworks lists subnetworks that are usable for creating clusters in a project. -func (c *ClusterManagerClient) ListUsableSubnetworks(ctx context.Context, req *containerpb.ListUsableSubnetworksRequest, opts ...gax.CallOption) *UsableSubnetworkIterator { - md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))) - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.ListUsableSubnetworks[0:len(c.CallOptions.ListUsableSubnetworks):len(c.CallOptions.ListUsableSubnetworks)], opts...) - it := &UsableSubnetworkIterator{} - req = proto.Clone(req).(*containerpb.ListUsableSubnetworksRequest) - it.InternalFetch = func(pageSize int, pageToken string) ([]*containerpb.UsableSubnetwork, string, error) { - var resp *containerpb.ListUsableSubnetworksResponse - req.PageToken = pageToken - if pageSize > math.MaxInt32 { - req.PageSize = math.MaxInt32 - } else { - req.PageSize = int32(pageSize) - } - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.clusterManagerClient.ListUsableSubnetworks(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, "", err - } - - it.Response = resp - return resp.GetSubnetworks(), resp.GetNextPageToken(), nil - } - fetch := func(pageSize int, pageToken string) (string, error) { - items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) - if err != nil { - return "", err - } - it.items = append(it.items, items...) - return nextPageToken, nil - } - it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) - it.pageInfo.MaxSize = int(req.GetPageSize()) - it.pageInfo.Token = req.GetPageToken() - return it -} - -// UsableSubnetworkIterator manages a stream of *containerpb.UsableSubnetwork. -type UsableSubnetworkIterator struct { - items []*containerpb.UsableSubnetwork - pageInfo *iterator.PageInfo - nextFunc func() error - - // Response is the raw response for the current page. - // It must be cast to the RPC response type. - // Calling Next() or InternalFetch() updates this value. - Response interface{} - - // InternalFetch is for use by the Google Cloud Libraries only. - // It is not part of the stable interface of this package. - // - // InternalFetch returns results from a single call to the underlying RPC. - // The number of results is no greater than pageSize. - // If there are no more results, nextPageToken is empty and err is nil. - InternalFetch func(pageSize int, pageToken string) (results []*containerpb.UsableSubnetwork, nextPageToken string, err error) -} - -// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. -func (it *UsableSubnetworkIterator) PageInfo() *iterator.PageInfo { - return it.pageInfo -} - -// Next returns the next result. Its second return value is iterator.Done if there are no more -// results. Once Next returns Done, all subsequent calls will return Done. -func (it *UsableSubnetworkIterator) Next() (*containerpb.UsableSubnetwork, error) { - var item *containerpb.UsableSubnetwork - if err := it.nextFunc(); err != nil { - return item, err - } - item = it.items[0] - it.items = it.items[1:] - return item, nil -} - -func (it *UsableSubnetworkIterator) bufLen() int { - return len(it.items) -} - -func (it *UsableSubnetworkIterator) takeBuf() interface{} { - b := it.items - it.items = nil - return b -} diff --git a/vendor/cloud.google.com/go/container/apiv1/doc.go b/vendor/cloud.google.com/go/container/apiv1/doc.go deleted file mode 100644 index 3161f3b3b4..0000000000 --- a/vendor/cloud.google.com/go/container/apiv1/doc.go +++ /dev/null @@ -1,117 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go_gapic. DO NOT EDIT. - -// Package container is an auto-generated package for the -// Kubernetes Engine API. -// -// Builds and manages container-based applications, powered by the open -// source Kubernetes technology. -// -// Use of Context -// -// The ctx passed to NewClient is used for authentication requests and -// for creating the underlying connection, but is not used for subsequent calls. -// Individual methods on the client use the ctx given to them. -// -// To close the open connection, use the Close() method. -// -// For information about setting deadlines, reusing contexts, and more -// please visit pkg.go.dev/cloud.google.com/go. -package container // import "cloud.google.com/go/container/apiv1" - -import ( - "context" - "os" - "runtime" - "strconv" - "strings" - "unicode" - - "google.golang.org/api/option" - "google.golang.org/grpc/metadata" -) - -// For more information on implementing a client constructor hook, see -// https://github.com/googleapis/google-cloud-go/wiki/Customizing-constructors. -type clientHookParams struct{} -type clientHook func(context.Context, clientHookParams) ([]option.ClientOption, error) - -const versionClient = "20201110" - -func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { - out, _ := metadata.FromOutgoingContext(ctx) - out = out.Copy() - for _, md := range mds { - for k, v := range md { - out[k] = append(out[k], v...) - } - } - return metadata.NewOutgoingContext(ctx, out) -} - -func checkDisableDeadlines() (bool, error) { - raw, ok := os.LookupEnv("GOOGLE_API_GO_EXPERIMENTAL_DISABLE_DEFAULT_DEADLINE") - if !ok { - return false, nil - } - - b, err := strconv.ParseBool(raw) - return b, err -} - -// DefaultAuthScopes reports the default set of authentication scopes to use with this package. -func DefaultAuthScopes() []string { - return []string{ - "https://www.googleapis.com/auth/cloud-platform", - } -} - -// versionGo returns the Go runtime version. The returned string -// has no whitespace, suitable for reporting in header. -func versionGo() string { - const develPrefix = "devel +" - - s := runtime.Version() - if strings.HasPrefix(s, develPrefix) { - s = s[len(develPrefix):] - if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { - s = s[:p] - } - return s - } - - notSemverRune := func(r rune) bool { - return !strings.ContainsRune("0123456789.", r) - } - - if strings.HasPrefix(s, "go1") { - s = s[2:] - var prerelease string - if p := strings.IndexFunc(s, notSemverRune); p >= 0 { - s, prerelease = s[:p], s[p:] - } - if strings.HasSuffix(s, ".") { - s += "0" - } else if strings.Count(s, ".") < 2 { - s += ".0" - } - if prerelease != "" { - s += "-" + prerelease - } - return s - } - return "UNKNOWN" -} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/alert_policy_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/alert_policy_client.go deleted file mode 100644 index 3a03cd2e91..0000000000 --- a/vendor/cloud.google.com/go/monitoring/apiv3/alert_policy_client.go +++ /dev/null @@ -1,330 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go_gapic. DO NOT EDIT. - -package monitoring - -import ( - "context" - "fmt" - "math" - "net/url" - "time" - - "github.com/golang/protobuf/proto" - gax "github.com/googleapis/gax-go/v2" - "google.golang.org/api/iterator" - "google.golang.org/api/option" - gtransport "google.golang.org/api/transport/grpc" - monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/metadata" -) - -var newAlertPolicyClientHook clientHook - -// AlertPolicyCallOptions contains the retry settings for each method of AlertPolicyClient. -type AlertPolicyCallOptions struct { - ListAlertPolicies []gax.CallOption - GetAlertPolicy []gax.CallOption - CreateAlertPolicy []gax.CallOption - DeleteAlertPolicy []gax.CallOption - UpdateAlertPolicy []gax.CallOption -} - -func defaultAlertPolicyClientOptions() []option.ClientOption { - return []option.ClientOption{ - option.WithEndpoint("monitoring.googleapis.com:443"), - option.WithGRPCDialOption(grpc.WithDisableServiceConfig()), - option.WithScopes(DefaultAuthScopes()...), - option.WithGRPCDialOption(grpc.WithDefaultCallOptions( - grpc.MaxCallRecvMsgSize(math.MaxInt32))), - } -} - -func defaultAlertPolicyCallOptions() *AlertPolicyCallOptions { - return &AlertPolicyCallOptions{ - ListAlertPolicies: []gax.CallOption{ - gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.DeadlineExceeded, - codes.Unavailable, - }, gax.Backoff{ - Initial: 100 * time.Millisecond, - Max: 30000 * time.Millisecond, - Multiplier: 1.30, - }) - }), - }, - GetAlertPolicy: []gax.CallOption{ - gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.DeadlineExceeded, - codes.Unavailable, - }, gax.Backoff{ - Initial: 100 * time.Millisecond, - Max: 30000 * time.Millisecond, - Multiplier: 1.30, - }) - }), - }, - CreateAlertPolicy: []gax.CallOption{}, - DeleteAlertPolicy: []gax.CallOption{ - gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.DeadlineExceeded, - codes.Unavailable, - }, gax.Backoff{ - Initial: 100 * time.Millisecond, - Max: 30000 * time.Millisecond, - Multiplier: 1.30, - }) - }), - }, - UpdateAlertPolicy: []gax.CallOption{}, - } -} - -// AlertPolicyClient is a client for interacting with Cloud Monitoring API. -// -// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. -type AlertPolicyClient struct { - // Connection pool of gRPC connections to the service. - connPool gtransport.ConnPool - - // The gRPC API client. - alertPolicyClient monitoringpb.AlertPolicyServiceClient - - // The call options for this service. - CallOptions *AlertPolicyCallOptions - - // The x-goog-* metadata to be sent with each request. - xGoogMetadata metadata.MD -} - -// NewAlertPolicyClient creates a new alert policy service client. -// -// The AlertPolicyService API is used to manage (list, create, delete, -// edit) alert policies in Stackdriver Monitoring. An alerting policy is -// a description of the conditions under which some aspect of your -// system is considered to be “unhealthy” and the ways to notify -// people or services about this state. In addition to using this API, alert -// policies can also be managed through -// Stackdriver Monitoring (at https://cloud.google.com/monitoring/docs/), -// which can be reached by clicking the “Monitoring” tab in -// Cloud Console (at https://console.cloud.google.com/). -func NewAlertPolicyClient(ctx context.Context, opts ...option.ClientOption) (*AlertPolicyClient, error) { - clientOpts := defaultAlertPolicyClientOptions() - - if newAlertPolicyClientHook != nil { - hookOpts, err := newAlertPolicyClientHook(ctx, clientHookParams{}) - if err != nil { - return nil, err - } - clientOpts = append(clientOpts, hookOpts...) - } - - connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...) - if err != nil { - return nil, err - } - c := &AlertPolicyClient{ - connPool: connPool, - CallOptions: defaultAlertPolicyCallOptions(), - - alertPolicyClient: monitoringpb.NewAlertPolicyServiceClient(connPool), - } - c.setGoogleClientInfo() - - return c, nil -} - -// Connection returns a connection to the API service. -// -// Deprecated. -func (c *AlertPolicyClient) Connection() *grpc.ClientConn { - return c.connPool.Conn() -} - -// Close closes the connection to the API service. The user should invoke this when -// the client is no longer required. -func (c *AlertPolicyClient) Close() error { - return c.connPool.Close() -} - -// setGoogleClientInfo sets the name and version of the application in -// the `x-goog-api-client` header passed on each request. Intended for -// use by Google-written clients. -func (c *AlertPolicyClient) setGoogleClientInfo(keyval ...string) { - kv := append([]string{"gl-go", versionGo()}, keyval...) - kv = append(kv, "gapic", versionClient, "gax", gax.Version, "grpc", grpc.Version) - c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) -} - -// ListAlertPolicies lists the existing alerting policies for the project. -func (c *AlertPolicyClient) ListAlertPolicies(ctx context.Context, req *monitoringpb.ListAlertPoliciesRequest, opts ...gax.CallOption) *AlertPolicyIterator { - md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.ListAlertPolicies[0:len(c.CallOptions.ListAlertPolicies):len(c.CallOptions.ListAlertPolicies)], opts...) - it := &AlertPolicyIterator{} - req = proto.Clone(req).(*monitoringpb.ListAlertPoliciesRequest) - it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.AlertPolicy, string, error) { - var resp *monitoringpb.ListAlertPoliciesResponse - req.PageToken = pageToken - if pageSize > math.MaxInt32 { - req.PageSize = math.MaxInt32 - } else { - req.PageSize = int32(pageSize) - } - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.alertPolicyClient.ListAlertPolicies(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, "", err - } - - it.Response = resp - return resp.AlertPolicies, resp.NextPageToken, nil - } - fetch := func(pageSize int, pageToken string) (string, error) { - items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) - if err != nil { - return "", err - } - it.items = append(it.items, items...) - return nextPageToken, nil - } - it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) - it.pageInfo.MaxSize = int(req.PageSize) - it.pageInfo.Token = req.PageToken - return it -} - -// GetAlertPolicy gets a single alerting policy. -func (c *AlertPolicyClient) GetAlertPolicy(ctx context.Context, req *monitoringpb.GetAlertPolicyRequest, opts ...gax.CallOption) (*monitoringpb.AlertPolicy, error) { - md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.GetAlertPolicy[0:len(c.CallOptions.GetAlertPolicy):len(c.CallOptions.GetAlertPolicy)], opts...) - var resp *monitoringpb.AlertPolicy - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.alertPolicyClient.GetAlertPolicy(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -// CreateAlertPolicy creates a new alerting policy. -func (c *AlertPolicyClient) CreateAlertPolicy(ctx context.Context, req *monitoringpb.CreateAlertPolicyRequest, opts ...gax.CallOption) (*monitoringpb.AlertPolicy, error) { - md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.CreateAlertPolicy[0:len(c.CallOptions.CreateAlertPolicy):len(c.CallOptions.CreateAlertPolicy)], opts...) - var resp *monitoringpb.AlertPolicy - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.alertPolicyClient.CreateAlertPolicy(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -// DeleteAlertPolicy deletes an alerting policy. -func (c *AlertPolicyClient) DeleteAlertPolicy(ctx context.Context, req *monitoringpb.DeleteAlertPolicyRequest, opts ...gax.CallOption) error { - md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.DeleteAlertPolicy[0:len(c.CallOptions.DeleteAlertPolicy):len(c.CallOptions.DeleteAlertPolicy)], opts...) - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - _, err = c.alertPolicyClient.DeleteAlertPolicy(ctx, req, settings.GRPC...) - return err - }, opts...) - return err -} - -// UpdateAlertPolicy updates an alerting policy. You can either replace the entire policy with -// a new one or replace only certain fields in the current alerting policy by -// specifying the fields to be updated via updateMask. Returns the -// updated alerting policy. -func (c *AlertPolicyClient) UpdateAlertPolicy(ctx context.Context, req *monitoringpb.UpdateAlertPolicyRequest, opts ...gax.CallOption) (*monitoringpb.AlertPolicy, error) { - md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "alert_policy.name", url.QueryEscape(req.GetAlertPolicy().GetName()))) - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.UpdateAlertPolicy[0:len(c.CallOptions.UpdateAlertPolicy):len(c.CallOptions.UpdateAlertPolicy)], opts...) - var resp *monitoringpb.AlertPolicy - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.alertPolicyClient.UpdateAlertPolicy(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -// AlertPolicyIterator manages a stream of *monitoringpb.AlertPolicy. -type AlertPolicyIterator struct { - items []*monitoringpb.AlertPolicy - pageInfo *iterator.PageInfo - nextFunc func() error - - // Response is the raw response for the current page. - // It must be cast to the RPC response type. - // Calling Next() or InternalFetch() updates this value. - Response interface{} - - // InternalFetch is for use by the Google Cloud Libraries only. - // It is not part of the stable interface of this package. - // - // InternalFetch returns results from a single call to the underlying RPC. - // The number of results is no greater than pageSize. - // If there are no more results, nextPageToken is empty and err is nil. - InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.AlertPolicy, nextPageToken string, err error) -} - -// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. -func (it *AlertPolicyIterator) PageInfo() *iterator.PageInfo { - return it.pageInfo -} - -// Next returns the next result. Its second return value is iterator.Done if there are no more -// results. Once Next returns Done, all subsequent calls will return Done. -func (it *AlertPolicyIterator) Next() (*monitoringpb.AlertPolicy, error) { - var item *monitoringpb.AlertPolicy - if err := it.nextFunc(); err != nil { - return item, err - } - item = it.items[0] - it.items = it.items[1:] - return item, nil -} - -func (it *AlertPolicyIterator) bufLen() int { - return len(it.items) -} - -func (it *AlertPolicyIterator) takeBuf() interface{} { - b := it.items - it.items = nil - return b -} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/doc.go b/vendor/cloud.google.com/go/monitoring/apiv3/doc.go deleted file mode 100644 index 52fa0ee0db..0000000000 --- a/vendor/cloud.google.com/go/monitoring/apiv3/doc.go +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go_gapic. DO NOT EDIT. - -// Package monitoring is an auto-generated package for the -// Cloud Monitoring API. -// -// Manages your Cloud Monitoring data and configurations. Most projects must -// be associated with a Workspace, with a few exceptions as noted on the -// individual method pages. The table entries below are presented in -// alphabetical order, not in order of common use. For explanations of the -// concepts found in the table entries, read the [Cloud Monitoring -// documentation](https://cloud.google.com/monitoring/docs). -// -// Use of Context -// -// The ctx passed to NewClient is used for authentication requests and -// for creating the underlying connection, but is not used for subsequent calls. -// Individual methods on the client use the ctx given to them. -// -// To close the open connection, use the Close() method. -// -// For information about setting deadlines, reusing contexts, and more -// please visit godoc.org/cloud.google.com/go. -// -// Deprecated: Please use cloud.google.com/go/monitoring/apiv3/v2. -package monitoring // import "cloud.google.com/go/monitoring/apiv3" - -import ( - "context" - "runtime" - "strings" - "unicode" - - "google.golang.org/api/option" - "google.golang.org/grpc/metadata" -) - -// For more information on implementing a client constructor hook, see -// https://github.com/googleapis/google-cloud-go/wiki/Customizing-constructors. -type clientHookParams struct{} -type clientHook func(context.Context, clientHookParams) ([]option.ClientOption, error) - -const versionClient = "20200416" - -func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { - out, _ := metadata.FromOutgoingContext(ctx) - out = out.Copy() - for _, md := range mds { - for k, v := range md { - out[k] = append(out[k], v...) - } - } - return metadata.NewOutgoingContext(ctx, out) -} - -// DefaultAuthScopes reports the default set of authentication scopes to use with this package. -func DefaultAuthScopes() []string { - return []string{ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/monitoring", - "https://www.googleapis.com/auth/monitoring.read", - "https://www.googleapis.com/auth/monitoring.write", - } -} - -// versionGo returns the Go runtime version. The returned string -// has no whitespace, suitable for reporting in header. -func versionGo() string { - const develPrefix = "devel +" - - s := runtime.Version() - if strings.HasPrefix(s, develPrefix) { - s = s[len(develPrefix):] - if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { - s = s[:p] - } - return s - } - - notSemverRune := func(r rune) bool { - return !strings.ContainsRune("0123456789.", r) - } - - if strings.HasPrefix(s, "go1") { - s = s[2:] - var prerelease string - if p := strings.IndexFunc(s, notSemverRune); p >= 0 { - s, prerelease = s[:p], s[p:] - } - if strings.HasSuffix(s, ".") { - s += "0" - } else if strings.Count(s, ".") < 2 { - s += ".0" - } - if prerelease != "" { - s += "-" + prerelease - } - return s - } - return "UNKNOWN" -} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/group_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/group_client.go deleted file mode 100644 index 9b02f06163..0000000000 --- a/vendor/cloud.google.com/go/monitoring/apiv3/group_client.go +++ /dev/null @@ -1,444 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go_gapic. DO NOT EDIT. - -package monitoring - -import ( - "context" - "fmt" - "math" - "net/url" - "time" - - "github.com/golang/protobuf/proto" - gax "github.com/googleapis/gax-go/v2" - "google.golang.org/api/iterator" - "google.golang.org/api/option" - gtransport "google.golang.org/api/transport/grpc" - monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres" - monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/metadata" -) - -var newGroupClientHook clientHook - -// GroupCallOptions contains the retry settings for each method of GroupClient. -type GroupCallOptions struct { - ListGroups []gax.CallOption - GetGroup []gax.CallOption - CreateGroup []gax.CallOption - UpdateGroup []gax.CallOption - DeleteGroup []gax.CallOption - ListGroupMembers []gax.CallOption -} - -func defaultGroupClientOptions() []option.ClientOption { - return []option.ClientOption{ - option.WithEndpoint("monitoring.googleapis.com:443"), - option.WithGRPCDialOption(grpc.WithDisableServiceConfig()), - option.WithScopes(DefaultAuthScopes()...), - option.WithGRPCDialOption(grpc.WithDefaultCallOptions( - grpc.MaxCallRecvMsgSize(math.MaxInt32))), - } -} - -func defaultGroupCallOptions() *GroupCallOptions { - return &GroupCallOptions{ - ListGroups: []gax.CallOption{ - gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.DeadlineExceeded, - codes.Unavailable, - }, gax.Backoff{ - Initial: 100 * time.Millisecond, - Max: 30000 * time.Millisecond, - Multiplier: 1.30, - }) - }), - }, - GetGroup: []gax.CallOption{ - gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.DeadlineExceeded, - codes.Unavailable, - }, gax.Backoff{ - Initial: 100 * time.Millisecond, - Max: 30000 * time.Millisecond, - Multiplier: 1.30, - }) - }), - }, - CreateGroup: []gax.CallOption{}, - UpdateGroup: []gax.CallOption{ - gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.DeadlineExceeded, - codes.Unavailable, - }, gax.Backoff{ - Initial: 100 * time.Millisecond, - Max: 30000 * time.Millisecond, - Multiplier: 1.30, - }) - }), - }, - DeleteGroup: []gax.CallOption{ - gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.DeadlineExceeded, - codes.Unavailable, - }, gax.Backoff{ - Initial: 100 * time.Millisecond, - Max: 30000 * time.Millisecond, - Multiplier: 1.30, - }) - }), - }, - ListGroupMembers: []gax.CallOption{ - gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.DeadlineExceeded, - codes.Unavailable, - }, gax.Backoff{ - Initial: 100 * time.Millisecond, - Max: 30000 * time.Millisecond, - Multiplier: 1.30, - }) - }), - }, - } -} - -// GroupClient is a client for interacting with Cloud Monitoring API. -// -// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. -type GroupClient struct { - // Connection pool of gRPC connections to the service. - connPool gtransport.ConnPool - - // The gRPC API client. - groupClient monitoringpb.GroupServiceClient - - // The call options for this service. - CallOptions *GroupCallOptions - - // The x-goog-* metadata to be sent with each request. - xGoogMetadata metadata.MD -} - -// NewGroupClient creates a new group service client. -// -// The Group API lets you inspect and manage your -// groups (at #google.monitoring.v3.Group). -// -// A group is a named filter that is used to identify -// a collection of monitored resources. Groups are typically used to -// mirror the physical and/or logical topology of the environment. -// Because group membership is computed dynamically, monitored -// resources that are started in the future are automatically placed -// in matching groups. By using a group to name monitored resources in, -// for example, an alert policy, the target of that alert policy is -// updated automatically as monitored resources are added and removed -// from the infrastructure. -func NewGroupClient(ctx context.Context, opts ...option.ClientOption) (*GroupClient, error) { - clientOpts := defaultGroupClientOptions() - - if newGroupClientHook != nil { - hookOpts, err := newGroupClientHook(ctx, clientHookParams{}) - if err != nil { - return nil, err - } - clientOpts = append(clientOpts, hookOpts...) - } - - connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...) - if err != nil { - return nil, err - } - c := &GroupClient{ - connPool: connPool, - CallOptions: defaultGroupCallOptions(), - - groupClient: monitoringpb.NewGroupServiceClient(connPool), - } - c.setGoogleClientInfo() - - return c, nil -} - -// Connection returns a connection to the API service. -// -// Deprecated. -func (c *GroupClient) Connection() *grpc.ClientConn { - return c.connPool.Conn() -} - -// Close closes the connection to the API service. The user should invoke this when -// the client is no longer required. -func (c *GroupClient) Close() error { - return c.connPool.Close() -} - -// setGoogleClientInfo sets the name and version of the application in -// the `x-goog-api-client` header passed on each request. Intended for -// use by Google-written clients. -func (c *GroupClient) setGoogleClientInfo(keyval ...string) { - kv := append([]string{"gl-go", versionGo()}, keyval...) - kv = append(kv, "gapic", versionClient, "gax", gax.Version, "grpc", grpc.Version) - c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) -} - -// ListGroups lists the existing groups. -func (c *GroupClient) ListGroups(ctx context.Context, req *monitoringpb.ListGroupsRequest, opts ...gax.CallOption) *GroupIterator { - md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.ListGroups[0:len(c.CallOptions.ListGroups):len(c.CallOptions.ListGroups)], opts...) - it := &GroupIterator{} - req = proto.Clone(req).(*monitoringpb.ListGroupsRequest) - it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.Group, string, error) { - var resp *monitoringpb.ListGroupsResponse - req.PageToken = pageToken - if pageSize > math.MaxInt32 { - req.PageSize = math.MaxInt32 - } else { - req.PageSize = int32(pageSize) - } - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.groupClient.ListGroups(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, "", err - } - - it.Response = resp - return resp.Group, resp.NextPageToken, nil - } - fetch := func(pageSize int, pageToken string) (string, error) { - items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) - if err != nil { - return "", err - } - it.items = append(it.items, items...) - return nextPageToken, nil - } - it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) - it.pageInfo.MaxSize = int(req.PageSize) - it.pageInfo.Token = req.PageToken - return it -} - -// GetGroup gets a single group. -func (c *GroupClient) GetGroup(ctx context.Context, req *monitoringpb.GetGroupRequest, opts ...gax.CallOption) (*monitoringpb.Group, error) { - md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.GetGroup[0:len(c.CallOptions.GetGroup):len(c.CallOptions.GetGroup)], opts...) - var resp *monitoringpb.Group - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.groupClient.GetGroup(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -// CreateGroup creates a new group. -func (c *GroupClient) CreateGroup(ctx context.Context, req *monitoringpb.CreateGroupRequest, opts ...gax.CallOption) (*monitoringpb.Group, error) { - md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.CreateGroup[0:len(c.CallOptions.CreateGroup):len(c.CallOptions.CreateGroup)], opts...) - var resp *monitoringpb.Group - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.groupClient.CreateGroup(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -// UpdateGroup updates an existing group. -// You can change any group attributes except name. -func (c *GroupClient) UpdateGroup(ctx context.Context, req *monitoringpb.UpdateGroupRequest, opts ...gax.CallOption) (*monitoringpb.Group, error) { - md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "group.name", url.QueryEscape(req.GetGroup().GetName()))) - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.UpdateGroup[0:len(c.CallOptions.UpdateGroup):len(c.CallOptions.UpdateGroup)], opts...) - var resp *monitoringpb.Group - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.groupClient.UpdateGroup(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -// DeleteGroup deletes an existing group. -func (c *GroupClient) DeleteGroup(ctx context.Context, req *monitoringpb.DeleteGroupRequest, opts ...gax.CallOption) error { - md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.DeleteGroup[0:len(c.CallOptions.DeleteGroup):len(c.CallOptions.DeleteGroup)], opts...) - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - _, err = c.groupClient.DeleteGroup(ctx, req, settings.GRPC...) - return err - }, opts...) - return err -} - -// ListGroupMembers lists the monitored resources that are members of a group. -func (c *GroupClient) ListGroupMembers(ctx context.Context, req *monitoringpb.ListGroupMembersRequest, opts ...gax.CallOption) *MonitoredResourceIterator { - md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.ListGroupMembers[0:len(c.CallOptions.ListGroupMembers):len(c.CallOptions.ListGroupMembers)], opts...) - it := &MonitoredResourceIterator{} - req = proto.Clone(req).(*monitoringpb.ListGroupMembersRequest) - it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoredrespb.MonitoredResource, string, error) { - var resp *monitoringpb.ListGroupMembersResponse - req.PageToken = pageToken - if pageSize > math.MaxInt32 { - req.PageSize = math.MaxInt32 - } else { - req.PageSize = int32(pageSize) - } - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.groupClient.ListGroupMembers(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, "", err - } - - it.Response = resp - return resp.Members, resp.NextPageToken, nil - } - fetch := func(pageSize int, pageToken string) (string, error) { - items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) - if err != nil { - return "", err - } - it.items = append(it.items, items...) - return nextPageToken, nil - } - it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) - it.pageInfo.MaxSize = int(req.PageSize) - it.pageInfo.Token = req.PageToken - return it -} - -// GroupIterator manages a stream of *monitoringpb.Group. -type GroupIterator struct { - items []*monitoringpb.Group - pageInfo *iterator.PageInfo - nextFunc func() error - - // Response is the raw response for the current page. - // It must be cast to the RPC response type. - // Calling Next() or InternalFetch() updates this value. - Response interface{} - - // InternalFetch is for use by the Google Cloud Libraries only. - // It is not part of the stable interface of this package. - // - // InternalFetch returns results from a single call to the underlying RPC. - // The number of results is no greater than pageSize. - // If there are no more results, nextPageToken is empty and err is nil. - InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.Group, nextPageToken string, err error) -} - -// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. -func (it *GroupIterator) PageInfo() *iterator.PageInfo { - return it.pageInfo -} - -// Next returns the next result. Its second return value is iterator.Done if there are no more -// results. Once Next returns Done, all subsequent calls will return Done. -func (it *GroupIterator) Next() (*monitoringpb.Group, error) { - var item *monitoringpb.Group - if err := it.nextFunc(); err != nil { - return item, err - } - item = it.items[0] - it.items = it.items[1:] - return item, nil -} - -func (it *GroupIterator) bufLen() int { - return len(it.items) -} - -func (it *GroupIterator) takeBuf() interface{} { - b := it.items - it.items = nil - return b -} - -// MonitoredResourceIterator manages a stream of *monitoredrespb.MonitoredResource. -type MonitoredResourceIterator struct { - items []*monitoredrespb.MonitoredResource - pageInfo *iterator.PageInfo - nextFunc func() error - - // Response is the raw response for the current page. - // It must be cast to the RPC response type. - // Calling Next() or InternalFetch() updates this value. - Response interface{} - - // InternalFetch is for use by the Google Cloud Libraries only. - // It is not part of the stable interface of this package. - // - // InternalFetch returns results from a single call to the underlying RPC. - // The number of results is no greater than pageSize. - // If there are no more results, nextPageToken is empty and err is nil. - InternalFetch func(pageSize int, pageToken string) (results []*monitoredrespb.MonitoredResource, nextPageToken string, err error) -} - -// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. -func (it *MonitoredResourceIterator) PageInfo() *iterator.PageInfo { - return it.pageInfo -} - -// Next returns the next result. Its second return value is iterator.Done if there are no more -// results. Once Next returns Done, all subsequent calls will return Done. -func (it *MonitoredResourceIterator) Next() (*monitoredrespb.MonitoredResource, error) { - var item *monitoredrespb.MonitoredResource - if err := it.nextFunc(); err != nil { - return item, err - } - item = it.items[0] - it.items = it.items[1:] - return item, nil -} - -func (it *MonitoredResourceIterator) bufLen() int { - return len(it.items) -} - -func (it *MonitoredResourceIterator) takeBuf() interface{} { - b := it.items - it.items = nil - return b -} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/metric_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/metric_client.go deleted file mode 100644 index d9ea4bdf35..0000000000 --- a/vendor/cloud.google.com/go/monitoring/apiv3/metric_client.go +++ /dev/null @@ -1,557 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go_gapic. DO NOT EDIT. - -package monitoring - -import ( - "context" - "fmt" - "math" - "net/url" - "time" - - "github.com/golang/protobuf/proto" - gax "github.com/googleapis/gax-go/v2" - "google.golang.org/api/iterator" - "google.golang.org/api/option" - gtransport "google.golang.org/api/transport/grpc" - metricpb "google.golang.org/genproto/googleapis/api/metric" - monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres" - monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/metadata" -) - -var newMetricClientHook clientHook - -// MetricCallOptions contains the retry settings for each method of MetricClient. -type MetricCallOptions struct { - ListMonitoredResourceDescriptors []gax.CallOption - GetMonitoredResourceDescriptor []gax.CallOption - ListMetricDescriptors []gax.CallOption - GetMetricDescriptor []gax.CallOption - CreateMetricDescriptor []gax.CallOption - DeleteMetricDescriptor []gax.CallOption - ListTimeSeries []gax.CallOption - CreateTimeSeries []gax.CallOption -} - -func defaultMetricClientOptions() []option.ClientOption { - return []option.ClientOption{ - option.WithEndpoint("monitoring.googleapis.com:443"), - option.WithGRPCDialOption(grpc.WithDisableServiceConfig()), - option.WithScopes(DefaultAuthScopes()...), - option.WithGRPCDialOption(grpc.WithDefaultCallOptions( - grpc.MaxCallRecvMsgSize(math.MaxInt32))), - } -} - -func defaultMetricCallOptions() *MetricCallOptions { - return &MetricCallOptions{ - ListMonitoredResourceDescriptors: []gax.CallOption{ - gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.DeadlineExceeded, - codes.Unavailable, - }, gax.Backoff{ - Initial: 100 * time.Millisecond, - Max: 30000 * time.Millisecond, - Multiplier: 1.30, - }) - }), - }, - GetMonitoredResourceDescriptor: []gax.CallOption{ - gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.DeadlineExceeded, - codes.Unavailable, - }, gax.Backoff{ - Initial: 100 * time.Millisecond, - Max: 30000 * time.Millisecond, - Multiplier: 1.30, - }) - }), - }, - ListMetricDescriptors: []gax.CallOption{ - gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.DeadlineExceeded, - codes.Unavailable, - }, gax.Backoff{ - Initial: 100 * time.Millisecond, - Max: 30000 * time.Millisecond, - Multiplier: 1.30, - }) - }), - }, - GetMetricDescriptor: []gax.CallOption{ - gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.DeadlineExceeded, - codes.Unavailable, - }, gax.Backoff{ - Initial: 100 * time.Millisecond, - Max: 30000 * time.Millisecond, - Multiplier: 1.30, - }) - }), - }, - CreateMetricDescriptor: []gax.CallOption{}, - DeleteMetricDescriptor: []gax.CallOption{ - gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.DeadlineExceeded, - codes.Unavailable, - }, gax.Backoff{ - Initial: 100 * time.Millisecond, - Max: 30000 * time.Millisecond, - Multiplier: 1.30, - }) - }), - }, - ListTimeSeries: []gax.CallOption{ - gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.DeadlineExceeded, - codes.Unavailable, - }, gax.Backoff{ - Initial: 100 * time.Millisecond, - Max: 30000 * time.Millisecond, - Multiplier: 1.30, - }) - }), - }, - CreateTimeSeries: []gax.CallOption{}, - } -} - -// MetricClient is a client for interacting with Cloud Monitoring API. -// -// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. -type MetricClient struct { - // Connection pool of gRPC connections to the service. - connPool gtransport.ConnPool - - // The gRPC API client. - metricClient monitoringpb.MetricServiceClient - - // The call options for this service. - CallOptions *MetricCallOptions - - // The x-goog-* metadata to be sent with each request. - xGoogMetadata metadata.MD -} - -// NewMetricClient creates a new metric service client. -// -// Manages metric descriptors, monitored resource descriptors, and -// time series data. -func NewMetricClient(ctx context.Context, opts ...option.ClientOption) (*MetricClient, error) { - clientOpts := defaultMetricClientOptions() - - if newMetricClientHook != nil { - hookOpts, err := newMetricClientHook(ctx, clientHookParams{}) - if err != nil { - return nil, err - } - clientOpts = append(clientOpts, hookOpts...) - } - - connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...) - if err != nil { - return nil, err - } - c := &MetricClient{ - connPool: connPool, - CallOptions: defaultMetricCallOptions(), - - metricClient: monitoringpb.NewMetricServiceClient(connPool), - } - c.setGoogleClientInfo() - - return c, nil -} - -// Connection returns a connection to the API service. -// -// Deprecated. -func (c *MetricClient) Connection() *grpc.ClientConn { - return c.connPool.Conn() -} - -// Close closes the connection to the API service. The user should invoke this when -// the client is no longer required. -func (c *MetricClient) Close() error { - return c.connPool.Close() -} - -// setGoogleClientInfo sets the name and version of the application in -// the `x-goog-api-client` header passed on each request. Intended for -// use by Google-written clients. -func (c *MetricClient) setGoogleClientInfo(keyval ...string) { - kv := append([]string{"gl-go", versionGo()}, keyval...) - kv = append(kv, "gapic", versionClient, "gax", gax.Version, "grpc", grpc.Version) - c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) -} - -// ListMonitoredResourceDescriptors lists monitored resource descriptors that match a filter. This method does not require a Workspace. -func (c *MetricClient) ListMonitoredResourceDescriptors(ctx context.Context, req *monitoringpb.ListMonitoredResourceDescriptorsRequest, opts ...gax.CallOption) *MonitoredResourceDescriptorIterator { - md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.ListMonitoredResourceDescriptors[0:len(c.CallOptions.ListMonitoredResourceDescriptors):len(c.CallOptions.ListMonitoredResourceDescriptors)], opts...) - it := &MonitoredResourceDescriptorIterator{} - req = proto.Clone(req).(*monitoringpb.ListMonitoredResourceDescriptorsRequest) - it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoredrespb.MonitoredResourceDescriptor, string, error) { - var resp *monitoringpb.ListMonitoredResourceDescriptorsResponse - req.PageToken = pageToken - if pageSize > math.MaxInt32 { - req.PageSize = math.MaxInt32 - } else { - req.PageSize = int32(pageSize) - } - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.metricClient.ListMonitoredResourceDescriptors(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, "", err - } - - it.Response = resp - return resp.ResourceDescriptors, resp.NextPageToken, nil - } - fetch := func(pageSize int, pageToken string) (string, error) { - items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) - if err != nil { - return "", err - } - it.items = append(it.items, items...) - return nextPageToken, nil - } - it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) - it.pageInfo.MaxSize = int(req.PageSize) - it.pageInfo.Token = req.PageToken - return it -} - -// GetMonitoredResourceDescriptor gets a single monitored resource descriptor. This method does not require a Workspace. -func (c *MetricClient) GetMonitoredResourceDescriptor(ctx context.Context, req *monitoringpb.GetMonitoredResourceDescriptorRequest, opts ...gax.CallOption) (*monitoredrespb.MonitoredResourceDescriptor, error) { - md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.GetMonitoredResourceDescriptor[0:len(c.CallOptions.GetMonitoredResourceDescriptor):len(c.CallOptions.GetMonitoredResourceDescriptor)], opts...) - var resp *monitoredrespb.MonitoredResourceDescriptor - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.metricClient.GetMonitoredResourceDescriptor(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -// ListMetricDescriptors lists metric descriptors that match a filter. This method does not require a Workspace. -func (c *MetricClient) ListMetricDescriptors(ctx context.Context, req *monitoringpb.ListMetricDescriptorsRequest, opts ...gax.CallOption) *MetricDescriptorIterator { - md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.ListMetricDescriptors[0:len(c.CallOptions.ListMetricDescriptors):len(c.CallOptions.ListMetricDescriptors)], opts...) - it := &MetricDescriptorIterator{} - req = proto.Clone(req).(*monitoringpb.ListMetricDescriptorsRequest) - it.InternalFetch = func(pageSize int, pageToken string) ([]*metricpb.MetricDescriptor, string, error) { - var resp *monitoringpb.ListMetricDescriptorsResponse - req.PageToken = pageToken - if pageSize > math.MaxInt32 { - req.PageSize = math.MaxInt32 - } else { - req.PageSize = int32(pageSize) - } - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.metricClient.ListMetricDescriptors(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, "", err - } - - it.Response = resp - return resp.MetricDescriptors, resp.NextPageToken, nil - } - fetch := func(pageSize int, pageToken string) (string, error) { - items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) - if err != nil { - return "", err - } - it.items = append(it.items, items...) - return nextPageToken, nil - } - it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) - it.pageInfo.MaxSize = int(req.PageSize) - it.pageInfo.Token = req.PageToken - return it -} - -// GetMetricDescriptor gets a single metric descriptor. This method does not require a Workspace. -func (c *MetricClient) GetMetricDescriptor(ctx context.Context, req *monitoringpb.GetMetricDescriptorRequest, opts ...gax.CallOption) (*metricpb.MetricDescriptor, error) { - md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.GetMetricDescriptor[0:len(c.CallOptions.GetMetricDescriptor):len(c.CallOptions.GetMetricDescriptor)], opts...) - var resp *metricpb.MetricDescriptor - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.metricClient.GetMetricDescriptor(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -// CreateMetricDescriptor creates a new metric descriptor. -// User-created metric descriptors define -// custom metrics (at https://cloud.google.com/monitoring/custom-metrics). -func (c *MetricClient) CreateMetricDescriptor(ctx context.Context, req *monitoringpb.CreateMetricDescriptorRequest, opts ...gax.CallOption) (*metricpb.MetricDescriptor, error) { - md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.CreateMetricDescriptor[0:len(c.CallOptions.CreateMetricDescriptor):len(c.CallOptions.CreateMetricDescriptor)], opts...) - var resp *metricpb.MetricDescriptor - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.metricClient.CreateMetricDescriptor(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -// DeleteMetricDescriptor deletes a metric descriptor. Only user-created -// custom metrics (at https://cloud.google.com/monitoring/custom-metrics) can be -// deleted. -func (c *MetricClient) DeleteMetricDescriptor(ctx context.Context, req *monitoringpb.DeleteMetricDescriptorRequest, opts ...gax.CallOption) error { - md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.DeleteMetricDescriptor[0:len(c.CallOptions.DeleteMetricDescriptor):len(c.CallOptions.DeleteMetricDescriptor)], opts...) - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - _, err = c.metricClient.DeleteMetricDescriptor(ctx, req, settings.GRPC...) - return err - }, opts...) - return err -} - -// ListTimeSeries lists time series that match a filter. This method does not require a Workspace. -func (c *MetricClient) ListTimeSeries(ctx context.Context, req *monitoringpb.ListTimeSeriesRequest, opts ...gax.CallOption) *TimeSeriesIterator { - md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.ListTimeSeries[0:len(c.CallOptions.ListTimeSeries):len(c.CallOptions.ListTimeSeries)], opts...) - it := &TimeSeriesIterator{} - req = proto.Clone(req).(*monitoringpb.ListTimeSeriesRequest) - it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.TimeSeries, string, error) { - var resp *monitoringpb.ListTimeSeriesResponse - req.PageToken = pageToken - if pageSize > math.MaxInt32 { - req.PageSize = math.MaxInt32 - } else { - req.PageSize = int32(pageSize) - } - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.metricClient.ListTimeSeries(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, "", err - } - - it.Response = resp - return resp.TimeSeries, resp.NextPageToken, nil - } - fetch := func(pageSize int, pageToken string) (string, error) { - items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) - if err != nil { - return "", err - } - it.items = append(it.items, items...) - return nextPageToken, nil - } - it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) - it.pageInfo.MaxSize = int(req.PageSize) - it.pageInfo.Token = req.PageToken - return it -} - -// CreateTimeSeries creates or adds data to one or more time series. -// The response is empty if all time series in the request were written. -// If any time series could not be written, a corresponding failure message is -// included in the error response. -func (c *MetricClient) CreateTimeSeries(ctx context.Context, req *monitoringpb.CreateTimeSeriesRequest, opts ...gax.CallOption) error { - md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.CreateTimeSeries[0:len(c.CallOptions.CreateTimeSeries):len(c.CallOptions.CreateTimeSeries)], opts...) - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - _, err = c.metricClient.CreateTimeSeries(ctx, req, settings.GRPC...) - return err - }, opts...) - return err -} - -// MetricDescriptorIterator manages a stream of *metricpb.MetricDescriptor. -type MetricDescriptorIterator struct { - items []*metricpb.MetricDescriptor - pageInfo *iterator.PageInfo - nextFunc func() error - - // Response is the raw response for the current page. - // It must be cast to the RPC response type. - // Calling Next() or InternalFetch() updates this value. - Response interface{} - - // InternalFetch is for use by the Google Cloud Libraries only. - // It is not part of the stable interface of this package. - // - // InternalFetch returns results from a single call to the underlying RPC. - // The number of results is no greater than pageSize. - // If there are no more results, nextPageToken is empty and err is nil. - InternalFetch func(pageSize int, pageToken string) (results []*metricpb.MetricDescriptor, nextPageToken string, err error) -} - -// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. -func (it *MetricDescriptorIterator) PageInfo() *iterator.PageInfo { - return it.pageInfo -} - -// Next returns the next result. Its second return value is iterator.Done if there are no more -// results. Once Next returns Done, all subsequent calls will return Done. -func (it *MetricDescriptorIterator) Next() (*metricpb.MetricDescriptor, error) { - var item *metricpb.MetricDescriptor - if err := it.nextFunc(); err != nil { - return item, err - } - item = it.items[0] - it.items = it.items[1:] - return item, nil -} - -func (it *MetricDescriptorIterator) bufLen() int { - return len(it.items) -} - -func (it *MetricDescriptorIterator) takeBuf() interface{} { - b := it.items - it.items = nil - return b -} - -// MonitoredResourceDescriptorIterator manages a stream of *monitoredrespb.MonitoredResourceDescriptor. -type MonitoredResourceDescriptorIterator struct { - items []*monitoredrespb.MonitoredResourceDescriptor - pageInfo *iterator.PageInfo - nextFunc func() error - - // Response is the raw response for the current page. - // It must be cast to the RPC response type. - // Calling Next() or InternalFetch() updates this value. - Response interface{} - - // InternalFetch is for use by the Google Cloud Libraries only. - // It is not part of the stable interface of this package. - // - // InternalFetch returns results from a single call to the underlying RPC. - // The number of results is no greater than pageSize. - // If there are no more results, nextPageToken is empty and err is nil. - InternalFetch func(pageSize int, pageToken string) (results []*monitoredrespb.MonitoredResourceDescriptor, nextPageToken string, err error) -} - -// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. -func (it *MonitoredResourceDescriptorIterator) PageInfo() *iterator.PageInfo { - return it.pageInfo -} - -// Next returns the next result. Its second return value is iterator.Done if there are no more -// results. Once Next returns Done, all subsequent calls will return Done. -func (it *MonitoredResourceDescriptorIterator) Next() (*monitoredrespb.MonitoredResourceDescriptor, error) { - var item *monitoredrespb.MonitoredResourceDescriptor - if err := it.nextFunc(); err != nil { - return item, err - } - item = it.items[0] - it.items = it.items[1:] - return item, nil -} - -func (it *MonitoredResourceDescriptorIterator) bufLen() int { - return len(it.items) -} - -func (it *MonitoredResourceDescriptorIterator) takeBuf() interface{} { - b := it.items - it.items = nil - return b -} - -// TimeSeriesIterator manages a stream of *monitoringpb.TimeSeries. -type TimeSeriesIterator struct { - items []*monitoringpb.TimeSeries - pageInfo *iterator.PageInfo - nextFunc func() error - - // Response is the raw response for the current page. - // It must be cast to the RPC response type. - // Calling Next() or InternalFetch() updates this value. - Response interface{} - - // InternalFetch is for use by the Google Cloud Libraries only. - // It is not part of the stable interface of this package. - // - // InternalFetch returns results from a single call to the underlying RPC. - // The number of results is no greater than pageSize. - // If there are no more results, nextPageToken is empty and err is nil. - InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.TimeSeries, nextPageToken string, err error) -} - -// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. -func (it *TimeSeriesIterator) PageInfo() *iterator.PageInfo { - return it.pageInfo -} - -// Next returns the next result. Its second return value is iterator.Done if there are no more -// results. Once Next returns Done, all subsequent calls will return Done. -func (it *TimeSeriesIterator) Next() (*monitoringpb.TimeSeries, error) { - var item *monitoringpb.TimeSeries - if err := it.nextFunc(); err != nil { - return item, err - } - item = it.items[0] - it.items = it.items[1:] - return item, nil -} - -func (it *TimeSeriesIterator) bufLen() int { - return len(it.items) -} - -func (it *TimeSeriesIterator) takeBuf() interface{} { - b := it.items - it.items = nil - return b -} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/notification_channel_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/notification_channel_client.go deleted file mode 100644 index 67b48a7aec..0000000000 --- a/vendor/cloud.google.com/go/monitoring/apiv3/notification_channel_client.go +++ /dev/null @@ -1,557 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go_gapic. DO NOT EDIT. - -package monitoring - -import ( - "context" - "fmt" - "math" - "net/url" - "time" - - "github.com/golang/protobuf/proto" - gax "github.com/googleapis/gax-go/v2" - "google.golang.org/api/iterator" - "google.golang.org/api/option" - gtransport "google.golang.org/api/transport/grpc" - monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/metadata" -) - -var newNotificationChannelClientHook clientHook - -// NotificationChannelCallOptions contains the retry settings for each method of NotificationChannelClient. -type NotificationChannelCallOptions struct { - ListNotificationChannelDescriptors []gax.CallOption - GetNotificationChannelDescriptor []gax.CallOption - ListNotificationChannels []gax.CallOption - GetNotificationChannel []gax.CallOption - CreateNotificationChannel []gax.CallOption - UpdateNotificationChannel []gax.CallOption - DeleteNotificationChannel []gax.CallOption - SendNotificationChannelVerificationCode []gax.CallOption - GetNotificationChannelVerificationCode []gax.CallOption - VerifyNotificationChannel []gax.CallOption -} - -func defaultNotificationChannelClientOptions() []option.ClientOption { - return []option.ClientOption{ - option.WithEndpoint("monitoring.googleapis.com:443"), - option.WithGRPCDialOption(grpc.WithDisableServiceConfig()), - option.WithScopes(DefaultAuthScopes()...), - option.WithGRPCDialOption(grpc.WithDefaultCallOptions( - grpc.MaxCallRecvMsgSize(math.MaxInt32))), - } -} - -func defaultNotificationChannelCallOptions() *NotificationChannelCallOptions { - return &NotificationChannelCallOptions{ - ListNotificationChannelDescriptors: []gax.CallOption{ - gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.DeadlineExceeded, - codes.Unavailable, - }, gax.Backoff{ - Initial: 100 * time.Millisecond, - Max: 30000 * time.Millisecond, - Multiplier: 1.30, - }) - }), - }, - GetNotificationChannelDescriptor: []gax.CallOption{ - gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.DeadlineExceeded, - codes.Unavailable, - }, gax.Backoff{ - Initial: 100 * time.Millisecond, - Max: 30000 * time.Millisecond, - Multiplier: 1.30, - }) - }), - }, - ListNotificationChannels: []gax.CallOption{ - gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.DeadlineExceeded, - codes.Unavailable, - }, gax.Backoff{ - Initial: 100 * time.Millisecond, - Max: 30000 * time.Millisecond, - Multiplier: 1.30, - }) - }), - }, - GetNotificationChannel: []gax.CallOption{ - gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.DeadlineExceeded, - codes.Unavailable, - }, gax.Backoff{ - Initial: 100 * time.Millisecond, - Max: 30000 * time.Millisecond, - Multiplier: 1.30, - }) - }), - }, - CreateNotificationChannel: []gax.CallOption{}, - UpdateNotificationChannel: []gax.CallOption{}, - DeleteNotificationChannel: []gax.CallOption{ - gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.DeadlineExceeded, - codes.Unavailable, - }, gax.Backoff{ - Initial: 100 * time.Millisecond, - Max: 30000 * time.Millisecond, - Multiplier: 1.30, - }) - }), - }, - SendNotificationChannelVerificationCode: []gax.CallOption{}, - GetNotificationChannelVerificationCode: []gax.CallOption{ - gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.DeadlineExceeded, - codes.Unavailable, - }, gax.Backoff{ - Initial: 100 * time.Millisecond, - Max: 30000 * time.Millisecond, - Multiplier: 1.30, - }) - }), - }, - VerifyNotificationChannel: []gax.CallOption{ - gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.DeadlineExceeded, - codes.Unavailable, - }, gax.Backoff{ - Initial: 100 * time.Millisecond, - Max: 30000 * time.Millisecond, - Multiplier: 1.30, - }) - }), - }, - } -} - -// NotificationChannelClient is a client for interacting with Cloud Monitoring API. -// -// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. -type NotificationChannelClient struct { - // Connection pool of gRPC connections to the service. - connPool gtransport.ConnPool - - // The gRPC API client. - notificationChannelClient monitoringpb.NotificationChannelServiceClient - - // The call options for this service. - CallOptions *NotificationChannelCallOptions - - // The x-goog-* metadata to be sent with each request. - xGoogMetadata metadata.MD -} - -// NewNotificationChannelClient creates a new notification channel service client. -// -// The Notification Channel API provides access to configuration that -// controls how messages related to incidents are sent. -func NewNotificationChannelClient(ctx context.Context, opts ...option.ClientOption) (*NotificationChannelClient, error) { - clientOpts := defaultNotificationChannelClientOptions() - - if newNotificationChannelClientHook != nil { - hookOpts, err := newNotificationChannelClientHook(ctx, clientHookParams{}) - if err != nil { - return nil, err - } - clientOpts = append(clientOpts, hookOpts...) - } - - connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...) - if err != nil { - return nil, err - } - c := &NotificationChannelClient{ - connPool: connPool, - CallOptions: defaultNotificationChannelCallOptions(), - - notificationChannelClient: monitoringpb.NewNotificationChannelServiceClient(connPool), - } - c.setGoogleClientInfo() - - return c, nil -} - -// Connection returns a connection to the API service. -// -// Deprecated. -func (c *NotificationChannelClient) Connection() *grpc.ClientConn { - return c.connPool.Conn() -} - -// Close closes the connection to the API service. The user should invoke this when -// the client is no longer required. -func (c *NotificationChannelClient) Close() error { - return c.connPool.Close() -} - -// setGoogleClientInfo sets the name and version of the application in -// the `x-goog-api-client` header passed on each request. Intended for -// use by Google-written clients. -func (c *NotificationChannelClient) setGoogleClientInfo(keyval ...string) { - kv := append([]string{"gl-go", versionGo()}, keyval...) - kv = append(kv, "gapic", versionClient, "gax", gax.Version, "grpc", grpc.Version) - c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) -} - -// ListNotificationChannelDescriptors lists the descriptors for supported channel types. The use of descriptors -// makes it possible for new channel types to be dynamically added. -func (c *NotificationChannelClient) ListNotificationChannelDescriptors(ctx context.Context, req *monitoringpb.ListNotificationChannelDescriptorsRequest, opts ...gax.CallOption) *NotificationChannelDescriptorIterator { - md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.ListNotificationChannelDescriptors[0:len(c.CallOptions.ListNotificationChannelDescriptors):len(c.CallOptions.ListNotificationChannelDescriptors)], opts...) - it := &NotificationChannelDescriptorIterator{} - req = proto.Clone(req).(*monitoringpb.ListNotificationChannelDescriptorsRequest) - it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.NotificationChannelDescriptor, string, error) { - var resp *monitoringpb.ListNotificationChannelDescriptorsResponse - req.PageToken = pageToken - if pageSize > math.MaxInt32 { - req.PageSize = math.MaxInt32 - } else { - req.PageSize = int32(pageSize) - } - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.notificationChannelClient.ListNotificationChannelDescriptors(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, "", err - } - - it.Response = resp - return resp.ChannelDescriptors, resp.NextPageToken, nil - } - fetch := func(pageSize int, pageToken string) (string, error) { - items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) - if err != nil { - return "", err - } - it.items = append(it.items, items...) - return nextPageToken, nil - } - it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) - it.pageInfo.MaxSize = int(req.PageSize) - it.pageInfo.Token = req.PageToken - return it -} - -// GetNotificationChannelDescriptor gets a single channel descriptor. The descriptor indicates which fields -// are expected / permitted for a notification channel of the given type. -func (c *NotificationChannelClient) GetNotificationChannelDescriptor(ctx context.Context, req *monitoringpb.GetNotificationChannelDescriptorRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannelDescriptor, error) { - md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.GetNotificationChannelDescriptor[0:len(c.CallOptions.GetNotificationChannelDescriptor):len(c.CallOptions.GetNotificationChannelDescriptor)], opts...) - var resp *monitoringpb.NotificationChannelDescriptor - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.notificationChannelClient.GetNotificationChannelDescriptor(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -// ListNotificationChannels lists the notification channels that have been created for the project. -func (c *NotificationChannelClient) ListNotificationChannels(ctx context.Context, req *monitoringpb.ListNotificationChannelsRequest, opts ...gax.CallOption) *NotificationChannelIterator { - md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.ListNotificationChannels[0:len(c.CallOptions.ListNotificationChannels):len(c.CallOptions.ListNotificationChannels)], opts...) - it := &NotificationChannelIterator{} - req = proto.Clone(req).(*monitoringpb.ListNotificationChannelsRequest) - it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.NotificationChannel, string, error) { - var resp *monitoringpb.ListNotificationChannelsResponse - req.PageToken = pageToken - if pageSize > math.MaxInt32 { - req.PageSize = math.MaxInt32 - } else { - req.PageSize = int32(pageSize) - } - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.notificationChannelClient.ListNotificationChannels(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, "", err - } - - it.Response = resp - return resp.NotificationChannels, resp.NextPageToken, nil - } - fetch := func(pageSize int, pageToken string) (string, error) { - items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) - if err != nil { - return "", err - } - it.items = append(it.items, items...) - return nextPageToken, nil - } - it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) - it.pageInfo.MaxSize = int(req.PageSize) - it.pageInfo.Token = req.PageToken - return it -} - -// GetNotificationChannel gets a single notification channel. The channel includes the relevant -// configuration details with which the channel was created. However, the -// response may truncate or omit passwords, API keys, or other private key -// matter and thus the response may not be 100% identical to the information -// that was supplied in the call to the create method. -func (c *NotificationChannelClient) GetNotificationChannel(ctx context.Context, req *monitoringpb.GetNotificationChannelRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannel, error) { - md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.GetNotificationChannel[0:len(c.CallOptions.GetNotificationChannel):len(c.CallOptions.GetNotificationChannel)], opts...) - var resp *monitoringpb.NotificationChannel - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.notificationChannelClient.GetNotificationChannel(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -// CreateNotificationChannel creates a new notification channel, representing a single notification -// endpoint such as an email address, SMS number, or PagerDuty service. -func (c *NotificationChannelClient) CreateNotificationChannel(ctx context.Context, req *monitoringpb.CreateNotificationChannelRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannel, error) { - md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.CreateNotificationChannel[0:len(c.CallOptions.CreateNotificationChannel):len(c.CallOptions.CreateNotificationChannel)], opts...) - var resp *monitoringpb.NotificationChannel - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.notificationChannelClient.CreateNotificationChannel(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -// UpdateNotificationChannel updates a notification channel. Fields not specified in the field mask -// remain unchanged. -func (c *NotificationChannelClient) UpdateNotificationChannel(ctx context.Context, req *monitoringpb.UpdateNotificationChannelRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannel, error) { - md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "notification_channel.name", url.QueryEscape(req.GetNotificationChannel().GetName()))) - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.UpdateNotificationChannel[0:len(c.CallOptions.UpdateNotificationChannel):len(c.CallOptions.UpdateNotificationChannel)], opts...) - var resp *monitoringpb.NotificationChannel - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.notificationChannelClient.UpdateNotificationChannel(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -// DeleteNotificationChannel deletes a notification channel. -func (c *NotificationChannelClient) DeleteNotificationChannel(ctx context.Context, req *monitoringpb.DeleteNotificationChannelRequest, opts ...gax.CallOption) error { - md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.DeleteNotificationChannel[0:len(c.CallOptions.DeleteNotificationChannel):len(c.CallOptions.DeleteNotificationChannel)], opts...) - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - _, err = c.notificationChannelClient.DeleteNotificationChannel(ctx, req, settings.GRPC...) - return err - }, opts...) - return err -} - -// SendNotificationChannelVerificationCode causes a verification code to be delivered to the channel. The code -// can then be supplied in VerifyNotificationChannel to verify the channel. -func (c *NotificationChannelClient) SendNotificationChannelVerificationCode(ctx context.Context, req *monitoringpb.SendNotificationChannelVerificationCodeRequest, opts ...gax.CallOption) error { - md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.SendNotificationChannelVerificationCode[0:len(c.CallOptions.SendNotificationChannelVerificationCode):len(c.CallOptions.SendNotificationChannelVerificationCode)], opts...) - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - _, err = c.notificationChannelClient.SendNotificationChannelVerificationCode(ctx, req, settings.GRPC...) - return err - }, opts...) - return err -} - -// GetNotificationChannelVerificationCode requests a verification code for an already verified channel that can then -// be used in a call to VerifyNotificationChannel() on a different channel -// with an equivalent identity in the same or in a different project. This -// makes it possible to copy a channel between projects without requiring -// manual reverification of the channel. If the channel is not in the -// verified state, this method will fail (in other words, this may only be -// used if the SendNotificationChannelVerificationCode and -// VerifyNotificationChannel paths have already been used to put the given -// channel into the verified state). -// -// There is no guarantee that the verification codes returned by this method -// will be of a similar structure or form as the ones that are delivered -// to the channel via SendNotificationChannelVerificationCode; while -// VerifyNotificationChannel() will recognize both the codes delivered via -// SendNotificationChannelVerificationCode() and returned from -// GetNotificationChannelVerificationCode(), it is typically the case that -// the verification codes delivered via -// SendNotificationChannelVerificationCode() will be shorter and also -// have a shorter expiration (e.g. codes such as “G-123456”) whereas -// GetVerificationCode() will typically return a much longer, websafe base -// 64 encoded string that has a longer expiration time. -func (c *NotificationChannelClient) GetNotificationChannelVerificationCode(ctx context.Context, req *monitoringpb.GetNotificationChannelVerificationCodeRequest, opts ...gax.CallOption) (*monitoringpb.GetNotificationChannelVerificationCodeResponse, error) { - md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.GetNotificationChannelVerificationCode[0:len(c.CallOptions.GetNotificationChannelVerificationCode):len(c.CallOptions.GetNotificationChannelVerificationCode)], opts...) - var resp *monitoringpb.GetNotificationChannelVerificationCodeResponse - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.notificationChannelClient.GetNotificationChannelVerificationCode(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -// VerifyNotificationChannel verifies a NotificationChannel by proving receipt of the code -// delivered to the channel as a result of calling -// SendNotificationChannelVerificationCode. -func (c *NotificationChannelClient) VerifyNotificationChannel(ctx context.Context, req *monitoringpb.VerifyNotificationChannelRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannel, error) { - md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.VerifyNotificationChannel[0:len(c.CallOptions.VerifyNotificationChannel):len(c.CallOptions.VerifyNotificationChannel)], opts...) - var resp *monitoringpb.NotificationChannel - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.notificationChannelClient.VerifyNotificationChannel(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -// NotificationChannelDescriptorIterator manages a stream of *monitoringpb.NotificationChannelDescriptor. -type NotificationChannelDescriptorIterator struct { - items []*monitoringpb.NotificationChannelDescriptor - pageInfo *iterator.PageInfo - nextFunc func() error - - // Response is the raw response for the current page. - // It must be cast to the RPC response type. - // Calling Next() or InternalFetch() updates this value. - Response interface{} - - // InternalFetch is for use by the Google Cloud Libraries only. - // It is not part of the stable interface of this package. - // - // InternalFetch returns results from a single call to the underlying RPC. - // The number of results is no greater than pageSize. - // If there are no more results, nextPageToken is empty and err is nil. - InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.NotificationChannelDescriptor, nextPageToken string, err error) -} - -// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. -func (it *NotificationChannelDescriptorIterator) PageInfo() *iterator.PageInfo { - return it.pageInfo -} - -// Next returns the next result. Its second return value is iterator.Done if there are no more -// results. Once Next returns Done, all subsequent calls will return Done. -func (it *NotificationChannelDescriptorIterator) Next() (*monitoringpb.NotificationChannelDescriptor, error) { - var item *monitoringpb.NotificationChannelDescriptor - if err := it.nextFunc(); err != nil { - return item, err - } - item = it.items[0] - it.items = it.items[1:] - return item, nil -} - -func (it *NotificationChannelDescriptorIterator) bufLen() int { - return len(it.items) -} - -func (it *NotificationChannelDescriptorIterator) takeBuf() interface{} { - b := it.items - it.items = nil - return b -} - -// NotificationChannelIterator manages a stream of *monitoringpb.NotificationChannel. -type NotificationChannelIterator struct { - items []*monitoringpb.NotificationChannel - pageInfo *iterator.PageInfo - nextFunc func() error - - // Response is the raw response for the current page. - // It must be cast to the RPC response type. - // Calling Next() or InternalFetch() updates this value. - Response interface{} - - // InternalFetch is for use by the Google Cloud Libraries only. - // It is not part of the stable interface of this package. - // - // InternalFetch returns results from a single call to the underlying RPC. - // The number of results is no greater than pageSize. - // If there are no more results, nextPageToken is empty and err is nil. - InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.NotificationChannel, nextPageToken string, err error) -} - -// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. -func (it *NotificationChannelIterator) PageInfo() *iterator.PageInfo { - return it.pageInfo -} - -// Next returns the next result. Its second return value is iterator.Done if there are no more -// results. Once Next returns Done, all subsequent calls will return Done. -func (it *NotificationChannelIterator) Next() (*monitoringpb.NotificationChannel, error) { - var item *monitoringpb.NotificationChannel - if err := it.nextFunc(); err != nil { - return item, err - } - item = it.items[0] - it.items = it.items[1:] - return item, nil -} - -func (it *NotificationChannelIterator) bufLen() int { - return len(it.items) -} - -func (it *NotificationChannelIterator) takeBuf() interface{} { - b := it.items - it.items = nil - return b -} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/path_funcs.go b/vendor/cloud.google.com/go/monitoring/apiv3/path_funcs.go deleted file mode 100644 index b2b514ba52..0000000000 --- a/vendor/cloud.google.com/go/monitoring/apiv3/path_funcs.go +++ /dev/null @@ -1,107 +0,0 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package monitoring - -// GroupProjectPath returns the path for the project resource. -// -// Deprecated: Use -// fmt.Sprintf("projects/%s", project) -// instead. -func GroupProjectPath(project string) string { - return "" + - "projects/" + - project + - "" -} - -// GroupGroupPath returns the path for the group resource. -// -// Deprecated: Use -// fmt.Sprintf("projects/%s/groups/%s", project, group) -// instead. -func GroupGroupPath(project, group string) string { - return "" + - "projects/" + - project + - "/groups/" + - group + - "" -} - -// MetricProjectPath returns the path for the project resource. -// -// Deprecated: Use -// fmt.Sprintf("projects/%s", project) -// instead. -func MetricProjectPath(project string) string { - return "" + - "projects/" + - project + - "" -} - -// MetricMetricDescriptorPath returns the path for the metric descriptor resource. -// -// Deprecated: Use -// fmt.Sprintf("projects/%s/metricDescriptors/%s", project, metricDescriptor) -// instead. -func MetricMetricDescriptorPath(project, metricDescriptor string) string { - return "" + - "projects/" + - project + - "/metricDescriptors/" + - metricDescriptor + - "" -} - -// MetricMonitoredResourceDescriptorPath returns the path for the monitored resource descriptor resource. -// -// Deprecated: Use -// fmt.Sprintf("projects/%s/monitoredResourceDescriptors/%s", project, monitoredResourceDescriptor) -// instead. -func MetricMonitoredResourceDescriptorPath(project, monitoredResourceDescriptor string) string { - return "" + - "projects/" + - project + - "/monitoredResourceDescriptors/" + - monitoredResourceDescriptor + - "" -} - -// UptimeCheckProjectPath returns the path for the project resource. -// -// Deprecated: Use -// fmt.Sprintf("projects/%s", project) -// instead. -func UptimeCheckProjectPath(project string) string { - return "" + - "projects/" + - project + - "" -} - -// UptimeCheckUptimeCheckConfigPath returns the path for the uptime check config resource. -// -// Deprecated: Use -// fmt.Sprintf("projects/%s/uptimeCheckConfigs/%s", project, uptimeCheckConfig) -// instead. -func UptimeCheckUptimeCheckConfigPath(project, uptimeCheckConfig string) string { - return "" + - "projects/" + - project + - "/uptimeCheckConfigs/" + - uptimeCheckConfig + - "" -} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/service_monitoring_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/service_monitoring_client.go deleted file mode 100644 index 202cf4d54c..0000000000 --- a/vendor/cloud.google.com/go/monitoring/apiv3/service_monitoring_client.go +++ /dev/null @@ -1,517 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go_gapic. DO NOT EDIT. - -package monitoring - -import ( - "context" - "fmt" - "math" - "net/url" - "time" - - "github.com/golang/protobuf/proto" - gax "github.com/googleapis/gax-go/v2" - "google.golang.org/api/iterator" - "google.golang.org/api/option" - gtransport "google.golang.org/api/transport/grpc" - monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/metadata" -) - -var newServiceMonitoringClientHook clientHook - -// ServiceMonitoringCallOptions contains the retry settings for each method of ServiceMonitoringClient. -type ServiceMonitoringCallOptions struct { - CreateService []gax.CallOption - GetService []gax.CallOption - ListServices []gax.CallOption - UpdateService []gax.CallOption - DeleteService []gax.CallOption - CreateServiceLevelObjective []gax.CallOption - GetServiceLevelObjective []gax.CallOption - ListServiceLevelObjectives []gax.CallOption - UpdateServiceLevelObjective []gax.CallOption - DeleteServiceLevelObjective []gax.CallOption -} - -func defaultServiceMonitoringClientOptions() []option.ClientOption { - return []option.ClientOption{ - option.WithEndpoint("monitoring.googleapis.com:443"), - option.WithGRPCDialOption(grpc.WithDisableServiceConfig()), - option.WithScopes(DefaultAuthScopes()...), - option.WithGRPCDialOption(grpc.WithDefaultCallOptions( - grpc.MaxCallRecvMsgSize(math.MaxInt32))), - } -} - -func defaultServiceMonitoringCallOptions() *ServiceMonitoringCallOptions { - return &ServiceMonitoringCallOptions{ - CreateService: []gax.CallOption{}, - GetService: []gax.CallOption{ - gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.DeadlineExceeded, - codes.Unavailable, - }, gax.Backoff{ - Initial: 100 * time.Millisecond, - Max: 30000 * time.Millisecond, - Multiplier: 1.30, - }) - }), - }, - ListServices: []gax.CallOption{ - gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.DeadlineExceeded, - codes.Unavailable, - }, gax.Backoff{ - Initial: 100 * time.Millisecond, - Max: 30000 * time.Millisecond, - Multiplier: 1.30, - }) - }), - }, - UpdateService: []gax.CallOption{}, - DeleteService: []gax.CallOption{ - gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.DeadlineExceeded, - codes.Unavailable, - }, gax.Backoff{ - Initial: 100 * time.Millisecond, - Max: 30000 * time.Millisecond, - Multiplier: 1.30, - }) - }), - }, - CreateServiceLevelObjective: []gax.CallOption{}, - GetServiceLevelObjective: []gax.CallOption{ - gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.DeadlineExceeded, - codes.Unavailable, - }, gax.Backoff{ - Initial: 100 * time.Millisecond, - Max: 30000 * time.Millisecond, - Multiplier: 1.30, - }) - }), - }, - ListServiceLevelObjectives: []gax.CallOption{ - gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.DeadlineExceeded, - codes.Unavailable, - }, gax.Backoff{ - Initial: 100 * time.Millisecond, - Max: 30000 * time.Millisecond, - Multiplier: 1.30, - }) - }), - }, - UpdateServiceLevelObjective: []gax.CallOption{}, - DeleteServiceLevelObjective: []gax.CallOption{ - gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.DeadlineExceeded, - codes.Unavailable, - }, gax.Backoff{ - Initial: 100 * time.Millisecond, - Max: 30000 * time.Millisecond, - Multiplier: 1.30, - }) - }), - }, - } -} - -// ServiceMonitoringClient is a client for interacting with Cloud Monitoring API. -// -// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. -type ServiceMonitoringClient struct { - // Connection pool of gRPC connections to the service. - connPool gtransport.ConnPool - - // The gRPC API client. - serviceMonitoringClient monitoringpb.ServiceMonitoringServiceClient - - // The call options for this service. - CallOptions *ServiceMonitoringCallOptions - - // The x-goog-* metadata to be sent with each request. - xGoogMetadata metadata.MD -} - -// NewServiceMonitoringClient creates a new service monitoring service client. -// -// The Cloud Monitoring Service-Oriented Monitoring API has endpoints for -// managing and querying aspects of a workspace’s services. These include the -// Service's monitored resources, its Service-Level Objectives, and a taxonomy -// of categorized Health Metrics. -func NewServiceMonitoringClient(ctx context.Context, opts ...option.ClientOption) (*ServiceMonitoringClient, error) { - clientOpts := defaultServiceMonitoringClientOptions() - - if newServiceMonitoringClientHook != nil { - hookOpts, err := newServiceMonitoringClientHook(ctx, clientHookParams{}) - if err != nil { - return nil, err - } - clientOpts = append(clientOpts, hookOpts...) - } - - connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...) - if err != nil { - return nil, err - } - c := &ServiceMonitoringClient{ - connPool: connPool, - CallOptions: defaultServiceMonitoringCallOptions(), - - serviceMonitoringClient: monitoringpb.NewServiceMonitoringServiceClient(connPool), - } - c.setGoogleClientInfo() - - return c, nil -} - -// Connection returns a connection to the API service. -// -// Deprecated. -func (c *ServiceMonitoringClient) Connection() *grpc.ClientConn { - return c.connPool.Conn() -} - -// Close closes the connection to the API service. The user should invoke this when -// the client is no longer required. -func (c *ServiceMonitoringClient) Close() error { - return c.connPool.Close() -} - -// setGoogleClientInfo sets the name and version of the application in -// the `x-goog-api-client` header passed on each request. Intended for -// use by Google-written clients. -func (c *ServiceMonitoringClient) setGoogleClientInfo(keyval ...string) { - kv := append([]string{"gl-go", versionGo()}, keyval...) - kv = append(kv, "gapic", versionClient, "gax", gax.Version, "grpc", grpc.Version) - c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) -} - -// CreateService create a Service. -func (c *ServiceMonitoringClient) CreateService(ctx context.Context, req *monitoringpb.CreateServiceRequest, opts ...gax.CallOption) (*monitoringpb.Service, error) { - md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))) - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.CreateService[0:len(c.CallOptions.CreateService):len(c.CallOptions.CreateService)], opts...) - var resp *monitoringpb.Service - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.serviceMonitoringClient.CreateService(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -// GetService get the named Service. -func (c *ServiceMonitoringClient) GetService(ctx context.Context, req *monitoringpb.GetServiceRequest, opts ...gax.CallOption) (*monitoringpb.Service, error) { - md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.GetService[0:len(c.CallOptions.GetService):len(c.CallOptions.GetService)], opts...) - var resp *monitoringpb.Service - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.serviceMonitoringClient.GetService(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -// ListServices list Services for this workspace. -func (c *ServiceMonitoringClient) ListServices(ctx context.Context, req *monitoringpb.ListServicesRequest, opts ...gax.CallOption) *ServiceIterator { - md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))) - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.ListServices[0:len(c.CallOptions.ListServices):len(c.CallOptions.ListServices)], opts...) - it := &ServiceIterator{} - req = proto.Clone(req).(*monitoringpb.ListServicesRequest) - it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.Service, string, error) { - var resp *monitoringpb.ListServicesResponse - req.PageToken = pageToken - if pageSize > math.MaxInt32 { - req.PageSize = math.MaxInt32 - } else { - req.PageSize = int32(pageSize) - } - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.serviceMonitoringClient.ListServices(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, "", err - } - - it.Response = resp - return resp.Services, resp.NextPageToken, nil - } - fetch := func(pageSize int, pageToken string) (string, error) { - items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) - if err != nil { - return "", err - } - it.items = append(it.items, items...) - return nextPageToken, nil - } - it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) - it.pageInfo.MaxSize = int(req.PageSize) - it.pageInfo.Token = req.PageToken - return it -} - -// UpdateService update this Service. -func (c *ServiceMonitoringClient) UpdateService(ctx context.Context, req *monitoringpb.UpdateServiceRequest, opts ...gax.CallOption) (*monitoringpb.Service, error) { - md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "service.name", url.QueryEscape(req.GetService().GetName()))) - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.UpdateService[0:len(c.CallOptions.UpdateService):len(c.CallOptions.UpdateService)], opts...) - var resp *monitoringpb.Service - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.serviceMonitoringClient.UpdateService(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -// DeleteService soft delete this Service. -func (c *ServiceMonitoringClient) DeleteService(ctx context.Context, req *monitoringpb.DeleteServiceRequest, opts ...gax.CallOption) error { - md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.DeleteService[0:len(c.CallOptions.DeleteService):len(c.CallOptions.DeleteService)], opts...) - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - _, err = c.serviceMonitoringClient.DeleteService(ctx, req, settings.GRPC...) - return err - }, opts...) - return err -} - -// CreateServiceLevelObjective create a ServiceLevelObjective for the given Service. -func (c *ServiceMonitoringClient) CreateServiceLevelObjective(ctx context.Context, req *monitoringpb.CreateServiceLevelObjectiveRequest, opts ...gax.CallOption) (*monitoringpb.ServiceLevelObjective, error) { - md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))) - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.CreateServiceLevelObjective[0:len(c.CallOptions.CreateServiceLevelObjective):len(c.CallOptions.CreateServiceLevelObjective)], opts...) - var resp *monitoringpb.ServiceLevelObjective - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.serviceMonitoringClient.CreateServiceLevelObjective(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -// GetServiceLevelObjective get a ServiceLevelObjective by name. -func (c *ServiceMonitoringClient) GetServiceLevelObjective(ctx context.Context, req *monitoringpb.GetServiceLevelObjectiveRequest, opts ...gax.CallOption) (*monitoringpb.ServiceLevelObjective, error) { - md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.GetServiceLevelObjective[0:len(c.CallOptions.GetServiceLevelObjective):len(c.CallOptions.GetServiceLevelObjective)], opts...) - var resp *monitoringpb.ServiceLevelObjective - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.serviceMonitoringClient.GetServiceLevelObjective(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -// ListServiceLevelObjectives list the ServiceLevelObjectives for the given Service. -func (c *ServiceMonitoringClient) ListServiceLevelObjectives(ctx context.Context, req *monitoringpb.ListServiceLevelObjectivesRequest, opts ...gax.CallOption) *ServiceLevelObjectiveIterator { - md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))) - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.ListServiceLevelObjectives[0:len(c.CallOptions.ListServiceLevelObjectives):len(c.CallOptions.ListServiceLevelObjectives)], opts...) - it := &ServiceLevelObjectiveIterator{} - req = proto.Clone(req).(*monitoringpb.ListServiceLevelObjectivesRequest) - it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.ServiceLevelObjective, string, error) { - var resp *monitoringpb.ListServiceLevelObjectivesResponse - req.PageToken = pageToken - if pageSize > math.MaxInt32 { - req.PageSize = math.MaxInt32 - } else { - req.PageSize = int32(pageSize) - } - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.serviceMonitoringClient.ListServiceLevelObjectives(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, "", err - } - - it.Response = resp - return resp.ServiceLevelObjectives, resp.NextPageToken, nil - } - fetch := func(pageSize int, pageToken string) (string, error) { - items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) - if err != nil { - return "", err - } - it.items = append(it.items, items...) - return nextPageToken, nil - } - it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) - it.pageInfo.MaxSize = int(req.PageSize) - it.pageInfo.Token = req.PageToken - return it -} - -// UpdateServiceLevelObjective update the given ServiceLevelObjective. -func (c *ServiceMonitoringClient) UpdateServiceLevelObjective(ctx context.Context, req *monitoringpb.UpdateServiceLevelObjectiveRequest, opts ...gax.CallOption) (*monitoringpb.ServiceLevelObjective, error) { - md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "service_level_objective.name", url.QueryEscape(req.GetServiceLevelObjective().GetName()))) - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.UpdateServiceLevelObjective[0:len(c.CallOptions.UpdateServiceLevelObjective):len(c.CallOptions.UpdateServiceLevelObjective)], opts...) - var resp *monitoringpb.ServiceLevelObjective - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.serviceMonitoringClient.UpdateServiceLevelObjective(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -// DeleteServiceLevelObjective delete the given ServiceLevelObjective. -func (c *ServiceMonitoringClient) DeleteServiceLevelObjective(ctx context.Context, req *monitoringpb.DeleteServiceLevelObjectiveRequest, opts ...gax.CallOption) error { - md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.DeleteServiceLevelObjective[0:len(c.CallOptions.DeleteServiceLevelObjective):len(c.CallOptions.DeleteServiceLevelObjective)], opts...) - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - _, err = c.serviceMonitoringClient.DeleteServiceLevelObjective(ctx, req, settings.GRPC...) - return err - }, opts...) - return err -} - -// ServiceIterator manages a stream of *monitoringpb.Service. -type ServiceIterator struct { - items []*monitoringpb.Service - pageInfo *iterator.PageInfo - nextFunc func() error - - // Response is the raw response for the current page. - // It must be cast to the RPC response type. - // Calling Next() or InternalFetch() updates this value. - Response interface{} - - // InternalFetch is for use by the Google Cloud Libraries only. - // It is not part of the stable interface of this package. - // - // InternalFetch returns results from a single call to the underlying RPC. - // The number of results is no greater than pageSize. - // If there are no more results, nextPageToken is empty and err is nil. - InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.Service, nextPageToken string, err error) -} - -// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. -func (it *ServiceIterator) PageInfo() *iterator.PageInfo { - return it.pageInfo -} - -// Next returns the next result. Its second return value is iterator.Done if there are no more -// results. Once Next returns Done, all subsequent calls will return Done. -func (it *ServiceIterator) Next() (*monitoringpb.Service, error) { - var item *monitoringpb.Service - if err := it.nextFunc(); err != nil { - return item, err - } - item = it.items[0] - it.items = it.items[1:] - return item, nil -} - -func (it *ServiceIterator) bufLen() int { - return len(it.items) -} - -func (it *ServiceIterator) takeBuf() interface{} { - b := it.items - it.items = nil - return b -} - -// ServiceLevelObjectiveIterator manages a stream of *monitoringpb.ServiceLevelObjective. -type ServiceLevelObjectiveIterator struct { - items []*monitoringpb.ServiceLevelObjective - pageInfo *iterator.PageInfo - nextFunc func() error - - // Response is the raw response for the current page. - // It must be cast to the RPC response type. - // Calling Next() or InternalFetch() updates this value. - Response interface{} - - // InternalFetch is for use by the Google Cloud Libraries only. - // It is not part of the stable interface of this package. - // - // InternalFetch returns results from a single call to the underlying RPC. - // The number of results is no greater than pageSize. - // If there are no more results, nextPageToken is empty and err is nil. - InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.ServiceLevelObjective, nextPageToken string, err error) -} - -// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. -func (it *ServiceLevelObjectiveIterator) PageInfo() *iterator.PageInfo { - return it.pageInfo -} - -// Next returns the next result. Its second return value is iterator.Done if there are no more -// results. Once Next returns Done, all subsequent calls will return Done. -func (it *ServiceLevelObjectiveIterator) Next() (*monitoringpb.ServiceLevelObjective, error) { - var item *monitoringpb.ServiceLevelObjective - if err := it.nextFunc(); err != nil { - return item, err - } - item = it.items[0] - it.items = it.items[1:] - return item, nil -} - -func (it *ServiceLevelObjectiveIterator) bufLen() int { - return len(it.items) -} - -func (it *ServiceLevelObjectiveIterator) takeBuf() interface{} { - b := it.items - it.items = nil - return b -} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/uptime_check_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/uptime_check_client.go deleted file mode 100644 index d07b202c6d..0000000000 --- a/vendor/cloud.google.com/go/monitoring/apiv3/uptime_check_client.go +++ /dev/null @@ -1,432 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go_gapic. DO NOT EDIT. - -package monitoring - -import ( - "context" - "fmt" - "math" - "net/url" - "time" - - "github.com/golang/protobuf/proto" - gax "github.com/googleapis/gax-go/v2" - "google.golang.org/api/iterator" - "google.golang.org/api/option" - gtransport "google.golang.org/api/transport/grpc" - monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/metadata" -) - -var newUptimeCheckClientHook clientHook - -// UptimeCheckCallOptions contains the retry settings for each method of UptimeCheckClient. -type UptimeCheckCallOptions struct { - ListUptimeCheckConfigs []gax.CallOption - GetUptimeCheckConfig []gax.CallOption - CreateUptimeCheckConfig []gax.CallOption - UpdateUptimeCheckConfig []gax.CallOption - DeleteUptimeCheckConfig []gax.CallOption - ListUptimeCheckIps []gax.CallOption -} - -func defaultUptimeCheckClientOptions() []option.ClientOption { - return []option.ClientOption{ - option.WithEndpoint("monitoring.googleapis.com:443"), - option.WithGRPCDialOption(grpc.WithDisableServiceConfig()), - option.WithScopes(DefaultAuthScopes()...), - option.WithGRPCDialOption(grpc.WithDefaultCallOptions( - grpc.MaxCallRecvMsgSize(math.MaxInt32))), - } -} - -func defaultUptimeCheckCallOptions() *UptimeCheckCallOptions { - return &UptimeCheckCallOptions{ - ListUptimeCheckConfigs: []gax.CallOption{ - gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.DeadlineExceeded, - codes.Unavailable, - }, gax.Backoff{ - Initial: 100 * time.Millisecond, - Max: 30000 * time.Millisecond, - Multiplier: 1.30, - }) - }), - }, - GetUptimeCheckConfig: []gax.CallOption{ - gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.DeadlineExceeded, - codes.Unavailable, - }, gax.Backoff{ - Initial: 100 * time.Millisecond, - Max: 30000 * time.Millisecond, - Multiplier: 1.30, - }) - }), - }, - CreateUptimeCheckConfig: []gax.CallOption{}, - UpdateUptimeCheckConfig: []gax.CallOption{}, - DeleteUptimeCheckConfig: []gax.CallOption{ - gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.DeadlineExceeded, - codes.Unavailable, - }, gax.Backoff{ - Initial: 100 * time.Millisecond, - Max: 30000 * time.Millisecond, - Multiplier: 1.30, - }) - }), - }, - ListUptimeCheckIps: []gax.CallOption{ - gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.DeadlineExceeded, - codes.Unavailable, - }, gax.Backoff{ - Initial: 100 * time.Millisecond, - Max: 30000 * time.Millisecond, - Multiplier: 1.30, - }) - }), - }, - } -} - -// UptimeCheckClient is a client for interacting with Cloud Monitoring API. -// -// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. -type UptimeCheckClient struct { - // Connection pool of gRPC connections to the service. - connPool gtransport.ConnPool - - // The gRPC API client. - uptimeCheckClient monitoringpb.UptimeCheckServiceClient - - // The call options for this service. - CallOptions *UptimeCheckCallOptions - - // The x-goog-* metadata to be sent with each request. - xGoogMetadata metadata.MD -} - -// NewUptimeCheckClient creates a new uptime check service client. -// -// The UptimeCheckService API is used to manage (list, create, delete, edit) -// Uptime check configurations in the Stackdriver Monitoring product. An Uptime -// check is a piece of configuration that determines which resources and -// services to monitor for availability. These configurations can also be -// configured interactively by navigating to the [Cloud Console] -// (http://console.cloud.google.com (at http://console.cloud.google.com)), selecting the appropriate project, -// clicking on “Monitoring” on the left-hand side to navigate to Stackdriver, -// and then clicking on “Uptime”. -func NewUptimeCheckClient(ctx context.Context, opts ...option.ClientOption) (*UptimeCheckClient, error) { - clientOpts := defaultUptimeCheckClientOptions() - - if newUptimeCheckClientHook != nil { - hookOpts, err := newUptimeCheckClientHook(ctx, clientHookParams{}) - if err != nil { - return nil, err - } - clientOpts = append(clientOpts, hookOpts...) - } - - connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...) - if err != nil { - return nil, err - } - c := &UptimeCheckClient{ - connPool: connPool, - CallOptions: defaultUptimeCheckCallOptions(), - - uptimeCheckClient: monitoringpb.NewUptimeCheckServiceClient(connPool), - } - c.setGoogleClientInfo() - - return c, nil -} - -// Connection returns a connection to the API service. -// -// Deprecated. -func (c *UptimeCheckClient) Connection() *grpc.ClientConn { - return c.connPool.Conn() -} - -// Close closes the connection to the API service. The user should invoke this when -// the client is no longer required. -func (c *UptimeCheckClient) Close() error { - return c.connPool.Close() -} - -// setGoogleClientInfo sets the name and version of the application in -// the `x-goog-api-client` header passed on each request. Intended for -// use by Google-written clients. -func (c *UptimeCheckClient) setGoogleClientInfo(keyval ...string) { - kv := append([]string{"gl-go", versionGo()}, keyval...) - kv = append(kv, "gapic", versionClient, "gax", gax.Version, "grpc", grpc.Version) - c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) -} - -// ListUptimeCheckConfigs lists the existing valid Uptime check configurations for the project -// (leaving out any invalid configurations). -func (c *UptimeCheckClient) ListUptimeCheckConfigs(ctx context.Context, req *monitoringpb.ListUptimeCheckConfigsRequest, opts ...gax.CallOption) *UptimeCheckConfigIterator { - md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))) - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.ListUptimeCheckConfigs[0:len(c.CallOptions.ListUptimeCheckConfigs):len(c.CallOptions.ListUptimeCheckConfigs)], opts...) - it := &UptimeCheckConfigIterator{} - req = proto.Clone(req).(*monitoringpb.ListUptimeCheckConfigsRequest) - it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.UptimeCheckConfig, string, error) { - var resp *monitoringpb.ListUptimeCheckConfigsResponse - req.PageToken = pageToken - if pageSize > math.MaxInt32 { - req.PageSize = math.MaxInt32 - } else { - req.PageSize = int32(pageSize) - } - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.uptimeCheckClient.ListUptimeCheckConfigs(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, "", err - } - - it.Response = resp - return resp.UptimeCheckConfigs, resp.NextPageToken, nil - } - fetch := func(pageSize int, pageToken string) (string, error) { - items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) - if err != nil { - return "", err - } - it.items = append(it.items, items...) - return nextPageToken, nil - } - it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) - it.pageInfo.MaxSize = int(req.PageSize) - it.pageInfo.Token = req.PageToken - return it -} - -// GetUptimeCheckConfig gets a single Uptime check configuration. -func (c *UptimeCheckClient) GetUptimeCheckConfig(ctx context.Context, req *monitoringpb.GetUptimeCheckConfigRequest, opts ...gax.CallOption) (*monitoringpb.UptimeCheckConfig, error) { - md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.GetUptimeCheckConfig[0:len(c.CallOptions.GetUptimeCheckConfig):len(c.CallOptions.GetUptimeCheckConfig)], opts...) - var resp *monitoringpb.UptimeCheckConfig - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.uptimeCheckClient.GetUptimeCheckConfig(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -// CreateUptimeCheckConfig creates a new Uptime check configuration. -func (c *UptimeCheckClient) CreateUptimeCheckConfig(ctx context.Context, req *monitoringpb.CreateUptimeCheckConfigRequest, opts ...gax.CallOption) (*monitoringpb.UptimeCheckConfig, error) { - md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))) - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.CreateUptimeCheckConfig[0:len(c.CallOptions.CreateUptimeCheckConfig):len(c.CallOptions.CreateUptimeCheckConfig)], opts...) - var resp *monitoringpb.UptimeCheckConfig - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.uptimeCheckClient.CreateUptimeCheckConfig(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -// UpdateUptimeCheckConfig updates an Uptime check configuration. You can either replace the entire -// configuration with a new one or replace only certain fields in the current -// configuration by specifying the fields to be updated via updateMask. -// Returns the updated configuration. -func (c *UptimeCheckClient) UpdateUptimeCheckConfig(ctx context.Context, req *monitoringpb.UpdateUptimeCheckConfigRequest, opts ...gax.CallOption) (*monitoringpb.UptimeCheckConfig, error) { - md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "uptime_check_config.name", url.QueryEscape(req.GetUptimeCheckConfig().GetName()))) - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.UpdateUptimeCheckConfig[0:len(c.CallOptions.UpdateUptimeCheckConfig):len(c.CallOptions.UpdateUptimeCheckConfig)], opts...) - var resp *monitoringpb.UptimeCheckConfig - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.uptimeCheckClient.UpdateUptimeCheckConfig(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -// DeleteUptimeCheckConfig deletes an Uptime check configuration. Note that this method will fail -// if the Uptime check configuration is referenced by an alert policy or -// other dependent configs that would be rendered invalid by the deletion. -func (c *UptimeCheckClient) DeleteUptimeCheckConfig(ctx context.Context, req *monitoringpb.DeleteUptimeCheckConfigRequest, opts ...gax.CallOption) error { - md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.DeleteUptimeCheckConfig[0:len(c.CallOptions.DeleteUptimeCheckConfig):len(c.CallOptions.DeleteUptimeCheckConfig)], opts...) - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - _, err = c.uptimeCheckClient.DeleteUptimeCheckConfig(ctx, req, settings.GRPC...) - return err - }, opts...) - return err -} - -// ListUptimeCheckIps returns the list of IP addresses that checkers run from -func (c *UptimeCheckClient) ListUptimeCheckIps(ctx context.Context, req *monitoringpb.ListUptimeCheckIpsRequest, opts ...gax.CallOption) *UptimeCheckIpIterator { - ctx = insertMetadata(ctx, c.xGoogMetadata) - opts = append(c.CallOptions.ListUptimeCheckIps[0:len(c.CallOptions.ListUptimeCheckIps):len(c.CallOptions.ListUptimeCheckIps)], opts...) - it := &UptimeCheckIpIterator{} - req = proto.Clone(req).(*monitoringpb.ListUptimeCheckIpsRequest) - it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.UptimeCheckIp, string, error) { - var resp *monitoringpb.ListUptimeCheckIpsResponse - req.PageToken = pageToken - if pageSize > math.MaxInt32 { - req.PageSize = math.MaxInt32 - } else { - req.PageSize = int32(pageSize) - } - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.uptimeCheckClient.ListUptimeCheckIps(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, "", err - } - - it.Response = resp - return resp.UptimeCheckIps, resp.NextPageToken, nil - } - fetch := func(pageSize int, pageToken string) (string, error) { - items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) - if err != nil { - return "", err - } - it.items = append(it.items, items...) - return nextPageToken, nil - } - it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) - it.pageInfo.MaxSize = int(req.PageSize) - it.pageInfo.Token = req.PageToken - return it -} - -// UptimeCheckConfigIterator manages a stream of *monitoringpb.UptimeCheckConfig. -type UptimeCheckConfigIterator struct { - items []*monitoringpb.UptimeCheckConfig - pageInfo *iterator.PageInfo - nextFunc func() error - - // Response is the raw response for the current page. - // It must be cast to the RPC response type. - // Calling Next() or InternalFetch() updates this value. - Response interface{} - - // InternalFetch is for use by the Google Cloud Libraries only. - // It is not part of the stable interface of this package. - // - // InternalFetch returns results from a single call to the underlying RPC. - // The number of results is no greater than pageSize. - // If there are no more results, nextPageToken is empty and err is nil. - InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.UptimeCheckConfig, nextPageToken string, err error) -} - -// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. -func (it *UptimeCheckConfigIterator) PageInfo() *iterator.PageInfo { - return it.pageInfo -} - -// Next returns the next result. Its second return value is iterator.Done if there are no more -// results. Once Next returns Done, all subsequent calls will return Done. -func (it *UptimeCheckConfigIterator) Next() (*monitoringpb.UptimeCheckConfig, error) { - var item *monitoringpb.UptimeCheckConfig - if err := it.nextFunc(); err != nil { - return item, err - } - item = it.items[0] - it.items = it.items[1:] - return item, nil -} - -func (it *UptimeCheckConfigIterator) bufLen() int { - return len(it.items) -} - -func (it *UptimeCheckConfigIterator) takeBuf() interface{} { - b := it.items - it.items = nil - return b -} - -// UptimeCheckIpIterator manages a stream of *monitoringpb.UptimeCheckIp. -type UptimeCheckIpIterator struct { - items []*monitoringpb.UptimeCheckIp - pageInfo *iterator.PageInfo - nextFunc func() error - - // Response is the raw response for the current page. - // It must be cast to the RPC response type. - // Calling Next() or InternalFetch() updates this value. - Response interface{} - - // InternalFetch is for use by the Google Cloud Libraries only. - // It is not part of the stable interface of this package. - // - // InternalFetch returns results from a single call to the underlying RPC. - // The number of results is no greater than pageSize. - // If there are no more results, nextPageToken is empty and err is nil. - InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.UptimeCheckIp, nextPageToken string, err error) -} - -// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. -func (it *UptimeCheckIpIterator) PageInfo() *iterator.PageInfo { - return it.pageInfo -} - -// Next returns the next result. Its second return value is iterator.Done if there are no more -// results. Once Next returns Done, all subsequent calls will return Done. -func (it *UptimeCheckIpIterator) Next() (*monitoringpb.UptimeCheckIp, error) { - var item *monitoringpb.UptimeCheckIp - if err := it.nextFunc(); err != nil { - return item, err - } - item = it.items[0] - it.items = it.items[1:] - return item, nil -} - -func (it *UptimeCheckIpIterator) bufLen() int { - return len(it.items) -} - -func (it *UptimeCheckIpIterator) takeBuf() interface{} { - b := it.items - it.items = nil - return b -} diff --git a/vendor/cloud.google.com/go/trace/apiv2/doc.go b/vendor/cloud.google.com/go/trace/apiv2/doc.go deleted file mode 100644 index d0c37e0631..0000000000 --- a/vendor/cloud.google.com/go/trace/apiv2/doc.go +++ /dev/null @@ -1,121 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go_gapic. DO NOT EDIT. - -// Package trace is an auto-generated package for the -// Stackdriver Trace API. -// -// Sends application trace data to Stackdriver Trace for viewing. Trace data -// is collected for all App Engine applications by default. Trace data from -// other applications can be provided using this API. This library is used to -// interact with the Trace API directly. If you are looking to instrument -// your application for Stackdriver Trace, we recommend using OpenCensus. -// -// Use of Context -// -// The ctx passed to NewClient is used for authentication requests and -// for creating the underlying connection, but is not used for subsequent calls. -// Individual methods on the client use the ctx given to them. -// -// To close the open connection, use the Close() method. -// -// For information about setting deadlines, reusing contexts, and more -// please visit pkg.go.dev/cloud.google.com/go. -package trace // import "cloud.google.com/go/trace/apiv2" - -import ( - "context" - "os" - "runtime" - "strconv" - "strings" - "unicode" - - "google.golang.org/api/option" - "google.golang.org/grpc/metadata" -) - -// For more information on implementing a client constructor hook, see -// https://github.com/googleapis/google-cloud-go/wiki/Customizing-constructors. -type clientHookParams struct{} -type clientHook func(context.Context, clientHookParams) ([]option.ClientOption, error) - -const versionClient = "20201110" - -func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { - out, _ := metadata.FromOutgoingContext(ctx) - out = out.Copy() - for _, md := range mds { - for k, v := range md { - out[k] = append(out[k], v...) - } - } - return metadata.NewOutgoingContext(ctx, out) -} - -func checkDisableDeadlines() (bool, error) { - raw, ok := os.LookupEnv("GOOGLE_API_GO_EXPERIMENTAL_DISABLE_DEFAULT_DEADLINE") - if !ok { - return false, nil - } - - b, err := strconv.ParseBool(raw) - return b, err -} - -// DefaultAuthScopes reports the default set of authentication scopes to use with this package. -func DefaultAuthScopes() []string { - return []string{ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/trace.append", - } -} - -// versionGo returns the Go runtime version. The returned string -// has no whitespace, suitable for reporting in header. -func versionGo() string { - const develPrefix = "devel +" - - s := runtime.Version() - if strings.HasPrefix(s, develPrefix) { - s = s[len(develPrefix):] - if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { - s = s[:p] - } - return s - } - - notSemverRune := func(r rune) bool { - return !strings.ContainsRune("0123456789.", r) - } - - if strings.HasPrefix(s, "go1") { - s = s[2:] - var prerelease string - if p := strings.IndexFunc(s, notSemverRune); p >= 0 { - s, prerelease = s[:p], s[p:] - } - if strings.HasSuffix(s, ".") { - s += "0" - } else if strings.Count(s, ".") < 2 { - s += ".0" - } - if prerelease != "" { - s += "-" + prerelease - } - return s - } - return "UNKNOWN" -} diff --git a/vendor/cloud.google.com/go/trace/apiv2/path_funcs.go b/vendor/cloud.google.com/go/trace/apiv2/path_funcs.go deleted file mode 100644 index 80b8d40b58..0000000000 --- a/vendor/cloud.google.com/go/trace/apiv2/path_funcs.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package trace - -// ProjectPath returns the path for the project resource. -// -// Deprecated: Use -// fmt.Sprintf("projects/%s", project) -// instead. -func ProjectPath(project string) string { - return "" + - "projects/" + - project + - "" -} - -// SpanPath returns the path for the span resource. -// -// Deprecated: Use -// fmt.Sprintf("projects/%s/traces/%s/spans/%s", project, trace, span) -// instead. -func SpanPath(project, trace, span string) string { - return "" + - "projects/" + - project + - "/traces/" + - trace + - "/spans/" + - span + - "" -} diff --git a/vendor/cloud.google.com/go/trace/apiv2/trace_client.go b/vendor/cloud.google.com/go/trace/apiv2/trace_client.go deleted file mode 100644 index 848a8ff2cd..0000000000 --- a/vendor/cloud.google.com/go/trace/apiv2/trace_client.go +++ /dev/null @@ -1,193 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go_gapic. DO NOT EDIT. - -package trace - -import ( - "context" - "fmt" - "math" - "net/url" - "time" - - gax "github.com/googleapis/gax-go/v2" - "google.golang.org/api/option" - "google.golang.org/api/option/internaloption" - gtransport "google.golang.org/api/transport/grpc" - cloudtracepb "google.golang.org/genproto/googleapis/devtools/cloudtrace/v2" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/metadata" -) - -var newClientHook clientHook - -// CallOptions contains the retry settings for each method of Client. -type CallOptions struct { - BatchWriteSpans []gax.CallOption - CreateSpan []gax.CallOption -} - -func defaultClientOptions() []option.ClientOption { - return []option.ClientOption{ - internaloption.WithDefaultEndpoint("cloudtrace.googleapis.com:443"), - internaloption.WithDefaultMTLSEndpoint("cloudtrace.mtls.googleapis.com:443"), - option.WithGRPCDialOption(grpc.WithDisableServiceConfig()), - option.WithScopes(DefaultAuthScopes()...), - option.WithGRPCDialOption(grpc.WithDefaultCallOptions( - grpc.MaxCallRecvMsgSize(math.MaxInt32))), - } -} - -func defaultCallOptions() *CallOptions { - return &CallOptions{ - BatchWriteSpans: []gax.CallOption{}, - CreateSpan: []gax.CallOption{ - gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.Unavailable, - codes.DeadlineExceeded, - }, gax.Backoff{ - Initial: 100 * time.Millisecond, - Max: 1000 * time.Millisecond, - Multiplier: 1.20, - }) - }), - }, - } -} - -// Client is a client for interacting with Stackdriver Trace API. -// -// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. -type Client struct { - // Connection pool of gRPC connections to the service. - connPool gtransport.ConnPool - - // flag to opt out of default deadlines via GOOGLE_API_GO_EXPERIMENTAL_DISABLE_DEFAULT_DEADLINE - disableDeadlines bool - - // The gRPC API client. - client cloudtracepb.TraceServiceClient - - // The call options for this service. - CallOptions *CallOptions - - // The x-goog-* metadata to be sent with each request. - xGoogMetadata metadata.MD -} - -// NewClient creates a new trace service client. -// -// This file describes an API for collecting and viewing traces and spans -// within a trace. A Trace is a collection of spans corresponding to a single -// operation or set of operations for an application. A span is an individual -// timed event which forms a node of the trace tree. A single trace may -// contain span(s) from multiple services. -func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { - clientOpts := defaultClientOptions() - - if newClientHook != nil { - hookOpts, err := newClientHook(ctx, clientHookParams{}) - if err != nil { - return nil, err - } - clientOpts = append(clientOpts, hookOpts...) - } - - disableDeadlines, err := checkDisableDeadlines() - if err != nil { - return nil, err - } - - connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...) - if err != nil { - return nil, err - } - c := &Client{ - connPool: connPool, - disableDeadlines: disableDeadlines, - CallOptions: defaultCallOptions(), - - client: cloudtracepb.NewTraceServiceClient(connPool), - } - c.setGoogleClientInfo() - - return c, nil -} - -// Connection returns a connection to the API service. -// -// Deprecated. -func (c *Client) Connection() *grpc.ClientConn { - return c.connPool.Conn() -} - -// Close closes the connection to the API service. The user should invoke this when -// the client is no longer required. -func (c *Client) Close() error { - return c.connPool.Close() -} - -// setGoogleClientInfo sets the name and version of the application in -// the `x-goog-api-client` header passed on each request. Intended for -// use by Google-written clients. -func (c *Client) setGoogleClientInfo(keyval ...string) { - kv := append([]string{"gl-go", versionGo()}, keyval...) - kv = append(kv, "gapic", versionClient, "gax", gax.Version, "grpc", grpc.Version) - c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) -} - -// BatchWriteSpans sends new spans to new or existing traces. You cannot update -// existing spans. -func (c *Client) BatchWriteSpans(ctx context.Context, req *cloudtracepb.BatchWriteSpansRequest, opts ...gax.CallOption) error { - if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { - cctx, cancel := context.WithTimeout(ctx, 120000*time.Millisecond) - defer cancel() - ctx = cctx - } - md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.BatchWriteSpans[0:len(c.CallOptions.BatchWriteSpans):len(c.CallOptions.BatchWriteSpans)], opts...) - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - _, err = c.client.BatchWriteSpans(ctx, req, settings.GRPC...) - return err - }, opts...) - return err -} - -// CreateSpan creates a new span. -func (c *Client) CreateSpan(ctx context.Context, req *cloudtracepb.Span, opts ...gax.CallOption) (*cloudtracepb.Span, error) { - if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { - cctx, cancel := context.WithTimeout(ctx, 120000*time.Millisecond) - defer cancel() - ctx = cctx - } - md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.CreateSpan[0:len(c.CallOptions.CreateSpan):len(c.CallOptions.CreateSpan)], opts...) - var resp *cloudtracepb.Span - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.client.CreateSpan(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} diff --git a/vendor/contrib.go.opencensus.io/exporter/prometheus/.golangci.yml b/vendor/contrib.go.opencensus.io/exporter/prometheus/.golangci.yml new file mode 100644 index 0000000000..0aa9844f42 --- /dev/null +++ b/vendor/contrib.go.opencensus.io/exporter/prometheus/.golangci.yml @@ -0,0 +1,123 @@ +# options for analysis running +run: + # default concurrency is a available CPU number + concurrency: 4 + + # timeout for analysis, e.g. 30s, 5m, default is 1m + timeout: 10m + + # exit code when at least one issue was found, default is 1 + issues-exit-code: 1 + + # include test files or not, default is true + tests: true + + # which dirs to skip: issues from them won't be reported; + # can use regexp here: generated.*, regexp is applied on full path; + # default value is empty list, but default dirs are skipped independently + # from this option's value (see skip-dirs-use-default). + skip-dirs: + + # default is true. Enables skipping of directories: + # vendor$, third_party$, testdata$, examples$, Godeps$, builtin$ + skip-dirs-use-default: false + + # which files to skip: they will be analyzed, but issues from them + # won't be reported. Default value is empty list, but there is + # no need to include all autogenerated files, we confidently recognize + # autogenerated files. If it's not please let us know. + skip-files: + + # by default isn't set. If set we pass it to "go list -mod={option}". From "go help modules": + # If invoked with -mod=readonly, the go command is disallowed from the implicit + # automatic updating of go.mod described above. Instead, it fails when any changes + # to go.mod are needed. This setting is most useful to check that go.mod does + # not need updates, such as in a continuous integration and testing system. + # If invoked with -mod=vendor, the go command assumes that the vendor + # directory holds the correct copies of dependencies and ignores + # the dependency descriptions in go.mod. + modules-download-mode: readonly + +# output configuration options +output: + # colored-line-number|line-number|json|tab|checkstyle|code-climate, default is "colored-line-number" + format: colored-line-number + + # print lines of code with issue, default is true + print-issued-lines: true + + # print linter name in the end of issue text, default is true + print-linter-name: true + +# all available settings of specific linters +linters-settings: + govet: + # report about shadowed variables + check-shadowing: true + + # settings per analyzer + settings: + printf: # analyzer name, run `go tool vet help` to see all analyzers + funcs: # run `go tool vet help printf` to see available settings for `printf` analyzer + - (github.com/golangci/golangci-lint/pkg/logutils.Log).Infof + - (github.com/golangci/golangci-lint/pkg/logutils.Log).Warnf + - (github.com/golangci/golangci-lint/pkg/logutils.Log).Errorf + - (github.com/golangci/golangci-lint/pkg/logutils.Log).Fatalf + + enable-all: true + # TODO: Enable this and fix the alignment issues. + disable: + - fieldalignment + + golint: + # minimal confidence for issues, default is 0.8 + min-confidence: 0.8 + + gofmt: + # simplify code: gofmt with `-s` option, true by default + simplify: true + + goimports: + # put imports beginning with prefix after 3rd-party packages; + # it's a comma-separated list of prefixes + local-prefixes: contrib.go.opencensus.io/exporter/prometheus + + misspell: + # Correct spellings using locale preferences for US or UK. + # Default is to use a neutral variety of English. + # Setting locale to US will correct the British spelling of 'colour' to 'color'. + locale: US + ignore-words: + - cancelled + - metre + - meter + - metres + - kilometre + - kilometres + +linters: + disable: + - errcheck + enable: + - gofmt + - goimports + - golint + - gosec + - govet + - staticcheck + - misspell + - scopelint + - unconvert + - gocritic + - unparam + +issues: + # Excluding configuration per-path, per-linter, per-text and per-source + exclude-rules: + # Exclude some linters from running on tests files. + - path: _test\.go + linters: + - scopelint + - text: "G404:" + linters: + - gosec diff --git a/vendor/contrib.go.opencensus.io/exporter/prometheus/.travis.yml b/vendor/contrib.go.opencensus.io/exporter/prometheus/.travis.yml index 957a893db2..17afafec2b 100644 --- a/vendor/contrib.go.opencensus.io/exporter/prometheus/.travis.yml +++ b/vendor/contrib.go.opencensus.io/exporter/prometheus/.travis.yml @@ -3,7 +3,7 @@ language: go go_import_path: contrib.go.opencensus.io go: - - 1.11.x + - 1.15.x env: global: diff --git a/vendor/contrib.go.opencensus.io/exporter/prometheus/Makefile b/vendor/contrib.go.opencensus.io/exporter/prometheus/Makefile index 2e11d225c2..cf4d613281 100644 --- a/vendor/contrib.go.opencensus.io/exporter/prometheus/Makefile +++ b/vendor/contrib.go.opencensus.io/exporter/prometheus/Makefile @@ -8,22 +8,18 @@ ALL_PKGS := $(shell go list $(sort $(dir $(ALL_SRC)))) GOTEST_OPT?=-v -race -timeout 30s GOTEST_OPT_WITH_COVERAGE = $(GOTEST_OPT) -coverprofile=coverage.txt -covermode=atomic GOTEST=go test -GOFMT=gofmt -GOLINT=golint -GOVET=go vet -EMBEDMD=embedmd +LINT=golangci-lint # TODO decide if we need to change these names. README_FILES := $(shell find . -name '*README.md' | sort | tr '\n' ' ') +.DEFAULT_GOAL := lint-test -.DEFAULT_GOAL := fmt-lint-vet-embedmd-test +.PHONY: lint-test +lint-test: lint test -.PHONY: fmt-lint-vet-embedmd-test -fmt-lint-vet-embedmd-test: fmt lint vet embedmd test - -# TODO enable test-with-coverage in tavis +# TODO enable test-with-coverage in travis .PHONY: travis-ci -travis-ci: fmt lint vet embedmd test test-386 +travis-ci: lint test test-386 all-pkgs: @echo $(ALL_PKGS) | tr ' ' '\n' | sort @@ -43,53 +39,12 @@ test-386: test-with-coverage: $(GOTEST) $(GOTEST_OPT_WITH_COVERAGE) $(ALL_PKGS) -.PHONY: fmt -fmt: - @FMTOUT=`$(GOFMT) -s -l $(ALL_SRC) 2>&1`; \ - if [ "$$FMTOUT" ]; then \ - echo "$(GOFMT) FAILED => gofmt the following files:\n"; \ - echo "$$FMTOUT\n"; \ - exit 1; \ - else \ - echo "Fmt finished successfully"; \ - fi - .PHONY: lint lint: - @LINTOUT=`$(GOLINT) $(ALL_PKGS) 2>&1`; \ - if [ "$$LINTOUT" ]; then \ - echo "$(GOLINT) FAILED => clean the following lint errors:\n"; \ - echo "$$LINTOUT\n"; \ - exit 1; \ - else \ - echo "Lint finished successfully"; \ - fi - -.PHONY: vet -vet: - # TODO: Understand why go vet downloads "github.com/google/go-cmp v0.2.0" - @VETOUT=`$(GOVET) ./... | grep -v "go: downloading" 2>&1`; \ - if [ "$$VETOUT" ]; then \ - echo "$(GOVET) FAILED => go vet the following files:\n"; \ - echo "$$VETOUT\n"; \ - exit 1; \ - else \ - echo "Vet finished successfully"; \ - fi - -.PHONY: embedmd -embedmd: - @EMBEDMDOUT=`$(EMBEDMD) -d $(README_FILES) 2>&1`; \ - if [ "$$EMBEDMDOUT" ]; then \ - echo "$(EMBEDMD) FAILED => embedmd the following files:\n"; \ - echo "$$EMBEDMDOUT\n"; \ - exit 1; \ - else \ - echo "Embedmd finished successfully"; \ - fi + $(LINT) run --allow-parallel-runners .PHONY: install-tools install-tools: - go get -u golang.org/x/tools/cmd/cover - go get -u golang.org/x/lint/golint - go get -u github.com/rakyll/embedmd + cd internal/tools && go install golang.org/x/tools/cmd/cover + cd internal/tools && go install github.com/golangci/golangci-lint/cmd/golangci-lint + diff --git a/vendor/contrib.go.opencensus.io/exporter/prometheus/go.mod b/vendor/contrib.go.opencensus.io/exporter/prometheus/go.mod index 1ed6efc9f7..b34120b774 100644 --- a/vendor/contrib.go.opencensus.io/exporter/prometheus/go.mod +++ b/vendor/contrib.go.opencensus.io/exporter/prometheus/go.mod @@ -1,14 +1,10 @@ module contrib.go.opencensus.io/exporter/prometheus require ( - github.com/cespare/xxhash/v2 v2.1.1 // indirect - github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9 // indirect - github.com/google/go-cmp v0.3.1 - github.com/prometheus/client_golang v1.2.1 - github.com/prometheus/procfs v0.0.6 // indirect - github.com/prometheus/statsd_exporter v0.15.0 - go.opencensus.io v0.22.4-0.20200608061201-1901b56b9515 - golang.org/x/sys v0.0.0-20191113165036-4c7a9d0fe056 // indirect + github.com/google/go-cmp v0.5.5 + github.com/prometheus/client_golang v1.9.0 + github.com/prometheus/statsd_exporter v0.20.0 + go.opencensus.io v0.23.0 ) go 1.13 diff --git a/vendor/contrib.go.opencensus.io/exporter/prometheus/go.sum b/vendor/contrib.go.opencensus.io/exporter/prometheus/go.sum index b65c1cfc0f..8710184f1b 100644 --- a/vendor/contrib.go.opencensus.io/exporter/prometheus/go.sum +++ b/vendor/contrib.go.opencensus.io/exporter/prometheus/go.sum @@ -1,149 +1,448 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= +github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= +github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= +github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= +github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4 h1:Hs82Z41s6SdL1CELW+XaDYmOH4hkBN4/N9og/AsOv7E= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d h1:UQZhZ2O0vMHr2cI+DC1Mbh0TJxzA3RcLoMsFw+aXw7E= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= +github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= +github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/cespare/xxhash/v2 v2.1.0 h1:yTUvW7Vhb89inJ+8irsUqiWjh8iT6sQPZiQzI6ReGkA= -github.com/cespare/xxhash/v2 v2.1.0/go.mod h1:dgIUBU3pDso/gPgZ1osOZ0iQf77oPR28Tjxl5dIMyVM= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= +github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= +github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= +github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= +github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 h1:ZgQEtGgCBiWRM39fZuwSd1LwSqqSW0hOdXCYYDX0R3I= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9 h1:uHTyIjqVhYRhLbJ8nIiOJHkEZZ+5YoOsAbD3sk82NiE= -github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= +github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= +github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= +github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= +github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= +github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= +github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= +github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= +github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= +github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= +github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= +github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= +github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= +github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA= +github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= +github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= +github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= +github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= +github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= +github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= +github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.2.1 h1:JnMpQc6ppsNgw9QPAGF6Dod479itz7lvlsMzzNayLOI= -github.com/prometheus/client_golang v1.2.1/go.mod h1:XMU6Z2MjaRKVu/dC1qupJI9SiNkDYzz3xecMgSW/F+U= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8= +github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= +github.com/prometheus/client_golang v1.6.0/go.mod h1:ZLOG9ck3JLRdB5MgO8f+lLTe83AXG6ro35rLTxvnIl4= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.9.0 h1:Rrch9mh17XcxvEu9D9DEpb4isxjGBtcevQjKvxPRQIU= +github.com/prometheus/client_golang v1.9.0/go.mod h1:FqZLKOZnGdFAhOK4nqGHa7D66IdsO+O441Eve7ptJDU= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE= +github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.4.1 h1:K0MGApIoQvMw27RTdJkPbr3JZ7DNbtxQNyi5STVM6Kw= +github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.7.0 h1:L+1lyG48J1zAQXA3RBX/nG/B3gjlHq0zTt2tlbJLyCY= github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.15.0 h1:4fgOnadei3EZvgRwxJ7RMpG1k1pOZth5Pc13tyspaKM= +github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.5 h1:3+auTFlqw+ZaQYJARz6ArODtkaIwtvBTx3N2NehQlL8= -github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= -github.com/prometheus/procfs v0.0.6 h1:0qbH+Yqu/cj1ViVLvEWCP6qMQ4efWUj6bQqOEA0V0U4= -github.com/prometheus/procfs v0.0.6/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= -github.com/prometheus/statsd_exporter v0.15.0 h1:UiwC1L5HkxEPeapXdm2Ye0u1vUJfTj7uwT5yydYpa1E= -github.com/prometheus/statsd_exporter v0.15.0/go.mod h1:Dv8HnkoLQkeEjkIE4/2ndAA7WL1zHKK7WMqFQqu72rw= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.2.0 h1:wH4vA7pcjKuZzjF7lM8awk4fnuJO6idemZXoKnULUx4= +github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/statsd_exporter v0.20.0 h1:M0hQphnq2WyWKS5CefQL8PqWwBOBPhiAkyLo5l4ZYvE= +github.com/prometheus/statsd_exporter v0.20.0/go.mod h1:YL3FWCG8JBBtaUSxAg4Gz2ZYu22bS84XM89ZQXXTWmQ= +github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -go.opencensus.io v0.22.2 h1:75k/FF0Q2YM8QYo07VPddOLBslDt1MZOdEslOHvmzAs= +github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= +go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4-0.20200608061201-1901b56b9515 h1:YS3N5IEX0gd5pYMAT6Am+LQPPuQTNRv11JaZY0+BV0Y= -go.opencensus.io v0.22.4-0.20200608061201-1901b56b9515/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6 h1:bjcUS9ztw9kFmmIxJInhon/0Is3p+EHBKNgquIzo1OI= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191010194322-b09406accb47 h1:/XfQ9z7ib8eEJX2hdgFTZJ/ntt0swNk5oYBziWeTCvY= -golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191113165036-4c7a9d0fe056 h1:dHtDnRWQtSx0Hjq9kvKFpBh9uPPKfQN70NZZmvssGwk= -golang.org/x/sys v0.0.0-20191113165036-4c7a9d0fe056/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201214210602-f9fddec55a1e h1:AyodaIpKjppX+cBfTASF2E1US3H2JFBj920Ot3rtDjs= +golang.org/x/sys v0.0.0-20201214210602-f9fddec55a1e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= diff --git a/vendor/contrib.go.opencensus.io/exporter/prometheus/prometheus.go b/vendor/contrib.go.opencensus.io/exporter/prometheus/prometheus.go index 7cf883f53b..b94c6d3991 100644 --- a/vendor/contrib.go.opencensus.io/exporter/prometheus/prometheus.go +++ b/vendor/contrib.go.opencensus.io/exporter/prometheus/prometheus.go @@ -17,13 +17,12 @@ package prometheus // import "contrib.go.opencensus.io/exporter/prometheus" import ( + "context" "fmt" "log" "net/http" "sync" - "context" - "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" "go.opencensus.io/metric/metricdata" @@ -105,7 +104,8 @@ func (o *Options) onError(err error) { // corresponding Prometheus Metric: SumData will be converted // to Untyped Metric, CountData will be a Counter Metric, // DistributionData will be a Histogram Metric. -// Deprecated in lieu of metricexport.Reader interface. +// +// Deprecated: in lieu of metricexport.Reader interface. func (e *Exporter) ExportView(vd *view.Data) { } @@ -117,7 +117,6 @@ func (e *Exporter) ServeHTTP(w http.ResponseWriter, r *http.Request) { // collector implements prometheus.Collector type collector struct { opts Options - mu sync.Mutex // mu guards all the fields. registerOnce sync.Once @@ -151,11 +150,12 @@ func newCollector(opts Options, registrar prometheus.Registerer) *collector { func (c *collector) toDesc(metric *metricdata.Metric) *prometheus.Desc { var labels prometheus.Labels - if metric.Resource == nil { + switch { + case metric.Resource == nil: labels = c.opts.ConstLabels - } else if c.opts.ConstLabels == nil { + case c.opts.ConstLabels == nil: labels = metric.Resource.Labels - } else { + default: labels = prometheus.Labels{} for k, v := range c.opts.ConstLabels { labels[k] = v @@ -171,7 +171,6 @@ func (c *collector) toDesc(metric *metricdata.Metric) *prometheus.Desc { metric.Descriptor.Description, toPromLabels(metric.Descriptor.LabelKeys), labels) - } type metricExporter struct { diff --git a/vendor/contrib.go.opencensus.io/exporter/stackdriver/.gitignore b/vendor/contrib.go.opencensus.io/exporter/stackdriver/.gitignore deleted file mode 100644 index c5a54149f0..0000000000 --- a/vendor/contrib.go.opencensus.io/exporter/stackdriver/.gitignore +++ /dev/null @@ -1,10 +0,0 @@ -# GoLand IDEA -/.idea/ -*.iml - -# VS Code -.vscode - -# Coverage -coverage.txt -coverage.html diff --git a/vendor/contrib.go.opencensus.io/exporter/stackdriver/.travis.yml b/vendor/contrib.go.opencensus.io/exporter/stackdriver/.travis.yml deleted file mode 100644 index 7da9441128..0000000000 --- a/vendor/contrib.go.opencensus.io/exporter/stackdriver/.travis.yml +++ /dev/null @@ -1,20 +0,0 @@ -language: go - -go: - - 1.12.x - -go_import_path: contrib.go.opencensus.io/exporter/stackdriver - -env: - global: - GO111MODULE=on - -install: - - go mod download - - make install-tools - -script: - - make travis-ci - -after_success: - - bash <(curl -s https://codecov.io/bash) \ No newline at end of file diff --git a/vendor/contrib.go.opencensus.io/exporter/stackdriver/AUTHORS b/vendor/contrib.go.opencensus.io/exporter/stackdriver/AUTHORS deleted file mode 100644 index e491a9e7f7..0000000000 --- a/vendor/contrib.go.opencensus.io/exporter/stackdriver/AUTHORS +++ /dev/null @@ -1 +0,0 @@ -Google Inc. diff --git a/vendor/contrib.go.opencensus.io/exporter/stackdriver/CONTRIBUTING.md b/vendor/contrib.go.opencensus.io/exporter/stackdriver/CONTRIBUTING.md deleted file mode 100644 index 0786fdf434..0000000000 --- a/vendor/contrib.go.opencensus.io/exporter/stackdriver/CONTRIBUTING.md +++ /dev/null @@ -1,24 +0,0 @@ -# How to contribute - -We'd love to accept your patches and contributions to this project. There are -just a few small guidelines you need to follow. - -## Contributor License Agreement - -Contributions to this project must be accompanied by a Contributor License -Agreement. You (or your employer) retain the copyright to your contribution, -this simply gives us permission to use and redistribute your contributions as -part of the project. Head over to to see -your current agreements on file or to sign a new one. - -You generally only need to submit a CLA once, so if you've already submitted one -(even if it was for a different project), you probably don't need to do it -again. - -## Code reviews - -All submissions, including submissions by project members, require review. We -use GitHub pull requests for this purpose. Consult [GitHub Help] for more -information on using pull requests. - -[GitHub Help]: https://help.github.com/articles/about-pull-requests/ diff --git a/vendor/contrib.go.opencensus.io/exporter/stackdriver/LICENSE b/vendor/contrib.go.opencensus.io/exporter/stackdriver/LICENSE deleted file mode 100644 index d645695673..0000000000 --- a/vendor/contrib.go.opencensus.io/exporter/stackdriver/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/contrib.go.opencensus.io/exporter/stackdriver/Makefile b/vendor/contrib.go.opencensus.io/exporter/stackdriver/Makefile deleted file mode 100644 index cc0818596c..0000000000 --- a/vendor/contrib.go.opencensus.io/exporter/stackdriver/Makefile +++ /dev/null @@ -1,125 +0,0 @@ -# TODO: Fix this on windows. -ALL_SRC := $(shell find . -name '*.go' \ - -not -path '*/internal/testpb/*' \ - -not -name 'tools.go' \ - -type f | sort) -ALL_PKGS := $(shell go list $(sort $(dir $(ALL_SRC)))) - -GOTEST_OPT?=-v -race -timeout 30s -GOTEST_OPT_WITH_COVERAGE = $(GOTEST_OPT) -coverprofile=coverage.txt -covermode=atomic -GOTEST=go test -GOFMT=gofmt -GOLINT=golint -GOIMPORTS=goimports -GOVET=go vet -EMBEDMD=embedmd -STATICCHECK=staticcheck -# TODO decide if we need to change these names. -README_FILES := $(shell find . -name '*README.md' | sort | tr '\n' ' ') - -.DEFAULT_GOAL := defaul-goal - -.PHONY: defaul-goal -defaul-goal: fmt lint vet embedmd goimports staticcheck test - -# TODO: enable test-with-cover when find out why "scripts/check-test-files.sh: 4: set: Illegal option -o pipefail" -.PHONY: travis-ci -travis-ci: fmt lint vet embedmd goimports staticcheck test test-386 test-with-coverage - -all-pkgs: - @echo $(ALL_PKGS) | tr ' ' '\n' | sort - -all-srcs: - @echo $(ALL_SRC) | tr ' ' '\n' | sort - -.PHONY: test -test: - $(GOTEST) $(GOTEST_OPT) $(ALL_PKGS) - -.PHONY: test-386 -test-386: - GOARCH=386 $(GOTEST) -v -timeout 30s $(ALL_PKGS) - -.PHONY: test-with-coverage -test-with-coverage: - @echo pre-compiling tests - @time go test -i $(ALL_PKGS) - $(GOTEST) $(GOTEST_OPT_WITH_COVERAGE) $(ALL_PKGS) - go tool cover -html=coverage.txt -o coverage.html - -.PHONY: test-with-cover -test-with-cover: - @echo Verifying that all packages have test files to count in coverage - @scripts/check-test-files.sh $(subst contrib.go.opencensus.io/exporter/stackdriver,./,$(ALL_PKGS)) - @echo pre-compiling tests - @time go test -i $(ALL_PKGS) - $(GOTEST) $(GOTEST_OPT_WITH_COVERAGE) $(ALL_PKGS) - go tool cover -html=coverage.txt -o coverage.html - -.PHONY: fmt -fmt: - @FMTOUT=`$(GOFMT) -s -l $(ALL_SRC) 2>&1`; \ - if [ "$$FMTOUT" ]; then \ - echo "$(GOFMT) FAILED => gofmt the following files:\n"; \ - echo "$$FMTOUT\n"; \ - exit 1; \ - else \ - echo "Fmt finished successfully"; \ - fi - -.PHONY: lint -lint: - @LINTOUT=`$(GOLINT) $(ALL_PKGS) 2>&1`; \ - if [ "$$LINTOUT" ]; then \ - echo "$(GOLINT) FAILED => clean the following lint errors:\n"; \ - echo "$$LINTOUT\n"; \ - exit 1; \ - else \ - echo "Lint finished successfully"; \ - fi - -.PHONY: vet -vet: - # TODO: Understand why go vet downloads "github.com/google/go-cmp v0.2.0" - @VETOUT=`$(GOVET) ./... | grep -v "go: downloading" 2>&1`; \ - if [ "$$VETOUT" ]; then \ - echo "$(GOVET) FAILED => go vet the following files:\n"; \ - echo "$$VETOUT\n"; \ - exit 1; \ - else \ - echo "Vet finished successfully"; \ - fi - -.PHONY: embedmd -embedmd: - @EMBEDMDOUT=`$(EMBEDMD) -d $(README_FILES) 2>&1`; \ - if [ "$$EMBEDMDOUT" ]; then \ - echo "$(EMBEDMD) FAILED => embedmd the following files:\n"; \ - echo "$$EMBEDMDOUT\n"; \ - exit 1; \ - else \ - echo "Embedmd finished successfully"; \ - fi - -.PHONY: goimports -goimports: - @IMPORTSOUT=`$(GOIMPORTS) -d . 2>&1`; \ - if [ "$$IMPORTSOUT" ]; then \ - echo "$(GOIMPORTS) FAILED => fix the following goimports errors:\n"; \ - echo "$$IMPORTSOUT\n"; \ - exit 1; \ - else \ - echo "Goimports finished successfully"; \ - fi - -.PHONY: staticcheck -staticcheck: - $(STATICCHECK) ./... - -.PHONY: install-tools -install-tools: - GO111MODULE=on go install \ - golang.org/x/lint/golint \ - golang.org/x/tools/cmd/goimports \ - github.com/rakyll/embedmd \ - honnef.co/go/tools/cmd/staticcheck diff --git a/vendor/contrib.go.opencensus.io/exporter/stackdriver/README.md b/vendor/contrib.go.opencensus.io/exporter/stackdriver/README.md deleted file mode 100644 index 24a6e11f90..0000000000 --- a/vendor/contrib.go.opencensus.io/exporter/stackdriver/README.md +++ /dev/null @@ -1,14 +0,0 @@ -# OpenCensus Go Stackdriver - -[![Build Status](https://travis-ci.org/census-ecosystem/opencensus-go-exporter-stackdriver.svg?branch=master)](https://travis-ci.org/census-ecosystem/opencensus-go-exporter-stackdriver) [![GoDoc][godoc-image]][godoc-url] - -Provides OpenCensus exporter support for Stackdriver Monitoring and Stackdriver Trace. - -## Installation - -``` -$ go get -u contrib.go.opencensus.io/exporter/stackdriver -``` - -[godoc-image]: https://godoc.org/contrib.go.opencensus.io/exporter/stackdriver?status.svg -[godoc-url]: https://godoc.org/contrib.go.opencensus.io/exporter/stackdriver diff --git a/vendor/contrib.go.opencensus.io/exporter/stackdriver/RESOURCE.md b/vendor/contrib.go.opencensus.io/exporter/stackdriver/RESOURCE.md deleted file mode 100644 index 9154b99ed5..0000000000 --- a/vendor/contrib.go.opencensus.io/exporter/stackdriver/RESOURCE.md +++ /dev/null @@ -1,53 +0,0 @@ -# RESOURCES - -Stackdriver has defined [resource types](https://cloud.google.com/monitoring/api/resources) for monitoring and for each resource type there -are mandatory resource labels. OpenCensus has defined [standard resource](https://github.com/census-instrumentation/opencensus-specs/blob/master/resource/StandardResources.md) -types and labels. -This document describes the translation from OpenCensus resources to Stackdriver resources -performed by this exporter. - -## Mapping between Stackdriver and OpenCensus Resources - -### k8s_container - -**condition:** resource.type == container - -*k8s_container takes a precedence, so GKE will be mapped to k8s_container and its -associated resource labels but it will not contain any gcp_instance specific -resource labels.* - - -| Item | OpenCensus | Stackdriver | -|---------------------|--------------------|----------------| -| **resource type** | container | k8s_container | -| **resource labels** | | | -| | cloud.zone | location | -| | k8s.cluster.name | cluster_name | -| | k8s.namespace.name | namespace_name | -| | k8s.pod.name | pod_name | -| | container.name | container_name | - - -### gcp_instance -**condition:** cloud.provider == gcp - -| Item | OpenCensus | Stackdriver | -|---------------------|--------------------|----------------| -| **resource type** | cloud | gcp_instance | -| **resource labels** | | | -| | host.id | instance_id | -| | cloud.zone | zone | - - -### aws_ec2_instance -**condition:** cloud.provider == aws - -| Item | OpenCensus | Stackdriver | -|---------------------|--------------------|------------------| -| **resource type** | | | -| | cloud | aws_ec2_instance | -| **resource labels** | | | -| | host.id | instance_id | -| | cloud.region | region | -| | cloud.account.id | aws_account | - diff --git a/vendor/contrib.go.opencensus.io/exporter/stackdriver/go.mod b/vendor/contrib.go.opencensus.io/exporter/stackdriver/go.mod deleted file mode 100644 index bdcd72adb7..0000000000 --- a/vendor/contrib.go.opencensus.io/exporter/stackdriver/go.mod +++ /dev/null @@ -1,23 +0,0 @@ -module contrib.go.opencensus.io/exporter/stackdriver - -go 1.12 - -require ( - cloud.google.com/go v0.45.1 - github.com/aws/aws-sdk-go v1.23.20 - github.com/census-instrumentation/opencensus-proto v0.2.1 - github.com/golang/protobuf v1.3.2 - github.com/google/go-cmp v0.3.1 - github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024 - go.opencensus.io v0.22.4 - golang.org/x/lint v0.0.0-20190409202823-959b441ac422 - golang.org/x/net v0.0.0-20190923162816-aa69164e4478 - golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 - golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e // indirect - golang.org/x/tools v0.0.0-20191010075000-0337d82405ff - google.golang.org/api v0.10.0 - google.golang.org/appengine v1.6.2 // indirect - google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51 - google.golang.org/grpc v1.23.1 - honnef.co/go/tools v0.0.1-2019.2.3 -) diff --git a/vendor/contrib.go.opencensus.io/exporter/stackdriver/go.sum b/vendor/contrib.go.opencensus.io/exporter/stackdriver/go.sum deleted file mode 100644 index e219b19073..0000000000 --- a/vendor/contrib.go.opencensus.io/exporter/stackdriver/go.sum +++ /dev/null @@ -1,203 +0,0 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0 h1:eOI3/cP2VTU6uZLDYAoic+eyzzB9YyGmJ7eIjl8rOPg= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0 h1:ROfEUZz+Gh5pa62DJWXSaonyu3StP6EA6lPEXPI6mCo= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1 h1:lRi0CHyU+ytlvylOlFKKq0af6JncuyoRh1J+QJBqQx0= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/aws/aws-sdk-go v1.23.20 h1:2CBuL21P0yKdZN5urf2NxKa1ha8fhnY+A3pBCHFeZoA= -github.com/aws/aws-sdk-go v1.23.20/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/census-instrumentation/opencensus-proto v0.2.1 h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 h1:ZgQEtGgCBiWRM39fZuwSd1LwSqqSW0hOdXCYYDX0R3I= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/googleapis/gax-go/v2 v2.0.4 h1:hU4mGcQI4DaAYW+IbTun+2qEZVFxK0ySjQLTbS0VQKc= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM= -github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024 h1:rBMNdlhTLzJjJSDIjNEXX1Pz3Hmwmz91v+zycvx9PJc= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.4 h1:LYy1Hy3MJdrCdMwwzxA/dRok4ejH+RwNGbuoD9fCjto= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422 h1:QzoH/1pFpZguR8NrRHLcO6jKqfv2zpuSqZLgdm7ZmjI= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09 h1:KaQtG+aDELoNmXYas3TVkGNYRuq8JQ1aa7LJt8EXVyo= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190923162816-aa69164e4478 h1:l5EDrHhldLYb3ZRHDUhXF7Om7MvYXnkV9/iQNo1lX6g= -golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421 h1:Wo7BWFiOk0QRFMLYMqJGFMd9CgUAcGx7V+qEg/h5IBI= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f h1:Bl/8QSvNqXvPGPGXa2z5xUTmV7VDcZyvRZ+QQXkXTZQ= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6 h1:bjcUS9ztw9kFmmIxJInhon/0Is3p+EHBKNgquIzo1OI= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0 h1:HyfiK1WMnHj5FXFXatD+Qs1A/xC2Run6RzeW1SyHxpc= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2 h1:z99zHgr7hKfrUcX/KsoJk5FJfjTceCKIp96+biqP4To= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0 h1:Dh6fw+p6FyRl5x/FvNswO1ji0lIGzm3KP8Y9VkS9PTE= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20191010075000-0337d82405ff h1:XdBG6es/oFDr1HwaxkxgVve7NB281QhxgK/i4voubFs= -golang.org/x/tools v0.0.0-20191010075000-0337d82405ff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.4.0 h1:KKgc1aqhV8wDPbDzlDtpvyjZFY3vjz85FP7p4wcQUyI= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0 h1:9sdfJOzWlkqPltHAuzT2Cp+yrBeY1KRVYgms8soxMwM= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.10.0 h1:7tmAxx3oKE98VMZ+SBZzvYYWRQ9HODBxmC8mXUsraSQ= -google.golang.org/api v0.10.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1 h1:QzqyMA1tlu6CgqCDUtU9V+ZKhLFT2dkJuANu5QaxI3I= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.2 h1:j8RI1yW0SkI+paT6uGwMlrMI/6zwYA6/CFil8rxOzGI= -google.golang.org/appengine v1.6.2/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19 h1:Lj2SnHtxkRGJDqnGaSjo+CCdIieEnwVazbOXILwQemk= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb h1:i1Ppqkc3WQXikh8bXiwHqAN5Rv3/qDCcRk0/Otx73BY= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64 h1:iKtrH9Y8mcbADOP0YFaEMth7OfuHY9xHOwNj4znpM1A= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51 h1:Ex1mq5jaJof+kRnYi3SlYJ8KKa9Ao3NHyIT5XJ1gF6U= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/grpc v1.19.0 h1:cfg4PD8YEdSFnm7qLV4++93WcmhH2nIUhMjhdCvl3j8= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1 h1:Hz2g2wirWK7H0qIIhGIqRGTuMwTE8HEKFnDZZ7lm9NU= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.23.1 h1:q4XQuHFC6I28BKZpo6IYyb3mNO+l7lSOxRuYTCiDfXk= -google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc h1:/hemPrYIhOhy8zYrNj+069zDB68us2sMGsfkFJO0iZs= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= diff --git a/vendor/contrib.go.opencensus.io/exporter/stackdriver/label.go b/vendor/contrib.go.opencensus.io/exporter/stackdriver/label.go deleted file mode 100644 index 88835cc0fc..0000000000 --- a/vendor/contrib.go.opencensus.io/exporter/stackdriver/label.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package stackdriver - -// Labels represents a set of Stackdriver Monitoring labels. -type Labels struct { - m map[string]labelValue -} - -type labelValue struct { - val, desc string -} - -// Set stores a label with the given key, value and description, -// overwriting any previous values with the given key. -func (labels *Labels) Set(key, value, description string) { - if labels.m == nil { - labels.m = make(map[string]labelValue) - } - labels.m[key] = labelValue{value, description} -} diff --git a/vendor/contrib.go.opencensus.io/exporter/stackdriver/metrics.go b/vendor/contrib.go.opencensus.io/exporter/stackdriver/metrics.go deleted file mode 100644 index 8a185b9738..0000000000 --- a/vendor/contrib.go.opencensus.io/exporter/stackdriver/metrics.go +++ /dev/null @@ -1,521 +0,0 @@ -// Copyright 2019, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package stackdriver - -/* -The code in this file is responsible for converting OpenCensus Proto metrics -directly to Stackdriver Metrics. -*/ - -import ( - "context" - "fmt" - "strings" - - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/ptypes/any" - "github.com/golang/protobuf/ptypes/timestamp" - "go.opencensus.io/trace" - - distributionpb "google.golang.org/genproto/googleapis/api/distribution" - labelpb "google.golang.org/genproto/googleapis/api/label" - googlemetricpb "google.golang.org/genproto/googleapis/api/metric" - monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres" - monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" - - "contrib.go.opencensus.io/exporter/stackdriver/monitoredresource" - "go.opencensus.io/metric/metricdata" - "go.opencensus.io/resource" -) - -const ( - exemplarAttachmentTypeString = "type.googleapis.com/google.protobuf.StringValue" - exemplarAttachmentTypeSpanCtx = "type.googleapis.com/google.monitoring.v3.SpanContext" - - // TODO(songy23): add support for this. - // exemplarAttachmentTypeDroppedLabels = "type.googleapis.com/google.monitoring.v3.DroppedLabels" -) - -// ExportMetrics exports OpenCensus Metrics to Stackdriver Monitoring. -func (se *statsExporter) ExportMetrics(ctx context.Context, metrics []*metricdata.Metric) error { - if len(metrics) == 0 { - return nil - } - - for _, metric := range metrics { - se.metricsBundler.Add(metric, 1) - // TODO: [rghetia] handle errors. - } - - return nil -} - -func (se *statsExporter) handleMetricsUpload(metrics []*metricdata.Metric) { - err := se.uploadMetrics(metrics) - if err != nil { - se.o.handleError(err) - } -} - -func (se *statsExporter) uploadMetrics(metrics []*metricdata.Metric) error { - ctx, cancel := newContextWithTimeout(se.o.Context, se.o.Timeout) - defer cancel() - - var errors []error - - ctx, span := trace.StartSpan( - ctx, - "contrib.go.opencensus.io/exporter/stackdriver.uploadMetrics", - trace.WithSampler(trace.NeverSample()), - ) - defer span.End() - - for _, metric := range metrics { - // Now create the metric descriptor remotely. - if err := se.createMetricDescriptorFromMetric(ctx, metric); err != nil { - span.SetStatus(trace.Status{Code: trace.StatusCodeUnknown, Message: err.Error()}) - errors = append(errors, err) - continue - } - } - - var allTimeSeries []*monitoringpb.TimeSeries - for _, metric := range metrics { - tsl, err := se.metricToMpbTs(ctx, metric) - if err != nil { - span.SetStatus(trace.Status{Code: trace.StatusCodeUnknown, Message: err.Error()}) - errors = append(errors, err) - continue - } - if tsl != nil { - allTimeSeries = append(allTimeSeries, tsl...) - } - } - - // Now batch timeseries up and then export. - for start, end := 0, 0; start < len(allTimeSeries); start = end { - end = start + maxTimeSeriesPerUpload - if end > len(allTimeSeries) { - end = len(allTimeSeries) - } - batch := allTimeSeries[start:end] - ctsreql := se.combineTimeSeriesToCreateTimeSeriesRequest(batch) - for _, ctsreq := range ctsreql { - if err := createTimeSeries(ctx, se.c, ctsreq); err != nil { - span.SetStatus(trace.Status{Code: trace.StatusCodeUnknown, Message: err.Error()}) - errors = append(errors, err) - } - } - } - - numErrors := len(errors) - if numErrors == 0 { - return nil - } else if numErrors == 1 { - return errors[0] - } - errMsgs := make([]string, 0, numErrors) - for _, err := range errors { - errMsgs = append(errMsgs, err.Error()) - } - return fmt.Errorf("[%s]", strings.Join(errMsgs, "; ")) -} - -// metricToMpbTs converts a metric into a list of Stackdriver Monitoring v3 API TimeSeries -// but it doesn't invoke any remote API. -func (se *statsExporter) metricToMpbTs(ctx context.Context, metric *metricdata.Metric) ([]*monitoringpb.TimeSeries, error) { - if metric == nil { - return nil, errNilMetricOrMetricDescriptor - } - - resource := se.metricRscToMpbRsc(metric.Resource) - - metricName := metric.Descriptor.Name - metricType := se.metricTypeFromProto(metricName) - metricLabelKeys := metric.Descriptor.LabelKeys - metricKind, _ := metricDescriptorTypeToMetricKind(metric) - - if metricKind == googlemetricpb.MetricDescriptor_METRIC_KIND_UNSPECIFIED { - // ignore these Timeserieses. TODO [rghetia] log errors. - return nil, nil - } - - timeSeries := make([]*monitoringpb.TimeSeries, 0, len(metric.TimeSeries)) - for _, ts := range metric.TimeSeries { - sdPoints, err := se.metricTsToMpbPoint(ts, metricKind) - if err != nil { - // TODO(@rghetia): record error metrics - continue - } - - // Each TimeSeries has labelValues which MUST be correlated - // with that from the MetricDescriptor - labels, err := metricLabelsToTsLabels(se.defaultLabels, metricLabelKeys, ts.LabelValues) - if err != nil { - // TODO: (@rghetia) perhaps log this error from labels extraction, if non-nil. - continue - } - - var rsc *monitoredrespb.MonitoredResource - var mr monitoredresource.Interface - if se.o.ResourceByDescriptor != nil { - labels, mr = se.o.ResourceByDescriptor(&metric.Descriptor, labels) - // TODO(rghetia): optimize this. It is inefficient to convert this for all metrics. - rsc = convertMonitoredResourceToPB(mr) - if rsc.Type == "" { - rsc.Type = "global" - rsc.Labels = nil - } - } else { - rsc = resource - } - timeSeries = append(timeSeries, &monitoringpb.TimeSeries{ - Metric: &googlemetricpb.Metric{ - Type: metricType, - Labels: labels, - }, - Resource: rsc, - Points: sdPoints, - }) - } - - return timeSeries, nil -} - -func metricLabelsToTsLabels(defaults map[string]labelValue, labelKeys []metricdata.LabelKey, labelValues []metricdata.LabelValue) (map[string]string, error) { - // Perform this sanity check now. - if len(labelKeys) != len(labelValues) { - return nil, fmt.Errorf("length mismatch: len(labelKeys)=%d len(labelValues)=%d", len(labelKeys), len(labelValues)) - } - - if len(defaults)+len(labelKeys) == 0 { - return nil, nil - } - - labels := make(map[string]string) - // Fill in the defaults firstly, irrespective of if the labelKeys and labelValues are mismatched. - for key, label := range defaults { - labels[sanitize(key)] = label.val - } - - for i, labelKey := range labelKeys { - labelValue := labelValues[i] - if labelValue.Present { - labels[sanitize(labelKey.Key)] = labelValue.Value - } - } - - return labels, nil -} - -// createMetricDescriptorFromMetric creates a metric descriptor from the OpenCensus metric -// and then creates it remotely using Stackdriver's API. -func (se *statsExporter) createMetricDescriptorFromMetric(ctx context.Context, metric *metricdata.Metric) error { - // Skip create metric descriptor if configured - if se.o.SkipCMD { - return nil - } - - se.metricMu.Lock() - defer se.metricMu.Unlock() - - name := metric.Descriptor.Name - if _, created := se.metricDescriptors[name]; created { - return nil - } - - if builtinMetric(se.metricTypeFromProto(name)) { - se.metricDescriptors[name] = true - return nil - } - - // Otherwise, we encountered a cache-miss and - // should create the metric descriptor remotely. - inMD, err := se.metricToMpbMetricDescriptor(metric) - if err != nil { - return err - } - - if err = se.createMetricDescriptor(ctx, inMD); err != nil { - return err - } - - // Now record the metric as having been created. - se.metricDescriptors[name] = true - return nil -} - -func (se *statsExporter) metricToMpbMetricDescriptor(metric *metricdata.Metric) (*googlemetricpb.MetricDescriptor, error) { - if metric == nil { - return nil, errNilMetricOrMetricDescriptor - } - - metricType := se.metricTypeFromProto(metric.Descriptor.Name) - displayName := se.displayName(metric.Descriptor.Name) - metricKind, valueType := metricDescriptorTypeToMetricKind(metric) - - sdm := &googlemetricpb.MetricDescriptor{ - Name: fmt.Sprintf("projects/%s/metricDescriptors/%s", se.o.ProjectID, metricType), - DisplayName: displayName, - Description: metric.Descriptor.Description, - Unit: string(metric.Descriptor.Unit), - Type: metricType, - MetricKind: metricKind, - ValueType: valueType, - Labels: metricLableKeysToLabels(se.defaultLabels, metric.Descriptor.LabelKeys), - } - - return sdm, nil -} - -func metricLableKeysToLabels(defaults map[string]labelValue, labelKeys []metricdata.LabelKey) []*labelpb.LabelDescriptor { - labelDescriptors := make([]*labelpb.LabelDescriptor, 0, len(defaults)+len(labelKeys)) - - // Fill in the defaults first. - for key, lbl := range defaults { - labelDescriptors = append(labelDescriptors, &labelpb.LabelDescriptor{ - Key: sanitize(key), - Description: lbl.desc, - ValueType: labelpb.LabelDescriptor_STRING, - }) - } - - // Now fill in those from the metric. - for _, key := range labelKeys { - labelDescriptors = append(labelDescriptors, &labelpb.LabelDescriptor{ - Key: sanitize(key.Key), - Description: key.Description, - ValueType: labelpb.LabelDescriptor_STRING, // We only use string tags - }) - } - return labelDescriptors -} - -func metricDescriptorTypeToMetricKind(m *metricdata.Metric) (googlemetricpb.MetricDescriptor_MetricKind, googlemetricpb.MetricDescriptor_ValueType) { - if m == nil { - return googlemetricpb.MetricDescriptor_METRIC_KIND_UNSPECIFIED, googlemetricpb.MetricDescriptor_VALUE_TYPE_UNSPECIFIED - } - - switch m.Descriptor.Type { - case metricdata.TypeCumulativeInt64: - return googlemetricpb.MetricDescriptor_CUMULATIVE, googlemetricpb.MetricDescriptor_INT64 - - case metricdata.TypeCumulativeFloat64: - return googlemetricpb.MetricDescriptor_CUMULATIVE, googlemetricpb.MetricDescriptor_DOUBLE - - case metricdata.TypeCumulativeDistribution: - return googlemetricpb.MetricDescriptor_CUMULATIVE, googlemetricpb.MetricDescriptor_DISTRIBUTION - - case metricdata.TypeGaugeFloat64: - return googlemetricpb.MetricDescriptor_GAUGE, googlemetricpb.MetricDescriptor_DOUBLE - - case metricdata.TypeGaugeInt64: - return googlemetricpb.MetricDescriptor_GAUGE, googlemetricpb.MetricDescriptor_INT64 - - case metricdata.TypeGaugeDistribution: - return googlemetricpb.MetricDescriptor_GAUGE, googlemetricpb.MetricDescriptor_DISTRIBUTION - - case metricdata.TypeSummary: - // TODO: [rghetia] after upgrading to proto version3, retrun UNRECOGNIZED instead of UNSPECIFIED - return googlemetricpb.MetricDescriptor_METRIC_KIND_UNSPECIFIED, googlemetricpb.MetricDescriptor_VALUE_TYPE_UNSPECIFIED - - default: - // TODO: [rghetia] after upgrading to proto version3, retrun UNRECOGNIZED instead of UNSPECIFIED - return googlemetricpb.MetricDescriptor_METRIC_KIND_UNSPECIFIED, googlemetricpb.MetricDescriptor_VALUE_TYPE_UNSPECIFIED - } -} - -func (se *statsExporter) metricRscToMpbRsc(rs *resource.Resource) *monitoredrespb.MonitoredResource { - if rs == nil { - resource := se.o.Resource - if resource == nil { - resource = &monitoredrespb.MonitoredResource{ - Type: "global", - } - } - return resource - } - typ := rs.Type - if typ == "" { - typ = "global" - } - mrsp := &monitoredrespb.MonitoredResource{ - Type: typ, - } - if rs.Labels != nil { - mrsp.Labels = make(map[string]string, len(rs.Labels)) - for k, v := range rs.Labels { - // TODO: [rghetia] add mapping between OC Labels and SD Labels. - mrsp.Labels[k] = v - } - } - return mrsp -} - -func (se *statsExporter) metricTsToMpbPoint(ts *metricdata.TimeSeries, metricKind googlemetricpb.MetricDescriptor_MetricKind) (sptl []*monitoringpb.Point, err error) { - for _, pt := range ts.Points { - - // If we have a last value aggregation point i.e. MetricDescriptor_GAUGE - // StartTime should be nil. - startTime := timestampProto(ts.StartTime) - if metricKind == googlemetricpb.MetricDescriptor_GAUGE { - startTime = nil - } - - spt, err := metricPointToMpbPoint(startTime, &pt, se.o.ProjectID) - if err != nil { - return nil, err - } - sptl = append(sptl, spt) - } - return sptl, nil -} - -func metricPointToMpbPoint(startTime *timestamp.Timestamp, pt *metricdata.Point, projectID string) (*monitoringpb.Point, error) { - if pt == nil { - return nil, nil - } - - mptv, err := metricPointToMpbValue(pt, projectID) - if err != nil { - return nil, err - } - - mpt := &monitoringpb.Point{ - Value: mptv, - Interval: &monitoringpb.TimeInterval{ - StartTime: startTime, - EndTime: timestampProto(pt.Time), - }, - } - return mpt, nil -} - -func metricPointToMpbValue(pt *metricdata.Point, projectID string) (*monitoringpb.TypedValue, error) { - if pt == nil { - return nil, nil - } - - var err error - var tval *monitoringpb.TypedValue - switch v := pt.Value.(type) { - default: - err = fmt.Errorf("protoToMetricPoint: unknown Data type: %T", pt.Value) - - case int64: - tval = &monitoringpb.TypedValue{ - Value: &monitoringpb.TypedValue_Int64Value{ - Int64Value: v, - }, - } - - case float64: - tval = &monitoringpb.TypedValue{ - Value: &monitoringpb.TypedValue_DoubleValue{ - DoubleValue: v, - }, - } - - case *metricdata.Distribution: - dv := v - var mv *monitoringpb.TypedValue_DistributionValue - var mean float64 - if dv.Count > 0 { - mean = float64(dv.Sum) / float64(dv.Count) - } - mv = &monitoringpb.TypedValue_DistributionValue{ - DistributionValue: &distributionpb.Distribution{ - Count: dv.Count, - Mean: mean, - SumOfSquaredDeviation: dv.SumOfSquaredDeviation, - }, - } - - insertZeroBound := false - if bopts := dv.BucketOptions; bopts != nil { - insertZeroBound = shouldInsertZeroBound(bopts.Bounds...) - mv.DistributionValue.BucketOptions = &distributionpb.Distribution_BucketOptions{ - Options: &distributionpb.Distribution_BucketOptions_ExplicitBuckets{ - ExplicitBuckets: &distributionpb.Distribution_BucketOptions_Explicit{ - // The first bucket bound should be 0.0 because the Metrics first bucket is - // [0, first_bound) but Stackdriver monitoring bucket bounds begin with -infinity - // (first bucket is (-infinity, 0)) - Bounds: addZeroBoundOnCondition(insertZeroBound, bopts.Bounds...), - }, - }, - } - } - bucketCounts, exemplars := metricBucketToBucketCountsAndExemplars(dv.Buckets, projectID) - mv.DistributionValue.BucketCounts = addZeroBucketCountOnCondition(insertZeroBound, bucketCounts...) - mv.DistributionValue.Exemplars = exemplars - - tval = &monitoringpb.TypedValue{Value: mv} - } - - return tval, err -} - -func metricBucketToBucketCountsAndExemplars(buckets []metricdata.Bucket, projectID string) ([]int64, []*distributionpb.Distribution_Exemplar) { - bucketCounts := make([]int64, len(buckets)) - var exemplars []*distributionpb.Distribution_Exemplar - for i, bucket := range buckets { - bucketCounts[i] = bucket.Count - if bucket.Exemplar != nil { - exemplars = append(exemplars, metricExemplarToPbExemplar(bucket.Exemplar, projectID)) - } - } - return bucketCounts, exemplars -} - -func metricExemplarToPbExemplar(exemplar *metricdata.Exemplar, projectID string) *distributionpb.Distribution_Exemplar { - return &distributionpb.Distribution_Exemplar{ - Value: exemplar.Value, - Timestamp: timestampProto(exemplar.Timestamp), - Attachments: attachmentsToPbAttachments(exemplar.Attachments, projectID), - } -} - -func attachmentsToPbAttachments(attachments metricdata.Attachments, projectID string) []*any.Any { - var pbAttachments []*any.Any - for _, v := range attachments { - if spanCtx, succ := v.(trace.SpanContext); succ { - pbAttachments = append(pbAttachments, toPbSpanCtxAttachment(spanCtx, projectID)) - } else { - // Treat everything else as plain string for now. - // TODO(songy23): add support for dropped label attachments. - pbAttachments = append(pbAttachments, toPbStringAttachment(v)) - } - } - return pbAttachments -} - -func toPbStringAttachment(v interface{}) *any.Any { - s := fmt.Sprintf("%v", v) - return &any.Any{ - TypeUrl: exemplarAttachmentTypeString, - Value: []byte(s), - } -} - -func toPbSpanCtxAttachment(spanCtx trace.SpanContext, projectID string) *any.Any { - pbSpanCtx := monitoringpb.SpanContext{ - SpanName: fmt.Sprintf("projects/%s/traces/%s/spans/%s", projectID, spanCtx.TraceID.String(), spanCtx.SpanID.String()), - } - bytes, _ := proto.Marshal(&pbSpanCtx) - return &any.Any{ - TypeUrl: exemplarAttachmentTypeSpanCtx, - Value: bytes, - } -} diff --git a/vendor/contrib.go.opencensus.io/exporter/stackdriver/metrics_batcher.go b/vendor/contrib.go.opencensus.io/exporter/stackdriver/metrics_batcher.go deleted file mode 100644 index 628471e9ed..0000000000 --- a/vendor/contrib.go.opencensus.io/exporter/stackdriver/metrics_batcher.go +++ /dev/null @@ -1,232 +0,0 @@ -// Copyright 2019, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package stackdriver - -import ( - "context" - "fmt" - "regexp" - "strconv" - "strings" - "sync" - "time" - - monitoring "cloud.google.com/go/monitoring/apiv3" - monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" -) - -const ( - minNumWorkers = 1 - minReqsChanSize = 5 -) - -type metricsBatcher struct { - projectName string - allTss []*monitoringpb.TimeSeries - allErrs []error - - // Counts all dropped TimeSeries by this metricsBatcher. - droppedTimeSeries int - - workers []*worker - // reqsChan, respsChan and wg are shared between metricsBatcher and worker goroutines. - reqsChan chan *monitoringpb.CreateTimeSeriesRequest - respsChan chan *response - wg *sync.WaitGroup -} - -func newMetricsBatcher(ctx context.Context, projectID string, numWorkers int, mc *monitoring.MetricClient, timeout time.Duration) *metricsBatcher { - if numWorkers < minNumWorkers { - numWorkers = minNumWorkers - } - workers := make([]*worker, 0, numWorkers) - reqsChanSize := numWorkers - if reqsChanSize < minReqsChanSize { - reqsChanSize = minReqsChanSize - } - reqsChan := make(chan *monitoringpb.CreateTimeSeriesRequest, reqsChanSize) - respsChan := make(chan *response, numWorkers) - var wg sync.WaitGroup - wg.Add(numWorkers) - for i := 0; i < numWorkers; i++ { - w := newWorker(ctx, mc, reqsChan, respsChan, &wg, timeout) - workers = append(workers, w) - go w.start() - } - return &metricsBatcher{ - projectName: fmt.Sprintf("projects/%s", projectID), - allTss: make([]*monitoringpb.TimeSeries, 0, maxTimeSeriesPerUpload), - droppedTimeSeries: 0, - workers: workers, - wg: &wg, - reqsChan: reqsChan, - respsChan: respsChan, - } -} - -func (mb *metricsBatcher) recordDroppedTimeseries(numTimeSeries int, errs ...error) { - mb.droppedTimeSeries += numTimeSeries - for _, err := range errs { - if err != nil { - mb.allErrs = append(mb.allErrs, err) - } - } -} - -func (mb *metricsBatcher) addTimeSeries(ts *monitoringpb.TimeSeries) { - mb.allTss = append(mb.allTss, ts) - if len(mb.allTss) == maxTimeSeriesPerUpload { - mb.sendReqToChan() - mb.allTss = make([]*monitoringpb.TimeSeries, 0, maxTimeSeriesPerUpload) - } -} - -func (mb *metricsBatcher) close(ctx context.Context) error { - // Send any remaining time series, must be <200 - if len(mb.allTss) > 0 { - mb.sendReqToChan() - } - - close(mb.reqsChan) - mb.wg.Wait() - for i := 0; i < len(mb.workers); i++ { - resp := <-mb.respsChan - mb.recordDroppedTimeseries(resp.droppedTimeSeries, resp.errs...) - } - close(mb.respsChan) - - numErrors := len(mb.allErrs) - if numErrors == 0 { - return nil - } - - if numErrors == 1 { - return mb.allErrs[0] - } - - errMsgs := make([]string, 0, numErrors) - for _, err := range mb.allErrs { - errMsgs = append(errMsgs, err.Error()) - } - return fmt.Errorf("[%s]", strings.Join(errMsgs, "; ")) -} - -// sendReqToChan grabs all the timeseies in this metricsBatcher, puts them -// to a CreateTimeSeriesRequest and sends the request to reqsChan. -func (mb *metricsBatcher) sendReqToChan() { - req := &monitoringpb.CreateTimeSeriesRequest{ - Name: mb.projectName, - TimeSeries: mb.allTss, - } - mb.reqsChan <- req -} - -// regex to extract min-max ranges from error response strings in the format "timeSeries[(min-max,...)] ..." (max is optional) -var timeSeriesErrRegex = regexp.MustCompile(`: timeSeries\[([0-9]+(?:-[0-9]+)?(?:,[0-9]+(?:-[0-9]+)?)*)\]`) - -// sendReq sends create time series requests to Stackdriver, -// and returns the count of dropped time series and error. -func sendReq(ctx context.Context, c *monitoring.MetricClient, req *monitoringpb.CreateTimeSeriesRequest) (int, error) { - // c == nil only happens in unit tests where we don't make real calls to Stackdriver server - if c == nil { - return 0, nil - } - - err := createTimeSeries(ctx, c, req) - if err == nil { - return 0, nil - } - - droppedTimeSeriesRangeMatches := timeSeriesErrRegex.FindAllStringSubmatch(err.Error(), -1) - if !strings.HasPrefix(err.Error(), "One or more TimeSeries could not be written:") || len(droppedTimeSeriesRangeMatches) == 0 { - return len(req.TimeSeries), err - } - - dropped := 0 - for _, submatches := range droppedTimeSeriesRangeMatches { - for i := 1; i < len(submatches); i++ { - for _, rng := range strings.Split(submatches[i], ",") { - rngSlice := strings.Split(rng, "-") - - // strconv errors not possible due to regex above - min, _ := strconv.Atoi(rngSlice[0]) - max := min - if len(rngSlice) > 1 { - max, _ = strconv.Atoi(rngSlice[1]) - } - - dropped += max - min + 1 - } - } - } - return dropped, err -} - -type worker struct { - ctx context.Context - timeout time.Duration - mc *monitoring.MetricClient - - resp *response - - respsChan chan *response - reqsChan chan *monitoringpb.CreateTimeSeriesRequest - - wg *sync.WaitGroup -} - -func newWorker( - ctx context.Context, - mc *monitoring.MetricClient, - reqsChan chan *monitoringpb.CreateTimeSeriesRequest, - respsChan chan *response, - wg *sync.WaitGroup, - timeout time.Duration) *worker { - return &worker{ - ctx: ctx, - mc: mc, - resp: &response{}, - reqsChan: reqsChan, - respsChan: respsChan, - wg: wg, - } -} - -func (w *worker) start() { - for req := range w.reqsChan { - w.sendReqWithTimeout(req) - } - w.respsChan <- w.resp - w.wg.Done() -} - -func (w *worker) sendReqWithTimeout(req *monitoringpb.CreateTimeSeriesRequest) { - ctx, cancel := newContextWithTimeout(w.ctx, w.timeout) - defer cancel() - - w.recordDroppedTimeseries(sendReq(ctx, w.mc, req)) -} - -func (w *worker) recordDroppedTimeseries(numTimeSeries int, err error) { - w.resp.droppedTimeSeries += numTimeSeries - if err != nil { - w.resp.errs = append(w.resp.errs, err) - } -} - -type response struct { - droppedTimeSeries int - errs []error -} diff --git a/vendor/contrib.go.opencensus.io/exporter/stackdriver/metrics_proto.go b/vendor/contrib.go.opencensus.io/exporter/stackdriver/metrics_proto.go deleted file mode 100644 index bcc1f0ee9f..0000000000 --- a/vendor/contrib.go.opencensus.io/exporter/stackdriver/metrics_proto.go +++ /dev/null @@ -1,569 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package stackdriver - -/* -The code in this file is responsible for converting OpenCensus Proto metrics -directly to Stackdriver Metrics. -*/ - -import ( - "context" - "errors" - "fmt" - "path" - "strings" - - "go.opencensus.io/resource" - - commonpb "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1" - metricspb "github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1" - resourcepb "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1" - timestamppb "github.com/golang/protobuf/ptypes/timestamp" - distributionpb "google.golang.org/genproto/googleapis/api/distribution" - labelpb "google.golang.org/genproto/googleapis/api/label" - googlemetricpb "google.golang.org/genproto/googleapis/api/metric" - monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres" - monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" -) - -var errNilMetricOrMetricDescriptor = errors.New("non-nil metric or metric descriptor") -var percentileLabelKey = &metricspb.LabelKey{ - Key: "percentile", - Description: "the value at a given percentile of a distribution", -} -var globalResource = &resource.Resource{Type: "global"} -var domains = []string{"googleapis.com", "kubernetes.io", "istio.io", "knative.dev"} - -// PushMetricsProto exports OpenCensus Metrics Proto to Stackdriver Monitoring synchronously, -// without de-duping or adding proto metrics to the bundler. -func (se *statsExporter) PushMetricsProto(ctx context.Context, node *commonpb.Node, rsc *resourcepb.Resource, metrics []*metricspb.Metric) (int, error) { - if len(metrics) == 0 { - return 0, errNilMetricOrMetricDescriptor - } - - // Caches the resources seen so far - seenResources := make(map[*resourcepb.Resource]*monitoredrespb.MonitoredResource) - - mb := newMetricsBatcher(ctx, se.o.ProjectID, se.o.NumberOfWorkers, se.c, se.o.Timeout) - for _, metric := range metrics { - if len(metric.GetTimeseries()) == 0 { - // No TimeSeries to export, skip this metric. - continue - } - mappedRsc := se.getResource(rsc, metric, seenResources) - if metric.GetMetricDescriptor().GetType() == metricspb.MetricDescriptor_SUMMARY { - summaryMtcs := se.convertSummaryMetrics(metric) - for _, summaryMtc := range summaryMtcs { - if err := se.createMetricDescriptorFromMetricProto(ctx, summaryMtc); err != nil { - mb.recordDroppedTimeseries(len(summaryMtc.GetTimeseries()), err) - continue - } - se.protoMetricToTimeSeries(ctx, mappedRsc, summaryMtc, mb) - } - } else { - if err := se.createMetricDescriptorFromMetricProto(ctx, metric); err != nil { - mb.recordDroppedTimeseries(len(metric.GetTimeseries()), err) - continue - } - se.protoMetricToTimeSeries(ctx, mappedRsc, metric, mb) - } - } - - return mb.droppedTimeSeries, mb.close(ctx) -} - -func (se *statsExporter) convertSummaryMetrics(summary *metricspb.Metric) []*metricspb.Metric { - var metrics []*metricspb.Metric - - for _, ts := range summary.Timeseries { - var percentileTss []*metricspb.TimeSeries - var countTss []*metricspb.TimeSeries - var sumTss []*metricspb.TimeSeries - lvs := ts.GetLabelValues() - - startTime := ts.StartTimestamp - for _, pt := range ts.GetPoints() { - ptTimestamp := pt.GetTimestamp() - summaryValue := pt.GetSummaryValue() - if summaryValue.Sum != nil { - sumTs := &metricspb.TimeSeries{ - LabelValues: lvs, - StartTimestamp: startTime, - Points: []*metricspb.Point{ - { - Value: &metricspb.Point_DoubleValue{ - DoubleValue: summaryValue.Sum.Value, - }, - Timestamp: ptTimestamp, - }, - }, - } - sumTss = append(sumTss, sumTs) - } - - if summaryValue.Count != nil { - countTs := &metricspb.TimeSeries{ - LabelValues: lvs, - StartTimestamp: startTime, - Points: []*metricspb.Point{ - { - Value: &metricspb.Point_Int64Value{ - Int64Value: summaryValue.Count.Value, - }, - Timestamp: ptTimestamp, - }, - }, - } - countTss = append(countTss, countTs) - } - - snapshot := summaryValue.GetSnapshot() - for _, percentileValue := range snapshot.GetPercentileValues() { - lvsWithPercentile := lvs[0:] - lvsWithPercentile = append(lvsWithPercentile, &metricspb.LabelValue{ - HasValue: true, - Value: fmt.Sprintf("%f", percentileValue.Percentile), - }) - percentileTs := &metricspb.TimeSeries{ - LabelValues: lvsWithPercentile, - StartTimestamp: nil, - Points: []*metricspb.Point{ - { - Value: &metricspb.Point_DoubleValue{ - DoubleValue: percentileValue.Value, - }, - Timestamp: ptTimestamp, - }, - }, - } - percentileTss = append(percentileTss, percentileTs) - } - } - - if len(sumTss) > 0 { - metric := &metricspb.Metric{ - MetricDescriptor: &metricspb.MetricDescriptor{ - Name: fmt.Sprintf("%s_summary_sum", summary.GetMetricDescriptor().GetName()), - Description: summary.GetMetricDescriptor().GetDescription(), - Type: metricspb.MetricDescriptor_CUMULATIVE_DOUBLE, - Unit: summary.GetMetricDescriptor().GetUnit(), - LabelKeys: summary.GetMetricDescriptor().GetLabelKeys(), - }, - Timeseries: sumTss, - Resource: summary.Resource, - } - metrics = append(metrics, metric) - } - if len(countTss) > 0 { - metric := &metricspb.Metric{ - MetricDescriptor: &metricspb.MetricDescriptor{ - Name: fmt.Sprintf("%s_summary_count", summary.GetMetricDescriptor().GetName()), - Description: summary.GetMetricDescriptor().GetDescription(), - Type: metricspb.MetricDescriptor_CUMULATIVE_INT64, - Unit: "1", - LabelKeys: summary.GetMetricDescriptor().GetLabelKeys(), - }, - Timeseries: countTss, - Resource: summary.Resource, - } - metrics = append(metrics, metric) - } - if len(percentileTss) > 0 { - lks := summary.GetMetricDescriptor().GetLabelKeys()[0:] - lks = append(lks, percentileLabelKey) - metric := &metricspb.Metric{ - MetricDescriptor: &metricspb.MetricDescriptor{ - Name: fmt.Sprintf("%s_summary_percentile", summary.GetMetricDescriptor().GetName()), - Description: summary.GetMetricDescriptor().GetDescription(), - Type: metricspb.MetricDescriptor_GAUGE_DOUBLE, - Unit: summary.GetMetricDescriptor().GetUnit(), - LabelKeys: lks, - }, - Timeseries: percentileTss, - Resource: summary.Resource, - } - metrics = append(metrics, metric) - } - } - return metrics -} - -func (se *statsExporter) getResource(rsc *resourcepb.Resource, metric *metricspb.Metric, seenRscs map[*resourcepb.Resource]*monitoredrespb.MonitoredResource) *monitoredrespb.MonitoredResource { - var resource = rsc - if metric.Resource != nil { - resource = metric.Resource - } - mappedRsc, ok := seenRscs[resource] - if !ok { - mappedRsc = se.o.MapResource(resourcepbToResource(resource)) - seenRscs[resource] = mappedRsc - } - return mappedRsc -} - -func resourcepbToResource(rsc *resourcepb.Resource) *resource.Resource { - if rsc == nil { - return globalResource - } - res := &resource.Resource{ - Type: rsc.Type, - Labels: make(map[string]string, len(rsc.Labels)), - } - - for k, v := range rsc.Labels { - res.Labels[k] = v - } - return res -} - -// protoMetricToTimeSeries converts a metric into a Stackdriver Monitoring v3 API CreateTimeSeriesRequest -// but it doesn't invoke any remote API. -func (se *statsExporter) protoMetricToTimeSeries(ctx context.Context, mappedRsc *monitoredrespb.MonitoredResource, metric *metricspb.Metric, mb *metricsBatcher) { - if metric == nil || metric.MetricDescriptor == nil { - mb.recordDroppedTimeseries(len(metric.GetTimeseries()), errNilMetricOrMetricDescriptor) - } - - metricType := se.metricTypeFromProto(metric.GetMetricDescriptor().GetName()) - metricLabelKeys := metric.GetMetricDescriptor().GetLabelKeys() - metricKind, valueType := protoMetricDescriptorTypeToMetricKind(metric) - labelKeys := make([]string, 0, len(metricLabelKeys)) - for _, key := range metricLabelKeys { - labelKeys = append(labelKeys, sanitize(key.GetKey())) - } - - for _, protoTimeSeries := range metric.Timeseries { - if len(protoTimeSeries.Points) == 0 { - // No points to send just move forward. - continue - } - - sdPoints, err := se.protoTimeSeriesToMonitoringPoints(protoTimeSeries, metricKind) - if err != nil { - mb.recordDroppedTimeseries(1, err) - continue - } - - // Each TimeSeries has labelValues which MUST be correlated - // with that from the MetricDescriptor - labels, err := labelsPerTimeSeries(se.defaultLabels, labelKeys, protoTimeSeries.GetLabelValues()) - if err != nil { - mb.recordDroppedTimeseries(1, err) - continue - } - mb.addTimeSeries(&monitoringpb.TimeSeries{ - Metric: &googlemetricpb.Metric{ - Type: metricType, - Labels: labels, - }, - MetricKind: metricKind, - ValueType: valueType, - Resource: mappedRsc, - Points: sdPoints, - }) - } -} - -func labelsPerTimeSeries(defaults map[string]labelValue, labelKeys []string, labelValues []*metricspb.LabelValue) (map[string]string, error) { - if len(labelKeys) != len(labelValues) { - return nil, fmt.Errorf("length mismatch: len(labelKeys)=%d len(labelValues)=%d", len(labelKeys), len(labelValues)) - } - - if len(defaults)+len(labelKeys) == 0 { - // No labels for this metric - return nil, nil - } - - labels := make(map[string]string) - // Fill in the defaults firstly, irrespective of if the labelKeys and labelValues are mismatched. - for key, label := range defaults { - labels[key] = label.val - } - - for i, labelKey := range labelKeys { - labelValue := labelValues[i] - if !labelValue.GetHasValue() { - continue - } - labels[labelKey] = labelValue.GetValue() - } - - return labels, nil -} - -func (se *statsExporter) createMetricDescriptorFromMetricProto(ctx context.Context, metric *metricspb.Metric) error { - // Skip create metric descriptor if configured - if se.o.SkipCMD { - return nil - } - - ctx, cancel := newContextWithTimeout(ctx, se.o.Timeout) - defer cancel() - - se.protoMu.Lock() - defer se.protoMu.Unlock() - - name := metric.GetMetricDescriptor().GetName() - if _, created := se.protoMetricDescriptors[name]; created { - return nil - } - - if builtinMetric(se.metricTypeFromProto(name)) { - se.protoMetricDescriptors[name] = true - return nil - } - - // Otherwise, we encountered a cache-miss and - // should create the metric descriptor remotely. - inMD, err := se.protoToMonitoringMetricDescriptor(metric, se.defaultLabels) - if err != nil { - return err - } - - if err = se.createMetricDescriptor(ctx, inMD); err != nil { - return err - } - - se.protoMetricDescriptors[name] = true - return nil -} - -func (se *statsExporter) protoTimeSeriesToMonitoringPoints(ts *metricspb.TimeSeries, metricKind googlemetricpb.MetricDescriptor_MetricKind) ([]*monitoringpb.Point, error) { - sptl := make([]*monitoringpb.Point, 0, len(ts.Points)) - for _, pt := range ts.Points { - // If we have a last value aggregation point i.e. MetricDescriptor_GAUGE - // StartTime should be nil. - startTime := ts.StartTimestamp - if metricKind == googlemetricpb.MetricDescriptor_GAUGE { - startTime = nil - } - spt, err := fromProtoPoint(startTime, pt) - if err != nil { - return nil, err - } - sptl = append(sptl, spt) - } - return sptl, nil -} - -func (se *statsExporter) protoToMonitoringMetricDescriptor(metric *metricspb.Metric, additionalLabels map[string]labelValue) (*googlemetricpb.MetricDescriptor, error) { - if metric == nil || metric.MetricDescriptor == nil { - return nil, errNilMetricOrMetricDescriptor - } - - md := metric.GetMetricDescriptor() - metricName := md.GetName() - unit := md.GetUnit() - description := md.GetDescription() - metricType := se.metricTypeFromProto(metricName) - displayName := se.displayName(metricName) - metricKind, valueType := protoMetricDescriptorTypeToMetricKind(metric) - - sdm := &googlemetricpb.MetricDescriptor{ - Name: fmt.Sprintf("projects/%s/metricDescriptors/%s", se.o.ProjectID, metricType), - DisplayName: displayName, - Description: description, - Unit: unit, - Type: metricType, - MetricKind: metricKind, - ValueType: valueType, - Labels: labelDescriptorsFromProto(additionalLabels, metric.GetMetricDescriptor().GetLabelKeys()), - } - - return sdm, nil -} - -func labelDescriptorsFromProto(defaults map[string]labelValue, protoLabelKeys []*metricspb.LabelKey) []*labelpb.LabelDescriptor { - labelDescriptors := make([]*labelpb.LabelDescriptor, 0, len(defaults)+len(protoLabelKeys)) - - // Fill in the defaults first. - for key, lbl := range defaults { - labelDescriptors = append(labelDescriptors, &labelpb.LabelDescriptor{ - Key: sanitize(key), - Description: lbl.desc, - ValueType: labelpb.LabelDescriptor_STRING, - }) - } - - // Now fill in those from the metric. - for _, protoKey := range protoLabelKeys { - labelDescriptors = append(labelDescriptors, &labelpb.LabelDescriptor{ - Key: sanitize(protoKey.GetKey()), - Description: protoKey.GetDescription(), - ValueType: labelpb.LabelDescriptor_STRING, // We only use string tags - }) - } - return labelDescriptors -} - -func (se *statsExporter) metricTypeFromProto(name string) string { - prefix := se.o.MetricPrefix - if se.o.GetMetricPrefix != nil { - prefix = se.o.GetMetricPrefix(name) - } - if prefix != "" { - name = path.Join(prefix, name) - } - if !hasDomain(name) { - // Still needed because the name may or may not have a "/" at the beginning. - name = path.Join(defaultDomain, name) - } - return name -} - -// hasDomain checks if the metric name already has a domain in it. -func hasDomain(name string) bool { - for _, domain := range domains { - if strings.Contains(name, domain) { - return true - } - } - return false -} - -func fromProtoPoint(startTime *timestamppb.Timestamp, pt *metricspb.Point) (*monitoringpb.Point, error) { - if pt == nil { - return nil, nil - } - - mptv, err := protoToMetricPoint(pt.Value) - if err != nil { - return nil, err - } - - return &monitoringpb.Point{ - Value: mptv, - Interval: &monitoringpb.TimeInterval{ - StartTime: startTime, - EndTime: pt.Timestamp, - }, - }, nil -} - -func protoToMetricPoint(value interface{}) (*monitoringpb.TypedValue, error) { - if value == nil { - return nil, nil - } - - switch v := value.(type) { - default: - // All the other types are not yet handled. - // TODO: (@odeke-em, @songy23) talk to the Stackdriver team to determine - // the use cases for: - // - // *TypedValue_BoolValue - // *TypedValue_StringValue - // - // and then file feature requests on OpenCensus-Specs and then OpenCensus-Proto, - // lest we shall error here. - // - // TODO: Add conversion from SummaryValue when - // https://github.com/census-ecosystem/opencensus-go-exporter-stackdriver/issues/66 - // has been figured out. - return nil, fmt.Errorf("protoToMetricPoint: unknown Data type: %T", value) - - case *metricspb.Point_Int64Value: - return &monitoringpb.TypedValue{ - Value: &monitoringpb.TypedValue_Int64Value{ - Int64Value: v.Int64Value, - }, - }, nil - - case *metricspb.Point_DoubleValue: - return &monitoringpb.TypedValue{ - Value: &monitoringpb.TypedValue_DoubleValue{ - DoubleValue: v.DoubleValue, - }, - }, nil - - case *metricspb.Point_DistributionValue: - dv := v.DistributionValue - var mv *monitoringpb.TypedValue_DistributionValue - if dv != nil { - var mean float64 - if dv.Count > 0 { - mean = float64(dv.Sum) / float64(dv.Count) - } - mv = &monitoringpb.TypedValue_DistributionValue{ - DistributionValue: &distributionpb.Distribution{ - Count: dv.Count, - Mean: mean, - SumOfSquaredDeviation: dv.SumOfSquaredDeviation, - }, - } - - insertZeroBound := false - if bopts := dv.BucketOptions; bopts != nil && bopts.Type != nil { - bexp, ok := bopts.Type.(*metricspb.DistributionValue_BucketOptions_Explicit_) - if ok && bexp != nil && bexp.Explicit != nil { - insertZeroBound = shouldInsertZeroBound(bexp.Explicit.Bounds...) - mv.DistributionValue.BucketOptions = &distributionpb.Distribution_BucketOptions{ - Options: &distributionpb.Distribution_BucketOptions_ExplicitBuckets{ - ExplicitBuckets: &distributionpb.Distribution_BucketOptions_Explicit{ - // The first bucket bound should be 0.0 because the Metrics first bucket is - // [0, first_bound) but Stackdriver monitoring bucket bounds begin with -infinity - // (first bucket is (-infinity, 0)) - Bounds: addZeroBoundOnCondition(insertZeroBound, bexp.Explicit.Bounds...), - }, - }, - } - } - } - mv.DistributionValue.BucketCounts = addZeroBucketCountOnCondition(insertZeroBound, bucketCounts(dv.Buckets)...) - - } - return &monitoringpb.TypedValue{Value: mv}, nil - } -} - -func bucketCounts(buckets []*metricspb.DistributionValue_Bucket) []int64 { - bucketCounts := make([]int64, len(buckets)) - for i, bucket := range buckets { - if bucket != nil { - bucketCounts[i] = bucket.Count - } - } - return bucketCounts -} - -func protoMetricDescriptorTypeToMetricKind(m *metricspb.Metric) (googlemetricpb.MetricDescriptor_MetricKind, googlemetricpb.MetricDescriptor_ValueType) { - dt := m.GetMetricDescriptor() - if dt == nil { - return googlemetricpb.MetricDescriptor_METRIC_KIND_UNSPECIFIED, googlemetricpb.MetricDescriptor_VALUE_TYPE_UNSPECIFIED - } - - switch dt.Type { - case metricspb.MetricDescriptor_CUMULATIVE_INT64: - return googlemetricpb.MetricDescriptor_CUMULATIVE, googlemetricpb.MetricDescriptor_INT64 - - case metricspb.MetricDescriptor_CUMULATIVE_DOUBLE: - return googlemetricpb.MetricDescriptor_CUMULATIVE, googlemetricpb.MetricDescriptor_DOUBLE - - case metricspb.MetricDescriptor_CUMULATIVE_DISTRIBUTION: - return googlemetricpb.MetricDescriptor_CUMULATIVE, googlemetricpb.MetricDescriptor_DISTRIBUTION - - case metricspb.MetricDescriptor_GAUGE_DOUBLE: - return googlemetricpb.MetricDescriptor_GAUGE, googlemetricpb.MetricDescriptor_DOUBLE - - case metricspb.MetricDescriptor_GAUGE_INT64: - return googlemetricpb.MetricDescriptor_GAUGE, googlemetricpb.MetricDescriptor_INT64 - - case metricspb.MetricDescriptor_GAUGE_DISTRIBUTION: - return googlemetricpb.MetricDescriptor_GAUGE, googlemetricpb.MetricDescriptor_DISTRIBUTION - - default: - return googlemetricpb.MetricDescriptor_METRIC_KIND_UNSPECIFIED, googlemetricpb.MetricDescriptor_VALUE_TYPE_UNSPECIFIED - } -} diff --git a/vendor/contrib.go.opencensus.io/exporter/stackdriver/monitoredresource/aws/aws_identity_doc_utils.go b/vendor/contrib.go.opencensus.io/exporter/stackdriver/monitoredresource/aws/aws_identity_doc_utils.go deleted file mode 100644 index f83b402724..0000000000 --- a/vendor/contrib.go.opencensus.io/exporter/stackdriver/monitoredresource/aws/aws_identity_doc_utils.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2020, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package aws - -import ( - "github.com/aws/aws-sdk-go/aws/ec2metadata" - "github.com/aws/aws-sdk-go/aws/session" -) - -// awsIdentityDocument is used to store parsed AWS Identity Document. -type awsIdentityDocument struct { - // accountID is the AWS account number for the VM. - accountID string - - // instanceID is the instance id of the instance. - instanceID string - - // Region is the AWS region for the VM. - region string -} - -// retrieveAWSIdentityDocument attempts to retrieve AWS Identity Document. -// If the environment is AWS EC2 Instance then a valid document is retrieved. -// Relevant attributes from the document are stored in awsIdentityDoc. -// This is only done once. -func retrieveAWSIdentityDocument() *awsIdentityDocument { - awsIdentityDoc := awsIdentityDocument{} - sesion, err := session.NewSession() - if err != nil { - return nil - } - c := ec2metadata.New(sesion) - if !c.Available() { - return nil - } - ec2InstanceIdentifyDocument, err := c.GetInstanceIdentityDocument() - if err != nil { - return nil - } - awsIdentityDoc.region = ec2InstanceIdentifyDocument.Region - awsIdentityDoc.instanceID = ec2InstanceIdentifyDocument.InstanceID - awsIdentityDoc.accountID = ec2InstanceIdentifyDocument.AccountID - - return &awsIdentityDoc -} diff --git a/vendor/contrib.go.opencensus.io/exporter/stackdriver/monitoredresource/aws/monitored_resources.go b/vendor/contrib.go.opencensus.io/exporter/stackdriver/monitoredresource/aws/monitored_resources.go deleted file mode 100644 index 5e4cde50e2..0000000000 --- a/vendor/contrib.go.opencensus.io/exporter/stackdriver/monitoredresource/aws/monitored_resources.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright 2020, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package aws - -import ( - "fmt" - "sync" -) - -// Interface is a type that represent monitor resource that satisfies monitoredresource.Interface -type Interface interface { - - // MonitoredResource returns the resource type and resource labels. - MonitoredResource() (resType string, labels map[string]string) -} - -// EC2Instance represents aws_ec2_instance type monitored resource. -// For definition refer to -// https://cloud.google.com/monitoring/api/resources#tag_aws_ec2_instance -type EC2Instance struct { - - // AWSAccount is the AWS account number for the VM. - AWSAccount string - - // InstanceID is the instance id of the instance. - InstanceID string - - // Region is the AWS region for the VM. The format of this field is "aws:{region}", - // where supported values for {region} are listed at - // http://docs.aws.amazon.com/general/latest/gr/rande.html. - Region string -} - -// MonitoredResource returns resource type and resource labels for AWSEC2Instance -func (aws *EC2Instance) MonitoredResource() (resType string, labels map[string]string) { - labels = map[string]string{ - "aws_account": aws.AWSAccount, - "instance_id": aws.InstanceID, - "region": aws.Region, - } - return "aws_ec2_instance", labels -} - -// Autodetect auto detects monitored resources based on -// the environment where the application is running. -// It supports detection of following resource types -// 1. aws_ec2_instance: -// -// Returns MonitoredResInterface which implements getLabels() and getType() -// For resource definition go to https://cloud.google.com/monitoring/api/resources -func Autodetect() Interface { - return func() Interface { - detectOnce.Do(func() { - autoDetected = detectResourceType(retrieveAWSIdentityDocument()) - }) - return autoDetected - }() - -} - -// createAWSEC2InstanceMonitoredResource creates a aws_ec2_instance monitored resource -// awsIdentityDoc contains AWS EC2 specific attributes. -func createEC2InstanceMonitoredResource(awsIdentityDoc *awsIdentityDocument) *EC2Instance { - awsInstance := EC2Instance{ - AWSAccount: awsIdentityDoc.accountID, - InstanceID: awsIdentityDoc.instanceID, - Region: fmt.Sprintf("aws:%s", awsIdentityDoc.region), - } - return &awsInstance -} - -// detectOnce is used to make sure AWS metadata detect function executes only once. -var detectOnce sync.Once - -// autoDetected is the metadata detected after the first execution of Autodetect function. -var autoDetected Interface - -// detectResourceType determines the resource type. -// awsIdentityDoc contains AWS EC2 attributes. nil if it is not AWS EC2 environment -func detectResourceType(awsIdentityDoc *awsIdentityDocument) Interface { - if awsIdentityDoc != nil { - return createEC2InstanceMonitoredResource(awsIdentityDoc) - } - return nil -} diff --git a/vendor/contrib.go.opencensus.io/exporter/stackdriver/monitoredresource/deprecated.go b/vendor/contrib.go.opencensus.io/exporter/stackdriver/monitoredresource/deprecated.go deleted file mode 100644 index fec5ec1251..0000000000 --- a/vendor/contrib.go.opencensus.io/exporter/stackdriver/monitoredresource/deprecated.go +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright 2020, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package monitoredresource - -import ( - "contrib.go.opencensus.io/exporter/stackdriver/monitoredresource/aws" - "contrib.go.opencensus.io/exporter/stackdriver/monitoredresource/gcp" -) - -// GKEContainer represents gke_container type monitored resource. -// For definition refer to -// https://cloud.google.com/monitoring/api/resources#tag_gke_container -// Deprecated: please use gcp.GKEContainer from "contrib.go.opencensus.io/exporter/stackdriver/monitoredresource/gcp". -type GKEContainer struct { - // ProjectID is the identifier of the GCP project associated with this resource, such as "my-project". - ProjectID string - - // InstanceID is the numeric VM instance identifier assigned by Compute Engine. - InstanceID string - - // ClusterName is the name for the cluster the container is running in. - ClusterName string - - // ContainerName is the name of the container. - ContainerName string - - // NamespaceID is the identifier for the cluster namespace the container is running in - NamespaceID string - - // PodID is the identifier for the pod the container is running in. - PodID string - - // Zone is the Compute Engine zone in which the VM is running. - Zone string - - // LoggingMonitoringV2Enabled is the identifier if user enabled V2 logging and monitoring for GKE - LoggingMonitoringV2Enabled bool -} - -// MonitoredResource returns resource type and resource labels for GKEContainer -func (gke *GKEContainer) MonitoredResource() (resType string, labels map[string]string) { - gcpGKE := gcp.GKEContainer(*gke) - return gcpGKE.MonitoredResource() -} - -// GCEInstance represents gce_instance type monitored resource. -// For definition refer to -// https://cloud.google.com/monitoring/api/resources#tag_gce_instance -// Deprecated: please use gcp.GCEInstance from "contrib.go.opencensus.io/exporter/stackdriver/monitoredresource/gcp". -type GCEInstance struct { - - // ProjectID is the identifier of the GCP project associated with this resource, such as "my-project". - ProjectID string - - // InstanceID is the numeric VM instance identifier assigned by Compute Engine. - InstanceID string - - // Zone is the Compute Engine zone in which the VM is running. - Zone string -} - -// MonitoredResource returns resource type and resource labels for GCEInstance -func (gce *GCEInstance) MonitoredResource() (resType string, labels map[string]string) { - gcpGCE := gcp.GCEInstance(*gce) - return gcpGCE.MonitoredResource() -} - -// AWSEC2Instance represents aws_ec2_instance type monitored resource. -// For definition refer to -// https://cloud.google.com/monitoring/api/resources#tag_aws_ec2_instance -// Deprecated: please use aws.EC2Container from "contrib.go.opencensus.io/exporter/stackdriver/monitoredresource/aws". -type AWSEC2Instance struct { - // AWSAccount is the AWS account number for the VM. - AWSAccount string - - // InstanceID is the instance id of the instance. - InstanceID string - - // Region is the AWS region for the VM. The format of this field is "aws:{region}", - // where supported values for {region} are listed at - // http://docs.aws.amazon.com/general/latest/gr/rande.html. - Region string -} - -// MonitoredResource returns resource type and resource labels for AWSEC2Instance -func (ec2 *AWSEC2Instance) MonitoredResource() (resType string, labels map[string]string) { - awsEC2 := aws.EC2Instance(*ec2) - return awsEC2.MonitoredResource() -} diff --git a/vendor/contrib.go.opencensus.io/exporter/stackdriver/monitoredresource/gcp/gcp_metadata_config.go b/vendor/contrib.go.opencensus.io/exporter/stackdriver/monitoredresource/gcp/gcp_metadata_config.go deleted file mode 100644 index ccaade65f2..0000000000 --- a/vendor/contrib.go.opencensus.io/exporter/stackdriver/monitoredresource/gcp/gcp_metadata_config.go +++ /dev/null @@ -1,117 +0,0 @@ -// Copyright 2020, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package gcp - -import ( - "context" - "fmt" - "log" - "os" - "strings" - - "cloud.google.com/go/compute/metadata" - container "cloud.google.com/go/container/apiv1" - containerpb "google.golang.org/genproto/googleapis/container/v1" -) - -// gcpMetadata represents metadata retrieved from GCP (GKE and GCE) environment. -type gcpMetadata struct { - - // projectID is the identifier of the GCP project associated with this resource, such as "my-project". - projectID string - - // instanceID is the numeric VM instance identifier assigned by Compute Engine. - instanceID string - - // clusterName is the name for the cluster the container is running in. - clusterName string - - // containerName is the name of the container. - containerName string - - // namespaceID is the identifier for the cluster namespace the container is running in - namespaceID string - - // podID is the identifier for the pod the container is running in. - podID string - - // zone is the Compute Engine zone in which the VM is running. - zone string - - monitoringV2 bool -} - -// retrieveGCPMetadata retrieves value of each Attribute from Metadata Server -// in GKE container and GCE instance environment. -// Some attributes are retrieved from the system environment. -// This is only executed detectOnce. -func retrieveGCPMetadata() *gcpMetadata { - gcpMetadata := gcpMetadata{} - var err error - gcpMetadata.instanceID, err = metadata.InstanceID() - if err != nil { - // Not a GCP environment - return &gcpMetadata - } - - gcpMetadata.projectID, err = metadata.ProjectID() - logError(err) - - gcpMetadata.zone, err = metadata.Zone() - logError(err) - - clusterName, err := metadata.InstanceAttributeValue("cluster-name") - logError(err) - gcpMetadata.clusterName = strings.TrimSpace(clusterName) - - clusterLocation, err := metadata.InstanceAttributeValue("cluster-location") - logError(err) - - // Following attributes are derived from environment variables. They are configured - // via yaml file. For details refer to: - // https://cloud.google.com/kubernetes-engine/docs/tutorials/custom-metrics-autoscaling#exporting_metrics_from_the_application - gcpMetadata.namespaceID = os.Getenv("NAMESPACE") - gcpMetadata.containerName = os.Getenv("CONTAINER_NAME") - gcpMetadata.podID = os.Getenv("HOSTNAME") - - // Monitoring API version can be obtained from cluster info.q - if gcpMetadata.clusterName != "" { - ctx := context.Background() - c, err := container.NewClusterManagerClient(ctx) - logError(err) - if c != nil { - req := &containerpb.GetClusterRequest{ - Name: fmt.Sprintf("projects/%s/locations/%s/clusters/%s", gcpMetadata.projectID, strings.TrimSpace(clusterLocation), gcpMetadata.clusterName), - } - resp, err := c.GetCluster(ctx, req) - logError(err) - if resp != nil && resp.GetMonitoringService() == "monitoring.googleapis.com/kubernetes" && - resp.GetLoggingService() == "logging.googleapis.com/kubernetes" { - gcpMetadata.monitoringV2 = true - } - } - } - - return &gcpMetadata -} - -// logError logs error only if the error is present and it is not 'not defined' -func logError(err error) { - if err != nil { - if !strings.Contains(err.Error(), "not defined") { - log.Printf("Error retrieving gcp metadata: %v", err) - } - } -} diff --git a/vendor/contrib.go.opencensus.io/exporter/stackdriver/monitoredresource/gcp/monitored_resources.go b/vendor/contrib.go.opencensus.io/exporter/stackdriver/monitoredresource/gcp/monitored_resources.go deleted file mode 100644 index 1a08b1c8e8..0000000000 --- a/vendor/contrib.go.opencensus.io/exporter/stackdriver/monitoredresource/gcp/monitored_resources.go +++ /dev/null @@ -1,168 +0,0 @@ -// Copyright 2020, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package gcp - -import ( - "os" - "sync" -) - -// Interface is a type that represent monitor resource that satisfies monitoredresource.Interface -type Interface interface { - - // MonitoredResource returns the resource type and resource labels. - MonitoredResource() (resType string, labels map[string]string) -} - -// GKEContainer represents gke_container type monitored resource. -// For definition refer to -// https://cloud.google.com/monitoring/api/resources#tag_gke_container -type GKEContainer struct { - - // ProjectID is the identifier of the GCP project associated with this resource, such as "my-project". - ProjectID string - - // InstanceID is the numeric VM instance identifier assigned by Compute Engine. - InstanceID string - - // ClusterName is the name for the cluster the container is running in. - ClusterName string - - // ContainerName is the name of the container. - ContainerName string - - // NamespaceID is the identifier for the cluster namespace the container is running in - NamespaceID string - - // PodID is the identifier for the pod the container is running in. - PodID string - - // Zone is the Compute Engine zone in which the VM is running. - Zone string - - // LoggingMonitoringV2Enabled is the identifier if user enabled V2 logging and monitoring for GKE - LoggingMonitoringV2Enabled bool -} - -// MonitoredResource returns resource type and resource labels for GKEContainer -func (gke *GKEContainer) MonitoredResource() (resType string, labels map[string]string) { - labels = map[string]string{ - "project_id": gke.ProjectID, - "cluster_name": gke.ClusterName, - "container_name": gke.ContainerName, - } - var typ string - if gke.LoggingMonitoringV2Enabled { - typ = "k8s_container" - labels["pod_name"] = gke.PodID - labels["namespace_name"] = gke.NamespaceID - labels["location"] = gke.Zone - } else { - typ = "gke_container" - labels["pod_id"] = gke.PodID - labels["namespace_id"] = gke.NamespaceID - labels["zone"] = gke.Zone - labels["instance_id"] = gke.InstanceID - } - return typ, labels -} - -// GCEInstance represents gce_instance type monitored resource. -// For definition refer to -// https://cloud.google.com/monitoring/api/resources#tag_gce_instance -type GCEInstance struct { - - // ProjectID is the identifier of the GCP project associated with this resource, such as "my-project". - ProjectID string - - // InstanceID is the numeric VM instance identifier assigned by Compute Engine. - InstanceID string - - // Zone is the Compute Engine zone in which the VM is running. - Zone string -} - -// MonitoredResource returns resource type and resource labels for GCEInstance -func (gce *GCEInstance) MonitoredResource() (resType string, labels map[string]string) { - labels = map[string]string{ - "project_id": gce.ProjectID, - "instance_id": gce.InstanceID, - "zone": gce.Zone, - } - return "gce_instance", labels -} - -// Autodetect auto detects monitored resources based on -// the environment where the application is running. -// It supports detection of following resource types -// 1. gke_container: -// 2. gce_instance: -// -// Returns MonitoredResInterface which implements getLabels() and getType() -// For resource definition go to https://cloud.google.com/monitoring/api/resources -func Autodetect() Interface { - return func() Interface { - detectOnce.Do(func() { - autoDetected = detectResourceType(retrieveGCPMetadata()) - }) - return autoDetected - }() - -} - -// createGCEInstanceMonitoredResource creates a gce_instance monitored resource -// gcpMetadata contains GCP (GKE or GCE) specific attributes. -func createGCEInstanceMonitoredResource(gcpMetadata *gcpMetadata) *GCEInstance { - gceInstance := GCEInstance{ - ProjectID: gcpMetadata.projectID, - InstanceID: gcpMetadata.instanceID, - Zone: gcpMetadata.zone, - } - return &gceInstance -} - -// createGKEContainerMonitoredResource creates a gke_container monitored resource -// gcpMetadata contains GCP (GKE or GCE) specific attributes. -func createGKEContainerMonitoredResource(gcpMetadata *gcpMetadata) *GKEContainer { - gkeContainer := GKEContainer{ - ProjectID: gcpMetadata.projectID, - InstanceID: gcpMetadata.instanceID, - Zone: gcpMetadata.zone, - ContainerName: gcpMetadata.containerName, - ClusterName: gcpMetadata.clusterName, - NamespaceID: gcpMetadata.namespaceID, - PodID: gcpMetadata.podID, - LoggingMonitoringV2Enabled: gcpMetadata.monitoringV2, - } - return &gkeContainer -} - -// detectOnce is used to make sure GCP metadata detect function executes only once. -var detectOnce sync.Once - -// autoDetected is the metadata detected after the first execution of Autodetect function. -var autoDetected Interface - -// detectResourceType determines the resource type. -// gcpMetadata contains GCP (GKE or GCE) specific attributes. -func detectResourceType(gcpMetadata *gcpMetadata) Interface { - if os.Getenv("KUBERNETES_SERVICE_HOST") != "" && - gcpMetadata != nil && gcpMetadata.instanceID != "" { - return createGKEContainerMonitoredResource(gcpMetadata) - } else if gcpMetadata != nil && gcpMetadata.instanceID != "" { - return createGCEInstanceMonitoredResource(gcpMetadata) - } - return nil -} diff --git a/vendor/contrib.go.opencensus.io/exporter/stackdriver/monitoredresource/monitored_resources.go b/vendor/contrib.go.opencensus.io/exporter/stackdriver/monitoredresource/monitored_resources.go deleted file mode 100644 index 46943eb10a..0000000000 --- a/vendor/contrib.go.opencensus.io/exporter/stackdriver/monitoredresource/monitored_resources.go +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright 2020, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package monitoredresource - -import ( - "sync" - - "contrib.go.opencensus.io/exporter/stackdriver/monitoredresource/aws" - "contrib.go.opencensus.io/exporter/stackdriver/monitoredresource/gcp" -) - -// Interface is a type that represent monitor resource that satisfies monitoredresource.Interface -type Interface interface { - // MonitoredResource returns the resource type and resource labels. - MonitoredResource() (resType string, labels map[string]string) -} - -// Autodetect auto detects monitored resources based on -// the environment where the application is running. -// It supports detection of following resource types -// 1. gke_container: -// 2. gce_instance: -// 3. aws_ec2_instance: -// -// Returns MonitoredResInterface which implements getLabels() and getType() -// For resource definition go to https://cloud.google.com/monitoring/api/resources -func Autodetect() Interface { - return func() Interface { - detectOnce.Do(func() { - var awsDetect, gcpDetect Interface - // First attempts to retrieve AWS Identity Doc and GCP metadata. - // It then determines the resource type - // In GCP and AWS environment both func finishes quickly. However, - // in an environment other than those (e.g local laptop) it - // takes 2 seconds for GCP and 5-6 for AWS. - var wg sync.WaitGroup - wg.Add(2) - - go func() { - defer wg.Done() - awsDetect = aws.Autodetect() - }() - go func() { - defer wg.Done() - gcpDetect = gcp.Autodetect() - }() - - wg.Wait() - autoDetected = awsDetect - if gcpDetect != nil { - autoDetected = gcpDetect - } - }) - return autoDetected - }() -} - -// detectOnce is used to make sure GCP and AWS metadata detect function executes only once. -var detectOnce sync.Once - -// autoDetected is the metadata detected after the first execution of Autodetect function. -var autoDetected Interface diff --git a/vendor/contrib.go.opencensus.io/exporter/stackdriver/resource.go b/vendor/contrib.go.opencensus.io/exporter/stackdriver/resource.go deleted file mode 100644 index 50331d2948..0000000000 --- a/vendor/contrib.go.opencensus.io/exporter/stackdriver/resource.go +++ /dev/null @@ -1,263 +0,0 @@ -// Copyright 2020, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package stackdriver // import "contrib.go.opencensus.io/exporter/stackdriver" - -import ( - "fmt" - "sync" - - "contrib.go.opencensus.io/exporter/stackdriver/monitoredresource/gcp" - "go.opencensus.io/resource" - "go.opencensus.io/resource/resourcekeys" - monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres" -) - -// Resource labels that are generally internal to the exporter. -// Consider exposing these labels and a type identifier in the future to allow -// for customization. -const ( - stackdriverProjectID = "contrib.opencensus.io/exporter/stackdriver/project_id" - stackdriverLocation = "contrib.opencensus.io/exporter/stackdriver/location" - stackdriverClusterName = "contrib.opencesus.io/exporter/stackdriver/cluster_name" - stackdriverGenericTaskNamespace = "contrib.opencensus.io/exporter/stackdriver/generic_task/namespace" - stackdriverGenericTaskJob = "contrib.opencensus.io/exporter/stackdriver/generic_task/job" - stackdriverGenericTaskID = "contrib.opencensus.io/exporter/stackdriver/generic_task/task_id" - - knativeResType = "knative_revision" - knativeServiceName = "service_name" - knativeRevisionName = "revision_name" - knativeConfigurationName = "configuration_name" - knativeNamespaceName = "namespace_name" - - knativeBrokerType = "knative_broker" - knativeBrokerName = "broker_name" - knativeTriggerType = "knative_trigger" - knativeTriggerName = "trigger_name" - - appEngineInstanceType = "gae_instance" - - appEngineService = "appengine.service.id" - appEngineVersion = "appengine.version.id" - appEngineInstance = "appengine.instance.id" -) - -var ( - // autodetectFunc returns a monitored resource that is autodetected. - // from the cloud environment at runtime. - autodetectFunc func() gcp.Interface - - // autodetectOnce is used to lazy initialize autodetectedLabels. - autodetectOnce *sync.Once - // autodetectedLabels stores all the labels from the autodetected monitored resource - // with a possible additional label for the GCP "location". - autodetectedLabels map[string]string -) - -func init() { - autodetectFunc = gcp.Autodetect - // monitoredresource.Autodetect only makes calls to the metadata APIs once - // and caches the results - autodetectOnce = new(sync.Once) -} - -// Mappings for the well-known OpenCensus resource label keys -// to applicable Stackdriver Monitored Resource label keys. -var k8sContainerMap = map[string]string{ - "project_id": stackdriverProjectID, - "location": resourcekeys.CloudKeyZone, - "cluster_name": resourcekeys.K8SKeyClusterName, - "namespace_name": resourcekeys.K8SKeyNamespaceName, - "pod_name": resourcekeys.K8SKeyPodName, - "container_name": resourcekeys.ContainerKeyName, -} - -var k8sPodMap = map[string]string{ - "project_id": stackdriverProjectID, - "location": resourcekeys.CloudKeyZone, - "cluster_name": resourcekeys.K8SKeyClusterName, - "namespace_name": resourcekeys.K8SKeyNamespaceName, - "pod_name": resourcekeys.K8SKeyPodName, -} - -var k8sNodeMap = map[string]string{ - "project_id": stackdriverProjectID, - "location": resourcekeys.CloudKeyZone, - "cluster_name": resourcekeys.K8SKeyClusterName, - "node_name": resourcekeys.HostKeyName, -} - -var gcpResourceMap = map[string]string{ - "project_id": stackdriverProjectID, - "instance_id": resourcekeys.HostKeyID, - "zone": resourcekeys.CloudKeyZone, -} - -var awsResourceMap = map[string]string{ - "project_id": stackdriverProjectID, - "instance_id": resourcekeys.HostKeyID, - "region": resourcekeys.CloudKeyRegion, - "aws_account": resourcekeys.CloudKeyAccountID, -} - -var appEngineInstanceMap = map[string]string{ - "project_id": stackdriverProjectID, - "location": resourcekeys.CloudKeyRegion, - "module_id": appEngineService, - "version_id": appEngineVersion, - "instance_id": appEngineInstance, -} - -// Generic task resource. -var genericResourceMap = map[string]string{ - "project_id": stackdriverProjectID, - "location": resourcekeys.CloudKeyZone, - "namespace": stackdriverGenericTaskNamespace, - "job": stackdriverGenericTaskJob, - "task_id": stackdriverGenericTaskID, -} - -var knativeRevisionResourceMap = map[string]string{ - "project_id": stackdriverProjectID, - "location": resourcekeys.CloudKeyZone, - "cluster_name": resourcekeys.K8SKeyClusterName, - knativeServiceName: knativeServiceName, - knativeRevisionName: knativeRevisionName, - knativeConfigurationName: knativeConfigurationName, - knativeNamespaceName: knativeNamespaceName, -} - -var knativeBrokerResourceMap = map[string]string{ - "project_id": stackdriverProjectID, - "location": resourcekeys.CloudKeyZone, - "cluster_name": resourcekeys.K8SKeyClusterName, - knativeNamespaceName: knativeNamespaceName, - knativeBrokerName: knativeBrokerName, -} - -var knativeTriggerResourceMap = map[string]string{ - "project_id": stackdriverProjectID, - "location": resourcekeys.CloudKeyZone, - "cluster_name": resourcekeys.K8SKeyClusterName, - knativeNamespaceName: knativeNamespaceName, - knativeBrokerName: knativeBrokerName, - knativeTriggerName: knativeTriggerName, -} - -// getAutodetectedLabels returns all the labels from the Monitored Resource detected -// from the environment by calling monitoredresource.Autodetect. If a "zone" label is detected, -// a "location" label is added with the same value to account for differences between -// Legacy Stackdriver and Stackdriver Kubernetes Engine Monitoring, -// see https://cloud.google.com/monitoring/kubernetes-engine/migration. -func getAutodetectedLabels() map[string]string { - autodetectOnce.Do(func() { - autodetectedLabels = map[string]string{} - if mr := autodetectFunc(); mr != nil { - _, labels := mr.MonitoredResource() - // accept "zone" value for "location" because values for location can be a zone - // or region, see https://cloud.google.com/docs/geography-and-regions - if _, ok := labels["zone"]; ok { - labels["location"] = labels["zone"] - } - - autodetectedLabels = labels - } - }) - - return autodetectedLabels -} - -// returns transformed label map and true if all labels in match are found -// in input except optional project_id. It returns false if at least one label -// other than project_id is missing. -func transformResource(match, input map[string]string) (map[string]string, bool) { - output := make(map[string]string, len(input)) - for dst, src := range match { - if v, ok := input[src]; ok { - output[dst] = v - continue - } - - // attempt to autodetect missing labels, autodetected label keys should - // match destination label keys - if v, ok := getAutodetectedLabels()[dst]; ok { - output[dst] = v - continue - } - - if dst != "project_id" { - return nil, true - } - } - return output, false -} - -// DefaultMapResource implements default resource mapping for well-known resource types -func DefaultMapResource(res *resource.Resource) *monitoredrespb.MonitoredResource { - match := genericResourceMap - result := &monitoredrespb.MonitoredResource{ - Type: "global", - } - if res == nil || res.Labels == nil { - return result - } - - switch { - case res.Type == resourcekeys.ContainerType: - result.Type = "k8s_container" - match = k8sContainerMap - case res.Type == resourcekeys.K8SType: - result.Type = "k8s_pod" - match = k8sPodMap - case res.Type == resourcekeys.HostType && res.Labels[resourcekeys.K8SKeyClusterName] != "": - result.Type = "k8s_node" - match = k8sNodeMap - case res.Type == appEngineInstanceType: - result.Type = appEngineInstanceType - match = appEngineInstanceMap - case res.Labels[resourcekeys.CloudKeyProvider] == resourcekeys.CloudProviderGCP: - result.Type = "gce_instance" - match = gcpResourceMap - case res.Labels[resourcekeys.CloudKeyProvider] == resourcekeys.CloudProviderAWS: - result.Type = "aws_ec2_instance" - match = awsResourceMap - case res.Type == knativeResType: - result.Type = res.Type - match = knativeRevisionResourceMap - case res.Type == knativeBrokerType: - result.Type = knativeBrokerType - match = knativeBrokerResourceMap - case res.Type == knativeTriggerType: - result.Type = knativeTriggerType - match = knativeTriggerResourceMap - } - - var missing bool - result.Labels, missing = transformResource(match, res.Labels) - if missing { - result.Type = "global" - // if project id specified then transform it. - if v, ok := res.Labels[stackdriverProjectID]; ok { - result.Labels = make(map[string]string, 1) - result.Labels["project_id"] = v - } - return result - } - if result.Type == "aws_ec2_instance" { - if v, ok := result.Labels["region"]; ok { - result.Labels["region"] = fmt.Sprintf("aws:%s", v) - } - } - return result -} diff --git a/vendor/contrib.go.opencensus.io/exporter/stackdriver/sanitize.go b/vendor/contrib.go.opencensus.io/exporter/stackdriver/sanitize.go deleted file mode 100644 index 184bb1d435..0000000000 --- a/vendor/contrib.go.opencensus.io/exporter/stackdriver/sanitize.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package stackdriver - -import ( - "strings" - "unicode" -) - -const labelKeySizeLimit = 100 - -// sanitize returns a string that is trunacated to 100 characters if it's too -// long, and replaces non-alphanumeric characters to underscores. -func sanitize(s string) string { - if len(s) == 0 { - return s - } - if len(s) > labelKeySizeLimit { - s = s[:labelKeySizeLimit] - } - s = strings.Map(sanitizeRune, s) - if unicode.IsDigit(rune(s[0])) { - s = "key_" + s - } - if s[0] == '_' { - s = "key" + s - } - return s -} - -// converts anything that is not a letter or digit to an underscore -func sanitizeRune(r rune) rune { - if unicode.IsLetter(r) || unicode.IsDigit(r) { - return r - } - // Everything else turns into an underscore - return '_' -} diff --git a/vendor/contrib.go.opencensus.io/exporter/stackdriver/stackdriver.go b/vendor/contrib.go.opencensus.io/exporter/stackdriver/stackdriver.go deleted file mode 100644 index 1f838ba33f..0000000000 --- a/vendor/contrib.go.opencensus.io/exporter/stackdriver/stackdriver.go +++ /dev/null @@ -1,511 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package stackdriver contains the OpenCensus exporters for -// Stackdriver Monitoring and Stackdriver Tracing. -// -// This exporter can be used to send metrics to Stackdriver Monitoring and traces -// to Stackdriver trace. -// -// The package uses Application Default Credentials to authenticate by default. -// See: https://developers.google.com/identity/protocols/application-default-credentials -// -// Alternatively, pass the authentication options in both the MonitoringClientOptions -// and the TraceClientOptions fields of Options. -// -// Stackdriver Monitoring -// -// This exporter support exporting OpenCensus views to Stackdriver Monitoring. -// Each registered view becomes a metric in Stackdriver Monitoring, with the -// tags becoming labels. -// -// The aggregation function determines the metric kind: LastValue aggregations -// generate Gauge metrics and all other aggregations generate Cumulative metrics. -// -// In order to be able to push your stats to Stackdriver Monitoring, you must: -// -// 1. Create a Cloud project: https://support.google.com/cloud/answer/6251787?hl=en -// 2. Enable billing: https://support.google.com/cloud/answer/6288653#new-billing -// 3. Enable the Stackdriver Monitoring API: https://console.cloud.google.com/apis/dashboard -// -// These steps enable the API but don't require that your app is hosted on Google Cloud Platform. -// -// Stackdriver Trace -// -// This exporter supports exporting Trace Spans to Stackdriver Trace. It also -// supports the Google "Cloud Trace" propagation format header. -package stackdriver // import "contrib.go.opencensus.io/exporter/stackdriver" - -import ( - "context" - "errors" - "fmt" - "log" - "os" - "path" - "strings" - "time" - - metadataapi "cloud.google.com/go/compute/metadata" - traceapi "cloud.google.com/go/trace/apiv2" - "contrib.go.opencensus.io/exporter/stackdriver/monitoredresource" - opencensus "go.opencensus.io" - "go.opencensus.io/resource" - "go.opencensus.io/resource/resourcekeys" - "go.opencensus.io/stats/view" - "go.opencensus.io/trace" - "golang.org/x/oauth2/google" - "google.golang.org/api/option" - metricpb "google.golang.org/genproto/googleapis/api/metric" - monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres" - - commonpb "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1" - metricspb "github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1" - resourcepb "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1" - "go.opencensus.io/metric/metricdata" -) - -// Options contains options for configuring the exporter. -type Options struct { - // ProjectID is the identifier of the Stackdriver - // project the user is uploading the stats data to. - // If not set, this will default to your "Application Default Credentials". - // For details see: https://developers.google.com/accounts/docs/application-default-credentials. - // - // It will be used in the project_id label of a Stackdriver monitored - // resource if the resource does not inherently belong to a specific - // project, e.g. on-premise resource like k8s_container or generic_task. - ProjectID string - - // Location is the identifier of the GCP or AWS cloud region/zone in which - // the data for a resource is stored. - // If not set, it will default to the location provided by the metadata server. - // - // It will be used in the location label of a Stackdriver monitored resource - // if the resource does not inherently belong to a specific project, e.g. - // on-premise resource like k8s_container or generic_task. - Location string - - // OnError is the hook to be called when there is - // an error uploading the stats or tracing data. - // If no custom hook is set, errors are logged. - // Optional. - OnError func(err error) - - // MonitoringClientOptions are additional options to be passed - // to the underlying Stackdriver Monitoring API client. - // Optional. - MonitoringClientOptions []option.ClientOption - - // TraceClientOptions are additional options to be passed - // to the underlying Stackdriver Trace API client. - // Optional. - TraceClientOptions []option.ClientOption - - // BundleDelayThreshold determines the max amount of time - // the exporter can wait before uploading view data or trace spans to - // the backend. - // Optional. - BundleDelayThreshold time.Duration - - // BundleCountThreshold determines how many view data events or trace spans - // can be buffered before batch uploading them to the backend. - // Optional. - BundleCountThreshold int - - // TraceSpansBufferMaxBytes is the maximum size (in bytes) of spans that - // will be buffered in memory before being dropped. - // - // If unset, a default of 8MB will be used. - TraceSpansBufferMaxBytes int - - // Resource sets the MonitoredResource against which all views will be - // recorded by this exporter. - // - // All Stackdriver metrics created by this exporter are custom metrics, - // so only a limited number of MonitoredResource types are supported, see: - // https://cloud.google.com/monitoring/custom-metrics/creating-metrics#which-resource - // - // An important consideration when setting the Resource here is that - // Stackdriver Monitoring only allows a single writer per - // TimeSeries, see: https://cloud.google.com/monitoring/api/v3/metrics-details#intro-time-series - // A TimeSeries is uniquely defined by the metric type name - // (constructed from the view name and the MetricPrefix), the Resource field, - // and the set of label key/value pairs (in OpenCensus terminology: tag). - // - // If no custom Resource is set, a default MonitoredResource - // with type global and no resource labels will be used. If you explicitly - // set this field, you may also want to set custom DefaultMonitoringLabels. - // - // Deprecated: Use MonitoredResource instead. - Resource *monitoredrespb.MonitoredResource - - // MonitoredResource sets the MonitoredResource against which all views will be - // recorded by this exporter. - // - // All Stackdriver metrics created by this exporter are custom metrics, - // so only a limited number of MonitoredResource types are supported, see: - // https://cloud.google.com/monitoring/custom-metrics/creating-metrics#which-resource - // - // An important consideration when setting the MonitoredResource here is that - // Stackdriver Monitoring only allows a single writer per - // TimeSeries, see: https://cloud.google.com/monitoring/api/v3/metrics-details#intro-time-series - // A TimeSeries is uniquely defined by the metric type name - // (constructed from the view name and the MetricPrefix), the MonitoredResource field, - // and the set of label key/value pairs (in OpenCensus terminology: tag). - // - // If no custom MonitoredResource is set AND if Resource is also not set then - // a default MonitoredResource with type global and no resource labels will be used. - // If you explicitly set this field, you may also want to set custom DefaultMonitoringLabels. - // - // This field replaces Resource field. If this is set then it will override the - // Resource field. - // Optional, but encouraged. - MonitoredResource monitoredresource.Interface - - // ResourceDetector provides a hook to discover arbitrary resource information. - // - // The translation function provided in MapResource must be able to conver the - // the resource information to a Stackdriver monitored resource. - // - // If this field is unset, resource type and tags will automatically be discovered through - // the OC_RESOURCE_TYPE and OC_RESOURCE_LABELS environment variables. - ResourceDetector resource.Detector - - // MapResource converts a OpenCensus resource to a Stackdriver monitored resource. - // - // If this field is unset, DefaultMapResource will be used which encodes a set of default - // conversions from auto-detected resources to well-known Stackdriver monitored resources. - MapResource func(*resource.Resource) *monitoredrespb.MonitoredResource - - // MetricPrefix overrides the prefix of a Stackdriver metric names. - // Optional. If unset defaults to "custom.googleapis.com/opencensus/". - // If GetMetricPrefix is non-nil, this option is ignored. - MetricPrefix string - - // GetMetricDisplayName allows customizing the display name for the metric - // associated with the given view. By default it will be: - // MetricPrefix + view.Name - GetMetricDisplayName func(view *view.View) string - - // GetMetricType allows customizing the metric type for the given view. - // By default, it will be: - // "custom.googleapis.com/opencensus/" + view.Name - // - // See: https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors#MetricDescriptor - // Depreacted. Use GetMetricPrefix instead. - GetMetricType func(view *view.View) string - - // GetMetricPrefix allows customizing the metric prefix for the given metric name. - // If it is not set, MetricPrefix is used. If MetricPrefix is not set, it defaults to: - // "custom.googleapis.com/opencensus/" - // - // See: https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors#MetricDescriptor - GetMetricPrefix func(name string) string - - // DefaultTraceAttributes will be appended to every span that is exported to - // Stackdriver Trace. - DefaultTraceAttributes map[string]interface{} - - // DefaultMonitoringLabels are labels added to every metric created by this - // exporter in Stackdriver Monitoring. - // - // If unset, this defaults to a single label with key "opencensus_task" and - // value "go-@". This default ensures that the set of labels - // together with the default Resource (global) are unique to this - // process, as required by Stackdriver Monitoring. - // - // If you set DefaultMonitoringLabels, make sure that the Resource field - // together with these labels is unique to the - // current process. This is to ensure that there is only a single writer to - // each TimeSeries in Stackdriver. - // - // Set this to &Labels{} (a pointer to an empty Labels) to avoid getting the - // default "opencensus_task" label. You should only do this if you know that - // the Resource you set uniquely identifies this Go process. - DefaultMonitoringLabels *Labels - - // Context allows you to provide a custom context for API calls. - // - // This context will be used several times: first, to create Stackdriver - // trace and metric clients, and then every time a new batch of traces or - // stats needs to be uploaded. - // - // Do not set a timeout on this context. Instead, set the Timeout option. - // - // If unset, context.Background() will be used. - Context context.Context - - // SkipCMD enforces to skip all the CreateMetricDescriptor calls. - // These calls are important in order to configure the unit of the metrics, - // but in some cases all the exported metrics are builtin (unit is configured) - // or the unit is not important. - SkipCMD bool - - // Timeout for all API calls. If not set, defaults to 5 seconds. - Timeout time.Duration - - // ReportingInterval sets the interval between reporting metrics. - // If it is set to zero then default value is used. - ReportingInterval time.Duration - - // NumberOfWorkers sets the number of go rountines that send requests - // to Stackdriver Monitoring and Trace. The minimum number of workers is 1. - NumberOfWorkers int - - // ResourceByDescriptor may be provided to supply monitored resource dynamically - // based on the metric Descriptor. Most users will not need to set this, - // but should instead set ResourceDetector. - // - // The MonitoredResource and ResourceDetector fields are ignored if this - // field is set to a non-nil value. - // - // The ResourceByDescriptor is called to derive monitored resources from - // metric.Descriptor and the label map associated with the time-series. - // If any label is used for the derived resource then it will be removed - // from the label map. The remaining labels in the map are returned to - // be used with the time-series. - // - // If the func set to this field does not return valid resource even for one - // time-series then it will result into an error for the entire CreateTimeSeries request - // which may contain more than one time-series. - ResourceByDescriptor func(*metricdata.Descriptor, map[string]string) (map[string]string, monitoredresource.Interface) - - // Override the user agent value supplied to Monitoring APIs and included as an - // attribute in trace data. - UserAgent string -} - -const defaultTimeout = 5 * time.Second - -var defaultDomain = path.Join("custom.googleapis.com", "opencensus") - -var defaultUserAgent = fmt.Sprintf("opencensus-go %s; stackdriver-exporter %s", opencensus.Version(), version) - -// Exporter is a stats and trace exporter that uploads data to Stackdriver. -// -// You can create a single Exporter and register it as both a trace exporter -// (to export to Stackdriver Trace) and a stats exporter (to integrate with -// Stackdriver Monitoring). -type Exporter struct { - traceExporter *traceExporter - statsExporter *statsExporter -} - -// NewExporter creates a new Exporter that implements both stats.Exporter and -// trace.Exporter. -func NewExporter(o Options) (*Exporter, error) { - if o.ProjectID == "" { - ctx := o.Context - if ctx == nil { - ctx = context.Background() - } - creds, err := google.FindDefaultCredentials(ctx, traceapi.DefaultAuthScopes()...) - if err != nil { - return nil, fmt.Errorf("stackdriver: %v", err) - } - if creds.ProjectID == "" { - return nil, errors.New("stackdriver: no project found with application default credentials") - } - o.ProjectID = creds.ProjectID - } - if o.Location == "" { - if metadataapi.OnGCE() { - zone, err := metadataapi.Zone() - if err != nil { - // This error should be logged with a warning level. - err = fmt.Errorf("setting Stackdriver default location failed: %s", err) - if o.OnError != nil { - o.OnError(err) - } else { - log.Print(err) - } - } else { - o.Location = zone - } - } - } - - if o.MonitoredResource != nil { - o.Resource = convertMonitoredResourceToPB(o.MonitoredResource) - } - if o.MapResource == nil { - o.MapResource = DefaultMapResource - } - if o.ResourceDetector != nil { - // For backwards-compatibility we still respect the deprecated resource field. - if o.Resource != nil { - return nil, errors.New("stackdriver: ResourceDetector must not be used in combination with deprecated resource fields") - } - res, err := o.ResourceDetector(o.Context) - if err != nil { - return nil, fmt.Errorf("stackdriver: detect resource: %s", err) - } - // Populate internal resource labels for defaulting project_id, location, and - // generic resource labels of applicable monitored resources. - if res.Labels == nil { - res.Labels = make(map[string]string) - } - res.Labels[stackdriverProjectID] = o.ProjectID - res.Labels[resourcekeys.CloudKeyZone] = o.Location - res.Labels[stackdriverGenericTaskNamespace] = "default" - res.Labels[stackdriverGenericTaskJob] = path.Base(os.Args[0]) - res.Labels[stackdriverGenericTaskID] = getTaskValue() - log.Printf("OpenCensus detected resource: %v", res) - - o.Resource = o.MapResource(res) - log.Printf("OpenCensus using monitored resource: %v", o.Resource) - } - if o.MetricPrefix != "" && !strings.HasSuffix(o.MetricPrefix, "/") { - o.MetricPrefix = o.MetricPrefix + "/" - } - if o.UserAgent == "" { - o.UserAgent = defaultUserAgent - } - - se, err := newStatsExporter(o) - if err != nil { - return nil, err - } - te, err := newTraceExporter(o) - if err != nil { - return nil, err - } - return &Exporter{ - statsExporter: se, - traceExporter: te, - }, nil -} - -// ExportView exports to the Stackdriver Monitoring if view data -// has one or more rows. -// Deprecated: use ExportMetrics and StartMetricsExporter instead. -func (e *Exporter) ExportView(vd *view.Data) { - e.statsExporter.ExportView(vd) -} - -// ExportMetricsProto exports OpenCensus Metrics Proto to Stackdriver Monitoring synchronously, -// without de-duping or adding proto metrics to the bundler. -func (e *Exporter) ExportMetricsProto(ctx context.Context, node *commonpb.Node, rsc *resourcepb.Resource, metrics []*metricspb.Metric) error { - _, err := e.statsExporter.PushMetricsProto(ctx, node, rsc, metrics) - return err -} - -// PushMetricsProto simliar with ExportMetricsProto but returns the number of dropped timeseries. -func (e *Exporter) PushMetricsProto(ctx context.Context, node *commonpb.Node, rsc *resourcepb.Resource, metrics []*metricspb.Metric) (int, error) { - return e.statsExporter.PushMetricsProto(ctx, node, rsc, metrics) -} - -// ExportMetrics exports OpenCensus Metrics to Stackdriver Monitoring -func (e *Exporter) ExportMetrics(ctx context.Context, metrics []*metricdata.Metric) error { - return e.statsExporter.ExportMetrics(ctx, metrics) -} - -// StartMetricsExporter starts exporter by creating an interval reader that reads metrics -// from all registered producers at set interval and exports them. -// Use StopMetricsExporter to stop exporting metrics. -// Previously, it required registering exporter to export stats collected by opencensus. -// exporter := stackdriver.NewExporter(stackdriver.Option{}) -// view.RegisterExporter(exporter) -// Now, it requires to call StartMetricsExporter() to export stats and metrics collected by opencensus. -// exporter := stackdriver.NewExporter(stackdriver.Option{}) -// exporter.StartMetricsExporter() -// defer exporter.StopMetricsExporter() -// -// Both approach should not be used simultaenously. Otherwise it may result into unknown behavior. -// Previous approach continues to work as before but will not report newly define metrics such -// as gauges. -func (e *Exporter) StartMetricsExporter() error { - return e.statsExporter.startMetricsReader() -} - -// StopMetricsExporter stops exporter from exporting metrics. -func (e *Exporter) StopMetricsExporter() { - e.statsExporter.stopMetricsReader() -} - -// ExportSpan exports a SpanData to Stackdriver Trace. -func (e *Exporter) ExportSpan(sd *trace.SpanData) { - if len(e.traceExporter.o.DefaultTraceAttributes) > 0 { - sd = e.sdWithDefaultTraceAttributes(sd) - } - e.traceExporter.ExportSpan(sd) -} - -// PushTraceSpans exports a bundle of OpenCensus Spans. -// Returns number of dropped spans. -func (e *Exporter) PushTraceSpans(ctx context.Context, node *commonpb.Node, rsc *resourcepb.Resource, spans []*trace.SpanData) (int, error) { - return e.traceExporter.pushTraceSpans(ctx, node, rsc, spans) -} - -func (e *Exporter) sdWithDefaultTraceAttributes(sd *trace.SpanData) *trace.SpanData { - newSD := *sd - newSD.Attributes = make(map[string]interface{}) - for k, v := range e.traceExporter.o.DefaultTraceAttributes { - newSD.Attributes[k] = v - } - for k, v := range sd.Attributes { - newSD.Attributes[k] = v - } - return &newSD -} - -// Flush waits for exported data to be uploaded. -// -// This is useful if your program is ending and you do not -// want to lose recent stats or spans. -func (e *Exporter) Flush() { - e.statsExporter.Flush() - e.traceExporter.Flush() -} - -// ViewToMetricDescriptor converts an OpenCensus view to a MetricDescriptor. -// -// This is useful for cases when you want to use your Go code as source of -// truth of metric descriptors. You can extract or define views in a central -// place, then call this method to generate MetricDescriptors. -func (e *Exporter) ViewToMetricDescriptor(ctx context.Context, v *view.View) (*metricpb.MetricDescriptor, error) { - return e.statsExporter.viewToMetricDescriptor(ctx, v) -} - -func (o Options) handleError(err error) { - if o.OnError != nil { - o.OnError(err) - return - } - log.Printf("Failed to export to Stackdriver: %v", err) -} - -func newContextWithTimeout(ctx context.Context, timeout time.Duration) (context.Context, func()) { - if ctx == nil { - ctx = context.Background() - } - if timeout <= 0 { - timeout = defaultTimeout - } - return context.WithTimeout(ctx, timeout) -} - -// convertMonitoredResourceToPB converts MonitoredResource data in to -// protocol buffer. -func convertMonitoredResourceToPB(mr monitoredresource.Interface) *monitoredrespb.MonitoredResource { - mrpb := new(monitoredrespb.MonitoredResource) - var labels map[string]string - mrpb.Type, labels = mr.MonitoredResource() - mrpb.Labels = make(map[string]string) - for k, v := range labels { - mrpb.Labels[k] = v - } - return mrpb -} diff --git a/vendor/contrib.go.opencensus.io/exporter/stackdriver/stats.go b/vendor/contrib.go.opencensus.io/exporter/stackdriver/stats.go deleted file mode 100644 index 895945b4eb..0000000000 --- a/vendor/contrib.go.opencensus.io/exporter/stackdriver/stats.go +++ /dev/null @@ -1,629 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package stackdriver - -import ( - "context" - "errors" - "fmt" - "os" - "path" - "sort" - "strconv" - "strings" - "sync" - "time" - - "go.opencensus.io/stats" - "go.opencensus.io/stats/view" - "go.opencensus.io/tag" - "go.opencensus.io/trace" - - monitoring "cloud.google.com/go/monitoring/apiv3" - "github.com/golang/protobuf/ptypes/timestamp" - "go.opencensus.io/metric/metricdata" - "go.opencensus.io/metric/metricexport" - "google.golang.org/api/option" - "google.golang.org/api/support/bundler" - distributionpb "google.golang.org/genproto/googleapis/api/distribution" - labelpb "google.golang.org/genproto/googleapis/api/label" - "google.golang.org/genproto/googleapis/api/metric" - googlemetricpb "google.golang.org/genproto/googleapis/api/metric" - metricpb "google.golang.org/genproto/googleapis/api/metric" - monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres" - monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" -) - -const ( - maxTimeSeriesPerUpload = 200 - opencensusTaskKey = "opencensus_task" - opencensusTaskDescription = "Opencensus task identifier" - defaultDisplayNamePrefix = "OpenCensus" - version = "0.13.3" -) - -// statsExporter exports stats to the Stackdriver Monitoring. -type statsExporter struct { - o Options - - viewDataBundler *bundler.Bundler - metricsBundler *bundler.Bundler - - protoMu sync.Mutex - protoMetricDescriptors map[string]bool // Metric descriptors that were already created remotely - - metricMu sync.Mutex - metricDescriptors map[string]bool // Metric descriptors that were already created remotely - - c *monitoring.MetricClient - defaultLabels map[string]labelValue - ir *metricexport.IntervalReader - - initReaderOnce sync.Once -} - -var ( - errBlankProjectID = errors.New("expecting a non-blank ProjectID") -) - -// newStatsExporter returns an exporter that uploads stats data to Stackdriver Monitoring. -// Only one Stackdriver exporter should be created per ProjectID per process, any subsequent -// invocations of NewExporter with the same ProjectID will return an error. -func newStatsExporter(o Options) (*statsExporter, error) { - if strings.TrimSpace(o.ProjectID) == "" { - return nil, errBlankProjectID - } - - opts := append(o.MonitoringClientOptions, option.WithUserAgent(o.UserAgent)) - ctx := o.Context - if ctx == nil { - ctx = context.Background() - } - client, err := monitoring.NewMetricClient(ctx, opts...) - if err != nil { - return nil, err - } - e := &statsExporter{ - c: client, - o: o, - protoMetricDescriptors: make(map[string]bool), - metricDescriptors: make(map[string]bool), - } - - var defaultLablesNotSanitized map[string]labelValue - if o.DefaultMonitoringLabels != nil { - defaultLablesNotSanitized = o.DefaultMonitoringLabels.m - } else { - defaultLablesNotSanitized = map[string]labelValue{ - opencensusTaskKey: {val: getTaskValue(), desc: opencensusTaskDescription}, - } - } - - e.defaultLabels = make(map[string]labelValue) - // Fill in the defaults firstly, irrespective of if the labelKeys and labelValues are mismatched. - for key, label := range defaultLablesNotSanitized { - e.defaultLabels[sanitize(key)] = label - } - - e.viewDataBundler = bundler.NewBundler((*view.Data)(nil), func(bundle interface{}) { - vds := bundle.([]*view.Data) - e.handleUpload(vds...) - }) - e.metricsBundler = bundler.NewBundler((*metricdata.Metric)(nil), func(bundle interface{}) { - metrics := bundle.([]*metricdata.Metric) - e.handleMetricsUpload(metrics) - }) - if delayThreshold := e.o.BundleDelayThreshold; delayThreshold > 0 { - e.viewDataBundler.DelayThreshold = delayThreshold - e.metricsBundler.DelayThreshold = delayThreshold - } - if countThreshold := e.o.BundleCountThreshold; countThreshold > 0 { - e.viewDataBundler.BundleCountThreshold = countThreshold - e.metricsBundler.BundleCountThreshold = countThreshold - } - return e, nil -} - -func (e *statsExporter) startMetricsReader() error { - e.initReaderOnce.Do(func() { - e.ir, _ = metricexport.NewIntervalReader(metricexport.NewReader(), e) - }) - e.ir.ReportingInterval = e.o.ReportingInterval - return e.ir.Start() -} - -func (e *statsExporter) stopMetricsReader() { - if e.ir != nil { - e.ir.Stop() - } -} - -func (e *statsExporter) getMonitoredResource(v *view.View, tags []tag.Tag) ([]tag.Tag, *monitoredrespb.MonitoredResource) { - resource := e.o.Resource - if resource == nil { - resource = &monitoredrespb.MonitoredResource{ - Type: "global", - } - } - return tags, resource -} - -// ExportView exports to the Stackdriver Monitoring if view data -// has one or more rows. -func (e *statsExporter) ExportView(vd *view.Data) { - if len(vd.Rows) == 0 { - return - } - err := e.viewDataBundler.Add(vd, 1) - switch err { - case nil: - return - case bundler.ErrOverflow: - e.o.handleError(errors.New("failed to upload: buffer full")) - default: - e.o.handleError(err) - } -} - -// getTaskValue returns a task label value in the format of -// "go-@". -func getTaskValue() string { - hostname, err := os.Hostname() - if err != nil { - hostname = "localhost" - } - return "go-" + strconv.Itoa(os.Getpid()) + "@" + hostname -} - -// handleUpload handles uploading a slice -// of Data, as well as error handling. -func (e *statsExporter) handleUpload(vds ...*view.Data) { - if err := e.uploadStats(vds); err != nil { - e.o.handleError(err) - } -} - -// Flush waits for exported view data and metrics to be uploaded. -// -// This is useful if your program is ending and you do not -// want to lose data that hasn't yet been exported. -func (e *statsExporter) Flush() { - e.viewDataBundler.Flush() - e.metricsBundler.Flush() -} - -func (e *statsExporter) uploadStats(vds []*view.Data) error { - ctx, cancel := newContextWithTimeout(e.o.Context, e.o.Timeout) - defer cancel() - ctx, span := trace.StartSpan( - ctx, - "contrib.go.opencensus.io/exporter/stackdriver.uploadStats", - trace.WithSampler(trace.NeverSample()), - ) - defer span.End() - - for _, vd := range vds { - if err := e.createMetricDescriptorFromView(ctx, vd.View); err != nil { - span.SetStatus(trace.Status{Code: 2, Message: err.Error()}) - return err - } - } - for _, req := range e.makeReq(vds, maxTimeSeriesPerUpload) { - if err := createTimeSeries(ctx, e.c, req); err != nil { - span.SetStatus(trace.Status{Code: 2, Message: err.Error()}) - // TODO(jbd): Don't fail fast here, batch errors? - return err - } - } - return nil -} - -func (e *statsExporter) makeReq(vds []*view.Data, limit int) []*monitoringpb.CreateTimeSeriesRequest { - var reqs []*monitoringpb.CreateTimeSeriesRequest - - var allTimeSeries []*monitoringpb.TimeSeries - for _, vd := range vds { - for _, row := range vd.Rows { - tags, resource := e.getMonitoredResource(vd.View, append([]tag.Tag(nil), row.Tags...)) - ts := &monitoringpb.TimeSeries{ - Metric: &metricpb.Metric{ - Type: e.metricType(vd.View), - Labels: newLabels(e.defaultLabels, tags), - }, - Resource: resource, - Points: []*monitoringpb.Point{newPoint(vd.View, row, vd.Start, vd.End)}, - } - allTimeSeries = append(allTimeSeries, ts) - } - } - - var timeSeries []*monitoringpb.TimeSeries - for _, ts := range allTimeSeries { - timeSeries = append(timeSeries, ts) - if len(timeSeries) == limit { - ctsreql := e.combineTimeSeriesToCreateTimeSeriesRequest(timeSeries) - reqs = append(reqs, ctsreql...) - timeSeries = timeSeries[:0] - } - } - - if len(timeSeries) > 0 { - ctsreql := e.combineTimeSeriesToCreateTimeSeriesRequest(timeSeries) - reqs = append(reqs, ctsreql...) - } - return reqs -} - -func (e *statsExporter) viewToMetricDescriptor(ctx context.Context, v *view.View) (*metricpb.MetricDescriptor, error) { - m := v.Measure - agg := v.Aggregation - viewName := v.Name - - metricType := e.metricType(v) - var valueType metricpb.MetricDescriptor_ValueType - unit := m.Unit() - // Default metric Kind - metricKind := metricpb.MetricDescriptor_CUMULATIVE - - switch agg.Type { - case view.AggTypeCount: - valueType = metricpb.MetricDescriptor_INT64 - // If the aggregation type is count, which counts the number of recorded measurements, the unit must be "1", - // because this view does not apply to the recorded values. - unit = stats.UnitDimensionless - case view.AggTypeSum: - switch m.(type) { - case *stats.Int64Measure: - valueType = metricpb.MetricDescriptor_INT64 - case *stats.Float64Measure: - valueType = metricpb.MetricDescriptor_DOUBLE - } - case view.AggTypeDistribution: - valueType = metricpb.MetricDescriptor_DISTRIBUTION - case view.AggTypeLastValue: - metricKind = metricpb.MetricDescriptor_GAUGE - switch m.(type) { - case *stats.Int64Measure: - valueType = metricpb.MetricDescriptor_INT64 - case *stats.Float64Measure: - valueType = metricpb.MetricDescriptor_DOUBLE - } - default: - return nil, fmt.Errorf("unsupported aggregation type: %s", agg.Type.String()) - } - - var displayName string - if e.o.GetMetricDisplayName == nil { - displayName = e.displayName(viewName) - } else { - displayName = e.o.GetMetricDisplayName(v) - } - - res := &metricpb.MetricDescriptor{ - Name: fmt.Sprintf("projects/%s/metricDescriptors/%s", e.o.ProjectID, metricType), - DisplayName: displayName, - Description: v.Description, - Unit: unit, - Type: metricType, - MetricKind: metricKind, - ValueType: valueType, - Labels: newLabelDescriptors(e.defaultLabels, v.TagKeys), - } - return res, nil -} - -// createMetricDescriptorFromView creates a MetricDescriptor for the given view data in Stackdriver Monitoring. -// An error will be returned if there is already a metric descriptor created with the same name -// but it has a different aggregation or keys. -func (e *statsExporter) createMetricDescriptorFromView(ctx context.Context, v *view.View) error { - // Skip create metric descriptor if configured - if e.o.SkipCMD { - return nil - } - - e.metricMu.Lock() - defer e.metricMu.Unlock() - - viewName := v.Name - - if _, created := e.metricDescriptors[viewName]; created { - return nil - } - - if builtinMetric(e.metricType(v)) { - e.metricDescriptors[viewName] = true - return nil - } - - inMD, err := e.viewToMetricDescriptor(ctx, v) - if err != nil { - return err - } - - if err = e.createMetricDescriptor(ctx, inMD); err != nil { - return err - } - - // Now cache the metric descriptor - e.metricDescriptors[viewName] = true - return nil -} - -func (e *statsExporter) displayName(suffix string) string { - if hasDomain(suffix) { - // If the display name suffix is already prefixed with domain, skip adding extra prefix - return suffix - } - return path.Join(defaultDisplayNamePrefix, suffix) -} - -func (e *statsExporter) combineTimeSeriesToCreateTimeSeriesRequest(ts []*monitoringpb.TimeSeries) (ctsreql []*monitoringpb.CreateTimeSeriesRequest) { - if len(ts) == 0 { - return nil - } - - // Since there are scenarios in which Metrics with the same Type - // can be bunched in the same TimeSeries, we have to ensure that - // we create a unique CreateTimeSeriesRequest with entirely unique Metrics - // per TimeSeries, lest we'll encounter: - // - // err: rpc error: code = InvalidArgument desc = One or more TimeSeries could not be written: - // Field timeSeries[2] had an invalid value: Duplicate TimeSeries encountered. - // Only one point can be written per TimeSeries per request.: timeSeries[2] - // - // This scenario happens when we are using the OpenCensus Agent in which multiple metrics - // are streamed by various client applications. - // See https://github.com/census-ecosystem/opencensus-go-exporter-stackdriver/issues/73 - uniqueTimeSeries := make([]*monitoringpb.TimeSeries, 0, len(ts)) - nonUniqueTimeSeries := make([]*monitoringpb.TimeSeries, 0, len(ts)) - seenMetrics := make(map[string]struct{}) - - for _, tti := range ts { - key := metricSignature(tti.Metric) - if _, alreadySeen := seenMetrics[key]; !alreadySeen { - uniqueTimeSeries = append(uniqueTimeSeries, tti) - seenMetrics[key] = struct{}{} - } else { - nonUniqueTimeSeries = append(nonUniqueTimeSeries, tti) - } - } - - // UniqueTimeSeries can be bunched up together - // While for each nonUniqueTimeSeries, we have - // to make a unique CreateTimeSeriesRequest. - ctsreql = append(ctsreql, &monitoringpb.CreateTimeSeriesRequest{ - Name: fmt.Sprintf("projects/%s", e.o.ProjectID), - TimeSeries: uniqueTimeSeries, - }) - - // Now recursively also combine the non-unique TimeSeries - // that were singly added to nonUniqueTimeSeries. - // The reason is that we need optimal combinations - // for optimal combinations because: - // * "a/b/c" - // * "a/b/c" - // * "x/y/z" - // * "a/b/c" - // * "x/y/z" - // * "p/y/z" - // * "d/y/z" - // - // should produce: - // CreateTimeSeries(uniqueTimeSeries) :: ["a/b/c", "x/y/z", "p/y/z", "d/y/z"] - // CreateTimeSeries(nonUniqueTimeSeries) :: ["a/b/c"] - // CreateTimeSeries(nonUniqueTimeSeries) :: ["a/b/c", "x/y/z"] - nonUniqueRequests := e.combineTimeSeriesToCreateTimeSeriesRequest(nonUniqueTimeSeries) - ctsreql = append(ctsreql, nonUniqueRequests...) - - return ctsreql -} - -// metricSignature creates a unique signature consisting of a -// metric's type and its lexicographically sorted label values -// See https://github.com/census-ecosystem/opencensus-go-exporter-stackdriver/issues/120 -func metricSignature(metric *googlemetricpb.Metric) string { - labels := metric.GetLabels() - labelValues := make([]string, 0, len(labels)) - - for _, labelValue := range labels { - labelValues = append(labelValues, labelValue) - } - sort.Strings(labelValues) - return fmt.Sprintf("%s:%s", metric.GetType(), strings.Join(labelValues, ",")) -} - -func newPoint(v *view.View, row *view.Row, start, end time.Time) *monitoringpb.Point { - switch v.Aggregation.Type { - case view.AggTypeLastValue: - return newGaugePoint(v, row, end) - default: - return newCumulativePoint(v, row, start, end) - } -} - -func newCumulativePoint(v *view.View, row *view.Row, start, end time.Time) *monitoringpb.Point { - return &monitoringpb.Point{ - Interval: &monitoringpb.TimeInterval{ - StartTime: ×tamp.Timestamp{ - Seconds: start.Unix(), - Nanos: int32(start.Nanosecond()), - }, - EndTime: ×tamp.Timestamp{ - Seconds: end.Unix(), - Nanos: int32(end.Nanosecond()), - }, - }, - Value: newTypedValue(v, row), - } -} - -func newGaugePoint(v *view.View, row *view.Row, end time.Time) *monitoringpb.Point { - gaugeTime := ×tamp.Timestamp{ - Seconds: end.Unix(), - Nanos: int32(end.Nanosecond()), - } - return &monitoringpb.Point{ - Interval: &monitoringpb.TimeInterval{ - EndTime: gaugeTime, - }, - Value: newTypedValue(v, row), - } -} - -func newTypedValue(vd *view.View, r *view.Row) *monitoringpb.TypedValue { - switch v := r.Data.(type) { - case *view.CountData: - return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_Int64Value{ - Int64Value: v.Value, - }} - case *view.SumData: - switch vd.Measure.(type) { - case *stats.Int64Measure: - return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_Int64Value{ - Int64Value: int64(v.Value), - }} - case *stats.Float64Measure: - return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_DoubleValue{ - DoubleValue: v.Value, - }} - } - case *view.DistributionData: - insertZeroBound := shouldInsertZeroBound(vd.Aggregation.Buckets...) - return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_DistributionValue{ - DistributionValue: &distributionpb.Distribution{ - Count: v.Count, - Mean: v.Mean, - SumOfSquaredDeviation: v.SumOfSquaredDev, - // TODO(songya): uncomment this once Stackdriver supports min/max. - // Range: &distributionpb.Distribution_Range{ - // Min: v.Min, - // Max: v.Max, - // }, - BucketOptions: &distributionpb.Distribution_BucketOptions{ - Options: &distributionpb.Distribution_BucketOptions_ExplicitBuckets{ - ExplicitBuckets: &distributionpb.Distribution_BucketOptions_Explicit{ - Bounds: addZeroBoundOnCondition(insertZeroBound, vd.Aggregation.Buckets...), - }, - }, - }, - BucketCounts: addZeroBucketCountOnCondition(insertZeroBound, v.CountPerBucket...), - }, - }} - case *view.LastValueData: - switch vd.Measure.(type) { - case *stats.Int64Measure: - return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_Int64Value{ - Int64Value: int64(v.Value), - }} - case *stats.Float64Measure: - return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_DoubleValue{ - DoubleValue: v.Value, - }} - } - } - return nil -} - -func shouldInsertZeroBound(bounds ...float64) bool { - if len(bounds) > 0 && bounds[0] > 0.0 { - return true - } - return false -} - -func addZeroBucketCountOnCondition(insert bool, counts ...int64) []int64 { - if insert { - return append([]int64{0}, counts...) - } - return counts -} - -func addZeroBoundOnCondition(insert bool, bounds ...float64) []float64 { - if insert { - return append([]float64{0.0}, bounds...) - } - return bounds -} - -func (e *statsExporter) metricType(v *view.View) string { - if formatter := e.o.GetMetricType; formatter != nil { - return formatter(v) - } - return path.Join("custom.googleapis.com", "opencensus", v.Name) -} - -func newLabels(defaults map[string]labelValue, tags []tag.Tag) map[string]string { - labels := make(map[string]string) - for k, lbl := range defaults { - labels[sanitize(k)] = lbl.val - } - for _, tag := range tags { - labels[sanitize(tag.Key.Name())] = tag.Value - } - return labels -} - -func newLabelDescriptors(defaults map[string]labelValue, keys []tag.Key) []*labelpb.LabelDescriptor { - labelDescriptors := make([]*labelpb.LabelDescriptor, 0, len(keys)+len(defaults)) - for key, lbl := range defaults { - labelDescriptors = append(labelDescriptors, &labelpb.LabelDescriptor{ - Key: sanitize(key), - Description: lbl.desc, - ValueType: labelpb.LabelDescriptor_STRING, - }) - } - for _, key := range keys { - labelDescriptors = append(labelDescriptors, &labelpb.LabelDescriptor{ - Key: sanitize(key.Name()), - ValueType: labelpb.LabelDescriptor_STRING, // We only use string tags - }) - } - return labelDescriptors -} - -func (e *statsExporter) createMetricDescriptor(ctx context.Context, md *metric.MetricDescriptor) error { - ctx, cancel := newContextWithTimeout(ctx, e.o.Timeout) - defer cancel() - cmrdesc := &monitoringpb.CreateMetricDescriptorRequest{ - Name: fmt.Sprintf("projects/%s", e.o.ProjectID), - MetricDescriptor: md, - } - _, err := createMetricDescriptor(ctx, e.c, cmrdesc) - return err -} - -var createMetricDescriptor = func(ctx context.Context, c *monitoring.MetricClient, mdr *monitoringpb.CreateMetricDescriptorRequest) (*metric.MetricDescriptor, error) { - return c.CreateMetricDescriptor(ctx, mdr) -} - -var createTimeSeries = func(ctx context.Context, c *monitoring.MetricClient, ts *monitoringpb.CreateTimeSeriesRequest) error { - return c.CreateTimeSeries(ctx, ts) -} - -var knownExternalMetricPrefixes = []string{ - "custom.googleapis.com/", - "external.googleapis.com/", -} - -// builtinMetric returns true if a MetricType is a heuristically known -// built-in Stackdriver metric -func builtinMetric(metricType string) bool { - for _, knownExternalMetric := range knownExternalMetricPrefixes { - if strings.HasPrefix(metricType, knownExternalMetric) { - return false - } - } - return true -} diff --git a/vendor/contrib.go.opencensus.io/exporter/stackdriver/trace.go b/vendor/contrib.go.opencensus.io/exporter/stackdriver/trace.go deleted file mode 100644 index a1066138e7..0000000000 --- a/vendor/contrib.go.opencensus.io/exporter/stackdriver/trace.go +++ /dev/null @@ -1,220 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package stackdriver - -import ( - "context" - "fmt" - "log" - "sync" - "time" - - tracingclient "cloud.google.com/go/trace/apiv2" - "github.com/golang/protobuf/proto" - "go.opencensus.io/trace" - "google.golang.org/api/support/bundler" - tracepb "google.golang.org/genproto/googleapis/devtools/cloudtrace/v2" - - commonpb "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1" - resourcepb "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1" -) - -// traceExporter is an implementation of trace.Exporter that uploads spans to -// Stackdriver. -// -type traceExporter struct { - o Options - projectID string - bundler *bundler.Bundler - // uploadFn defaults to uploadSpans; it can be replaced for tests. - uploadFn func(spans []*tracepb.Span) - overflowLogger - client *tracingclient.Client -} - -var _ trace.Exporter = (*traceExporter)(nil) - -func newTraceExporter(o Options) (*traceExporter, error) { - ctx := o.Context - if ctx == nil { - ctx = context.Background() - } - client, err := tracingclient.NewClient(ctx, o.TraceClientOptions...) - if err != nil { - return nil, fmt.Errorf("stackdriver: couldn't initialize trace client: %v", err) - } - return newTraceExporterWithClient(o, client), nil -} - -const defaultBufferedByteLimit = 8 * 1024 * 1024 - -func newTraceExporterWithClient(o Options, c *tracingclient.Client) *traceExporter { - e := &traceExporter{ - projectID: o.ProjectID, - client: c, - o: o, - } - b := bundler.NewBundler((*tracepb.Span)(nil), func(bundle interface{}) { - e.uploadFn(bundle.([]*tracepb.Span)) - }) - if o.BundleDelayThreshold > 0 { - b.DelayThreshold = o.BundleDelayThreshold - } else { - b.DelayThreshold = 2 * time.Second - } - if o.BundleCountThreshold > 0 { - b.BundleCountThreshold = o.BundleCountThreshold - } else { - b.BundleCountThreshold = 50 - } - if o.NumberOfWorkers > 0 { - b.HandlerLimit = o.NumberOfWorkers - } - // The measured "bytes" are not really bytes, see exportReceiver. - b.BundleByteThreshold = b.BundleCountThreshold * 200 - b.BundleByteLimit = b.BundleCountThreshold * 1000 - if o.TraceSpansBufferMaxBytes > 0 { - b.BufferedByteLimit = o.TraceSpansBufferMaxBytes - } else { - b.BufferedByteLimit = defaultBufferedByteLimit - } - - e.bundler = b - e.uploadFn = e.uploadSpans - return e -} - -// ExportSpan exports a SpanData to Stackdriver Trace. -func (e *traceExporter) ExportSpan(s *trace.SpanData) { - protoSpan := protoFromSpanData(s, e.projectID, e.o.Resource, e.o.UserAgent) - protoSize := proto.Size(protoSpan) - err := e.bundler.Add(protoSpan, protoSize) - switch err { - case nil: - return - case bundler.ErrOversizedItem: - case bundler.ErrOverflow: - e.overflowLogger.log() - default: - e.o.handleError(err) - } -} - -// Flush waits for exported trace spans to be uploaded. -// -// This is useful if your program is ending and you do not want to lose recent -// spans. -func (e *traceExporter) Flush() { - e.bundler.Flush() -} - -func (e *traceExporter) pushTraceSpans(ctx context.Context, node *commonpb.Node, r *resourcepb.Resource, spans []*trace.SpanData) (int, error) { - ctx, span := trace.StartSpan( - ctx, - "contrib.go.opencensus.io/exporter/stackdriver.PushTraceSpans", - trace.WithSampler(trace.NeverSample()), - ) - defer span.End() - span.AddAttributes(trace.Int64Attribute("num_spans", int64(len(spans)))) - - protoSpans := make([]*tracepb.Span, 0, len(spans)) - - res := e.o.Resource - if r != nil { - res = e.o.MapResource(resourcepbToResource(r)) - } - - for _, span := range spans { - protoSpans = append(protoSpans, protoFromSpanData(span, e.projectID, res, e.o.UserAgent)) - } - - req := tracepb.BatchWriteSpansRequest{ - Name: "projects/" + e.projectID, - Spans: protoSpans, - } - // Create a never-sampled span to prevent traces associated with exporter. - ctx, cancel := newContextWithTimeout(ctx, e.o.Timeout) - defer cancel() - - err := e.client.BatchWriteSpans(ctx, &req) - - if err != nil { - return len(spans), err - } - return 0, nil -} - -// uploadSpans uploads a set of spans to Stackdriver. -func (e *traceExporter) uploadSpans(spans []*tracepb.Span) { - req := tracepb.BatchWriteSpansRequest{ - Name: "projects/" + e.projectID, - Spans: spans, - } - // Create a never-sampled span to prevent traces associated with exporter. - ctx, cancel := newContextWithTimeout(e.o.Context, e.o.Timeout) - defer cancel() - ctx, span := trace.StartSpan( - ctx, - "contrib.go.opencensus.io/exporter/stackdriver.uploadSpans", - trace.WithSampler(trace.NeverSample()), - ) - defer span.End() - span.AddAttributes(trace.Int64Attribute("num_spans", int64(len(spans)))) - - err := e.client.BatchWriteSpans(ctx, &req) - if err != nil { - span.SetStatus(trace.Status{Code: 2, Message: err.Error()}) - e.o.handleError(err) - } -} - -// overflowLogger ensures that at most one overflow error log message is -// written every 5 seconds. -type overflowLogger struct { - mu sync.Mutex - pause bool - accum int -} - -func (o *overflowLogger) delay() { - o.pause = true - time.AfterFunc(5*time.Second, func() { - o.mu.Lock() - defer o.mu.Unlock() - switch { - case o.accum == 0: - o.pause = false - case o.accum == 1: - log.Println("OpenCensus Stackdriver exporter: failed to upload span: buffer full") - o.accum = 0 - o.delay() - default: - log.Printf("OpenCensus Stackdriver exporter: failed to upload %d spans: buffer full", o.accum) - o.accum = 0 - o.delay() - } - }) -} - -func (o *overflowLogger) log() { - o.mu.Lock() - defer o.mu.Unlock() - if !o.pause { - log.Println("OpenCensus Stackdriver exporter: failed to upload span: buffer full") - o.delay() - } else { - o.accum++ - } -} diff --git a/vendor/contrib.go.opencensus.io/exporter/stackdriver/trace_proto.go b/vendor/contrib.go.opencensus.io/exporter/stackdriver/trace_proto.go deleted file mode 100644 index b44957fe4b..0000000000 --- a/vendor/contrib.go.opencensus.io/exporter/stackdriver/trace_proto.go +++ /dev/null @@ -1,295 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package stackdriver - -import ( - "fmt" - "math" - "strconv" - "time" - "unicode/utf8" - - timestamppb "github.com/golang/protobuf/ptypes/timestamp" - wrapperspb "github.com/golang/protobuf/ptypes/wrappers" - "go.opencensus.io/plugin/ochttp" - "go.opencensus.io/trace" - monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres" - tracepb "google.golang.org/genproto/googleapis/devtools/cloudtrace/v2" - statuspb "google.golang.org/genproto/googleapis/rpc/status" -) - -const ( - maxAnnotationEventsPerSpan = 32 - maxMessageEventsPerSpan = 128 - maxAttributeStringValue = 256 - agentLabel = "g.co/agent" - - labelHTTPHost = `/http/host` - labelHTTPMethod = `/http/method` - labelHTTPStatusCode = `/http/status_code` - labelHTTPPath = `/http/path` - labelHTTPUserAgent = `/http/user_agent` -) - -// proto returns a protocol buffer representation of a SpanData. -func protoFromSpanData(s *trace.SpanData, projectID string, mr *monitoredrespb.MonitoredResource, userAgent string) *tracepb.Span { - if s == nil { - return nil - } - - traceIDString := s.SpanContext.TraceID.String() - spanIDString := s.SpanContext.SpanID.String() - - name := s.Name - switch s.SpanKind { - case trace.SpanKindClient: - name = "Sent." + name - case trace.SpanKindServer: - name = "Recv." + name - } - - sp := &tracepb.Span{ - Name: "projects/" + projectID + "/traces/" + traceIDString + "/spans/" + spanIDString, - SpanId: spanIDString, - DisplayName: trunc(name, 128), - StartTime: timestampProto(s.StartTime), - EndTime: timestampProto(s.EndTime), - SameProcessAsParentSpan: &wrapperspb.BoolValue{Value: !s.HasRemoteParent}, - } - if p := s.ParentSpanID; p != (trace.SpanID{}) { - sp.ParentSpanId = p.String() - } - if s.Status.Code != 0 || s.Status.Message != "" { - sp.Status = &statuspb.Status{Code: s.Status.Code, Message: s.Status.Message} - } - - var annotations, droppedAnnotationsCount, messageEvents, droppedMessageEventsCount int - copyAttributes(&sp.Attributes, s.Attributes) - - // Copy MonitoredResources as span Attributes - sp.Attributes = copyMonitoredResourceAttributes(sp.Attributes, mr) - - as := s.Annotations - for i, a := range as { - if annotations >= maxAnnotationEventsPerSpan { - droppedAnnotationsCount = len(as) - i - break - } - annotation := &tracepb.Span_TimeEvent_Annotation{Description: trunc(a.Message, maxAttributeStringValue)} - copyAttributes(&annotation.Attributes, a.Attributes) - event := &tracepb.Span_TimeEvent{ - Time: timestampProto(a.Time), - Value: &tracepb.Span_TimeEvent_Annotation_{Annotation: annotation}, - } - annotations++ - if sp.TimeEvents == nil { - sp.TimeEvents = &tracepb.Span_TimeEvents{} - } - sp.TimeEvents.TimeEvent = append(sp.TimeEvents.TimeEvent, event) - } - - if sp.Attributes == nil { - sp.Attributes = &tracepb.Span_Attributes{ - AttributeMap: make(map[string]*tracepb.AttributeValue), - } - } - - // Only set the agent label if it is not already set. That enables the - // OpenCensus agent/collector to set the agent label based on the library that - // sent the span to the agent. - // - // We now provide a config option to set the userAgent explicitly, which is - // used both here and in request headers when sending metric data, but have - // retained this non-override functionality for backwards compatibility. - if _, hasAgent := sp.Attributes.AttributeMap[agentLabel]; !hasAgent { - sp.Attributes.AttributeMap[agentLabel] = &tracepb.AttributeValue{ - Value: &tracepb.AttributeValue_StringValue{ - StringValue: trunc(userAgent, maxAttributeStringValue), - }, - } - } - - es := s.MessageEvents - for i, e := range es { - if messageEvents >= maxMessageEventsPerSpan { - droppedMessageEventsCount = len(es) - i - break - } - messageEvents++ - if sp.TimeEvents == nil { - sp.TimeEvents = &tracepb.Span_TimeEvents{} - } - sp.TimeEvents.TimeEvent = append(sp.TimeEvents.TimeEvent, &tracepb.Span_TimeEvent{ - Time: timestampProto(e.Time), - Value: &tracepb.Span_TimeEvent_MessageEvent_{ - MessageEvent: &tracepb.Span_TimeEvent_MessageEvent{ - Type: tracepb.Span_TimeEvent_MessageEvent_Type(e.EventType), - Id: e.MessageID, - UncompressedSizeBytes: e.UncompressedByteSize, - CompressedSizeBytes: e.CompressedByteSize, - }, - }, - }) - } - - if droppedAnnotationsCount != 0 || droppedMessageEventsCount != 0 { - if sp.TimeEvents == nil { - sp.TimeEvents = &tracepb.Span_TimeEvents{} - } - sp.TimeEvents.DroppedAnnotationsCount = clip32(droppedAnnotationsCount) - sp.TimeEvents.DroppedMessageEventsCount = clip32(droppedMessageEventsCount) - } - - if len(s.Links) > 0 { - sp.Links = &tracepb.Span_Links{} - sp.Links.Link = make([]*tracepb.Span_Link, 0, len(s.Links)) - for _, l := range s.Links { - link := &tracepb.Span_Link{ - TraceId: l.TraceID.String(), - SpanId: l.SpanID.String(), - Type: tracepb.Span_Link_Type(l.Type), - } - copyAttributes(&link.Attributes, l.Attributes) - sp.Links.Link = append(sp.Links.Link, link) - } - } - return sp -} - -// timestampProto creates a timestamp proto for a time.Time. -func timestampProto(t time.Time) *timestamppb.Timestamp { - return ×tamppb.Timestamp{ - Seconds: t.Unix(), - Nanos: int32(t.Nanosecond()), - } -} - -// copyMonitoredResourceAttributes copies proto monitoredResource to proto map field (Span_Attributes) -// it creates the map if it is nil. -func copyMonitoredResourceAttributes(out *tracepb.Span_Attributes, mr *monitoredrespb.MonitoredResource) *tracepb.Span_Attributes { - if mr == nil { - return out - } - if out == nil { - out = &tracepb.Span_Attributes{} - } - if out.AttributeMap == nil { - out.AttributeMap = make(map[string]*tracepb.AttributeValue) - } - for k, v := range mr.Labels { - av := attributeValue(v) - out.AttributeMap[fmt.Sprintf("g.co/r/%s/%s", mr.Type, k)] = av - } - return out -} - -// copyAttributes copies a map of attributes to a proto map field. -// It creates the map if it is nil. -func copyAttributes(out **tracepb.Span_Attributes, in map[string]interface{}) { - if len(in) == 0 { - return - } - if *out == nil { - *out = &tracepb.Span_Attributes{} - } - if (*out).AttributeMap == nil { - (*out).AttributeMap = make(map[string]*tracepb.AttributeValue) - } - var dropped int32 - for key, value := range in { - av := attributeValue(value) - if av == nil { - continue - } - switch key { - case ochttp.PathAttribute: - (*out).AttributeMap[labelHTTPPath] = av - case ochttp.HostAttribute: - (*out).AttributeMap[labelHTTPHost] = av - case ochttp.MethodAttribute: - (*out).AttributeMap[labelHTTPMethod] = av - case ochttp.UserAgentAttribute: - (*out).AttributeMap[labelHTTPUserAgent] = av - case ochttp.StatusCodeAttribute: - (*out).AttributeMap[labelHTTPStatusCode] = av - default: - if len(key) > 128 { - dropped++ - continue - } - (*out).AttributeMap[key] = av - } - } - (*out).DroppedAttributesCount = dropped -} - -func attributeValue(v interface{}) *tracepb.AttributeValue { - switch value := v.(type) { - case bool: - return &tracepb.AttributeValue{ - Value: &tracepb.AttributeValue_BoolValue{BoolValue: value}, - } - case int64: - return &tracepb.AttributeValue{ - Value: &tracepb.AttributeValue_IntValue{IntValue: value}, - } - case float64: - // TODO: set double value if Stackdriver Trace support it in the future. - return &tracepb.AttributeValue{ - Value: &tracepb.AttributeValue_StringValue{ - StringValue: trunc(strconv.FormatFloat(value, 'f', -1, 64), - maxAttributeStringValue)}, - } - case string: - return &tracepb.AttributeValue{ - Value: &tracepb.AttributeValue_StringValue{StringValue: trunc(value, maxAttributeStringValue)}, - } - } - return nil -} - -// trunc returns a TruncatableString truncated to the given limit. -func trunc(s string, limit int) *tracepb.TruncatableString { - if len(s) > limit { - b := []byte(s[:limit]) - for { - r, size := utf8.DecodeLastRune(b) - if r == utf8.RuneError && size == 1 { - b = b[:len(b)-1] - } else { - break - } - } - return &tracepb.TruncatableString{ - Value: string(b), - TruncatedByteCount: clip32(len(s) - len(b)), - } - } - return &tracepb.TruncatableString{ - Value: s, - TruncatedByteCount: 0, - } -} - -// clip32 clips an int to the range of an int32. -func clip32(x int) int32 { - if x < math.MinInt32 { - return math.MinInt32 - } - if x > math.MaxInt32 { - return math.MaxInt32 - } - return int32(x) -} diff --git a/vendor/github.com/antlr/antlr4/LICENSE.txt b/vendor/github.com/antlr/antlr4/LICENSE.txt deleted file mode 100644 index 2042d1bda6..0000000000 --- a/vendor/github.com/antlr/antlr4/LICENSE.txt +++ /dev/null @@ -1,52 +0,0 @@ -[The "BSD 3-clause license"] -Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions -are met: - - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - 3. Neither the name of the copyright holder nor the names of its contributors - may be used to endorse or promote products derived from this software - without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR -IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES -OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. -IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, -INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT -NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF -THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -===== - -MIT License for codepointat.js from https://git.io/codepointat -MIT License for fromcodepoint.js from https://git.io/vDW1m - -Copyright Mathias Bynens - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn.go deleted file mode 100644 index 1592212e14..0000000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn.go +++ /dev/null @@ -1,152 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -var ATNInvalidAltNumber int - -type ATN struct { - // DecisionToState is the decision points for all rules, subrules, optional - // blocks, ()+, ()*, etc. Used to build DFA predictors for them. - DecisionToState []DecisionState - - // grammarType is the ATN type and is used for deserializing ATNs from strings. - grammarType int - - // lexerActions is referenced by action transitions in the ATN for lexer ATNs. - lexerActions []LexerAction - - // maxTokenType is the maximum value for any symbol recognized by a transition in the ATN. - maxTokenType int - - modeNameToStartState map[string]*TokensStartState - - modeToStartState []*TokensStartState - - // ruleToStartState maps from rule index to starting state number. - ruleToStartState []*RuleStartState - - // ruleToStopState maps from rule index to stop state number. - ruleToStopState []*RuleStopState - - // ruleToTokenType maps the rule index to the resulting token type for lexer - // ATNs. For parser ATNs, it maps the rule index to the generated bypass token - // type if ATNDeserializationOptions.isGenerateRuleBypassTransitions was - // specified, and otherwise is nil. - ruleToTokenType []int - - states []ATNState -} - -func NewATN(grammarType int, maxTokenType int) *ATN { - return &ATN{ - grammarType: grammarType, - maxTokenType: maxTokenType, - modeNameToStartState: make(map[string]*TokensStartState), - } -} - -// NextTokensInContext computes the set of valid tokens that can occur starting -// in state s. If ctx is nil, the set of tokens will not include what can follow -// the rule surrounding s. In other words, the set will be restricted to tokens -// reachable staying within the rule of s. -func (a *ATN) NextTokensInContext(s ATNState, ctx RuleContext) *IntervalSet { - return NewLL1Analyzer(a).Look(s, nil, ctx) -} - -// NextTokensNoContext computes the set of valid tokens that can occur starting -// in s and staying in same rule. Token.EPSILON is in set if we reach end of -// rule. -func (a *ATN) NextTokensNoContext(s ATNState) *IntervalSet { - if s.GetNextTokenWithinRule() != nil { - return s.GetNextTokenWithinRule() - } - - s.SetNextTokenWithinRule(a.NextTokensInContext(s, nil)) - s.GetNextTokenWithinRule().readOnly = true - - return s.GetNextTokenWithinRule() -} - -func (a *ATN) NextTokens(s ATNState, ctx RuleContext) *IntervalSet { - if ctx == nil { - return a.NextTokensNoContext(s) - } - - return a.NextTokensInContext(s, ctx) -} - -func (a *ATN) addState(state ATNState) { - if state != nil { - state.SetATN(a) - state.SetStateNumber(len(a.states)) - } - - a.states = append(a.states, state) -} - -func (a *ATN) removeState(state ATNState) { - a.states[state.GetStateNumber()] = nil // Just free the memory; don't shift states in the slice -} - -func (a *ATN) defineDecisionState(s DecisionState) int { - a.DecisionToState = append(a.DecisionToState, s) - s.setDecision(len(a.DecisionToState) - 1) - - return s.getDecision() -} - -func (a *ATN) getDecisionState(decision int) DecisionState { - if len(a.DecisionToState) == 0 { - return nil - } - - return a.DecisionToState[decision] -} - -// getExpectedTokens computes the set of input symbols which could follow ATN -// state number stateNumber in the specified full parse context ctx and returns -// the set of potentially valid input symbols which could follow the specified -// state in the specified context. This method considers the complete parser -// context, but does not evaluate semantic predicates (i.e. all predicates -// encountered during the calculation are assumed true). If a path in the ATN -// exists from the starting state to the RuleStopState of the outermost context -// without Matching any symbols, Token.EOF is added to the returned set. -// -// A nil ctx defaults to ParserRuleContext.EMPTY. -// -// It panics if the ATN does not contain state stateNumber. -func (a *ATN) getExpectedTokens(stateNumber int, ctx RuleContext) *IntervalSet { - if stateNumber < 0 || stateNumber >= len(a.states) { - panic("Invalid state number.") - } - - s := a.states[stateNumber] - following := a.NextTokens(s, nil) - - if !following.contains(TokenEpsilon) { - return following - } - - expected := NewIntervalSet() - - expected.addSet(following) - expected.removeOne(TokenEpsilon) - - for ctx != nil && ctx.GetInvokingState() >= 0 && following.contains(TokenEpsilon) { - invokingState := a.states[ctx.GetInvokingState()] - rt := invokingState.GetTransitions()[0] - - following = a.NextTokens(rt.(*RuleTransition).followState, nil) - expected.addSet(following) - expected.removeOne(TokenEpsilon) - ctx = ctx.GetParent().(RuleContext) - } - - if following.contains(TokenEpsilon) { - expected.addOne(TokenEOF) - } - - return expected -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_config.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_config.go deleted file mode 100644 index 0535d5246c..0000000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_config.go +++ /dev/null @@ -1,295 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "fmt" -) - -type comparable interface { - equals(other interface{}) bool -} - -// ATNConfig is a tuple: (ATN state, predicted alt, syntactic, semantic -// context). The syntactic context is a graph-structured stack node whose -// path(s) to the root is the rule invocation(s) chain used to arrive at the -// state. The semantic context is the tree of semantic predicates encountered -// before reaching an ATN state. -type ATNConfig interface { - comparable - - hash() int - - GetState() ATNState - GetAlt() int - GetSemanticContext() SemanticContext - - GetContext() PredictionContext - SetContext(PredictionContext) - - GetReachesIntoOuterContext() int - SetReachesIntoOuterContext(int) - - String() string - - getPrecedenceFilterSuppressed() bool - setPrecedenceFilterSuppressed(bool) -} - -type BaseATNConfig struct { - precedenceFilterSuppressed bool - state ATNState - alt int - context PredictionContext - semanticContext SemanticContext - reachesIntoOuterContext int -} - -func NewBaseATNConfig7(old *BaseATNConfig) *BaseATNConfig { // TODO: Dup - return &BaseATNConfig{ - state: old.state, - alt: old.alt, - context: old.context, - semanticContext: old.semanticContext, - reachesIntoOuterContext: old.reachesIntoOuterContext, - } -} - -func NewBaseATNConfig6(state ATNState, alt int, context PredictionContext) *BaseATNConfig { - return NewBaseATNConfig5(state, alt, context, SemanticContextNone) -} - -func NewBaseATNConfig5(state ATNState, alt int, context PredictionContext, semanticContext SemanticContext) *BaseATNConfig { - if semanticContext == nil { - panic("semanticContext cannot be nil") // TODO: Necessary? - } - - return &BaseATNConfig{state: state, alt: alt, context: context, semanticContext: semanticContext} -} - -func NewBaseATNConfig4(c ATNConfig, state ATNState) *BaseATNConfig { - return NewBaseATNConfig(c, state, c.GetContext(), c.GetSemanticContext()) -} - -func NewBaseATNConfig3(c ATNConfig, state ATNState, semanticContext SemanticContext) *BaseATNConfig { - return NewBaseATNConfig(c, state, c.GetContext(), semanticContext) -} - -func NewBaseATNConfig2(c ATNConfig, semanticContext SemanticContext) *BaseATNConfig { - return NewBaseATNConfig(c, c.GetState(), c.GetContext(), semanticContext) -} - -func NewBaseATNConfig1(c ATNConfig, state ATNState, context PredictionContext) *BaseATNConfig { - return NewBaseATNConfig(c, state, context, c.GetSemanticContext()) -} - -func NewBaseATNConfig(c ATNConfig, state ATNState, context PredictionContext, semanticContext SemanticContext) *BaseATNConfig { - if semanticContext == nil { - panic("semanticContext cannot be nil") - } - - return &BaseATNConfig{ - state: state, - alt: c.GetAlt(), - context: context, - semanticContext: semanticContext, - reachesIntoOuterContext: c.GetReachesIntoOuterContext(), - precedenceFilterSuppressed: c.getPrecedenceFilterSuppressed(), - } -} - -func (b *BaseATNConfig) getPrecedenceFilterSuppressed() bool { - return b.precedenceFilterSuppressed -} - -func (b *BaseATNConfig) setPrecedenceFilterSuppressed(v bool) { - b.precedenceFilterSuppressed = v -} - -func (b *BaseATNConfig) GetState() ATNState { - return b.state -} - -func (b *BaseATNConfig) GetAlt() int { - return b.alt -} - -func (b *BaseATNConfig) SetContext(v PredictionContext) { - b.context = v -} -func (b *BaseATNConfig) GetContext() PredictionContext { - return b.context -} - -func (b *BaseATNConfig) GetSemanticContext() SemanticContext { - return b.semanticContext -} - -func (b *BaseATNConfig) GetReachesIntoOuterContext() int { - return b.reachesIntoOuterContext -} - -func (b *BaseATNConfig) SetReachesIntoOuterContext(v int) { - b.reachesIntoOuterContext = v -} - -// An ATN configuration is equal to another if both have the same state, they -// predict the same alternative, and syntactic/semantic contexts are the same. -func (b *BaseATNConfig) equals(o interface{}) bool { - if b == o { - return true - } - - var other, ok = o.(*BaseATNConfig) - - if !ok { - return false - } - - var equal bool - - if b.context == nil { - equal = other.context == nil - } else { - equal = b.context.equals(other.context) - } - - var ( - nums = b.state.GetStateNumber() == other.state.GetStateNumber() - alts = b.alt == other.alt - cons = b.semanticContext.equals(other.semanticContext) - sups = b.precedenceFilterSuppressed == other.precedenceFilterSuppressed - ) - - return nums && alts && cons && sups && equal -} - -func (b *BaseATNConfig) hash() int { - var c int - if b.context != nil { - c = b.context.hash() - } - - h := murmurInit(7) - h = murmurUpdate(h, b.state.GetStateNumber()) - h = murmurUpdate(h, b.alt) - h = murmurUpdate(h, c) - h = murmurUpdate(h, b.semanticContext.hash()) - return murmurFinish(h, 4) -} - -func (b *BaseATNConfig) String() string { - var s1, s2, s3 string - - if b.context != nil { - s1 = ",[" + fmt.Sprint(b.context) + "]" - } - - if b.semanticContext != SemanticContextNone { - s2 = "," + fmt.Sprint(b.semanticContext) - } - - if b.reachesIntoOuterContext > 0 { - s3 = ",up=" + fmt.Sprint(b.reachesIntoOuterContext) - } - - return fmt.Sprintf("(%v,%v%v%v%v)", b.state, b.alt, s1, s2, s3) -} - -type LexerATNConfig struct { - *BaseATNConfig - lexerActionExecutor *LexerActionExecutor - passedThroughNonGreedyDecision bool -} - -func NewLexerATNConfig6(state ATNState, alt int, context PredictionContext) *LexerATNConfig { - return &LexerATNConfig{BaseATNConfig: NewBaseATNConfig5(state, alt, context, SemanticContextNone)} -} - -func NewLexerATNConfig5(state ATNState, alt int, context PredictionContext, lexerActionExecutor *LexerActionExecutor) *LexerATNConfig { - return &LexerATNConfig{ - BaseATNConfig: NewBaseATNConfig5(state, alt, context, SemanticContextNone), - lexerActionExecutor: lexerActionExecutor, - } -} - -func NewLexerATNConfig4(c *LexerATNConfig, state ATNState) *LexerATNConfig { - return &LexerATNConfig{ - BaseATNConfig: NewBaseATNConfig(c, state, c.GetContext(), c.GetSemanticContext()), - lexerActionExecutor: c.lexerActionExecutor, - passedThroughNonGreedyDecision: checkNonGreedyDecision(c, state), - } -} - -func NewLexerATNConfig3(c *LexerATNConfig, state ATNState, lexerActionExecutor *LexerActionExecutor) *LexerATNConfig { - return &LexerATNConfig{ - BaseATNConfig: NewBaseATNConfig(c, state, c.GetContext(), c.GetSemanticContext()), - lexerActionExecutor: lexerActionExecutor, - passedThroughNonGreedyDecision: checkNonGreedyDecision(c, state), - } -} - -func NewLexerATNConfig2(c *LexerATNConfig, state ATNState, context PredictionContext) *LexerATNConfig { - return &LexerATNConfig{ - BaseATNConfig: NewBaseATNConfig(c, state, context, c.GetSemanticContext()), - lexerActionExecutor: c.lexerActionExecutor, - passedThroughNonGreedyDecision: checkNonGreedyDecision(c, state), - } -} - -func NewLexerATNConfig1(state ATNState, alt int, context PredictionContext) *LexerATNConfig { - return &LexerATNConfig{BaseATNConfig: NewBaseATNConfig5(state, alt, context, SemanticContextNone)} -} - -func (l *LexerATNConfig) hash() int { - var f int - if l.passedThroughNonGreedyDecision { - f = 1 - } else { - f = 0 - } - h := murmurInit(7) - h = murmurUpdate(h, l.state.hash()) - h = murmurUpdate(h, l.alt) - h = murmurUpdate(h, l.context.hash()) - h = murmurUpdate(h, l.semanticContext.hash()) - h = murmurUpdate(h, f) - h = murmurUpdate(h, l.lexerActionExecutor.hash()) - h = murmurFinish(h, 6) - return h -} - -func (l *LexerATNConfig) equals(other interface{}) bool { - var othert, ok = other.(*LexerATNConfig) - - if l == other { - return true - } else if !ok { - return false - } else if l.passedThroughNonGreedyDecision != othert.passedThroughNonGreedyDecision { - return false - } - - var b bool - - if l.lexerActionExecutor != nil { - b = !l.lexerActionExecutor.equals(othert.lexerActionExecutor) - } else { - b = othert.lexerActionExecutor != nil - } - - if b { - return false - } - - return l.BaseATNConfig.equals(othert.BaseATNConfig) -} - - -func checkNonGreedyDecision(source *LexerATNConfig, target ATNState) bool { - var ds, ok = target.(DecisionState) - - return source.passedThroughNonGreedyDecision || (ok && ds.getNonGreedy()) -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_config_set.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_config_set.go deleted file mode 100644 index d9f74755fa..0000000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_config_set.go +++ /dev/null @@ -1,387 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import "fmt" - -type ATNConfigSet interface { - hash() int - Add(ATNConfig, *DoubleDict) bool - AddAll([]ATNConfig) bool - - GetStates() *Set - GetPredicates() []SemanticContext - GetItems() []ATNConfig - - OptimizeConfigs(interpreter *BaseATNSimulator) - - Equals(other interface{}) bool - - Length() int - IsEmpty() bool - Contains(ATNConfig) bool - ContainsFast(ATNConfig) bool - Clear() - String() string - - HasSemanticContext() bool - SetHasSemanticContext(v bool) - - ReadOnly() bool - SetReadOnly(bool) - - GetConflictingAlts() *BitSet - SetConflictingAlts(*BitSet) - - FullContext() bool - - GetUniqueAlt() int - SetUniqueAlt(int) - - GetDipsIntoOuterContext() bool - SetDipsIntoOuterContext(bool) -} - -// BaseATNConfigSet is a specialized set of ATNConfig that tracks information -// about its elements and can combine similar configurations using a -// graph-structured stack. -type BaseATNConfigSet struct { - cachedHash int - - // configLookup is used to determine whether two BaseATNConfigSets are equal. We - // need all configurations with the same (s, i, _, semctx) to be equal. A key - // effectively doubles the number of objects associated with ATNConfigs. All - // keys are hashed by (s, i, _, pi), not including the context. Wiped out when - // read-only because a set becomes a DFA state. - configLookup *Set - - // configs is the added elements. - configs []ATNConfig - - // TODO: These fields make me pretty uncomfortable, but it is nice to pack up - // info together because it saves recomputation. Can we track conflicts as they - // are added to save scanning configs later? - conflictingAlts *BitSet - - // dipsIntoOuterContext is used by parsers and lexers. In a lexer, it indicates - // we hit a pred while computing a closure operation. Do not make a DFA state - // from the BaseATNConfigSet in this case. TODO: How is this used by parsers? - dipsIntoOuterContext bool - - // fullCtx is whether it is part of a full context LL prediction. Used to - // determine how to merge $. It is a wildcard with SLL, but not for an LL - // context merge. - fullCtx bool - - // Used in parser and lexer. In lexer, it indicates we hit a pred - // while computing a closure operation. Don't make a DFA state from a. - hasSemanticContext bool - - // readOnly is whether it is read-only. Do not - // allow any code to manipulate the set if true because DFA states will point at - // sets and those must not change. It not protect other fields; conflictingAlts - // in particular, which is assigned after readOnly. - readOnly bool - - // TODO: These fields make me pretty uncomfortable, but it is nice to pack up - // info together because it saves recomputation. Can we track conflicts as they - // are added to save scanning configs later? - uniqueAlt int -} - -func NewBaseATNConfigSet(fullCtx bool) *BaseATNConfigSet { - return &BaseATNConfigSet{ - cachedHash: -1, - configLookup: NewSet(nil, equalATNConfigs), - fullCtx: fullCtx, - } -} - -// Add merges contexts with existing configs for (s, i, pi, _), where s is the -// ATNConfig.state, i is the ATNConfig.alt, and pi is the -// ATNConfig.semanticContext. We use (s,i,pi) as the key. Updates -// dipsIntoOuterContext and hasSemanticContext when necessary. -func (b *BaseATNConfigSet) Add(config ATNConfig, mergeCache *DoubleDict) bool { - if b.readOnly { - panic("set is read-only") - } - - if config.GetSemanticContext() != SemanticContextNone { - b.hasSemanticContext = true - } - - if config.GetReachesIntoOuterContext() > 0 { - b.dipsIntoOuterContext = true - } - - existing := b.configLookup.add(config).(ATNConfig) - - if existing == config { - b.cachedHash = -1 - b.configs = append(b.configs, config) // Track order here - - return true - } - - // Merge a previous (s, i, pi, _) with it and save the result - rootIsWildcard := !b.fullCtx - merged := merge(existing.GetContext(), config.GetContext(), rootIsWildcard, mergeCache) - - // No need to check for existing.context because config.context is in the cache, - // since the only way to create new graphs is the "call rule" and here. We cache - // at both places. - existing.SetReachesIntoOuterContext(intMax(existing.GetReachesIntoOuterContext(), config.GetReachesIntoOuterContext())) - - // Preserve the precedence filter suppression during the merge - if config.getPrecedenceFilterSuppressed() { - existing.setPrecedenceFilterSuppressed(true) - } - - // Replace the context because there is no need to do alt mapping - existing.SetContext(merged) - - return true -} - -func (b *BaseATNConfigSet) GetStates() *Set { - states := NewSet(nil, nil) - - for i := 0; i < len(b.configs); i++ { - states.add(b.configs[i].GetState()) - } - - return states -} - -func (b *BaseATNConfigSet) HasSemanticContext() bool { - return b.hasSemanticContext -} - -func (b *BaseATNConfigSet) SetHasSemanticContext(v bool) { - b.hasSemanticContext = v -} - -func (b *BaseATNConfigSet) GetPredicates() []SemanticContext { - preds := make([]SemanticContext, 0) - - for i := 0; i < len(b.configs); i++ { - c := b.configs[i].GetSemanticContext() - - if c != SemanticContextNone { - preds = append(preds, c) - } - } - - return preds -} - -func (b *BaseATNConfigSet) GetItems() []ATNConfig { - return b.configs -} - -func (b *BaseATNConfigSet) OptimizeConfigs(interpreter *BaseATNSimulator) { - if b.readOnly { - panic("set is read-only") - } - - if b.configLookup.length() == 0 { - return - } - - for i := 0; i < len(b.configs); i++ { - config := b.configs[i] - - config.SetContext(interpreter.getCachedContext(config.GetContext())) - } -} - -func (b *BaseATNConfigSet) AddAll(coll []ATNConfig) bool { - for i := 0; i < len(coll); i++ { - b.Add(coll[i], nil) - } - - return false -} - -func (b *BaseATNConfigSet) Equals(other interface{}) bool { - if b == other { - return true - } else if _, ok := other.(*BaseATNConfigSet); !ok { - return false - } - - other2 := other.(*BaseATNConfigSet) - - return b.configs != nil && - // TODO: b.configs.equals(other2.configs) && // TODO: Is b necessary? - b.fullCtx == other2.fullCtx && - b.uniqueAlt == other2.uniqueAlt && - b.conflictingAlts == other2.conflictingAlts && - b.hasSemanticContext == other2.hasSemanticContext && - b.dipsIntoOuterContext == other2.dipsIntoOuterContext -} - -func (b *BaseATNConfigSet) hash() int { - if b.readOnly { - if b.cachedHash == -1 { - b.cachedHash = b.hashCodeConfigs() - } - - return b.cachedHash - } - - return b.hashCodeConfigs() -} - -func (b *BaseATNConfigSet) hashCodeConfigs() int { - h := murmurInit(1) - for _, c := range b.configs { - if c != nil { - h = murmurUpdate(h, c.hash()) - } - } - return murmurFinish(h, len(b.configs)) -} - -func (b *BaseATNConfigSet) Length() int { - return len(b.configs) -} - -func (b *BaseATNConfigSet) IsEmpty() bool { - return len(b.configs) == 0 -} - -func (b *BaseATNConfigSet) Contains(item ATNConfig) bool { - if b.configLookup == nil { - panic("not implemented for read-only sets") - } - - return b.configLookup.contains(item) -} - -func (b *BaseATNConfigSet) ContainsFast(item ATNConfig) bool { - if b.configLookup == nil { - panic("not implemented for read-only sets") - } - - return b.configLookup.contains(item) // TODO: containsFast is not implemented for Set -} - -func (b *BaseATNConfigSet) Clear() { - if b.readOnly { - panic("set is read-only") - } - - b.configs = make([]ATNConfig, 0) - b.cachedHash = -1 - b.configLookup = NewSet(nil, equalATNConfigs) -} - -func (b *BaseATNConfigSet) FullContext() bool { - return b.fullCtx -} - -func (b *BaseATNConfigSet) GetDipsIntoOuterContext() bool { - return b.dipsIntoOuterContext -} - -func (b *BaseATNConfigSet) SetDipsIntoOuterContext(v bool) { - b.dipsIntoOuterContext = v -} - -func (b *BaseATNConfigSet) GetUniqueAlt() int { - return b.uniqueAlt -} - -func (b *BaseATNConfigSet) SetUniqueAlt(v int) { - b.uniqueAlt = v -} - -func (b *BaseATNConfigSet) GetConflictingAlts() *BitSet { - return b.conflictingAlts -} - -func (b *BaseATNConfigSet) SetConflictingAlts(v *BitSet) { - b.conflictingAlts = v -} - -func (b *BaseATNConfigSet) ReadOnly() bool { - return b.readOnly -} - -func (b *BaseATNConfigSet) SetReadOnly(readOnly bool) { - b.readOnly = readOnly - - if readOnly { - b.configLookup = nil // Read only, so no need for the lookup cache - } -} - -func (b *BaseATNConfigSet) String() string { - s := "[" - - for i, c := range b.configs { - s += c.String() - - if i != len(b.configs)-1 { - s += ", " - } - } - - s += "]" - - if b.hasSemanticContext { - s += ",hasSemanticContext=" + fmt.Sprint(b.hasSemanticContext) - } - - if b.uniqueAlt != ATNInvalidAltNumber { - s += ",uniqueAlt=" + fmt.Sprint(b.uniqueAlt) - } - - if b.conflictingAlts != nil { - s += ",conflictingAlts=" + b.conflictingAlts.String() - } - - if b.dipsIntoOuterContext { - s += ",dipsIntoOuterContext" - } - - return s -} - -type OrderedATNConfigSet struct { - *BaseATNConfigSet -} - -func NewOrderedATNConfigSet() *OrderedATNConfigSet { - b := NewBaseATNConfigSet(false) - - b.configLookup = NewSet(nil, nil) - - return &OrderedATNConfigSet{BaseATNConfigSet: b} -} - -func equalATNConfigs(a, b interface{}) bool { - if a == nil || b == nil { - return false - } - - if a == b { - return true - } - - var ai, ok = a.(ATNConfig) - var bi, ok1 = b.(ATNConfig) - - if !ok || !ok1 { - return false - } - - nums := ai.GetState().GetStateNumber() == bi.GetState().GetStateNumber() - alts := ai.GetAlt() == bi.GetAlt() - cons := ai.GetSemanticContext().equals(bi.GetSemanticContext()) - - return nums && alts && cons -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_deserialization_options.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_deserialization_options.go deleted file mode 100644 index 18b89efafb..0000000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_deserialization_options.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -var ATNDeserializationOptionsdefaultOptions = &ATNDeserializationOptions{true, false, false} - -type ATNDeserializationOptions struct { - readOnly bool - verifyATN bool - generateRuleBypassTransitions bool -} - -func NewATNDeserializationOptions(CopyFrom *ATNDeserializationOptions) *ATNDeserializationOptions { - o := new(ATNDeserializationOptions) - - if CopyFrom != nil { - o.readOnly = CopyFrom.readOnly - o.verifyATN = CopyFrom.verifyATN - o.generateRuleBypassTransitions = CopyFrom.generateRuleBypassTransitions - } - - return o -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_deserializer.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_deserializer.go deleted file mode 100644 index 884d39cf7c..0000000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_deserializer.go +++ /dev/null @@ -1,828 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "encoding/hex" - "fmt" - "strconv" - "strings" - "unicode/utf16" -) - -// This is the earliest supported serialized UUID. -// stick to serialized version for now, we don't need a UUID instance -var BaseSerializedUUID = "AADB8D7E-AEEF-4415-AD2B-8204D6CF042E" -var AddedUnicodeSMP = "59627784-3BE5-417A-B9EB-8131A7286089" - -// This list contains all of the currently supported UUIDs, ordered by when -// the feature first appeared in this branch. -var SupportedUUIDs = []string{BaseSerializedUUID, AddedUnicodeSMP} - -var SerializedVersion = 3 - -// This is the current serialized UUID. -var SerializedUUID = AddedUnicodeSMP - -type LoopEndStateIntPair struct { - item0 *LoopEndState - item1 int -} - -type BlockStartStateIntPair struct { - item0 BlockStartState - item1 int -} - -type ATNDeserializer struct { - deserializationOptions *ATNDeserializationOptions - data []rune - pos int - uuid string -} - -func NewATNDeserializer(options *ATNDeserializationOptions) *ATNDeserializer { - if options == nil { - options = ATNDeserializationOptionsdefaultOptions - } - - return &ATNDeserializer{deserializationOptions: options} -} - -func stringInSlice(a string, list []string) int { - for i, b := range list { - if b == a { - return i - } - } - - return -1 -} - -// isFeatureSupported determines if a particular serialized representation of an -// ATN supports a particular feature, identified by the UUID used for -// serializing the ATN at the time the feature was first introduced. Feature is -// the UUID marking the first time the feature was supported in the serialized -// ATN. ActualUuid is the UUID of the actual serialized ATN which is currently -// being deserialized. It returns true if actualUuid represents a serialized ATN -// at or after the feature identified by feature was introduced, and otherwise -// false. -func (a *ATNDeserializer) isFeatureSupported(feature, actualUUID string) bool { - idx1 := stringInSlice(feature, SupportedUUIDs) - - if idx1 < 0 { - return false - } - - idx2 := stringInSlice(actualUUID, SupportedUUIDs) - - return idx2 >= idx1 -} - -func (a *ATNDeserializer) DeserializeFromUInt16(data []uint16) *ATN { - a.reset(utf16.Decode(data)) - a.checkVersion() - a.checkUUID() - - atn := a.readATN() - - a.readStates(atn) - a.readRules(atn) - a.readModes(atn) - - sets := make([]*IntervalSet, 0) - - // First, deserialize sets with 16-bit arguments <= U+FFFF. - sets = a.readSets(atn, sets, a.readInt) - // Next, if the ATN was serialized with the Unicode SMP feature, - // deserialize sets with 32-bit arguments <= U+10FFFF. - if (a.isFeatureSupported(AddedUnicodeSMP, a.uuid)) { - sets = a.readSets(atn, sets, a.readInt32) - } - - a.readEdges(atn, sets) - a.readDecisions(atn) - a.readLexerActions(atn) - a.markPrecedenceDecisions(atn) - a.verifyATN(atn) - - if a.deserializationOptions.generateRuleBypassTransitions && atn.grammarType == ATNTypeParser { - a.generateRuleBypassTransitions(atn) - // Re-verify after modification - a.verifyATN(atn) - } - - return atn - -} - -func (a *ATNDeserializer) reset(data []rune) { - temp := make([]rune, len(data)) - - for i, c := range data { - // Don't adjust the first value since that's the version number - if i == 0 { - temp[i] = c - } else if c > 1 { - temp[i] = c - 2 - } else { - temp[i] = c + 65533 - } - } - - a.data = temp - a.pos = 0 -} - -func (a *ATNDeserializer) checkVersion() { - version := a.readInt() - - if version != SerializedVersion { - panic("Could not deserialize ATN with version " + strconv.Itoa(version) + " (expected " + strconv.Itoa(SerializedVersion) + ").") - } -} - -func (a *ATNDeserializer) checkUUID() { - uuid := a.readUUID() - - if stringInSlice(uuid, SupportedUUIDs) < 0 { - panic("Could not deserialize ATN with UUID: " + uuid + " (expected " + SerializedUUID + " or a legacy UUID).") - } - - a.uuid = uuid -} - -func (a *ATNDeserializer) readATN() *ATN { - grammarType := a.readInt() - maxTokenType := a.readInt() - - return NewATN(grammarType, maxTokenType) -} - -func (a *ATNDeserializer) readStates(atn *ATN) { - loopBackStateNumbers := make([]LoopEndStateIntPair, 0) - endStateNumbers := make([]BlockStartStateIntPair, 0) - - nstates := a.readInt() - - for i := 0; i < nstates; i++ { - stype := a.readInt() - - // Ignore bad types of states - if stype == ATNStateInvalidType { - atn.addState(nil) - - continue - } - - ruleIndex := a.readInt() - - if ruleIndex == 0xFFFF { - ruleIndex = -1 - } - - s := a.stateFactory(stype, ruleIndex) - - if stype == ATNStateLoopEnd { - loopBackStateNumber := a.readInt() - - loopBackStateNumbers = append(loopBackStateNumbers, LoopEndStateIntPair{s.(*LoopEndState), loopBackStateNumber}) - } else if s2, ok := s.(BlockStartState); ok { - endStateNumber := a.readInt() - - endStateNumbers = append(endStateNumbers, BlockStartStateIntPair{s2, endStateNumber}) - } - - atn.addState(s) - } - - // Delay the assignment of loop back and end states until we know all the state - // instances have been initialized - for j := 0; j < len(loopBackStateNumbers); j++ { - pair := loopBackStateNumbers[j] - - pair.item0.loopBackState = atn.states[pair.item1] - } - - for j := 0; j < len(endStateNumbers); j++ { - pair := endStateNumbers[j] - - pair.item0.setEndState(atn.states[pair.item1].(*BlockEndState)) - } - - numNonGreedyStates := a.readInt() - - for j := 0; j < numNonGreedyStates; j++ { - stateNumber := a.readInt() - - atn.states[stateNumber].(DecisionState).setNonGreedy(true) - } - - numPrecedenceStates := a.readInt() - - for j := 0; j < numPrecedenceStates; j++ { - stateNumber := a.readInt() - - atn.states[stateNumber].(*RuleStartState).isPrecedenceRule = true - } -} - -func (a *ATNDeserializer) readRules(atn *ATN) { - nrules := a.readInt() - - if atn.grammarType == ATNTypeLexer { - atn.ruleToTokenType = make([]int, nrules) // TODO: initIntArray(nrules, 0) - } - - atn.ruleToStartState = make([]*RuleStartState, nrules) // TODO: initIntArray(nrules, 0) - - for i := 0; i < nrules; i++ { - s := a.readInt() - startState := atn.states[s].(*RuleStartState) - - atn.ruleToStartState[i] = startState - - if atn.grammarType == ATNTypeLexer { - tokenType := a.readInt() - - if tokenType == 0xFFFF { - tokenType = TokenEOF - } - - atn.ruleToTokenType[i] = tokenType - } - } - - atn.ruleToStopState = make([]*RuleStopState, nrules) //initIntArray(nrules, 0) - - for i := 0; i < len(atn.states); i++ { - state := atn.states[i] - - if s2, ok := state.(*RuleStopState); ok { - atn.ruleToStopState[s2.ruleIndex] = s2 - atn.ruleToStartState[s2.ruleIndex].stopState = s2 - } - } -} - -func (a *ATNDeserializer) readModes(atn *ATN) { - nmodes := a.readInt() - - for i := 0; i < nmodes; i++ { - s := a.readInt() - - atn.modeToStartState = append(atn.modeToStartState, atn.states[s].(*TokensStartState)) - } -} - -func (a *ATNDeserializer) readSets(atn *ATN, sets []*IntervalSet, readUnicode func() int) []*IntervalSet { - m := a.readInt() - - for i := 0; i < m; i++ { - iset := NewIntervalSet() - - sets = append(sets, iset) - - n := a.readInt() - containsEOF := a.readInt() - - if containsEOF != 0 { - iset.addOne(-1) - } - - for j := 0; j < n; j++ { - i1 := readUnicode() - i2 := readUnicode() - - iset.addRange(i1, i2) - } - } - - return sets -} - -func (a *ATNDeserializer) readEdges(atn *ATN, sets []*IntervalSet) { - nedges := a.readInt() - - for i := 0; i < nedges; i++ { - var ( - src = a.readInt() - trg = a.readInt() - ttype = a.readInt() - arg1 = a.readInt() - arg2 = a.readInt() - arg3 = a.readInt() - trans = a.edgeFactory(atn, ttype, src, trg, arg1, arg2, arg3, sets) - srcState = atn.states[src] - ) - - srcState.AddTransition(trans, -1) - } - - // Edges for rule stop states can be derived, so they are not serialized - for i := 0; i < len(atn.states); i++ { - state := atn.states[i] - - for j := 0; j < len(state.GetTransitions()); j++ { - var t, ok = state.GetTransitions()[j].(*RuleTransition) - - if !ok { - continue - } - - outermostPrecedenceReturn := -1 - - if atn.ruleToStartState[t.getTarget().GetRuleIndex()].isPrecedenceRule { - if t.precedence == 0 { - outermostPrecedenceReturn = t.getTarget().GetRuleIndex() - } - } - - trans := NewEpsilonTransition(t.followState, outermostPrecedenceReturn) - - atn.ruleToStopState[t.getTarget().GetRuleIndex()].AddTransition(trans, -1) - } - } - - for i := 0; i < len(atn.states); i++ { - state := atn.states[i] - - if s2, ok := state.(*BaseBlockStartState); ok { - // We need to know the end state to set its start state - if s2.endState == nil { - panic("IllegalState") - } - - // Block end states can only be associated to a single block start state - if s2.endState.startState != nil { - panic("IllegalState") - } - - s2.endState.startState = state - } - - if s2, ok := state.(*PlusLoopbackState); ok { - for j := 0; j < len(s2.GetTransitions()); j++ { - target := s2.GetTransitions()[j].getTarget() - - if t2, ok := target.(*PlusBlockStartState); ok { - t2.loopBackState = state - } - } - } else if s2, ok := state.(*StarLoopbackState); ok { - for j := 0; j < len(s2.GetTransitions()); j++ { - target := s2.GetTransitions()[j].getTarget() - - if t2, ok := target.(*StarLoopEntryState); ok { - t2.loopBackState = state - } - } - } - } -} - -func (a *ATNDeserializer) readDecisions(atn *ATN) { - ndecisions := a.readInt() - - for i := 0; i < ndecisions; i++ { - s := a.readInt() - decState := atn.states[s].(DecisionState) - - atn.DecisionToState = append(atn.DecisionToState, decState) - decState.setDecision(i) - } -} - -func (a *ATNDeserializer) readLexerActions(atn *ATN) { - if atn.grammarType == ATNTypeLexer { - count := a.readInt() - - atn.lexerActions = make([]LexerAction, count) // initIntArray(count, nil) - - for i := 0; i < count; i++ { - actionType := a.readInt() - data1 := a.readInt() - - if data1 == 0xFFFF { - data1 = -1 - } - - data2 := a.readInt() - - if data2 == 0xFFFF { - data2 = -1 - } - - lexerAction := a.lexerActionFactory(actionType, data1, data2) - - atn.lexerActions[i] = lexerAction - } - } -} - -func (a *ATNDeserializer) generateRuleBypassTransitions(atn *ATN) { - count := len(atn.ruleToStartState) - - for i := 0; i < count; i++ { - atn.ruleToTokenType[i] = atn.maxTokenType + i + 1 - } - - for i := 0; i < count; i++ { - a.generateRuleBypassTransition(atn, i) - } -} - -func (a *ATNDeserializer) generateRuleBypassTransition(atn *ATN, idx int) { - bypassStart := NewBasicBlockStartState() - - bypassStart.ruleIndex = idx - atn.addState(bypassStart) - - bypassStop := NewBlockEndState() - - bypassStop.ruleIndex = idx - atn.addState(bypassStop) - - bypassStart.endState = bypassStop - - atn.defineDecisionState(bypassStart.BaseDecisionState) - - bypassStop.startState = bypassStart - - var excludeTransition Transition - var endState ATNState - - if atn.ruleToStartState[idx].isPrecedenceRule { - // Wrap from the beginning of the rule to the StarLoopEntryState - endState = nil - - for i := 0; i < len(atn.states); i++ { - state := atn.states[i] - - if a.stateIsEndStateFor(state, idx) != nil { - endState = state - excludeTransition = state.(*StarLoopEntryState).loopBackState.GetTransitions()[0] - - break - } - } - - if excludeTransition == nil { - panic("Couldn't identify final state of the precedence rule prefix section.") - } - } else { - endState = atn.ruleToStopState[idx] - } - - // All non-excluded transitions that currently target end state need to target - // blockEnd instead - for i := 0; i < len(atn.states); i++ { - state := atn.states[i] - - for j := 0; j < len(state.GetTransitions()); j++ { - transition := state.GetTransitions()[j] - - if transition == excludeTransition { - continue - } - - if transition.getTarget() == endState { - transition.setTarget(bypassStop) - } - } - } - - // All transitions leaving the rule start state need to leave blockStart instead - ruleToStartState := atn.ruleToStartState[idx] - count := len(ruleToStartState.GetTransitions()) - - for count > 0 { - bypassStart.AddTransition(ruleToStartState.GetTransitions()[count-1], -1) - ruleToStartState.SetTransitions([]Transition{ruleToStartState.GetTransitions()[len(ruleToStartState.GetTransitions())-1]}) - } - - // Link the new states - atn.ruleToStartState[idx].AddTransition(NewEpsilonTransition(bypassStart, -1), -1) - bypassStop.AddTransition(NewEpsilonTransition(endState, -1), -1) - - MatchState := NewBasicState() - - atn.addState(MatchState) - MatchState.AddTransition(NewAtomTransition(bypassStop, atn.ruleToTokenType[idx]), -1) - bypassStart.AddTransition(NewEpsilonTransition(MatchState, -1), -1) -} - -func (a *ATNDeserializer) stateIsEndStateFor(state ATNState, idx int) ATNState { - if state.GetRuleIndex() != idx { - return nil - } - - if _, ok := state.(*StarLoopEntryState); !ok { - return nil - } - - maybeLoopEndState := state.GetTransitions()[len(state.GetTransitions())-1].getTarget() - - if _, ok := maybeLoopEndState.(*LoopEndState); !ok { - return nil - } - - var _, ok = maybeLoopEndState.GetTransitions()[0].getTarget().(*RuleStopState) - - if maybeLoopEndState.(*LoopEndState).epsilonOnlyTransitions && ok { - return state - } - - return nil -} - -// markPrecedenceDecisions analyzes the StarLoopEntryState states in the -// specified ATN to set the StarLoopEntryState.precedenceRuleDecision field to -// the correct value. -func (a *ATNDeserializer) markPrecedenceDecisions(atn *ATN) { - for _, state := range atn.states { - if _, ok := state.(*StarLoopEntryState); !ok { - continue - } - - // We analyze the ATN to determine if a ATN decision state is the - // decision for the closure block that determines whether a - // precedence rule should continue or complete. - if atn.ruleToStartState[state.GetRuleIndex()].isPrecedenceRule { - maybeLoopEndState := state.GetTransitions()[len(state.GetTransitions())-1].getTarget() - - if s3, ok := maybeLoopEndState.(*LoopEndState); ok { - var _, ok2 = maybeLoopEndState.GetTransitions()[0].getTarget().(*RuleStopState) - - if s3.epsilonOnlyTransitions && ok2 { - state.(*StarLoopEntryState).precedenceRuleDecision = true - } - } - } - } -} - -func (a *ATNDeserializer) verifyATN(atn *ATN) { - if !a.deserializationOptions.verifyATN { - return - } - - // Verify assumptions - for i := 0; i < len(atn.states); i++ { - state := atn.states[i] - - if state == nil { - continue - } - - a.checkCondition(state.GetEpsilonOnlyTransitions() || len(state.GetTransitions()) <= 1, "") - - switch s2 := state.(type) { - case *PlusBlockStartState: - a.checkCondition(s2.loopBackState != nil, "") - - case *StarLoopEntryState: - a.checkCondition(s2.loopBackState != nil, "") - a.checkCondition(len(s2.GetTransitions()) == 2, "") - - switch s2 := state.(type) { - case *StarBlockStartState: - var _, ok2 = s2.GetTransitions()[1].getTarget().(*LoopEndState) - - a.checkCondition(ok2, "") - a.checkCondition(!s2.nonGreedy, "") - - case *LoopEndState: - var s3, ok2 = s2.GetTransitions()[1].getTarget().(*StarBlockStartState) - - a.checkCondition(ok2, "") - a.checkCondition(s3.nonGreedy, "") - - default: - panic("IllegalState") - } - - case *StarLoopbackState: - a.checkCondition(len(state.GetTransitions()) == 1, "") - - var _, ok2 = state.GetTransitions()[0].getTarget().(*StarLoopEntryState) - - a.checkCondition(ok2, "") - - case *LoopEndState: - a.checkCondition(s2.loopBackState != nil, "") - - case *RuleStartState: - a.checkCondition(s2.stopState != nil, "") - - case *BaseBlockStartState: - a.checkCondition(s2.endState != nil, "") - - case *BlockEndState: - a.checkCondition(s2.startState != nil, "") - - case DecisionState: - a.checkCondition(len(s2.GetTransitions()) <= 1 || s2.getDecision() >= 0, "") - - default: - var _, ok = s2.(*RuleStopState) - - a.checkCondition(len(s2.GetTransitions()) <= 1 || ok, "") - } - } -} - -func (a *ATNDeserializer) checkCondition(condition bool, message string) { - if !condition { - if message == "" { - message = "IllegalState" - } - - panic(message) - } -} - -func (a *ATNDeserializer) readInt() int { - v := a.data[a.pos] - - a.pos++ - - return int(v) -} - -func (a *ATNDeserializer) readInt32() int { - var low = a.readInt() - var high = a.readInt() - return low | (high << 16) -} - -//TODO -//func (a *ATNDeserializer) readLong() int64 { -// panic("Not implemented") -// var low = a.readInt32() -// var high = a.readInt32() -// return (low & 0x00000000FFFFFFFF) | (high << int32) -//} - -func createByteToHex() []string { - bth := make([]string, 256) - - for i := 0; i < 256; i++ { - bth[i] = strings.ToUpper(hex.EncodeToString([]byte{byte(i)})) - } - - return bth -} - -var byteToHex = createByteToHex() - -func (a *ATNDeserializer) readUUID() string { - bb := make([]int, 16) - - for i := 7; i >= 0; i-- { - integer := a.readInt() - - bb[(2*i)+1] = integer & 0xFF - bb[2*i] = (integer >> 8) & 0xFF - } - - return byteToHex[bb[0]] + byteToHex[bb[1]] + - byteToHex[bb[2]] + byteToHex[bb[3]] + "-" + - byteToHex[bb[4]] + byteToHex[bb[5]] + "-" + - byteToHex[bb[6]] + byteToHex[bb[7]] + "-" + - byteToHex[bb[8]] + byteToHex[bb[9]] + "-" + - byteToHex[bb[10]] + byteToHex[bb[11]] + - byteToHex[bb[12]] + byteToHex[bb[13]] + - byteToHex[bb[14]] + byteToHex[bb[15]] -} - -func (a *ATNDeserializer) edgeFactory(atn *ATN, typeIndex, src, trg, arg1, arg2, arg3 int, sets []*IntervalSet) Transition { - target := atn.states[trg] - - switch typeIndex { - case TransitionEPSILON: - return NewEpsilonTransition(target, -1) - - case TransitionRANGE: - if arg3 != 0 { - return NewRangeTransition(target, TokenEOF, arg2) - } - - return NewRangeTransition(target, arg1, arg2) - - case TransitionRULE: - return NewRuleTransition(atn.states[arg1], arg2, arg3, target) - - case TransitionPREDICATE: - return NewPredicateTransition(target, arg1, arg2, arg3 != 0) - - case TransitionPRECEDENCE: - return NewPrecedencePredicateTransition(target, arg1) - - case TransitionATOM: - if arg3 != 0 { - return NewAtomTransition(target, TokenEOF) - } - - return NewAtomTransition(target, arg1) - - case TransitionACTION: - return NewActionTransition(target, arg1, arg2, arg3 != 0) - - case TransitionSET: - return NewSetTransition(target, sets[arg1]) - - case TransitionNOTSET: - return NewNotSetTransition(target, sets[arg1]) - - case TransitionWILDCARD: - return NewWildcardTransition(target) - } - - panic("The specified transition type is not valid.") -} - -func (a *ATNDeserializer) stateFactory(typeIndex, ruleIndex int) ATNState { - var s ATNState - - switch typeIndex { - case ATNStateInvalidType: - return nil - - case ATNStateBasic: - s = NewBasicState() - - case ATNStateRuleStart: - s = NewRuleStartState() - - case ATNStateBlockStart: - s = NewBasicBlockStartState() - - case ATNStatePlusBlockStart: - s = NewPlusBlockStartState() - - case ATNStateStarBlockStart: - s = NewStarBlockStartState() - - case ATNStateTokenStart: - s = NewTokensStartState() - - case ATNStateRuleStop: - s = NewRuleStopState() - - case ATNStateBlockEnd: - s = NewBlockEndState() - - case ATNStateStarLoopBack: - s = NewStarLoopbackState() - - case ATNStateStarLoopEntry: - s = NewStarLoopEntryState() - - case ATNStatePlusLoopBack: - s = NewPlusLoopbackState() - - case ATNStateLoopEnd: - s = NewLoopEndState() - - default: - panic(fmt.Sprintf("state type %d is invalid", typeIndex)) - } - - s.SetRuleIndex(ruleIndex) - - return s -} - -func (a *ATNDeserializer) lexerActionFactory(typeIndex, data1, data2 int) LexerAction { - switch typeIndex { - case LexerActionTypeChannel: - return NewLexerChannelAction(data1) - - case LexerActionTypeCustom: - return NewLexerCustomAction(data1, data2) - - case LexerActionTypeMode: - return NewLexerModeAction(data1) - - case LexerActionTypeMore: - return LexerMoreActionINSTANCE - - case LexerActionTypePopMode: - return LexerPopModeActionINSTANCE - - case LexerActionTypePushMode: - return NewLexerPushModeAction(data1) - - case LexerActionTypeSkip: - return LexerSkipActionINSTANCE - - case LexerActionTypeType: - return NewLexerTypeAction(data1) - - default: - panic(fmt.Sprintf("lexer action %d is invalid", typeIndex)) - } -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_simulator.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_simulator.go deleted file mode 100644 index d5454d6d5d..0000000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_simulator.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -var ATNSimulatorError = NewDFAState(0x7FFFFFFF, NewBaseATNConfigSet(false)) - -type IATNSimulator interface { - SharedContextCache() *PredictionContextCache - ATN() *ATN - DecisionToDFA() []*DFA -} - -type BaseATNSimulator struct { - atn *ATN - sharedContextCache *PredictionContextCache - decisionToDFA []*DFA -} - -func NewBaseATNSimulator(atn *ATN, sharedContextCache *PredictionContextCache) *BaseATNSimulator { - b := new(BaseATNSimulator) - - b.atn = atn - b.sharedContextCache = sharedContextCache - - return b -} - -func (b *BaseATNSimulator) getCachedContext(context PredictionContext) PredictionContext { - if b.sharedContextCache == nil { - return context - } - - visited := make(map[PredictionContext]PredictionContext) - - return getCachedBasePredictionContext(context, b.sharedContextCache, visited) -} - -func (b *BaseATNSimulator) SharedContextCache() *PredictionContextCache { - return b.sharedContextCache -} - -func (b *BaseATNSimulator) ATN() *ATN { - return b.atn -} - -func (b *BaseATNSimulator) DecisionToDFA() []*DFA { - return b.decisionToDFA -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_state.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_state.go deleted file mode 100644 index 563d5db38d..0000000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_state.go +++ /dev/null @@ -1,386 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import "strconv" - -// Constants for serialization. -const ( - ATNStateInvalidType = 0 - ATNStateBasic = 1 - ATNStateRuleStart = 2 - ATNStateBlockStart = 3 - ATNStatePlusBlockStart = 4 - ATNStateStarBlockStart = 5 - ATNStateTokenStart = 6 - ATNStateRuleStop = 7 - ATNStateBlockEnd = 8 - ATNStateStarLoopBack = 9 - ATNStateStarLoopEntry = 10 - ATNStatePlusLoopBack = 11 - ATNStateLoopEnd = 12 - - ATNStateInvalidStateNumber = -1 -) - -var ATNStateInitialNumTransitions = 4 - -type ATNState interface { - GetEpsilonOnlyTransitions() bool - - GetRuleIndex() int - SetRuleIndex(int) - - GetNextTokenWithinRule() *IntervalSet - SetNextTokenWithinRule(*IntervalSet) - - GetATN() *ATN - SetATN(*ATN) - - GetStateType() int - - GetStateNumber() int - SetStateNumber(int) - - GetTransitions() []Transition - SetTransitions([]Transition) - AddTransition(Transition, int) - - String() string - hash() int -} - -type BaseATNState struct { - // NextTokenWithinRule caches lookahead during parsing. Not used during construction. - NextTokenWithinRule *IntervalSet - - // atn is the current ATN. - atn *ATN - - epsilonOnlyTransitions bool - - // ruleIndex tracks the Rule index because there are no Rule objects at runtime. - ruleIndex int - - stateNumber int - - stateType int - - // Track the transitions emanating from this ATN state. - transitions []Transition -} - -func NewBaseATNState() *BaseATNState { - return &BaseATNState{stateNumber: ATNStateInvalidStateNumber, stateType: ATNStateInvalidType} -} - -func (as *BaseATNState) GetRuleIndex() int { - return as.ruleIndex -} - -func (as *BaseATNState) SetRuleIndex(v int) { - as.ruleIndex = v -} -func (as *BaseATNState) GetEpsilonOnlyTransitions() bool { - return as.epsilonOnlyTransitions -} - -func (as *BaseATNState) GetATN() *ATN { - return as.atn -} - -func (as *BaseATNState) SetATN(atn *ATN) { - as.atn = atn -} - -func (as *BaseATNState) GetTransitions() []Transition { - return as.transitions -} - -func (as *BaseATNState) SetTransitions(t []Transition) { - as.transitions = t -} - -func (as *BaseATNState) GetStateType() int { - return as.stateType -} - -func (as *BaseATNState) GetStateNumber() int { - return as.stateNumber -} - -func (as *BaseATNState) SetStateNumber(stateNumber int) { - as.stateNumber = stateNumber -} - -func (as *BaseATNState) GetNextTokenWithinRule() *IntervalSet { - return as.NextTokenWithinRule -} - -func (as *BaseATNState) SetNextTokenWithinRule(v *IntervalSet) { - as.NextTokenWithinRule = v -} - -func (as *BaseATNState) hash() int { - return as.stateNumber -} - -func (as *BaseATNState) String() string { - return strconv.Itoa(as.stateNumber) -} - -func (as *BaseATNState) equals(other interface{}) bool { - if ot, ok := other.(ATNState); ok { - return as.stateNumber == ot.GetStateNumber() - } - - return false -} - -func (as *BaseATNState) isNonGreedyExitState() bool { - return false -} - -func (as *BaseATNState) AddTransition(trans Transition, index int) { - if len(as.transitions) == 0 { - as.epsilonOnlyTransitions = trans.getIsEpsilon() - } else if as.epsilonOnlyTransitions != trans.getIsEpsilon() { - as.epsilonOnlyTransitions = false - } - - if index == -1 { - as.transitions = append(as.transitions, trans) - } else { - as.transitions = append(as.transitions[:index], append([]Transition{trans}, as.transitions[index:]...)...) - // TODO: as.transitions.splice(index, 1, trans) - } -} - -type BasicState struct { - *BaseATNState -} - -func NewBasicState() *BasicState { - b := NewBaseATNState() - - b.stateType = ATNStateBasic - - return &BasicState{BaseATNState: b} -} - -type DecisionState interface { - ATNState - - getDecision() int - setDecision(int) - - getNonGreedy() bool - setNonGreedy(bool) -} - -type BaseDecisionState struct { - *BaseATNState - decision int - nonGreedy bool -} - -func NewBaseDecisionState() *BaseDecisionState { - return &BaseDecisionState{BaseATNState: NewBaseATNState(), decision: -1} -} - -func (s *BaseDecisionState) getDecision() int { - return s.decision -} - -func (s *BaseDecisionState) setDecision(b int) { - s.decision = b -} - -func (s *BaseDecisionState) getNonGreedy() bool { - return s.nonGreedy -} - -func (s *BaseDecisionState) setNonGreedy(b bool) { - s.nonGreedy = b -} - -type BlockStartState interface { - DecisionState - - getEndState() *BlockEndState - setEndState(*BlockEndState) -} - -// BaseBlockStartState is the start of a regular (...) block. -type BaseBlockStartState struct { - *BaseDecisionState - endState *BlockEndState -} - -func NewBlockStartState() *BaseBlockStartState { - return &BaseBlockStartState{BaseDecisionState: NewBaseDecisionState()} -} - -func (s *BaseBlockStartState) getEndState() *BlockEndState { - return s.endState -} - -func (s *BaseBlockStartState) setEndState(b *BlockEndState) { - s.endState = b -} - -type BasicBlockStartState struct { - *BaseBlockStartState -} - -func NewBasicBlockStartState() *BasicBlockStartState { - b := NewBlockStartState() - - b.stateType = ATNStateBlockStart - - return &BasicBlockStartState{BaseBlockStartState: b} -} - -// BlockEndState is a terminal node of a simple (a|b|c) block. -type BlockEndState struct { - *BaseATNState - startState ATNState -} - -func NewBlockEndState() *BlockEndState { - b := NewBaseATNState() - - b.stateType = ATNStateBlockEnd - - return &BlockEndState{BaseATNState: b} -} - -// RuleStopState is the last node in the ATN for a rule, unless that rule is the -// start symbol. In that case, there is one transition to EOF. Later, we might -// encode references to all calls to this rule to compute FOLLOW sets for error -// handling. -type RuleStopState struct { - *BaseATNState -} - -func NewRuleStopState() *RuleStopState { - b := NewBaseATNState() - - b.stateType = ATNStateRuleStop - - return &RuleStopState{BaseATNState: b} -} - -type RuleStartState struct { - *BaseATNState - stopState ATNState - isPrecedenceRule bool -} - -func NewRuleStartState() *RuleStartState { - b := NewBaseATNState() - - b.stateType = ATNStateRuleStart - - return &RuleStartState{BaseATNState: b} -} - -// PlusLoopbackState is a decision state for A+ and (A|B)+. It has two -// transitions: one to the loop back to start of the block, and one to exit. -type PlusLoopbackState struct { - *BaseDecisionState -} - -func NewPlusLoopbackState() *PlusLoopbackState { - b := NewBaseDecisionState() - - b.stateType = ATNStatePlusLoopBack - - return &PlusLoopbackState{BaseDecisionState: b} -} - -// PlusBlockStartState is the start of a (A|B|...)+ loop. Technically it is a -// decision state; we don't use it for code generation. Somebody might need it, -// it is included for completeness. In reality, PlusLoopbackState is the real -// decision-making node for A+. -type PlusBlockStartState struct { - *BaseBlockStartState - loopBackState ATNState -} - -func NewPlusBlockStartState() *PlusBlockStartState { - b := NewBlockStartState() - - b.stateType = ATNStatePlusBlockStart - - return &PlusBlockStartState{BaseBlockStartState: b} -} - -// StarBlockStartState is the block that begins a closure loop. -type StarBlockStartState struct { - *BaseBlockStartState -} - -func NewStarBlockStartState() *StarBlockStartState { - b := NewBlockStartState() - - b.stateType = ATNStateStarBlockStart - - return &StarBlockStartState{BaseBlockStartState: b} -} - -type StarLoopbackState struct { - *BaseATNState -} - -func NewStarLoopbackState() *StarLoopbackState { - b := NewBaseATNState() - - b.stateType = ATNStateStarLoopBack - - return &StarLoopbackState{BaseATNState: b} -} - -type StarLoopEntryState struct { - *BaseDecisionState - loopBackState ATNState - precedenceRuleDecision bool -} - -func NewStarLoopEntryState() *StarLoopEntryState { - b := NewBaseDecisionState() - - b.stateType = ATNStateStarLoopEntry - - // False precedenceRuleDecision indicates whether s state can benefit from a precedence DFA during SLL decision making. - return &StarLoopEntryState{BaseDecisionState: b} -} - -// LoopEndState marks the end of a * or + loop. -type LoopEndState struct { - *BaseATNState - loopBackState ATNState -} - -func NewLoopEndState() *LoopEndState { - b := NewBaseATNState() - - b.stateType = ATNStateLoopEnd - - return &LoopEndState{BaseATNState: b} -} - -// TokensStartState is the Tokens rule start state linking to each lexer rule start state. -type TokensStartState struct { - *BaseDecisionState -} - -func NewTokensStartState() *TokensStartState { - b := NewBaseDecisionState() - - b.stateType = ATNStateTokenStart - - return &TokensStartState{BaseDecisionState: b} -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_type.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_type.go deleted file mode 100644 index a7b48976b3..0000000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_type.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -// Represent the type of recognizer an ATN applies to. -const ( - ATNTypeLexer = 0 - ATNTypeParser = 1 -) diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/char_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/char_stream.go deleted file mode 100644 index 70c1207f7f..0000000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/char_stream.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -type CharStream interface { - IntStream - GetText(int, int) string - GetTextFromTokens(start, end Token) string - GetTextFromInterval(*Interval) string -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/common_token_factory.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/common_token_factory.go deleted file mode 100644 index 330ff8f31f..0000000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/common_token_factory.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -// TokenFactory creates CommonToken objects. -type TokenFactory interface { - Create(source *TokenSourceCharStreamPair, ttype int, text string, channel, start, stop, line, column int) Token -} - -// CommonTokenFactory is the default TokenFactory implementation. -type CommonTokenFactory struct { - // copyText indicates whether CommonToken.setText should be called after - // constructing tokens to explicitly set the text. This is useful for cases - // where the input stream might not be able to provide arbitrary substrings of - // text from the input after the lexer creates a token (e.g. the - // implementation of CharStream.GetText in UnbufferedCharStream panics an - // UnsupportedOperationException). Explicitly setting the token text allows - // Token.GetText to be called at any time regardless of the input stream - // implementation. - // - // The default value is false to avoid the performance and memory overhead of - // copying text for every token unless explicitly requested. - copyText bool -} - -func NewCommonTokenFactory(copyText bool) *CommonTokenFactory { - return &CommonTokenFactory{copyText: copyText} -} - -// CommonTokenFactoryDEFAULT is the default CommonTokenFactory. It does not -// explicitly copy token text when constructing tokens. -var CommonTokenFactoryDEFAULT = NewCommonTokenFactory(false) - -func (c *CommonTokenFactory) Create(source *TokenSourceCharStreamPair, ttype int, text string, channel, start, stop, line, column int) Token { - t := NewCommonToken(source, ttype, channel, start, stop) - - t.line = line - t.column = column - - if text != "" { - t.SetText(text) - } else if c.copyText && source.charStream != nil { - t.SetText(source.charStream.GetTextFromInterval(NewInterval(start, stop))) - } - - return t -} - -func (c *CommonTokenFactory) createThin(ttype int, text string) Token { - t := NewCommonToken(nil, ttype, TokenDefaultChannel, -1, -1) - t.SetText(text) - - return t -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/common_token_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/common_token_stream.go deleted file mode 100644 index c90e9b8904..0000000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/common_token_stream.go +++ /dev/null @@ -1,447 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "strconv" -) - -// CommonTokenStream is an implementation of TokenStream that loads tokens from -// a TokenSource on-demand and places the tokens in a buffer to provide access -// to any previous token by index. This token stream ignores the value of -// Token.getChannel. If your parser requires the token stream filter tokens to -// only those on a particular channel, such as Token.DEFAULT_CHANNEL or -// Token.HIDDEN_CHANNEL, use a filtering token stream such a CommonTokenStream. -type CommonTokenStream struct { - channel int - - // fetchedEOF indicates whether the Token.EOF token has been fetched from - // tokenSource and added to tokens. This field improves performance for the - // following cases: - // - // consume: The lookahead check in consume to preven consuming the EOF symbol is - // optimized by checking the values of fetchedEOF and p instead of calling LA. - // - // fetch: The check to prevent adding multiple EOF symbols into tokens is - // trivial with bt field. - fetchedEOF bool - - // index indexs into tokens of the current token (next token to consume). - // tokens[p] should be LT(1). It is set to -1 when the stream is first - // constructed or when SetTokenSource is called, indicating that the first token - // has not yet been fetched from the token source. For additional information, - // see the documentation of IntStream for a description of initializing methods. - index int - - // tokenSource is the TokenSource from which tokens for the bt stream are - // fetched. - tokenSource TokenSource - - // tokens is all tokens fetched from the token source. The list is considered a - // complete view of the input once fetchedEOF is set to true. - tokens []Token -} - -func NewCommonTokenStream(lexer Lexer, channel int) *CommonTokenStream { - return &CommonTokenStream{ - channel: channel, - index: -1, - tokenSource: lexer, - tokens: make([]Token, 0), - } -} - -func (c *CommonTokenStream) GetAllTokens() []Token { - return c.tokens -} - -func (c *CommonTokenStream) Mark() int { - return 0 -} - -func (c *CommonTokenStream) Release(marker int) {} - -func (c *CommonTokenStream) reset() { - c.Seek(0) -} - -func (c *CommonTokenStream) Seek(index int) { - c.lazyInit() - c.index = c.adjustSeekIndex(index) -} - -func (c *CommonTokenStream) Get(index int) Token { - c.lazyInit() - - return c.tokens[index] -} - -func (c *CommonTokenStream) Consume() { - SkipEOFCheck := false - - if c.index >= 0 { - if c.fetchedEOF { - // The last token in tokens is EOF. Skip the check if p indexes any fetched. - // token except the last. - SkipEOFCheck = c.index < len(c.tokens)-1 - } else { - // No EOF token in tokens. Skip the check if p indexes a fetched token. - SkipEOFCheck = c.index < len(c.tokens) - } - } else { - // Not yet initialized - SkipEOFCheck = false - } - - if !SkipEOFCheck && c.LA(1) == TokenEOF { - panic("cannot consume EOF") - } - - if c.Sync(c.index + 1) { - c.index = c.adjustSeekIndex(c.index + 1) - } -} - -// Sync makes sure index i in tokens has a token and returns true if a token is -// located at index i and otherwise false. -func (c *CommonTokenStream) Sync(i int) bool { - n := i - len(c.tokens) + 1 // TODO: How many more elements do we need? - - if n > 0 { - fetched := c.fetch(n) - return fetched >= n - } - - return true -} - -// fetch adds n elements to buffer and returns the actual number of elements -// added to the buffer. -func (c *CommonTokenStream) fetch(n int) int { - if c.fetchedEOF { - return 0 - } - - for i := 0; i < n; i++ { - t := c.tokenSource.NextToken() - - t.SetTokenIndex(len(c.tokens)) - c.tokens = append(c.tokens, t) - - if t.GetTokenType() == TokenEOF { - c.fetchedEOF = true - - return i + 1 - } - } - - return n -} - -// GetTokens gets all tokens from start to stop inclusive. -func (c *CommonTokenStream) GetTokens(start int, stop int, types *IntervalSet) []Token { - if start < 0 || stop < 0 { - return nil - } - - c.lazyInit() - - subset := make([]Token, 0) - - if stop >= len(c.tokens) { - stop = len(c.tokens) - 1 - } - - for i := start; i < stop; i++ { - t := c.tokens[i] - - if t.GetTokenType() == TokenEOF { - break - } - - if types == nil || types.contains(t.GetTokenType()) { - subset = append(subset, t) - } - } - - return subset -} - -func (c *CommonTokenStream) LA(i int) int { - return c.LT(i).GetTokenType() -} - -func (c *CommonTokenStream) lazyInit() { - if c.index == -1 { - c.setup() - } -} - -func (c *CommonTokenStream) setup() { - c.Sync(0) - c.index = c.adjustSeekIndex(0) -} - -func (c *CommonTokenStream) GetTokenSource() TokenSource { - return c.tokenSource -} - -// SetTokenSource resets the c token stream by setting its token source. -func (c *CommonTokenStream) SetTokenSource(tokenSource TokenSource) { - c.tokenSource = tokenSource - c.tokens = make([]Token, 0) - c.index = -1 -} - -// NextTokenOnChannel returns the index of the next token on channel given a -// starting index. Returns i if tokens[i] is on channel. Returns -1 if there are -// no tokens on channel between i and EOF. -func (c *CommonTokenStream) NextTokenOnChannel(i, channel int) int { - c.Sync(i) - - if i >= len(c.tokens) { - return -1 - } - - token := c.tokens[i] - - for token.GetChannel() != c.channel { - if token.GetTokenType() == TokenEOF { - return -1 - } - - i++ - c.Sync(i) - token = c.tokens[i] - } - - return i -} - -// previousTokenOnChannel returns the index of the previous token on channel -// given a starting index. Returns i if tokens[i] is on channel. Returns -1 if -// there are no tokens on channel between i and 0. -func (c *CommonTokenStream) previousTokenOnChannel(i, channel int) int { - for i >= 0 && c.tokens[i].GetChannel() != channel { - i-- - } - - return i -} - -// GetHiddenTokensToRight collects all tokens on a specified channel to the -// right of the current token up until we see a token on DEFAULT_TOKEN_CHANNEL -// or EOF. If channel is -1, it finds any non-default channel token. -func (c *CommonTokenStream) GetHiddenTokensToRight(tokenIndex, channel int) []Token { - c.lazyInit() - - if tokenIndex < 0 || tokenIndex >= len(c.tokens) { - panic(strconv.Itoa(tokenIndex) + " not in 0.." + strconv.Itoa(len(c.tokens)-1)) - } - - nextOnChannel := c.NextTokenOnChannel(tokenIndex+1, LexerDefaultTokenChannel) - from := tokenIndex + 1 - - // If no onchannel to the right, then nextOnChannel == -1, so set to to last token - var to int - - if nextOnChannel == -1 { - to = len(c.tokens) - 1 - } else { - to = nextOnChannel - } - - return c.filterForChannel(from, to, channel) -} - -// GetHiddenTokensToLeft collects all tokens on channel to the left of the -// current token until we see a token on DEFAULT_TOKEN_CHANNEL. If channel is -// -1, it finds any non default channel token. -func (c *CommonTokenStream) GetHiddenTokensToLeft(tokenIndex, channel int) []Token { - c.lazyInit() - - if tokenIndex < 0 || tokenIndex >= len(c.tokens) { - panic(strconv.Itoa(tokenIndex) + " not in 0.." + strconv.Itoa(len(c.tokens)-1)) - } - - prevOnChannel := c.previousTokenOnChannel(tokenIndex-1, LexerDefaultTokenChannel) - - if prevOnChannel == tokenIndex-1 { - return nil - } - - // If there are none on channel to the left and prevOnChannel == -1 then from = 0 - from := prevOnChannel + 1 - to := tokenIndex - 1 - - return c.filterForChannel(from, to, channel) -} - -func (c *CommonTokenStream) filterForChannel(left, right, channel int) []Token { - hidden := make([]Token, 0) - - for i := left; i < right+1; i++ { - t := c.tokens[i] - - if channel == -1 { - if t.GetChannel() != LexerDefaultTokenChannel { - hidden = append(hidden, t) - } - } else if t.GetChannel() == channel { - hidden = append(hidden, t) - } - } - - if len(hidden) == 0 { - return nil - } - - return hidden -} - -func (c *CommonTokenStream) GetSourceName() string { - return c.tokenSource.GetSourceName() -} - -func (c *CommonTokenStream) Size() int { - return len(c.tokens) -} - -func (c *CommonTokenStream) Index() int { - return c.index -} - -func (c *CommonTokenStream) GetAllText() string { - return c.GetTextFromInterval(nil) -} - -func (c *CommonTokenStream) GetTextFromTokens(start, end Token) string { - if start == nil || end == nil { - return "" - } - - return c.GetTextFromInterval(NewInterval(start.GetTokenIndex(), end.GetTokenIndex())) -} - -func (c *CommonTokenStream) GetTextFromRuleContext(interval RuleContext) string { - return c.GetTextFromInterval(interval.GetSourceInterval()) -} - -func (c *CommonTokenStream) GetTextFromInterval(interval *Interval) string { - c.lazyInit() - c.Fill() - - if interval == nil { - interval = NewInterval(0, len(c.tokens)-1) - } - - start := interval.Start - stop := interval.Stop - - if start < 0 || stop < 0 { - return "" - } - - if stop >= len(c.tokens) { - stop = len(c.tokens) - 1 - } - - s := "" - - for i := start; i < stop+1; i++ { - t := c.tokens[i] - - if t.GetTokenType() == TokenEOF { - break - } - - s += t.GetText() - } - - return s -} - -// Fill gets all tokens from the lexer until EOF. -func (c *CommonTokenStream) Fill() { - c.lazyInit() - - for c.fetch(1000) == 1000 { - continue - } -} - -func (c *CommonTokenStream) adjustSeekIndex(i int) int { - return c.NextTokenOnChannel(i, c.channel) -} - -func (c *CommonTokenStream) LB(k int) Token { - if k == 0 || c.index-k < 0 { - return nil - } - - i := c.index - n := 1 - - // Find k good tokens looking backward - for n <= k { - // Skip off-channel tokens - i = c.previousTokenOnChannel(i-1, c.channel) - n++ - } - - if i < 0 { - return nil - } - - return c.tokens[i] -} - -func (c *CommonTokenStream) LT(k int) Token { - c.lazyInit() - - if k == 0 { - return nil - } - - if k < 0 { - return c.LB(-k) - } - - i := c.index - n := 1 // We know tokens[n] is valid - - // Find k good tokens - for n < k { - // Skip off-channel tokens, but make sure to not look past EOF - if c.Sync(i + 1) { - i = c.NextTokenOnChannel(i+1, c.channel) - } - - n++ - } - - return c.tokens[i] -} - -// getNumberOfOnChannelTokens counts EOF once. -func (c *CommonTokenStream) getNumberOfOnChannelTokens() int { - var n int - - c.Fill() - - for i := 0; i < len(c.tokens); i++ { - t := c.tokens[i] - - if t.GetChannel() == c.channel { - n++ - } - - if t.GetTokenType() == TokenEOF { - break - } - } - - return n -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa.go deleted file mode 100644 index d6079aa203..0000000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa.go +++ /dev/null @@ -1,171 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "sort" - "sync" -) - -type DFA struct { - // atnStartState is the ATN state in which this was created - atnStartState DecisionState - - decision int - - // states is all the DFA states. Use Map to get the old state back; Set can only - // indicate whether it is there. - states map[int]*DFAState - statesMu sync.RWMutex - - s0 *DFAState - s0Mu sync.RWMutex - - // precedenceDfa is the backing field for isPrecedenceDfa and setPrecedenceDfa. - // True if the DFA is for a precedence decision and false otherwise. - precedenceDfa bool -} - -func NewDFA(atnStartState DecisionState, decision int) *DFA { - return &DFA{ - atnStartState: atnStartState, - decision: decision, - states: make(map[int]*DFAState), - } -} - -// getPrecedenceStartState gets the start state for the current precedence and -// returns the start state corresponding to the specified precedence if a start -// state exists for the specified precedence and nil otherwise. d must be a -// precedence DFA. See also isPrecedenceDfa. -func (d *DFA) getPrecedenceStartState(precedence int) *DFAState { - if !d.precedenceDfa { - panic("only precedence DFAs may contain a precedence start state") - } - - d.s0Mu.RLock() - defer d.s0Mu.RUnlock() - - // s0.edges is never nil for a precedence DFA - if precedence < 0 || precedence >= len(d.s0.edges) { - return nil - } - - return d.s0.edges[precedence] -} - -// setPrecedenceStartState sets the start state for the current precedence. d -// must be a precedence DFA. See also isPrecedenceDfa. -func (d *DFA) setPrecedenceStartState(precedence int, startState *DFAState) { - if !d.precedenceDfa { - panic("only precedence DFAs may contain a precedence start state") - } - - if precedence < 0 { - return - } - - d.s0Mu.Lock() - defer d.s0Mu.Unlock() - - // Synchronization on s0 here is ok. When the DFA is turned into a - // precedence DFA, s0 will be initialized once and not updated again. s0.edges - // is never nil for a precedence DFA. - if precedence >= len(d.s0.edges) { - d.s0.edges = append(d.s0.edges, make([]*DFAState, precedence+1-len(d.s0.edges))...) - } - - d.s0.edges[precedence] = startState -} - -// setPrecedenceDfa sets whether d is a precedence DFA. If precedenceDfa differs -// from the current DFA configuration, then d.states is cleared, the initial -// state s0 is set to a new DFAState with an empty outgoing DFAState.edges to -// store the start states for individual precedence values if precedenceDfa is -// true or nil otherwise, and d.precedenceDfa is updated. -func (d *DFA) setPrecedenceDfa(precedenceDfa bool) { - if d.precedenceDfa != precedenceDfa { - d.states = make(map[int]*DFAState) - - if precedenceDfa { - precedenceState := NewDFAState(-1, NewBaseATNConfigSet(false)) - - precedenceState.edges = make([]*DFAState, 0) - precedenceState.isAcceptState = false - precedenceState.requiresFullContext = false - d.s0 = precedenceState - } else { - d.s0 = nil - } - - d.precedenceDfa = precedenceDfa - } -} - -func (d *DFA) getS0() *DFAState { - d.s0Mu.RLock() - defer d.s0Mu.RUnlock() - return d.s0 -} - -func (d *DFA) setS0(s *DFAState) { - d.s0Mu.Lock() - defer d.s0Mu.Unlock() - d.s0 = s -} - -func (d *DFA) getState(hash int) (*DFAState, bool) { - d.statesMu.RLock() - defer d.statesMu.RUnlock() - s, ok := d.states[hash] - return s, ok -} - -func (d *DFA) setState(hash int, state *DFAState) { - d.statesMu.Lock() - defer d.statesMu.Unlock() - d.states[hash] = state -} - -func (d *DFA) numStates() int { - d.statesMu.RLock() - defer d.statesMu.RUnlock() - return len(d.states) -} - -type dfaStateList []*DFAState - -func (d dfaStateList) Len() int { return len(d) } -func (d dfaStateList) Less(i, j int) bool { return d[i].stateNumber < d[j].stateNumber } -func (d dfaStateList) Swap(i, j int) { d[i], d[j] = d[j], d[i] } - -// sortedStates returns the states in d sorted by their state number. -func (d *DFA) sortedStates() []*DFAState { - vs := make([]*DFAState, 0, len(d.states)) - - for _, v := range d.states { - vs = append(vs, v) - } - - sort.Sort(dfaStateList(vs)) - - return vs -} - -func (d *DFA) String(literalNames []string, symbolicNames []string) string { - if d.s0 == nil { - return "" - } - - return NewDFASerializer(d, literalNames, symbolicNames).String() -} - -func (d *DFA) ToLexerString() string { - if d.s0 == nil { - return "" - } - - return NewLexerDFASerializer(d).String() -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa_serializer.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa_serializer.go deleted file mode 100644 index 4c0f690229..0000000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa_serializer.go +++ /dev/null @@ -1,152 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "fmt" - "strconv" -) - -// DFASerializer is a DFA walker that knows how to dump them to serialized -// strings. -type DFASerializer struct { - dfa *DFA - literalNames []string - symbolicNames []string -} - -func NewDFASerializer(dfa *DFA, literalNames, symbolicNames []string) *DFASerializer { - if literalNames == nil { - literalNames = make([]string, 0) - } - - if symbolicNames == nil { - symbolicNames = make([]string, 0) - } - - return &DFASerializer{ - dfa: dfa, - literalNames: literalNames, - symbolicNames: symbolicNames, - } -} - -func (d *DFASerializer) String() string { - if d.dfa.s0 == nil { - return "" - } - - buf := "" - states := d.dfa.sortedStates() - - for _, s := range states { - if s.edges != nil { - n := len(s.edges) - - for j := 0; j < n; j++ { - t := s.edges[j] - - if t != nil && t.stateNumber != 0x7FFFFFFF { - buf += d.GetStateString(s) - buf += "-" - buf += d.getEdgeLabel(j) - buf += "->" - buf += d.GetStateString(t) - buf += "\n" - } - } - } - } - - if len(buf) == 0 { - return "" - } - - return buf -} - -func (d *DFASerializer) getEdgeLabel(i int) string { - if i == 0 { - return "EOF" - } else if d.literalNames != nil && i-1 < len(d.literalNames) { - return d.literalNames[i-1] - } else if d.symbolicNames != nil && i-1 < len(d.symbolicNames) { - return d.symbolicNames[i-1] - } - - return strconv.Itoa(i - 1) -} - -func (d *DFASerializer) GetStateString(s *DFAState) string { - var a, b string - - if s.isAcceptState { - a = ":" - } - - if s.requiresFullContext { - b = "^" - } - - baseStateStr := a + "s" + strconv.Itoa(s.stateNumber) + b - - if s.isAcceptState { - if s.predicates != nil { - return baseStateStr + "=>" + fmt.Sprint(s.predicates) - } - - return baseStateStr + "=>" + fmt.Sprint(s.prediction) - } - - return baseStateStr -} - -type LexerDFASerializer struct { - *DFASerializer -} - -func NewLexerDFASerializer(dfa *DFA) *LexerDFASerializer { - return &LexerDFASerializer{DFASerializer: NewDFASerializer(dfa, nil, nil)} -} - -func (l *LexerDFASerializer) getEdgeLabel(i int) string { - return "'" + string(i) + "'" -} - -func (l *LexerDFASerializer) String() string { - if l.dfa.s0 == nil { - return "" - } - - buf := "" - states := l.dfa.sortedStates() - - for i := 0; i < len(states); i++ { - s := states[i] - - if s.edges != nil { - n := len(s.edges) - - for j := 0; j < n; j++ { - t := s.edges[j] - - if t != nil && t.stateNumber != 0x7FFFFFFF { - buf += l.GetStateString(s) - buf += "-" - buf += l.getEdgeLabel(j) - buf += "->" - buf += l.GetStateString(t) - buf += "\n" - } - } - } - } - - if len(buf) == 0 { - return "" - } - - return buf -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa_state.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa_state.go deleted file mode 100644 index 38e918ad91..0000000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa_state.go +++ /dev/null @@ -1,166 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "fmt" -) - -// PredPrediction maps a predicate to a predicted alternative. -type PredPrediction struct { - alt int - pred SemanticContext -} - -func NewPredPrediction(pred SemanticContext, alt int) *PredPrediction { - return &PredPrediction{alt: alt, pred: pred} -} - -func (p *PredPrediction) String() string { - return "(" + fmt.Sprint(p.pred) + ", " + fmt.Sprint(p.alt) + ")" -} - -// DFAState represents a set of possible ATN configurations. As Aho, Sethi, -// Ullman p. 117 says: "The DFA uses its state to keep track of all possible -// states the ATN can be in after reading each input symbol. That is to say, -// after reading input a1a2..an, the DFA is in a state that represents the -// subset T of the states of the ATN that are reachable from the ATN's start -// state along some path labeled a1a2..an." In conventional NFA-to-DFA -// conversion, therefore, the subset T would be a bitset representing the set of -// states the ATN could be in. We need to track the alt predicted by each state -// as well, however. More importantly, we need to maintain a stack of states, -// tracking the closure operations as they jump from rule to rule, emulating -// rule invocations (method calls). I have to add a stack to simulate the proper -// lookahead sequences for the underlying LL grammar from which the ATN was -// derived. -// -// I use a set of ATNConfig objects, not simple states. An ATNConfig is both a -// state (ala normal conversion) and a RuleContext describing the chain of rules -// (if any) followed to arrive at that state. -// -// A DFAState may have multiple references to a particular state, but with -// different ATN contexts (with same or different alts) meaning that state was -// reached via a different set of rule invocations. -type DFAState struct { - stateNumber int - configs ATNConfigSet - - // edges elements point to the target of the symbol. Shift up by 1 so (-1) - // Token.EOF maps to the first element. - edges []*DFAState - - isAcceptState bool - - // prediction is the ttype we match or alt we predict if the state is accept. - // Set to ATN.INVALID_ALT_NUMBER when predicates != nil or - // requiresFullContext. - prediction int - - lexerActionExecutor *LexerActionExecutor - - // requiresFullContext indicates it was created during an SLL prediction that - // discovered a conflict between the configurations in the state. Future - // ParserATNSimulator.execATN invocations immediately jump doing - // full context prediction if true. - requiresFullContext bool - - // predicates is the predicates associated with the ATN configurations of the - // DFA state during SLL parsing. When we have predicates, requiresFullContext - // is false, since full context prediction evaluates predicates on-the-fly. If - // d is - // not nil, then prediction is ATN.INVALID_ALT_NUMBER. - // - // We only use these for non-requiresFullContext but conflicting states. That - // means we know from the context (it's $ or we don't dip into outer context) - // that it's an ambiguity not a conflict. - // - // This list is computed by - // ParserATNSimulator.predicateDFAState. - predicates []*PredPrediction -} - -func NewDFAState(stateNumber int, configs ATNConfigSet) *DFAState { - if configs == nil { - configs = NewBaseATNConfigSet(false) - } - - return &DFAState{configs: configs, stateNumber: stateNumber} -} - -// GetAltSet gets the set of all alts mentioned by all ATN configurations in d. -func (d *DFAState) GetAltSet() *Set { - alts := NewSet(nil, nil) - - if d.configs != nil { - for _, c := range d.configs.GetItems() { - alts.add(c.GetAlt()) - } - } - - if alts.length() == 0 { - return nil - } - - return alts -} - -func (d *DFAState) setPrediction(v int) { - d.prediction = v -} - -// equals returns whether d equals other. Two DFAStates are equal if their ATN -// configuration sets are the same. This method is used to see if a state -// already exists. -// -// Because the number of alternatives and number of ATN configurations are -// finite, there is a finite number of DFA states that can be processed. This is -// necessary to show that the algorithm terminates. -// -// Cannot test the DFA state numbers here because in -// ParserATNSimulator.addDFAState we need to know if any other state exists that -// has d exact set of ATN configurations. The stateNumber is irrelevant. -func (d *DFAState) equals(other interface{}) bool { - if d == other { - return true - } else if _, ok := other.(*DFAState); !ok { - return false - } - - return d.configs.Equals(other.(*DFAState).configs) -} - -func (d *DFAState) String() string { - var s string - if d.isAcceptState { - if d.predicates != nil { - s = "=>" + fmt.Sprint(d.predicates) - } else { - s = "=>" + fmt.Sprint(d.prediction) - } - } - - return fmt.Sprintf("%d:%s%s", fmt.Sprint(d.configs), s) -} - -func (d *DFAState) hash() int { - h := murmurInit(11) - - c := 1 - if d.isAcceptState { - if d.predicates != nil { - for _, p := range d.predicates { - h = murmurUpdate(h, p.alt) - h = murmurUpdate(h, p.pred.hash()) - c += 2 - } - } else { - h = murmurUpdate(h, d.prediction) - c += 1 - } - } - - h = murmurUpdate(h, d.configs.hash()) - return murmurFinish(h, c) -} \ No newline at end of file diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/diagnostic_error_listener.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/diagnostic_error_listener.go deleted file mode 100644 index 1fec43d9dc..0000000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/diagnostic_error_listener.go +++ /dev/null @@ -1,111 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "strconv" -) - -// -// This implementation of {@link ANTLRErrorListener} can be used to identify -// certain potential correctness and performance problems in grammars. "reports" -// are made by calling {@link Parser//NotifyErrorListeners} with the appropriate -// message. -// -//
    -//
  • Ambiguities: These are cases where more than one path through the -// grammar can Match the input.
  • -//
  • Weak context sensitivity: These are cases where full-context -// prediction resolved an SLL conflict to a unique alternative which equaled the -// minimum alternative of the SLL conflict.
  • -//
  • Strong (forced) context sensitivity: These are cases where the -// full-context prediction resolved an SLL conflict to a unique alternative, -// and the minimum alternative of the SLL conflict was found to not be -// a truly viable alternative. Two-stage parsing cannot be used for inputs where -// d situation occurs.
  • -//
- -type DiagnosticErrorListener struct { - *DefaultErrorListener - - exactOnly bool -} - -func NewDiagnosticErrorListener(exactOnly bool) *DiagnosticErrorListener { - - n := new(DiagnosticErrorListener) - - // whether all ambiguities or only exact ambiguities are Reported. - n.exactOnly = exactOnly - return n -} - -func (d *DiagnosticErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) { - if d.exactOnly && !exact { - return - } - msg := "reportAmbiguity d=" + - d.getDecisionDescription(recognizer, dfa) + - ": ambigAlts=" + - d.getConflictingAlts(ambigAlts, configs).String() + - ", input='" + - recognizer.GetTokenStream().GetTextFromInterval(NewInterval(startIndex, stopIndex)) + "'" - recognizer.NotifyErrorListeners(msg, nil, nil) -} - -func (d *DiagnosticErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) { - - msg := "reportAttemptingFullContext d=" + - d.getDecisionDescription(recognizer, dfa) + - ", input='" + - recognizer.GetTokenStream().GetTextFromInterval(NewInterval(startIndex, stopIndex)) + "'" - recognizer.NotifyErrorListeners(msg, nil, nil) -} - -func (d *DiagnosticErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) { - msg := "reportContextSensitivity d=" + - d.getDecisionDescription(recognizer, dfa) + - ", input='" + - recognizer.GetTokenStream().GetTextFromInterval(NewInterval(startIndex, stopIndex)) + "'" - recognizer.NotifyErrorListeners(msg, nil, nil) -} - -func (d *DiagnosticErrorListener) getDecisionDescription(recognizer Parser, dfa *DFA) string { - decision := dfa.decision - ruleIndex := dfa.atnStartState.GetRuleIndex() - - ruleNames := recognizer.GetRuleNames() - if ruleIndex < 0 || ruleIndex >= len(ruleNames) { - return strconv.Itoa(decision) - } - ruleName := ruleNames[ruleIndex] - if ruleName == "" { - return strconv.Itoa(decision) - } - return strconv.Itoa(decision) + " (" + ruleName + ")" -} - -// -// Computes the set of conflicting or ambiguous alternatives from a -// configuration set, if that information was not already provided by the -// parser. -// -// @param ReportedAlts The set of conflicting or ambiguous alternatives, as -// Reported by the parser. -// @param configs The conflicting or ambiguous configuration set. -// @return Returns {@code ReportedAlts} if it is not {@code nil}, otherwise -// returns the set of alternatives represented in {@code configs}. -// -func (d *DiagnosticErrorListener) getConflictingAlts(ReportedAlts *BitSet, set ATNConfigSet) *BitSet { - if ReportedAlts != nil { - return ReportedAlts - } - result := NewBitSet() - for _, c := range set.GetItems() { - result.add(c.GetAlt()) - } - - return result -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/error_listener.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/error_listener.go deleted file mode 100644 index 028e1a9d7f..0000000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/error_listener.go +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "fmt" - "os" - "strconv" -) - -// Provides an empty default implementation of {@link ANTLRErrorListener}. The -// default implementation of each method does nothing, but can be overridden as -// necessary. - -type ErrorListener interface { - SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) - ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) - ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) - ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) -} - -type DefaultErrorListener struct { -} - -func NewDefaultErrorListener() *DefaultErrorListener { - return new(DefaultErrorListener) -} - -func (d *DefaultErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) { -} - -func (d *DefaultErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) { -} - -func (d *DefaultErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) { -} - -func (d *DefaultErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) { -} - -type ConsoleErrorListener struct { - *DefaultErrorListener -} - -func NewConsoleErrorListener() *ConsoleErrorListener { - return new(ConsoleErrorListener) -} - -// -// Provides a default instance of {@link ConsoleErrorListener}. -// -var ConsoleErrorListenerINSTANCE = NewConsoleErrorListener() - -// -// {@inheritDoc} -// -//

-// This implementation prints messages to {@link System//err} containing the -// values of {@code line}, {@code charPositionInLine}, and {@code msg} using -// the following format.

-// -//
-// line line:charPositionInLine msg
-// 
-// -func (c *ConsoleErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) { - fmt.Fprintln(os.Stderr, "line "+strconv.Itoa(line)+":"+strconv.Itoa(column)+" "+msg) -} - -type ProxyErrorListener struct { - *DefaultErrorListener - delegates []ErrorListener -} - -func NewProxyErrorListener(delegates []ErrorListener) *ProxyErrorListener { - if delegates == nil { - panic("delegates is not provided") - } - l := new(ProxyErrorListener) - l.delegates = delegates - return l -} - -func (p *ProxyErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) { - for _, d := range p.delegates { - d.SyntaxError(recognizer, offendingSymbol, line, column, msg, e) - } -} - -func (p *ProxyErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) { - for _, d := range p.delegates { - d.ReportAmbiguity(recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs) - } -} - -func (p *ProxyErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) { - for _, d := range p.delegates { - d.ReportAttemptingFullContext(recognizer, dfa, startIndex, stopIndex, conflictingAlts, configs) - } -} - -func (p *ProxyErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) { - for _, d := range p.delegates { - d.ReportContextSensitivity(recognizer, dfa, startIndex, stopIndex, prediction, configs) - } -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/error_strategy.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/error_strategy.go deleted file mode 100644 index 977a6e4549..0000000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/error_strategy.go +++ /dev/null @@ -1,758 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "fmt" - "reflect" - "strconv" - "strings" -) - -type ErrorStrategy interface { - reset(Parser) - RecoverInline(Parser) Token - Recover(Parser, RecognitionException) - Sync(Parser) - inErrorRecoveryMode(Parser) bool - ReportError(Parser, RecognitionException) - ReportMatch(Parser) -} - -// This is the default implementation of {@link ANTLRErrorStrategy} used for -// error Reporting and recovery in ANTLR parsers. -// -type DefaultErrorStrategy struct { - errorRecoveryMode bool - lastErrorIndex int - lastErrorStates *IntervalSet -} - -var _ ErrorStrategy = &DefaultErrorStrategy{} - -func NewDefaultErrorStrategy() *DefaultErrorStrategy { - - d := new(DefaultErrorStrategy) - - // Indicates whether the error strategy is currently "recovering from an - // error". This is used to suppress Reporting multiple error messages while - // attempting to recover from a detected syntax error. - // - // @see //inErrorRecoveryMode - // - d.errorRecoveryMode = false - - // The index into the input stream where the last error occurred. - // This is used to prevent infinite loops where an error is found - // but no token is consumed during recovery...another error is found, - // ad nauseum. This is a failsafe mechanism to guarantee that at least - // one token/tree node is consumed for two errors. - // - d.lastErrorIndex = -1 - d.lastErrorStates = nil - return d -} - -//

The default implementation simply calls {@link //endErrorCondition} to -// ensure that the handler is not in error recovery mode.

-func (d *DefaultErrorStrategy) reset(recognizer Parser) { - d.endErrorCondition(recognizer) -} - -// -// This method is called to enter error recovery mode when a recognition -// exception is Reported. -// -// @param recognizer the parser instance -// -func (d *DefaultErrorStrategy) beginErrorCondition(recognizer Parser) { - d.errorRecoveryMode = true -} - -func (d *DefaultErrorStrategy) inErrorRecoveryMode(recognizer Parser) bool { - return d.errorRecoveryMode -} - -// -// This method is called to leave error recovery mode after recovering from -// a recognition exception. -// -// @param recognizer -// -func (d *DefaultErrorStrategy) endErrorCondition(recognizer Parser) { - d.errorRecoveryMode = false - d.lastErrorStates = nil - d.lastErrorIndex = -1 -} - -// -// {@inheritDoc} -// -//

The default implementation simply calls {@link //endErrorCondition}.

-// -func (d *DefaultErrorStrategy) ReportMatch(recognizer Parser) { - d.endErrorCondition(recognizer) -} - -// -// {@inheritDoc} -// -//

The default implementation returns immediately if the handler is already -// in error recovery mode. Otherwise, it calls {@link //beginErrorCondition} -// and dispatches the Reporting task based on the runtime type of {@code e} -// according to the following table.

-// -//
    -//
  • {@link NoViableAltException}: Dispatches the call to -// {@link //ReportNoViableAlternative}
  • -//
  • {@link InputMisMatchException}: Dispatches the call to -// {@link //ReportInputMisMatch}
  • -//
  • {@link FailedPredicateException}: Dispatches the call to -// {@link //ReportFailedPredicate}
  • -//
  • All other types: calls {@link Parser//NotifyErrorListeners} to Report -// the exception
  • -//
-// -func (d *DefaultErrorStrategy) ReportError(recognizer Parser, e RecognitionException) { - // if we've already Reported an error and have not Matched a token - // yet successfully, don't Report any errors. - if d.inErrorRecoveryMode(recognizer) { - return // don't Report spurious errors - } - d.beginErrorCondition(recognizer) - - switch t := e.(type) { - default: - fmt.Println("unknown recognition error type: " + reflect.TypeOf(e).Name()) - // fmt.Println(e.stack) - recognizer.NotifyErrorListeners(e.GetMessage(), e.GetOffendingToken(), e) - case *NoViableAltException: - d.ReportNoViableAlternative(recognizer, t) - case *InputMisMatchException: - d.ReportInputMisMatch(recognizer, t) - case *FailedPredicateException: - d.ReportFailedPredicate(recognizer, t) - } -} - -// {@inheritDoc} -// -//

The default implementation reSynchronizes the parser by consuming tokens -// until we find one in the reSynchronization set--loosely the set of tokens -// that can follow the current rule.

-// -func (d *DefaultErrorStrategy) Recover(recognizer Parser, e RecognitionException) { - - if d.lastErrorIndex == recognizer.GetInputStream().Index() && - d.lastErrorStates != nil && d.lastErrorStates.contains(recognizer.GetState()) { - // uh oh, another error at same token index and previously-Visited - // state in ATN must be a case where LT(1) is in the recovery - // token set so nothing got consumed. Consume a single token - // at least to prevent an infinite loop d is a failsafe. - recognizer.Consume() - } - d.lastErrorIndex = recognizer.GetInputStream().Index() - if d.lastErrorStates == nil { - d.lastErrorStates = NewIntervalSet() - } - d.lastErrorStates.addOne(recognizer.GetState()) - followSet := d.getErrorRecoverySet(recognizer) - d.consumeUntil(recognizer, followSet) -} - -// The default implementation of {@link ANTLRErrorStrategy//Sync} makes sure -// that the current lookahead symbol is consistent with what were expecting -// at d point in the ATN. You can call d anytime but ANTLR only -// generates code to check before subrules/loops and each iteration. -// -//

Implements Jim Idle's magic Sync mechanism in closures and optional -// subrules. E.g.,

-// -//
-// a : Sync ( stuff Sync )*
-// Sync : {consume to what can follow Sync}
-// 
-// -// At the start of a sub rule upon error, {@link //Sync} performs single -// token deletion, if possible. If it can't do that, it bails on the current -// rule and uses the default error recovery, which consumes until the -// reSynchronization set of the current rule. -// -//

If the sub rule is optional ({@code (...)?}, {@code (...)*}, or block -// with an empty alternative), then the expected set includes what follows -// the subrule.

-// -//

During loop iteration, it consumes until it sees a token that can start a -// sub rule or what follows loop. Yes, that is pretty aggressive. We opt to -// stay in the loop as long as possible.

-// -//

ORIGINS

-// -//

Previous versions of ANTLR did a poor job of their recovery within loops. -// A single mismatch token or missing token would force the parser to bail -// out of the entire rules surrounding the loop. So, for rule

-// -//
-// classfunc : 'class' ID '{' member* '}'
-// 
-// -// input with an extra token between members would force the parser to -// consume until it found the next class definition rather than the next -// member definition of the current class. -// -//

This functionality cost a little bit of effort because the parser has to -// compare token set at the start of the loop and at each iteration. If for -// some reason speed is suffering for you, you can turn off d -// functionality by simply overriding d method as a blank { }.

-// -func (d *DefaultErrorStrategy) Sync(recognizer Parser) { - // If already recovering, don't try to Sync - if d.inErrorRecoveryMode(recognizer) { - return - } - - s := recognizer.GetInterpreter().atn.states[recognizer.GetState()] - la := recognizer.GetTokenStream().LA(1) - - // try cheaper subset first might get lucky. seems to shave a wee bit off - nextTokens := recognizer.GetATN().NextTokens(s, nil) - if nextTokens.contains(TokenEpsilon) || nextTokens.contains(la) { - return - } - - switch s.GetStateType() { - case ATNStateBlockStart, ATNStateStarBlockStart, ATNStatePlusBlockStart, ATNStateStarLoopEntry: - // Report error and recover if possible - if d.SingleTokenDeletion(recognizer) != nil { - return - } - panic(NewInputMisMatchException(recognizer)) - case ATNStatePlusLoopBack, ATNStateStarLoopBack: - d.ReportUnwantedToken(recognizer) - expecting := NewIntervalSet() - expecting.addSet(recognizer.GetExpectedTokens()) - whatFollowsLoopIterationOrRule := expecting.addSet(d.getErrorRecoverySet(recognizer)) - d.consumeUntil(recognizer, whatFollowsLoopIterationOrRule) - default: - // do nothing if we can't identify the exact kind of ATN state - } -} - -// This is called by {@link //ReportError} when the exception is a -// {@link NoViableAltException}. -// -// @see //ReportError -// -// @param recognizer the parser instance -// @param e the recognition exception -// -func (d *DefaultErrorStrategy) ReportNoViableAlternative(recognizer Parser, e *NoViableAltException) { - tokens := recognizer.GetTokenStream() - var input string - if tokens != nil { - if e.startToken.GetTokenType() == TokenEOF { - input = "" - } else { - input = tokens.GetTextFromTokens(e.startToken, e.offendingToken) - } - } else { - input = "" - } - msg := "no viable alternative at input " + d.escapeWSAndQuote(input) - recognizer.NotifyErrorListeners(msg, e.offendingToken, e) -} - -// -// This is called by {@link //ReportError} when the exception is an -// {@link InputMisMatchException}. -// -// @see //ReportError -// -// @param recognizer the parser instance -// @param e the recognition exception -// -func (this *DefaultErrorStrategy) ReportInputMisMatch(recognizer Parser, e *InputMisMatchException) { - msg := "mismatched input " + this.GetTokenErrorDisplay(e.offendingToken) + - " expecting " + e.getExpectedTokens().StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false) - recognizer.NotifyErrorListeners(msg, e.offendingToken, e) -} - -// -// This is called by {@link //ReportError} when the exception is a -// {@link FailedPredicateException}. -// -// @see //ReportError -// -// @param recognizer the parser instance -// @param e the recognition exception -// -func (d *DefaultErrorStrategy) ReportFailedPredicate(recognizer Parser, e *FailedPredicateException) { - ruleName := recognizer.GetRuleNames()[recognizer.GetParserRuleContext().GetRuleIndex()] - msg := "rule " + ruleName + " " + e.message - recognizer.NotifyErrorListeners(msg, e.offendingToken, e) -} - -// This method is called to Report a syntax error which requires the removal -// of a token from the input stream. At the time d method is called, the -// erroneous symbol is current {@code LT(1)} symbol and has not yet been -// removed from the input stream. When d method returns, -// {@code recognizer} is in error recovery mode. -// -//

This method is called when {@link //singleTokenDeletion} identifies -// single-token deletion as a viable recovery strategy for a mismatched -// input error.

-// -//

The default implementation simply returns if the handler is already in -// error recovery mode. Otherwise, it calls {@link //beginErrorCondition} to -// enter error recovery mode, followed by calling -// {@link Parser//NotifyErrorListeners}.

-// -// @param recognizer the parser instance -// -func (d *DefaultErrorStrategy) ReportUnwantedToken(recognizer Parser) { - if d.inErrorRecoveryMode(recognizer) { - return - } - d.beginErrorCondition(recognizer) - t := recognizer.GetCurrentToken() - tokenName := d.GetTokenErrorDisplay(t) - expecting := d.GetExpectedTokens(recognizer) - msg := "extraneous input " + tokenName + " expecting " + - expecting.StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false) - recognizer.NotifyErrorListeners(msg, t, nil) -} - -// This method is called to Report a syntax error which requires the -// insertion of a missing token into the input stream. At the time d -// method is called, the missing token has not yet been inserted. When d -// method returns, {@code recognizer} is in error recovery mode. -// -//

This method is called when {@link //singleTokenInsertion} identifies -// single-token insertion as a viable recovery strategy for a mismatched -// input error.

-// -//

The default implementation simply returns if the handler is already in -// error recovery mode. Otherwise, it calls {@link //beginErrorCondition} to -// enter error recovery mode, followed by calling -// {@link Parser//NotifyErrorListeners}.

-// -// @param recognizer the parser instance -// -func (d *DefaultErrorStrategy) ReportMissingToken(recognizer Parser) { - if d.inErrorRecoveryMode(recognizer) { - return - } - d.beginErrorCondition(recognizer) - t := recognizer.GetCurrentToken() - expecting := d.GetExpectedTokens(recognizer) - msg := "missing " + expecting.StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false) + - " at " + d.GetTokenErrorDisplay(t) - recognizer.NotifyErrorListeners(msg, t, nil) -} - -//

The default implementation attempts to recover from the mismatched input -// by using single token insertion and deletion as described below. If the -// recovery attempt fails, d method panics an -// {@link InputMisMatchException}.

-// -//

EXTRA TOKEN (single token deletion)

-// -//

{@code LA(1)} is not what we are looking for. If {@code LA(2)} has the -// right token, however, then assume {@code LA(1)} is some extra spurious -// token and delete it. Then consume and return the next token (which was -// the {@code LA(2)} token) as the successful result of the Match operation.

-// -//

This recovery strategy is implemented by {@link -// //singleTokenDeletion}.

-// -//

MISSING TOKEN (single token insertion)

-// -//

If current token (at {@code LA(1)}) is consistent with what could come -// after the expected {@code LA(1)} token, then assume the token is missing -// and use the parser's {@link TokenFactory} to create it on the fly. The -// "insertion" is performed by returning the created token as the successful -// result of the Match operation.

-// -//

This recovery strategy is implemented by {@link -// //singleTokenInsertion}.

-// -//

EXAMPLE

-// -//

For example, Input {@code i=(3} is clearly missing the {@code ')'}. When -// the parser returns from the nested call to {@code expr}, it will have -// call chain:

-// -//
-// stat &rarr expr &rarr atom
-// 
-// -// and it will be trying to Match the {@code ')'} at d point in the -// derivation: -// -//
-// => ID '=' '(' INT ')' ('+' atom)* ''
-// ^
-// 
-// -// The attempt to Match {@code ')'} will fail when it sees {@code ''} and -// call {@link //recoverInline}. To recover, it sees that {@code LA(1)==''} -// is in the set of tokens that can follow the {@code ')'} token reference -// in rule {@code atom}. It can assume that you forgot the {@code ')'}. -// -func (d *DefaultErrorStrategy) RecoverInline(recognizer Parser) Token { - // SINGLE TOKEN DELETION - MatchedSymbol := d.SingleTokenDeletion(recognizer) - if MatchedSymbol != nil { - // we have deleted the extra token. - // now, move past ttype token as if all were ok - recognizer.Consume() - return MatchedSymbol - } - // SINGLE TOKEN INSERTION - if d.SingleTokenInsertion(recognizer) { - return d.GetMissingSymbol(recognizer) - } - // even that didn't work must panic the exception - panic(NewInputMisMatchException(recognizer)) -} - -// -// This method implements the single-token insertion inline error recovery -// strategy. It is called by {@link //recoverInline} if the single-token -// deletion strategy fails to recover from the mismatched input. If this -// method returns {@code true}, {@code recognizer} will be in error recovery -// mode. -// -//

This method determines whether or not single-token insertion is viable by -// checking if the {@code LA(1)} input symbol could be successfully Matched -// if it were instead the {@code LA(2)} symbol. If d method returns -// {@code true}, the caller is responsible for creating and inserting a -// token with the correct type to produce d behavior.

-// -// @param recognizer the parser instance -// @return {@code true} if single-token insertion is a viable recovery -// strategy for the current mismatched input, otherwise {@code false} -// -func (d *DefaultErrorStrategy) SingleTokenInsertion(recognizer Parser) bool { - currentSymbolType := recognizer.GetTokenStream().LA(1) - // if current token is consistent with what could come after current - // ATN state, then we know we're missing a token error recovery - // is free to conjure up and insert the missing token - atn := recognizer.GetInterpreter().atn - currentState := atn.states[recognizer.GetState()] - next := currentState.GetTransitions()[0].getTarget() - expectingAtLL2 := atn.NextTokens(next, recognizer.GetParserRuleContext()) - if expectingAtLL2.contains(currentSymbolType) { - d.ReportMissingToken(recognizer) - return true - } - - return false -} - -// This method implements the single-token deletion inline error recovery -// strategy. It is called by {@link //recoverInline} to attempt to recover -// from mismatched input. If this method returns nil, the parser and error -// handler state will not have changed. If this method returns non-nil, -// {@code recognizer} will not be in error recovery mode since the -// returned token was a successful Match. -// -//

If the single-token deletion is successful, d method calls -// {@link //ReportUnwantedToken} to Report the error, followed by -// {@link Parser//consume} to actually "delete" the extraneous token. Then, -// before returning {@link //ReportMatch} is called to signal a successful -// Match.

-// -// @param recognizer the parser instance -// @return the successfully Matched {@link Token} instance if single-token -// deletion successfully recovers from the mismatched input, otherwise -// {@code nil} -// -func (d *DefaultErrorStrategy) SingleTokenDeletion(recognizer Parser) Token { - NextTokenType := recognizer.GetTokenStream().LA(2) - expecting := d.GetExpectedTokens(recognizer) - if expecting.contains(NextTokenType) { - d.ReportUnwantedToken(recognizer) - // print("recoverFromMisMatchedToken deleting " \ - // + str(recognizer.GetTokenStream().LT(1)) \ - // + " since " + str(recognizer.GetTokenStream().LT(2)) \ - // + " is what we want", file=sys.stderr) - recognizer.Consume() // simply delete extra token - // we want to return the token we're actually Matching - MatchedSymbol := recognizer.GetCurrentToken() - d.ReportMatch(recognizer) // we know current token is correct - return MatchedSymbol - } - - return nil -} - -// Conjure up a missing token during error recovery. -// -// The recognizer attempts to recover from single missing -// symbols. But, actions might refer to that missing symbol. -// For example, x=ID {f($x)}. The action clearly assumes -// that there has been an identifier Matched previously and that -// $x points at that token. If that token is missing, but -// the next token in the stream is what we want we assume that -// d token is missing and we keep going. Because we -// have to return some token to replace the missing token, -// we have to conjure one up. This method gives the user control -// over the tokens returned for missing tokens. Mostly, -// you will want to create something special for identifier -// tokens. For literals such as '{' and ',', the default -// action in the parser or tree parser works. It simply creates -// a CommonToken of the appropriate type. The text will be the token. -// If you change what tokens must be created by the lexer, -// override d method to create the appropriate tokens. -// -func (d *DefaultErrorStrategy) GetMissingSymbol(recognizer Parser) Token { - currentSymbol := recognizer.GetCurrentToken() - expecting := d.GetExpectedTokens(recognizer) - expectedTokenType := expecting.first() - var tokenText string - - if expectedTokenType == TokenEOF { - tokenText = "" - } else { - ln := recognizer.GetLiteralNames() - if expectedTokenType > 0 && expectedTokenType < len(ln) { - tokenText = "" - } else { - tokenText = "" // TODO matches the JS impl - } - } - current := currentSymbol - lookback := recognizer.GetTokenStream().LT(-1) - if current.GetTokenType() == TokenEOF && lookback != nil { - current = lookback - } - - tf := recognizer.GetTokenFactory() - - return tf.Create(current.GetSource(), expectedTokenType, tokenText, TokenDefaultChannel, -1, -1, current.GetLine(), current.GetColumn()) -} - -func (d *DefaultErrorStrategy) GetExpectedTokens(recognizer Parser) *IntervalSet { - return recognizer.GetExpectedTokens() -} - -// How should a token be displayed in an error message? The default -// is to display just the text, but during development you might -// want to have a lot of information spit out. Override in that case -// to use t.String() (which, for CommonToken, dumps everything about -// the token). This is better than forcing you to override a method in -// your token objects because you don't have to go modify your lexer -// so that it creates a NewJava type. -// -func (d *DefaultErrorStrategy) GetTokenErrorDisplay(t Token) string { - if t == nil { - return "" - } - s := t.GetText() - if s == "" { - if t.GetTokenType() == TokenEOF { - s = "" - } else { - s = "<" + strconv.Itoa(t.GetTokenType()) + ">" - } - } - return d.escapeWSAndQuote(s) -} - -func (d *DefaultErrorStrategy) escapeWSAndQuote(s string) string { - s = strings.Replace(s, "\t", "\\t", -1) - s = strings.Replace(s, "\n", "\\n", -1) - s = strings.Replace(s, "\r", "\\r", -1) - return "'" + s + "'" -} - -// Compute the error recovery set for the current rule. During -// rule invocation, the parser pushes the set of tokens that can -// follow that rule reference on the stack d amounts to -// computing FIRST of what follows the rule reference in the -// enclosing rule. See LinearApproximator.FIRST(). -// This local follow set only includes tokens -// from within the rule i.e., the FIRST computation done by -// ANTLR stops at the end of a rule. -// -// EXAMPLE -// -// When you find a "no viable alt exception", the input is not -// consistent with any of the alternatives for rule r. The best -// thing to do is to consume tokens until you see something that -// can legally follow a call to r//or* any rule that called r. -// You don't want the exact set of viable next tokens because the -// input might just be missing a token--you might consume the -// rest of the input looking for one of the missing tokens. -// -// Consider grammar: -// -// a : '[' b ']' -// | '(' b ')' -// -// b : c '^' INT -// c : ID -// | INT -// -// -// At each rule invocation, the set of tokens that could follow -// that rule is pushed on a stack. Here are the various -// context-sensitive follow sets: -// -// FOLLOW(b1_in_a) = FIRST(']') = ']' -// FOLLOW(b2_in_a) = FIRST(')') = ')' -// FOLLOW(c_in_b) = FIRST('^') = '^' -// -// Upon erroneous input "[]", the call chain is -// -// a -> b -> c -// -// and, hence, the follow context stack is: -// -// depth follow set start of rule execution -// 0 a (from main()) -// 1 ']' b -// 2 '^' c -// -// Notice that ')' is not included, because b would have to have -// been called from a different context in rule a for ')' to be -// included. -// -// For error recovery, we cannot consider FOLLOW(c) -// (context-sensitive or otherwise). We need the combined set of -// all context-sensitive FOLLOW sets--the set of all tokens that -// could follow any reference in the call chain. We need to -// reSync to one of those tokens. Note that FOLLOW(c)='^' and if -// we reSync'd to that token, we'd consume until EOF. We need to -// Sync to context-sensitive FOLLOWs for a, b, and c: {']','^'}. -// In this case, for input "[]", LA(1) is ']' and in the set, so we would -// not consume anything. After printing an error, rule c would -// return normally. Rule b would not find the required '^' though. -// At this point, it gets a mismatched token error and panics an -// exception (since LA(1) is not in the viable following token -// set). The rule exception handler tries to recover, but finds -// the same recovery set and doesn't consume anything. Rule b -// exits normally returning to rule a. Now it finds the ']' (and -// with the successful Match exits errorRecovery mode). -// -// So, you can see that the parser walks up the call chain looking -// for the token that was a member of the recovery set. -// -// Errors are not generated in errorRecovery mode. -// -// ANTLR's error recovery mechanism is based upon original ideas: -// -// "Algorithms + Data Structures = Programs" by Niklaus Wirth -// -// and -// -// "A note on error recovery in recursive descent parsers": -// http://portal.acm.org/citation.cfm?id=947902.947905 -// -// Later, Josef Grosch had some good ideas: -// -// "Efficient and Comfortable Error Recovery in Recursive Descent -// Parsers": -// ftp://www.cocolab.com/products/cocktail/doca4.ps/ell.ps.zip -// -// Like Grosch I implement context-sensitive FOLLOW sets that are combined -// at run-time upon error to avoid overhead during parsing. -// -func (d *DefaultErrorStrategy) getErrorRecoverySet(recognizer Parser) *IntervalSet { - atn := recognizer.GetInterpreter().atn - ctx := recognizer.GetParserRuleContext() - recoverSet := NewIntervalSet() - for ctx != nil && ctx.GetInvokingState() >= 0 { - // compute what follows who invoked us - invokingState := atn.states[ctx.GetInvokingState()] - rt := invokingState.GetTransitions()[0] - follow := atn.NextTokens(rt.(*RuleTransition).followState, nil) - recoverSet.addSet(follow) - ctx = ctx.GetParent().(ParserRuleContext) - } - recoverSet.removeOne(TokenEpsilon) - return recoverSet -} - -// Consume tokens until one Matches the given token set.// -func (d *DefaultErrorStrategy) consumeUntil(recognizer Parser, set *IntervalSet) { - ttype := recognizer.GetTokenStream().LA(1) - for ttype != TokenEOF && !set.contains(ttype) { - recognizer.Consume() - ttype = recognizer.GetTokenStream().LA(1) - } -} - -// -// This implementation of {@link ANTLRErrorStrategy} responds to syntax errors -// by immediately canceling the parse operation with a -// {@link ParseCancellationException}. The implementation ensures that the -// {@link ParserRuleContext//exception} field is set for all parse tree nodes -// that were not completed prior to encountering the error. -// -//

-// This error strategy is useful in the following scenarios.

-// -//
    -//
  • Two-stage parsing: This error strategy allows the first -// stage of two-stage parsing to immediately terminate if an error is -// encountered, and immediately fall back to the second stage. In addition to -// avoiding wasted work by attempting to recover from errors here, the empty -// implementation of {@link BailErrorStrategy//Sync} improves the performance of -// the first stage.
  • -//
  • Silent validation: When syntax errors are not being -// Reported or logged, and the parse result is simply ignored if errors occur, -// the {@link BailErrorStrategy} avoids wasting work on recovering from errors -// when the result will be ignored either way.
  • -//
-// -//

-// {@code myparser.setErrorHandler(NewBailErrorStrategy())}

-// -// @see Parser//setErrorHandler(ANTLRErrorStrategy) - -type BailErrorStrategy struct { - *DefaultErrorStrategy -} - -var _ ErrorStrategy = &BailErrorStrategy{} - -func NewBailErrorStrategy() *BailErrorStrategy { - - b := new(BailErrorStrategy) - - b.DefaultErrorStrategy = NewDefaultErrorStrategy() - - return b -} - -// Instead of recovering from exception {@code e}, re-panic it wrapped -// in a {@link ParseCancellationException} so it is not caught by the -// rule func catches. Use {@link Exception//getCause()} to get the -// original {@link RecognitionException}. -// -func (b *BailErrorStrategy) Recover(recognizer Parser, e RecognitionException) { - context := recognizer.GetParserRuleContext() - for context != nil { - context.SetException(e) - context = context.GetParent().(ParserRuleContext) - } - panic(NewParseCancellationException()) // TODO we don't emit e properly -} - -// Make sure we don't attempt to recover inline if the parser -// successfully recovers, it won't panic an exception. -// -func (b *BailErrorStrategy) RecoverInline(recognizer Parser) Token { - b.Recover(recognizer, NewInputMisMatchException(recognizer)) - - return nil -} - -// Make sure we don't attempt to recover from problems in subrules.// -func (b *BailErrorStrategy) Sync(recognizer Parser) { - // pass -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/errors.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/errors.go deleted file mode 100644 index 2ef74926ec..0000000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/errors.go +++ /dev/null @@ -1,241 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -// The root of the ANTLR exception hierarchy. In general, ANTLR tracks just -// 3 kinds of errors: prediction errors, failed predicate errors, and -// mismatched input errors. In each case, the parser knows where it is -// in the input, where it is in the ATN, the rule invocation stack, -// and what kind of problem occurred. - -type RecognitionException interface { - GetOffendingToken() Token - GetMessage() string - GetInputStream() IntStream -} - -type BaseRecognitionException struct { - message string - recognizer Recognizer - offendingToken Token - offendingState int - ctx RuleContext - input IntStream -} - -func NewBaseRecognitionException(message string, recognizer Recognizer, input IntStream, ctx RuleContext) *BaseRecognitionException { - - // todo - // Error.call(this) - // - // if (!!Error.captureStackTrace) { - // Error.captureStackTrace(this, RecognitionException) - // } else { - // stack := NewError().stack - // } - // TODO may be able to use - "runtime" func Stack(buf []byte, all bool) int - - t := new(BaseRecognitionException) - - t.message = message - t.recognizer = recognizer - t.input = input - t.ctx = ctx - // The current {@link Token} when an error occurred. Since not all streams - // support accessing symbols by index, we have to track the {@link Token} - // instance itself. - t.offendingToken = nil - // Get the ATN state number the parser was in at the time the error - // occurred. For {@link NoViableAltException} and - // {@link LexerNoViableAltException} exceptions, this is the - // {@link DecisionState} number. For others, it is the state whose outgoing - // edge we couldn't Match. - t.offendingState = -1 - if t.recognizer != nil { - t.offendingState = t.recognizer.GetState() - } - - return t -} - -func (b *BaseRecognitionException) GetMessage() string { - return b.message -} - -func (b *BaseRecognitionException) GetOffendingToken() Token { - return b.offendingToken -} - -func (b *BaseRecognitionException) GetInputStream() IntStream { - return b.input -} - -//

If the state number is not known, b method returns -1.

- -// -// Gets the set of input symbols which could potentially follow the -// previously Matched symbol at the time b exception was panicn. -// -//

If the set of expected tokens is not known and could not be computed, -// b method returns {@code nil}.

-// -// @return The set of token types that could potentially follow the current -// state in the ATN, or {@code nil} if the information is not available. -// / -func (b *BaseRecognitionException) getExpectedTokens() *IntervalSet { - if b.recognizer != nil { - return b.recognizer.GetATN().getExpectedTokens(b.offendingState, b.ctx) - } - - return nil -} - -func (b *BaseRecognitionException) String() string { - return b.message -} - -type LexerNoViableAltException struct { - *BaseRecognitionException - - startIndex int - deadEndConfigs ATNConfigSet -} - -func NewLexerNoViableAltException(lexer Lexer, input CharStream, startIndex int, deadEndConfigs ATNConfigSet) *LexerNoViableAltException { - - l := new(LexerNoViableAltException) - - l.BaseRecognitionException = NewBaseRecognitionException("", lexer, input, nil) - - l.startIndex = startIndex - l.deadEndConfigs = deadEndConfigs - - return l -} - -func (l *LexerNoViableAltException) String() string { - symbol := "" - if l.startIndex >= 0 && l.startIndex < l.input.Size() { - symbol = l.input.(CharStream).GetTextFromInterval(NewInterval(l.startIndex, l.startIndex)) - } - return "LexerNoViableAltException" + symbol -} - -type NoViableAltException struct { - *BaseRecognitionException - - startToken Token - offendingToken Token - ctx ParserRuleContext - deadEndConfigs ATNConfigSet -} - -// Indicates that the parser could not decide which of two or more paths -// to take based upon the remaining input. It tracks the starting token -// of the offending input and also knows where the parser was -// in the various paths when the error. Reported by ReportNoViableAlternative() -// -func NewNoViableAltException(recognizer Parser, input TokenStream, startToken Token, offendingToken Token, deadEndConfigs ATNConfigSet, ctx ParserRuleContext) *NoViableAltException { - - if ctx == nil { - ctx = recognizer.GetParserRuleContext() - } - - if offendingToken == nil { - offendingToken = recognizer.GetCurrentToken() - } - - if startToken == nil { - startToken = recognizer.GetCurrentToken() - } - - if input == nil { - input = recognizer.GetInputStream().(TokenStream) - } - - n := new(NoViableAltException) - n.BaseRecognitionException = NewBaseRecognitionException("", recognizer, input, ctx) - - // Which configurations did we try at input.Index() that couldn't Match - // input.LT(1)?// - n.deadEndConfigs = deadEndConfigs - // The token object at the start index the input stream might - // not be buffering tokens so get a reference to it. (At the - // time the error occurred, of course the stream needs to keep a - // buffer all of the tokens but later we might not have access to those.) - n.startToken = startToken - n.offendingToken = offendingToken - - return n -} - -type InputMisMatchException struct { - *BaseRecognitionException -} - -// This signifies any kind of mismatched input exceptions such as -// when the current input does not Match the expected token. -// -func NewInputMisMatchException(recognizer Parser) *InputMisMatchException { - - i := new(InputMisMatchException) - i.BaseRecognitionException = NewBaseRecognitionException("", recognizer, recognizer.GetInputStream(), recognizer.GetParserRuleContext()) - - i.offendingToken = recognizer.GetCurrentToken() - - return i - -} - -// A semantic predicate failed during validation. Validation of predicates -// occurs when normally parsing the alternative just like Matching a token. -// Disambiguating predicate evaluation occurs when we test a predicate during -// prediction. - -type FailedPredicateException struct { - *BaseRecognitionException - - ruleIndex int - predicateIndex int - predicate string -} - -func NewFailedPredicateException(recognizer Parser, predicate string, message string) *FailedPredicateException { - - f := new(FailedPredicateException) - - f.BaseRecognitionException = NewBaseRecognitionException(f.formatMessage(predicate, message), recognizer, recognizer.GetInputStream(), recognizer.GetParserRuleContext()) - - s := recognizer.GetInterpreter().atn.states[recognizer.GetState()] - trans := s.GetTransitions()[0] - if trans2, ok := trans.(*PredicateTransition); ok { - f.ruleIndex = trans2.ruleIndex - f.predicateIndex = trans2.predIndex - } else { - f.ruleIndex = 0 - f.predicateIndex = 0 - } - f.predicate = predicate - f.offendingToken = recognizer.GetCurrentToken() - - return f -} - -func (f *FailedPredicateException) formatMessage(predicate, message string) string { - if message != "" { - return message - } - - return "failed predicate: {" + predicate + "}?" -} - -type ParseCancellationException struct { -} - -func NewParseCancellationException() *ParseCancellationException { - // Error.call(this) - // Error.captureStackTrace(this, ParseCancellationException) - return new(ParseCancellationException) -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/file_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/file_stream.go deleted file mode 100644 index 842170c086..0000000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/file_stream.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "bytes" - "io" - "os" -) - -// This is an InputStream that is loaded from a file all at once -// when you construct the object. - -type FileStream struct { - *InputStream - - filename string -} - -func NewFileStream(fileName string) (*FileStream, error) { - - buf := bytes.NewBuffer(nil) - - f, err := os.Open(fileName) - if err != nil { - return nil, err - } - defer f.Close() - _, err = io.Copy(buf, f) - if err != nil { - return nil, err - } - - fs := new(FileStream) - - fs.filename = fileName - s := string(buf.Bytes()) - - fs.InputStream = NewInputStream(s) - - return fs, nil - -} - -func (f *FileStream) GetSourceName() string { - return f.filename -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/input_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/input_stream.go deleted file mode 100644 index 5ff270f536..0000000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/input_stream.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -type InputStream struct { - name string - index int - data []rune - size int -} - -func NewInputStream(data string) *InputStream { - - is := new(InputStream) - - is.name = "" - is.index = 0 - is.data = []rune(data) - is.size = len(is.data) // number of runes - - return is -} - -func (is *InputStream) reset() { - is.index = 0 -} - -func (is *InputStream) Consume() { - if is.index >= is.size { - // assert is.LA(1) == TokenEOF - panic("cannot consume EOF") - } - is.index++ -} - -func (is *InputStream) LA(offset int) int { - - if offset == 0 { - return 0 // nil - } - if offset < 0 { - offset++ // e.g., translate LA(-1) to use offset=0 - } - pos := is.index + offset - 1 - - if pos < 0 || pos >= is.size { // invalid - return TokenEOF - } - - return int(is.data[pos]) -} - -func (is *InputStream) LT(offset int) int { - return is.LA(offset) -} - -func (is *InputStream) Index() int { - return is.index -} - -func (is *InputStream) Size() int { - return is.size -} - -// mark/release do nothing we have entire buffer -func (is *InputStream) Mark() int { - return -1 -} - -func (is *InputStream) Release(marker int) { -} - -func (is *InputStream) Seek(index int) { - if index <= is.index { - is.index = index // just jump don't update stream state (line,...) - return - } - // seek forward - is.index = intMin(index, is.size) -} - -func (is *InputStream) GetText(start int, stop int) string { - if stop >= is.size { - stop = is.size - 1 - } - if start >= is.size { - return "" - } - - return string(is.data[start : stop+1]) -} - -func (is *InputStream) GetTextFromTokens(start, stop Token) string { - if start != nil && stop != nil { - return is.GetTextFromInterval(NewInterval(start.GetTokenIndex(), stop.GetTokenIndex())) - } - - return "" -} - -func (is *InputStream) GetTextFromInterval(i *Interval) string { - return is.GetText(i.Start, i.Stop) -} - -func (*InputStream) GetSourceName() string { - return "Obtained from string" -} - -func (is *InputStream) String() string { - return string(is.data) -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/int_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/int_stream.go deleted file mode 100644 index 438e0ea6e7..0000000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/int_stream.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -type IntStream interface { - Consume() - LA(int) int - Mark() int - Release(marker int) - Index() int - Seek(index int) - Size() int - GetSourceName() string -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/interval_set.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/interval_set.go deleted file mode 100644 index 510d909114..0000000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/interval_set.go +++ /dev/null @@ -1,296 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "strconv" - "strings" -) - -type Interval struct { - Start int - Stop int -} - -/* stop is not included! */ -func NewInterval(start, stop int) *Interval { - i := new(Interval) - - i.Start = start - i.Stop = stop - return i -} - -func (i *Interval) Contains(item int) bool { - return item >= i.Start && item < i.Stop -} - -func (i *Interval) String() string { - if i.Start == i.Stop-1 { - return strconv.Itoa(i.Start) - } - - return strconv.Itoa(i.Start) + ".." + strconv.Itoa(i.Stop-1) -} - -func (i *Interval) length() int { - return i.Stop - i.Start -} - -type IntervalSet struct { - intervals []*Interval - readOnly bool -} - -func NewIntervalSet() *IntervalSet { - - i := new(IntervalSet) - - i.intervals = nil - i.readOnly = false - - return i -} - -func (i *IntervalSet) first() int { - if len(i.intervals) == 0 { - return TokenInvalidType - } - - return i.intervals[0].Start -} - -func (i *IntervalSet) addOne(v int) { - i.addInterval(NewInterval(v, v+1)) -} - -func (i *IntervalSet) addRange(l, h int) { - i.addInterval(NewInterval(l, h+1)) -} - -func (i *IntervalSet) addInterval(v *Interval) { - if i.intervals == nil { - i.intervals = make([]*Interval, 0) - i.intervals = append(i.intervals, v) - } else { - // find insert pos - for k, interval := range i.intervals { - // distinct range -> insert - if v.Stop < interval.Start { - i.intervals = append(i.intervals[0:k], append([]*Interval{v}, i.intervals[k:]...)...) - return - } else if v.Stop == interval.Start { - i.intervals[k].Start = v.Start - return - } else if v.Start <= interval.Stop { - i.intervals[k] = NewInterval(intMin(interval.Start, v.Start), intMax(interval.Stop, v.Stop)) - - // if not applying to end, merge potential overlaps - if k < len(i.intervals)-1 { - l := i.intervals[k] - r := i.intervals[k+1] - // if r contained in l - if l.Stop >= r.Stop { - i.intervals = append(i.intervals[0:k+1], i.intervals[k+2:]...) - } else if l.Stop >= r.Start { // partial overlap - i.intervals[k] = NewInterval(l.Start, r.Stop) - i.intervals = append(i.intervals[0:k+1], i.intervals[k+2:]...) - } - } - return - } - } - // greater than any exiting - i.intervals = append(i.intervals, v) - } -} - -func (i *IntervalSet) addSet(other *IntervalSet) *IntervalSet { - if other.intervals != nil { - for k := 0; k < len(other.intervals); k++ { - i2 := other.intervals[k] - i.addInterval(NewInterval(i2.Start, i2.Stop)) - } - } - return i -} - -func (i *IntervalSet) complement(start int, stop int) *IntervalSet { - result := NewIntervalSet() - result.addInterval(NewInterval(start, stop+1)) - for j := 0; j < len(i.intervals); j++ { - result.removeRange(i.intervals[j]) - } - return result -} - -func (i *IntervalSet) contains(item int) bool { - if i.intervals == nil { - return false - } - for k := 0; k < len(i.intervals); k++ { - if i.intervals[k].Contains(item) { - return true - } - } - return false -} - -func (i *IntervalSet) length() int { - len := 0 - - for _, v := range i.intervals { - len += v.length() - } - - return len -} - -func (i *IntervalSet) removeRange(v *Interval) { - if v.Start == v.Stop-1 { - i.removeOne(v.Start) - } else if i.intervals != nil { - k := 0 - for n := 0; n < len(i.intervals); n++ { - ni := i.intervals[k] - // intervals are ordered - if v.Stop <= ni.Start { - return - } else if v.Start > ni.Start && v.Stop < ni.Stop { - i.intervals[k] = NewInterval(ni.Start, v.Start) - x := NewInterval(v.Stop, ni.Stop) - // i.intervals.splice(k, 0, x) - i.intervals = append(i.intervals[0:k], append([]*Interval{x}, i.intervals[k:]...)...) - return - } else if v.Start <= ni.Start && v.Stop >= ni.Stop { - // i.intervals.splice(k, 1) - i.intervals = append(i.intervals[0:k], i.intervals[k+1:]...) - k = k - 1 // need another pass - } else if v.Start < ni.Stop { - i.intervals[k] = NewInterval(ni.Start, v.Start) - } else if v.Stop < ni.Stop { - i.intervals[k] = NewInterval(v.Stop, ni.Stop) - } - k++ - } - } -} - -func (i *IntervalSet) removeOne(v int) { - if i.intervals != nil { - for k := 0; k < len(i.intervals); k++ { - ki := i.intervals[k] - // intervals i ordered - if v < ki.Start { - return - } else if v == ki.Start && v == ki.Stop-1 { - // i.intervals.splice(k, 1) - i.intervals = append(i.intervals[0:k], i.intervals[k+1:]...) - return - } else if v == ki.Start { - i.intervals[k] = NewInterval(ki.Start+1, ki.Stop) - return - } else if v == ki.Stop-1 { - i.intervals[k] = NewInterval(ki.Start, ki.Stop-1) - return - } else if v < ki.Stop-1 { - x := NewInterval(ki.Start, v) - ki.Start = v + 1 - // i.intervals.splice(k, 0, x) - i.intervals = append(i.intervals[0:k], append([]*Interval{x}, i.intervals[k:]...)...) - return - } - } - } -} - -func (i *IntervalSet) String() string { - return i.StringVerbose(nil, nil, false) -} - -func (i *IntervalSet) StringVerbose(literalNames []string, symbolicNames []string, elemsAreChar bool) string { - - if i.intervals == nil { - return "{}" - } else if literalNames != nil || symbolicNames != nil { - return i.toTokenString(literalNames, symbolicNames) - } else if elemsAreChar { - return i.toCharString() - } - - return i.toIndexString() -} - -func (i *IntervalSet) toCharString() string { - names := make([]string, len(i.intervals)) - - for j := 0; j < len(i.intervals); j++ { - v := i.intervals[j] - if v.Stop == v.Start+1 { - if v.Start == TokenEOF { - names = append(names, "") - } else { - names = append(names, ("'" + string(v.Start) + "'")) - } - } else { - names = append(names, "'"+string(v.Start)+"'..'"+string(v.Stop-1)+"'") - } - } - if len(names) > 1 { - return "{" + strings.Join(names, ", ") + "}" - } - - return names[0] -} - -func (i *IntervalSet) toIndexString() string { - - names := make([]string, 0) - for j := 0; j < len(i.intervals); j++ { - v := i.intervals[j] - if v.Stop == v.Start+1 { - if v.Start == TokenEOF { - names = append(names, "") - } else { - names = append(names, strconv.Itoa(v.Start)) - } - } else { - names = append(names, strconv.Itoa(v.Start)+".."+strconv.Itoa(v.Stop-1)) - } - } - if len(names) > 1 { - return "{" + strings.Join(names, ", ") + "}" - } - - return names[0] -} - -func (i *IntervalSet) toTokenString(literalNames []string, symbolicNames []string) string { - names := make([]string, 0) - for _, v := range i.intervals { - for j := v.Start; j < v.Stop; j++ { - names = append(names, i.elementName(literalNames, symbolicNames, j)) - } - } - if len(names) > 1 { - return "{" + strings.Join(names, ", ") + "}" - } - - return names[0] -} - -func (i *IntervalSet) elementName(literalNames []string, symbolicNames []string, a int) string { - if a == TokenEOF { - return "" - } else if a == TokenEpsilon { - return "" - } else { - if a < len(literalNames) && literalNames[a] != "" { - return literalNames[a] - } - - return symbolicNames[a] - } -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer.go deleted file mode 100644 index b04f04572f..0000000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer.go +++ /dev/null @@ -1,418 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "fmt" - "strconv" -) - -// A lexer is recognizer that draws input symbols from a character stream. -// lexer grammars result in a subclass of this object. A Lexer object -// uses simplified Match() and error recovery mechanisms in the interest -// of speed. -/// - -type Lexer interface { - TokenSource - Recognizer - - Emit() Token - - SetChannel(int) - PushMode(int) - PopMode() int - SetType(int) - SetMode(int) -} - -type BaseLexer struct { - *BaseRecognizer - - Interpreter ILexerATNSimulator - TokenStartCharIndex int - TokenStartLine int - TokenStartColumn int - ActionType int - Virt Lexer // The most derived lexer implementation. Allows virtual method calls. - - input CharStream - factory TokenFactory - tokenFactorySourcePair *TokenSourceCharStreamPair - token Token - hitEOF bool - channel int - thetype int - modeStack IntStack - mode int - text string -} - -func NewBaseLexer(input CharStream) *BaseLexer { - - lexer := new(BaseLexer) - - lexer.BaseRecognizer = NewBaseRecognizer() - - lexer.input = input - lexer.factory = CommonTokenFactoryDEFAULT - lexer.tokenFactorySourcePair = &TokenSourceCharStreamPair{lexer, input} - - lexer.Virt = lexer - - lexer.Interpreter = nil // child classes must populate it - - // The goal of all lexer rules/methods is to create a token object. - // l is an instance variable as multiple rules may collaborate to - // create a single token. NextToken will return l object after - // Matching lexer rule(s). If you subclass to allow multiple token - // emissions, then set l to the last token to be Matched or - // something nonnil so that the auto token emit mechanism will not - // emit another token. - lexer.token = nil - - // What character index in the stream did the current token start at? - // Needed, for example, to get the text for current token. Set at - // the start of NextToken. - lexer.TokenStartCharIndex = -1 - - // The line on which the first character of the token resides/// - lexer.TokenStartLine = -1 - - // The character position of first character within the line/// - lexer.TokenStartColumn = -1 - - // Once we see EOF on char stream, next token will be EOF. - // If you have DONE : EOF then you see DONE EOF. - lexer.hitEOF = false - - // The channel number for the current token/// - lexer.channel = TokenDefaultChannel - - // The token type for the current token/// - lexer.thetype = TokenInvalidType - - lexer.modeStack = make([]int, 0) - lexer.mode = LexerDefaultMode - - // You can set the text for the current token to override what is in - // the input char buffer. Use setText() or can set l instance var. - // / - lexer.text = "" - - return lexer -} - -const ( - LexerDefaultMode = 0 - LexerMore = -2 - LexerSkip = -3 -) - -const ( - LexerDefaultTokenChannel = TokenDefaultChannel - LexerHidden = TokenHiddenChannel - LexerMinCharValue = 0x0000 - LexerMaxCharValue = 0x10FFFF -) - -func (b *BaseLexer) reset() { - // wack Lexer state variables - if b.input != nil { - b.input.Seek(0) // rewind the input - } - b.token = nil - b.thetype = TokenInvalidType - b.channel = TokenDefaultChannel - b.TokenStartCharIndex = -1 - b.TokenStartColumn = -1 - b.TokenStartLine = -1 - b.text = "" - - b.hitEOF = false - b.mode = LexerDefaultMode - b.modeStack = make([]int, 0) - - b.Interpreter.reset() -} - -func (b *BaseLexer) GetInterpreter() ILexerATNSimulator { - return b.Interpreter -} - -func (b *BaseLexer) GetInputStream() CharStream { - return b.input -} - -func (b *BaseLexer) GetSourceName() string { - return b.GrammarFileName -} - -func (b *BaseLexer) SetChannel(v int) { - b.channel = v -} - -func (b *BaseLexer) GetTokenFactory() TokenFactory { - return b.factory -} - -func (b *BaseLexer) setTokenFactory(f TokenFactory) { - b.factory = f -} - -func (b *BaseLexer) safeMatch() (ret int) { - defer func() { - if e := recover(); e != nil { - if re, ok := e.(RecognitionException); ok { - b.notifyListeners(re) // Report error - b.Recover(re) - ret = LexerSkip // default - } - } - }() - - return b.Interpreter.Match(b.input, b.mode) -} - -// Return a token from l source i.e., Match a token on the char stream. -func (b *BaseLexer) NextToken() Token { - if b.input == nil { - panic("NextToken requires a non-nil input stream.") - } - - tokenStartMarker := b.input.Mark() - - // previously in finally block - defer func() { - // make sure we release marker after Match or - // unbuffered char stream will keep buffering - b.input.Release(tokenStartMarker) - }() - - for { - if b.hitEOF { - b.EmitEOF() - return b.token - } - b.token = nil - b.channel = TokenDefaultChannel - b.TokenStartCharIndex = b.input.Index() - b.TokenStartColumn = b.Interpreter.GetCharPositionInLine() - b.TokenStartLine = b.Interpreter.GetLine() - b.text = "" - continueOuter := false - for { - b.thetype = TokenInvalidType - ttype := LexerSkip - - ttype = b.safeMatch() - - if b.input.LA(1) == TokenEOF { - b.hitEOF = true - } - if b.thetype == TokenInvalidType { - b.thetype = ttype - } - if b.thetype == LexerSkip { - continueOuter = true - break - } - if b.thetype != LexerMore { - break - } - } - - if continueOuter { - continue - } - if b.token == nil { - b.Virt.Emit() - } - return b.token - } - - return nil -} - -// Instruct the lexer to Skip creating a token for current lexer rule -// and look for another token. NextToken() knows to keep looking when -// a lexer rule finishes with token set to SKIPTOKEN. Recall that -// if token==nil at end of any token rule, it creates one for you -// and emits it. -// / -func (b *BaseLexer) Skip() { - b.thetype = LexerSkip -} - -func (b *BaseLexer) More() { - b.thetype = LexerMore -} - -func (b *BaseLexer) SetMode(m int) { - b.mode = m -} - -func (b *BaseLexer) PushMode(m int) { - if LexerATNSimulatorDebug { - fmt.Println("pushMode " + strconv.Itoa(m)) - } - b.modeStack.Push(b.mode) - b.mode = m -} - -func (b *BaseLexer) PopMode() int { - if len(b.modeStack) == 0 { - panic("Empty Stack") - } - if LexerATNSimulatorDebug { - fmt.Println("popMode back to " + fmt.Sprint(b.modeStack[0:len(b.modeStack)-1])) - } - i, _ := b.modeStack.Pop() - b.mode = i - return b.mode -} - -func (b *BaseLexer) inputStream() CharStream { - return b.input -} - -// SetInputStream resets the lexer input stream and associated lexer state. -func (b *BaseLexer) SetInputStream(input CharStream) { - b.input = nil - b.tokenFactorySourcePair = &TokenSourceCharStreamPair{b, b.input} - b.reset() - b.input = input - b.tokenFactorySourcePair = &TokenSourceCharStreamPair{b, b.input} -} - -func (b *BaseLexer) GetTokenSourceCharStreamPair() *TokenSourceCharStreamPair { - return b.tokenFactorySourcePair -} - -// By default does not support multiple emits per NextToken invocation -// for efficiency reasons. Subclass and override l method, NextToken, -// and GetToken (to push tokens into a list and pull from that list -// rather than a single variable as l implementation does). -// / -func (b *BaseLexer) EmitToken(token Token) { - b.token = token -} - -// The standard method called to automatically emit a token at the -// outermost lexical rule. The token object should point into the -// char buffer start..stop. If there is a text override in 'text', -// use that to set the token's text. Override l method to emit -// custom Token objects or provide a Newfactory. -// / -func (b *BaseLexer) Emit() Token { - t := b.factory.Create(b.tokenFactorySourcePair, b.thetype, b.text, b.channel, b.TokenStartCharIndex, b.GetCharIndex()-1, b.TokenStartLine, b.TokenStartColumn) - b.EmitToken(t) - return t -} - -func (b *BaseLexer) EmitEOF() Token { - cpos := b.GetCharPositionInLine() - lpos := b.GetLine() - eof := b.factory.Create(b.tokenFactorySourcePair, TokenEOF, "", TokenDefaultChannel, b.input.Index(), b.input.Index()-1, lpos, cpos) - b.EmitToken(eof) - return eof -} - -func (b *BaseLexer) GetCharPositionInLine() int { - return b.Interpreter.GetCharPositionInLine() -} - -func (b *BaseLexer) GetLine() int { - return b.Interpreter.GetLine() -} - -func (b *BaseLexer) GetType() int { - return b.thetype -} - -func (b *BaseLexer) SetType(t int) { - b.thetype = t -} - -// What is the index of the current character of lookahead?/// -func (b *BaseLexer) GetCharIndex() int { - return b.input.Index() -} - -// Return the text Matched so far for the current token or any text override. -//Set the complete text of l token it wipes any previous changes to the text. -func (b *BaseLexer) GetText() string { - if b.text != "" { - return b.text - } - - return b.Interpreter.GetText(b.input) -} - -func (b *BaseLexer) SetText(text string) { - b.text = text -} - -func (b *BaseLexer) GetATN() *ATN { - return b.Interpreter.ATN() -} - -// Return a list of all Token objects in input char stream. -// Forces load of all tokens. Does not include EOF token. -// / -func (b *BaseLexer) GetAllTokens() []Token { - vl := b.Virt - tokens := make([]Token, 0) - t := vl.NextToken() - for t.GetTokenType() != TokenEOF { - tokens = append(tokens, t) - t = vl.NextToken() - } - return tokens -} - -func (b *BaseLexer) notifyListeners(e RecognitionException) { - start := b.TokenStartCharIndex - stop := b.input.Index() - text := b.input.GetTextFromInterval(NewInterval(start, stop)) - msg := "token recognition error at: '" + text + "'" - listener := b.GetErrorListenerDispatch() - listener.SyntaxError(b, nil, b.TokenStartLine, b.TokenStartColumn, msg, e) -} - -func (b *BaseLexer) getErrorDisplayForChar(c rune) string { - if c == TokenEOF { - return "" - } else if c == '\n' { - return "\\n" - } else if c == '\t' { - return "\\t" - } else if c == '\r' { - return "\\r" - } else { - return string(c) - } -} - -func (b *BaseLexer) getCharErrorDisplay(c rune) string { - return "'" + b.getErrorDisplayForChar(c) + "'" -} - -// Lexers can normally Match any char in it's vocabulary after Matching -// a token, so do the easy thing and just kill a character and hope -// it all works out. You can instead use the rule invocation stack -// to do sophisticated error recovery if you are in a fragment rule. -// / -func (b *BaseLexer) Recover(re RecognitionException) { - if b.input.LA(1) != TokenEOF { - if _, ok := re.(*LexerNoViableAltException); ok { - // Skip a char and try again - b.Interpreter.Consume(b.input) - } else { - // TODO: Do we lose character or line position information? - b.input.Consume() - } - } -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_action.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_action.go deleted file mode 100644 index 20df84f943..0000000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_action.go +++ /dev/null @@ -1,431 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import "strconv" - -const ( - LexerActionTypeChannel = 0 //The type of a {@link LexerChannelAction} action. - LexerActionTypeCustom = 1 //The type of a {@link LexerCustomAction} action. - LexerActionTypeMode = 2 //The type of a {@link LexerModeAction} action. - LexerActionTypeMore = 3 //The type of a {@link LexerMoreAction} action. - LexerActionTypePopMode = 4 //The type of a {@link LexerPopModeAction} action. - LexerActionTypePushMode = 5 //The type of a {@link LexerPushModeAction} action. - LexerActionTypeSkip = 6 //The type of a {@link LexerSkipAction} action. - LexerActionTypeType = 7 //The type of a {@link LexerTypeAction} action. -) - -type LexerAction interface { - getActionType() int - getIsPositionDependent() bool - execute(lexer Lexer) - hash() int - equals(other LexerAction) bool -} - -type BaseLexerAction struct { - actionType int - isPositionDependent bool -} - -func NewBaseLexerAction(action int) *BaseLexerAction { - la := new(BaseLexerAction) - - la.actionType = action - la.isPositionDependent = false - - return la -} - -func (b *BaseLexerAction) execute(lexer Lexer) { - panic("Not implemented") -} - -func (b *BaseLexerAction) getActionType() int { - return b.actionType -} - -func (b *BaseLexerAction) getIsPositionDependent() bool { - return b.isPositionDependent -} - -func (b *BaseLexerAction) hash() int { - return b.actionType -} - -func (b *BaseLexerAction) equals(other LexerAction) bool { - return b == other -} - -// -// Implements the {@code Skip} lexer action by calling {@link Lexer//Skip}. -// -//

The {@code Skip} command does not have any parameters, so l action is -// implemented as a singleton instance exposed by {@link //INSTANCE}.

-type LexerSkipAction struct { - *BaseLexerAction -} - -func NewLexerSkipAction() *LexerSkipAction { - la := new(LexerSkipAction) - la.BaseLexerAction = NewBaseLexerAction(LexerActionTypeSkip) - return la -} - -// Provides a singleton instance of l parameterless lexer action. -var LexerSkipActionINSTANCE = NewLexerSkipAction() - -func (l *LexerSkipAction) execute(lexer Lexer) { - lexer.Skip() -} - -func (l *LexerSkipAction) String() string { - return "skip" -} - -// Implements the {@code type} lexer action by calling {@link Lexer//setType} -// with the assigned type. -type LexerTypeAction struct { - *BaseLexerAction - - thetype int -} - -func NewLexerTypeAction(thetype int) *LexerTypeAction { - l := new(LexerTypeAction) - l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeType) - l.thetype = thetype - return l -} - -func (l *LexerTypeAction) execute(lexer Lexer) { - lexer.SetType(l.thetype) -} - -func (l *LexerTypeAction) hash() int { - h := murmurInit(0) - h = murmurUpdate(h, l.actionType) - h = murmurUpdate(h, l.thetype) - return murmurFinish(h, 2) -} - -func (l *LexerTypeAction) equals(other LexerAction) bool { - if l == other { - return true - } else if _, ok := other.(*LexerTypeAction); !ok { - return false - } else { - return l.thetype == other.(*LexerTypeAction).thetype - } -} - -func (l *LexerTypeAction) String() string { - return "actionType(" + strconv.Itoa(l.thetype) + ")" -} - -// Implements the {@code pushMode} lexer action by calling -// {@link Lexer//pushMode} with the assigned mode. -type LexerPushModeAction struct { - *BaseLexerAction - - mode int -} - -func NewLexerPushModeAction(mode int) *LexerPushModeAction { - - l := new(LexerPushModeAction) - l.BaseLexerAction = NewBaseLexerAction(LexerActionTypePushMode) - - l.mode = mode - return l -} - -//

This action is implemented by calling {@link Lexer//pushMode} with the -// value provided by {@link //getMode}.

-func (l *LexerPushModeAction) execute(lexer Lexer) { - lexer.PushMode(l.mode) -} - -func (l *LexerPushModeAction) hash() int { - h := murmurInit(0) - h = murmurUpdate(h, l.actionType) - h = murmurUpdate(h, l.mode) - return murmurFinish(h, 2) -} - -func (l *LexerPushModeAction) equals(other LexerAction) bool { - if l == other { - return true - } else if _, ok := other.(*LexerPushModeAction); !ok { - return false - } else { - return l.mode == other.(*LexerPushModeAction).mode - } -} - -func (l *LexerPushModeAction) String() string { - return "pushMode(" + strconv.Itoa(l.mode) + ")" -} - -// Implements the {@code popMode} lexer action by calling {@link Lexer//popMode}. -// -//

The {@code popMode} command does not have any parameters, so l action is -// implemented as a singleton instance exposed by {@link //INSTANCE}.

-type LexerPopModeAction struct { - *BaseLexerAction -} - -func NewLexerPopModeAction() *LexerPopModeAction { - - l := new(LexerPopModeAction) - - l.BaseLexerAction = NewBaseLexerAction(LexerActionTypePopMode) - - return l -} - -var LexerPopModeActionINSTANCE = NewLexerPopModeAction() - -//

This action is implemented by calling {@link Lexer//popMode}.

-func (l *LexerPopModeAction) execute(lexer Lexer) { - lexer.PopMode() -} - -func (l *LexerPopModeAction) String() string { - return "popMode" -} - -// Implements the {@code more} lexer action by calling {@link Lexer//more}. -// -//

The {@code more} command does not have any parameters, so l action is -// implemented as a singleton instance exposed by {@link //INSTANCE}.

- -type LexerMoreAction struct { - *BaseLexerAction -} - -func NewLexerMoreAction() *LexerMoreAction { - l := new(LexerMoreAction) - l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeMore) - - return l -} - -var LexerMoreActionINSTANCE = NewLexerMoreAction() - -//

This action is implemented by calling {@link Lexer//popMode}.

-func (l *LexerMoreAction) execute(lexer Lexer) { - lexer.More() -} - -func (l *LexerMoreAction) String() string { - return "more" -} - -// Implements the {@code mode} lexer action by calling {@link Lexer//mode} with -// the assigned mode. -type LexerModeAction struct { - *BaseLexerAction - - mode int -} - -func NewLexerModeAction(mode int) *LexerModeAction { - l := new(LexerModeAction) - l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeMode) - l.mode = mode - return l -} - -//

This action is implemented by calling {@link Lexer//mode} with the -// value provided by {@link //getMode}.

-func (l *LexerModeAction) execute(lexer Lexer) { - lexer.SetMode(l.mode) -} - -func (l *LexerModeAction) hash() int { - h := murmurInit(0) - h = murmurUpdate(h, l.actionType) - h = murmurUpdate(h, l.mode) - return murmurFinish(h, 2) -} - -func (l *LexerModeAction) equals(other LexerAction) bool { - if l == other { - return true - } else if _, ok := other.(*LexerModeAction); !ok { - return false - } else { - return l.mode == other.(*LexerModeAction).mode - } -} - -func (l *LexerModeAction) String() string { - return "mode(" + strconv.Itoa(l.mode) + ")" -} - -// Executes a custom lexer action by calling {@link Recognizer//action} with the -// rule and action indexes assigned to the custom action. The implementation of -// a custom action is added to the generated code for the lexer in an override -// of {@link Recognizer//action} when the grammar is compiled. -// -//

This class may represent embedded actions created with the {...} -// syntax in ANTLR 4, as well as actions created for lexer commands where the -// command argument could not be evaluated when the grammar was compiled.

- -// Constructs a custom lexer action with the specified rule and action -// indexes. -// -// @param ruleIndex The rule index to use for calls to -// {@link Recognizer//action}. -// @param actionIndex The action index to use for calls to -// {@link Recognizer//action}. - -type LexerCustomAction struct { - *BaseLexerAction - ruleIndex, actionIndex int -} - -func NewLexerCustomAction(ruleIndex, actionIndex int) *LexerCustomAction { - l := new(LexerCustomAction) - l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeCustom) - l.ruleIndex = ruleIndex - l.actionIndex = actionIndex - l.isPositionDependent = true - return l -} - -//

Custom actions are implemented by calling {@link Lexer//action} with the -// appropriate rule and action indexes.

-func (l *LexerCustomAction) execute(lexer Lexer) { - lexer.Action(nil, l.ruleIndex, l.actionIndex) -} - -func (l *LexerCustomAction) hash() int { - h := murmurInit(0) - h = murmurUpdate(h, l.actionType) - h = murmurUpdate(h, l.ruleIndex) - h = murmurUpdate(h, l.actionIndex) - return murmurFinish(h, 3) -} - -func (l *LexerCustomAction) equals(other LexerAction) bool { - if l == other { - return true - } else if _, ok := other.(*LexerCustomAction); !ok { - return false - } else { - return l.ruleIndex == other.(*LexerCustomAction).ruleIndex && l.actionIndex == other.(*LexerCustomAction).actionIndex - } -} - -// Implements the {@code channel} lexer action by calling -// {@link Lexer//setChannel} with the assigned channel. -// Constructs a New{@code channel} action with the specified channel value. -// @param channel The channel value to pass to {@link Lexer//setChannel}. -type LexerChannelAction struct { - *BaseLexerAction - - channel int -} - -func NewLexerChannelAction(channel int) *LexerChannelAction { - l := new(LexerChannelAction) - l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeChannel) - l.channel = channel - return l -} - -//

This action is implemented by calling {@link Lexer//setChannel} with the -// value provided by {@link //getChannel}.

-func (l *LexerChannelAction) execute(lexer Lexer) { - lexer.SetChannel(l.channel) -} - -func (l *LexerChannelAction) hash() int { - h := murmurInit(0) - h = murmurUpdate(h, l.actionType) - h = murmurUpdate(h, l.channel) - return murmurFinish(h, 2) -} - -func (l *LexerChannelAction) equals(other LexerAction) bool { - if l == other { - return true - } else if _, ok := other.(*LexerChannelAction); !ok { - return false - } else { - return l.channel == other.(*LexerChannelAction).channel - } -} - -func (l *LexerChannelAction) String() string { - return "channel(" + strconv.Itoa(l.channel) + ")" -} - -// This implementation of {@link LexerAction} is used for tracking input offsets -// for position-dependent actions within a {@link LexerActionExecutor}. -// -//

This action is not serialized as part of the ATN, and is only required for -// position-dependent lexer actions which appear at a location other than the -// end of a rule. For more information about DFA optimizations employed for -// lexer actions, see {@link LexerActionExecutor//append} and -// {@link LexerActionExecutor//fixOffsetBeforeMatch}.

- -// Constructs a Newindexed custom action by associating a character offset -// with a {@link LexerAction}. -// -//

Note: This class is only required for lexer actions for which -// {@link LexerAction//isPositionDependent} returns {@code true}.

-// -// @param offset The offset into the input {@link CharStream}, relative to -// the token start index, at which the specified lexer action should be -// executed. -// @param action The lexer action to execute at a particular offset in the -// input {@link CharStream}. -type LexerIndexedCustomAction struct { - *BaseLexerAction - - offset int - lexerAction LexerAction - isPositionDependent bool -} - -func NewLexerIndexedCustomAction(offset int, lexerAction LexerAction) *LexerIndexedCustomAction { - - l := new(LexerIndexedCustomAction) - l.BaseLexerAction = NewBaseLexerAction(lexerAction.getActionType()) - - l.offset = offset - l.lexerAction = lexerAction - l.isPositionDependent = true - - return l -} - -//

This method calls {@link //execute} on the result of {@link //getAction} -// using the provided {@code lexer}.

-func (l *LexerIndexedCustomAction) execute(lexer Lexer) { - // assume the input stream position was properly set by the calling code - l.lexerAction.execute(lexer) -} - -func (l *LexerIndexedCustomAction) hash() int { - h := murmurInit(0) - h = murmurUpdate(h, l.actionType) - h = murmurUpdate(h, l.offset) - h = murmurUpdate(h, l.lexerAction.hash()) - return murmurFinish(h, 3) -} - -func (l *LexerIndexedCustomAction) equals(other LexerAction) bool { - if l == other { - return true - } else if _, ok := other.(*LexerIndexedCustomAction); !ok { - return false - } else { - return l.offset == other.(*LexerIndexedCustomAction).offset && l.lexerAction == other.(*LexerIndexedCustomAction).lexerAction - } -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_action_executor.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_action_executor.go deleted file mode 100644 index 80b949a1a5..0000000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_action_executor.go +++ /dev/null @@ -1,170 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -// Represents an executor for a sequence of lexer actions which traversed during -// the Matching operation of a lexer rule (token). -// -//

The executor tracks position information for position-dependent lexer actions -// efficiently, ensuring that actions appearing only at the end of the rule do -// not cause bloating of the {@link DFA} created for the lexer.

- -type LexerActionExecutor struct { - lexerActions []LexerAction - cachedHash int -} - -func NewLexerActionExecutor(lexerActions []LexerAction) *LexerActionExecutor { - - if lexerActions == nil { - lexerActions = make([]LexerAction, 0) - } - - l := new(LexerActionExecutor) - - l.lexerActions = lexerActions - - // Caches the result of {@link //hashCode} since the hash code is an element - // of the performance-critical {@link LexerATNConfig//hashCode} operation. - l.cachedHash = murmurInit(57) - for _, a := range lexerActions { - l.cachedHash = murmurUpdate(l.cachedHash, a.hash()) - } - - return l -} - -// Creates a {@link LexerActionExecutor} which executes the actions for -// the input {@code lexerActionExecutor} followed by a specified -// {@code lexerAction}. -// -// @param lexerActionExecutor The executor for actions already traversed by -// the lexer while Matching a token within a particular -// {@link LexerATNConfig}. If this is {@code nil}, the method behaves as -// though it were an empty executor. -// @param lexerAction The lexer action to execute after the actions -// specified in {@code lexerActionExecutor}. -// -// @return A {@link LexerActionExecutor} for executing the combine actions -// of {@code lexerActionExecutor} and {@code lexerAction}. -func LexerActionExecutorappend(lexerActionExecutor *LexerActionExecutor, lexerAction LexerAction) *LexerActionExecutor { - if lexerActionExecutor == nil { - return NewLexerActionExecutor([]LexerAction{lexerAction}) - } - - return NewLexerActionExecutor(append(lexerActionExecutor.lexerActions, lexerAction)) -} - -// Creates a {@link LexerActionExecutor} which encodes the current offset -// for position-dependent lexer actions. -// -//

Normally, when the executor encounters lexer actions where -// {@link LexerAction//isPositionDependent} returns {@code true}, it calls -// {@link IntStream//seek} on the input {@link CharStream} to set the input -// position to the end of the current token. This behavior provides -// for efficient DFA representation of lexer actions which appear at the end -// of a lexer rule, even when the lexer rule Matches a variable number of -// characters.

-// -//

Prior to traversing a Match transition in the ATN, the current offset -// from the token start index is assigned to all position-dependent lexer -// actions which have not already been assigned a fixed offset. By storing -// the offsets relative to the token start index, the DFA representation of -// lexer actions which appear in the middle of tokens remains efficient due -// to sharing among tokens of the same length, regardless of their absolute -// position in the input stream.

-// -//

If the current executor already has offsets assigned to all -// position-dependent lexer actions, the method returns {@code this}.

-// -// @param offset The current offset to assign to all position-dependent -// lexer actions which do not already have offsets assigned. -// -// @return A {@link LexerActionExecutor} which stores input stream offsets -// for all position-dependent lexer actions. -// / -func (l *LexerActionExecutor) fixOffsetBeforeMatch(offset int) *LexerActionExecutor { - var updatedLexerActions []LexerAction - for i := 0; i < len(l.lexerActions); i++ { - _, ok := l.lexerActions[i].(*LexerIndexedCustomAction) - if l.lexerActions[i].getIsPositionDependent() && !ok { - if updatedLexerActions == nil { - updatedLexerActions = make([]LexerAction, 0) - - for _, a := range l.lexerActions { - updatedLexerActions = append(updatedLexerActions, a) - } - } - - updatedLexerActions[i] = NewLexerIndexedCustomAction(offset, l.lexerActions[i]) - } - } - if updatedLexerActions == nil { - return l - } - - return NewLexerActionExecutor(updatedLexerActions) -} - -// Execute the actions encapsulated by l executor within the context of a -// particular {@link Lexer}. -// -//

This method calls {@link IntStream//seek} to set the position of the -// {@code input} {@link CharStream} prior to calling -// {@link LexerAction//execute} on a position-dependent action. Before the -// method returns, the input position will be restored to the same position -// it was in when the method was invoked.

-// -// @param lexer The lexer instance. -// @param input The input stream which is the source for the current token. -// When l method is called, the current {@link IntStream//index} for -// {@code input} should be the start of the following token, i.e. 1 -// character past the end of the current token. -// @param startIndex The token start index. This value may be passed to -// {@link IntStream//seek} to set the {@code input} position to the beginning -// of the token. -// / -func (l *LexerActionExecutor) execute(lexer Lexer, input CharStream, startIndex int) { - requiresSeek := false - stopIndex := input.Index() - - defer func() { - if requiresSeek { - input.Seek(stopIndex) - } - }() - - for i := 0; i < len(l.lexerActions); i++ { - lexerAction := l.lexerActions[i] - if la, ok := lexerAction.(*LexerIndexedCustomAction); ok { - offset := la.offset - input.Seek(startIndex + offset) - lexerAction = la.lexerAction - requiresSeek = (startIndex + offset) != stopIndex - } else if lexerAction.getIsPositionDependent() { - input.Seek(stopIndex) - requiresSeek = false - } - lexerAction.execute(lexer) - } -} - -func (l *LexerActionExecutor) hash() int { - if l == nil { - return 61 - } - return l.cachedHash -} - -func (l *LexerActionExecutor) equals(other interface{}) bool { - if l == other { - return true - } else if _, ok := other.(*LexerActionExecutor); !ok { - return false - } else { - return l.cachedHash == other.(*LexerActionExecutor).cachedHash && - &l.lexerActions == &other.(*LexerActionExecutor).lexerActions - } -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_atn_simulator.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_atn_simulator.go deleted file mode 100644 index 131364f75c..0000000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_atn_simulator.go +++ /dev/null @@ -1,658 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "fmt" - "strconv" -) - -var ( - LexerATNSimulatorDebug = false - LexerATNSimulatorDFADebug = false - - LexerATNSimulatorMinDFAEdge = 0 - LexerATNSimulatorMaxDFAEdge = 127 // forces unicode to stay in ATN - - LexerATNSimulatorMatchCalls = 0 -) - -type ILexerATNSimulator interface { - IATNSimulator - - reset() - Match(input CharStream, mode int) int - GetCharPositionInLine() int - GetLine() int - GetText(input CharStream) string - Consume(input CharStream) -} - -type LexerATNSimulator struct { - *BaseATNSimulator - - recog Lexer - predictionMode int - mergeCache DoubleDict - startIndex int - Line int - CharPositionInLine int - mode int - prevAccept *SimState - MatchCalls int -} - -func NewLexerATNSimulator(recog Lexer, atn *ATN, decisionToDFA []*DFA, sharedContextCache *PredictionContextCache) *LexerATNSimulator { - l := new(LexerATNSimulator) - - l.BaseATNSimulator = NewBaseATNSimulator(atn, sharedContextCache) - - l.decisionToDFA = decisionToDFA - l.recog = recog - // The current token's starting index into the character stream. - // Shared across DFA to ATN simulation in case the ATN fails and the - // DFA did not have a previous accept state. In l case, we use the - // ATN-generated exception object. - l.startIndex = -1 - // line number 1..n within the input/// - l.Line = 1 - // The index of the character relative to the beginning of the line - // 0..n-1/// - l.CharPositionInLine = 0 - l.mode = LexerDefaultMode - // Used during DFA/ATN exec to record the most recent accept configuration - // info - l.prevAccept = NewSimState() - // done - return l -} - -func (l *LexerATNSimulator) copyState(simulator *LexerATNSimulator) { - l.CharPositionInLine = simulator.CharPositionInLine - l.Line = simulator.Line - l.mode = simulator.mode - l.startIndex = simulator.startIndex -} - -func (l *LexerATNSimulator) Match(input CharStream, mode int) int { - l.MatchCalls++ - l.mode = mode - mark := input.Mark() - - defer func() { - input.Release(mark) - }() - - l.startIndex = input.Index() - l.prevAccept.reset() - - dfa := l.decisionToDFA[mode] - - if dfa.s0 == nil { - return l.MatchATN(input) - } - - return l.execATN(input, dfa.s0) -} - -func (l *LexerATNSimulator) reset() { - l.prevAccept.reset() - l.startIndex = -1 - l.Line = 1 - l.CharPositionInLine = 0 - l.mode = LexerDefaultMode -} - -func (l *LexerATNSimulator) MatchATN(input CharStream) int { - startState := l.atn.modeToStartState[l.mode] - - if LexerATNSimulatorDebug { - fmt.Println("MatchATN mode " + strconv.Itoa(l.mode) + " start: " + startState.String()) - } - oldMode := l.mode - s0Closure := l.computeStartState(input, startState) - suppressEdge := s0Closure.hasSemanticContext - s0Closure.hasSemanticContext = false - - next := l.addDFAState(s0Closure) - - if !suppressEdge { - l.decisionToDFA[l.mode].setS0(next) - } - - predict := l.execATN(input, next) - - if LexerATNSimulatorDebug { - fmt.Println("DFA after MatchATN: " + l.decisionToDFA[oldMode].ToLexerString()) - } - return predict -} - -func (l *LexerATNSimulator) execATN(input CharStream, ds0 *DFAState) int { - - if LexerATNSimulatorDebug { - fmt.Println("start state closure=" + ds0.configs.String()) - } - if ds0.isAcceptState { - // allow zero-length tokens - l.captureSimState(l.prevAccept, input, ds0) - } - t := input.LA(1) - s := ds0 // s is current/from DFA state - - for { // while more work - if LexerATNSimulatorDebug { - fmt.Println("execATN loop starting closure: " + s.configs.String()) - } - - // As we move src->trg, src->trg, we keep track of the previous trg to - // avoid looking up the DFA state again, which is expensive. - // If the previous target was already part of the DFA, we might - // be able to avoid doing a reach operation upon t. If s!=nil, - // it means that semantic predicates didn't prevent us from - // creating a DFA state. Once we know s!=nil, we check to see if - // the DFA state has an edge already for t. If so, we can just reuse - // it's configuration set there's no point in re-computing it. - // This is kind of like doing DFA simulation within the ATN - // simulation because DFA simulation is really just a way to avoid - // computing reach/closure sets. Technically, once we know that - // we have a previously added DFA state, we could jump over to - // the DFA simulator. But, that would mean popping back and forth - // a lot and making things more complicated algorithmically. - // This optimization makes a lot of sense for loops within DFA. - // A character will take us back to an existing DFA state - // that already has lots of edges out of it. e.g., .* in comments. - target := l.getExistingTargetState(s, t) - if target == nil { - target = l.computeTargetState(input, s, t) - // print("Computed:" + str(target)) - } - if target == ATNSimulatorError { - break - } - // If l is a consumable input element, make sure to consume before - // capturing the accept state so the input index, line, and char - // position accurately reflect the state of the interpreter at the - // end of the token. - if t != TokenEOF { - l.Consume(input) - } - if target.isAcceptState { - l.captureSimState(l.prevAccept, input, target) - if t == TokenEOF { - break - } - } - t = input.LA(1) - s = target // flip current DFA target becomes Newsrc/from state - } - - return l.failOrAccept(l.prevAccept, input, s.configs, t) -} - -// Get an existing target state for an edge in the DFA. If the target state -// for the edge has not yet been computed or is otherwise not available, -// l method returns {@code nil}. -// -// @param s The current DFA state -// @param t The next input symbol -// @return The existing target DFA state for the given input symbol -// {@code t}, or {@code nil} if the target state for l edge is not -// already cached -func (l *LexerATNSimulator) getExistingTargetState(s *DFAState, t int) *DFAState { - if s.edges == nil || t < LexerATNSimulatorMinDFAEdge || t > LexerATNSimulatorMaxDFAEdge { - return nil - } - - target := s.edges[t-LexerATNSimulatorMinDFAEdge] - if LexerATNSimulatorDebug && target != nil { - fmt.Println("reuse state " + strconv.Itoa(s.stateNumber) + " edge to " + strconv.Itoa(target.stateNumber)) - } - return target -} - -// Compute a target state for an edge in the DFA, and attempt to add the -// computed state and corresponding edge to the DFA. -// -// @param input The input stream -// @param s The current DFA state -// @param t The next input symbol -// -// @return The computed target DFA state for the given input symbol -// {@code t}. If {@code t} does not lead to a valid DFA state, l method -// returns {@link //ERROR}. -func (l *LexerATNSimulator) computeTargetState(input CharStream, s *DFAState, t int) *DFAState { - reach := NewOrderedATNConfigSet() - - // if we don't find an existing DFA state - // Fill reach starting from closure, following t transitions - l.getReachableConfigSet(input, s.configs, reach.BaseATNConfigSet, t) - - if len(reach.configs) == 0 { // we got nowhere on t from s - if !reach.hasSemanticContext { - // we got nowhere on t, don't panic out l knowledge it'd - // cause a failover from DFA later. - l.addDFAEdge(s, t, ATNSimulatorError, nil) - } - // stop when we can't Match any more char - return ATNSimulatorError - } - // Add an edge from s to target DFA found/created for reach - return l.addDFAEdge(s, t, nil, reach.BaseATNConfigSet) -} - -func (l *LexerATNSimulator) failOrAccept(prevAccept *SimState, input CharStream, reach ATNConfigSet, t int) int { - if l.prevAccept.dfaState != nil { - lexerActionExecutor := prevAccept.dfaState.lexerActionExecutor - l.accept(input, lexerActionExecutor, l.startIndex, prevAccept.index, prevAccept.line, prevAccept.column) - return prevAccept.dfaState.prediction - } - - // if no accept and EOF is first char, return EOF - if t == TokenEOF && input.Index() == l.startIndex { - return TokenEOF - } - - panic(NewLexerNoViableAltException(l.recog, input, l.startIndex, reach)) -} - -// Given a starting configuration set, figure out all ATN configurations -// we can reach upon input {@code t}. Parameter {@code reach} is a return -// parameter. -func (l *LexerATNSimulator) getReachableConfigSet(input CharStream, closure ATNConfigSet, reach ATNConfigSet, t int) { - // l is used to Skip processing for configs which have a lower priority - // than a config that already reached an accept state for the same rule - SkipAlt := ATNInvalidAltNumber - - for _, cfg := range closure.GetItems() { - currentAltReachedAcceptState := (cfg.GetAlt() == SkipAlt) - if currentAltReachedAcceptState && cfg.(*LexerATNConfig).passedThroughNonGreedyDecision { - continue - } - - if LexerATNSimulatorDebug { - - fmt.Printf("testing %s at %s\n", l.GetTokenName(t), cfg.String()) // l.recog, true)) - } - - for _, trans := range cfg.GetState().GetTransitions() { - target := l.getReachableTarget(trans, t) - if target != nil { - lexerActionExecutor := cfg.(*LexerATNConfig).lexerActionExecutor - if lexerActionExecutor != nil { - lexerActionExecutor = lexerActionExecutor.fixOffsetBeforeMatch(input.Index() - l.startIndex) - } - treatEOFAsEpsilon := (t == TokenEOF) - config := NewLexerATNConfig3(cfg.(*LexerATNConfig), target, lexerActionExecutor) - if l.closure(input, config, reach, - currentAltReachedAcceptState, true, treatEOFAsEpsilon) { - // any remaining configs for l alt have a lower priority - // than the one that just reached an accept state. - SkipAlt = cfg.GetAlt() - } - } - } - } -} - -func (l *LexerATNSimulator) accept(input CharStream, lexerActionExecutor *LexerActionExecutor, startIndex, index, line, charPos int) { - if LexerATNSimulatorDebug { - fmt.Printf("ACTION %s\n", lexerActionExecutor) - } - // seek to after last char in token - input.Seek(index) - l.Line = line - l.CharPositionInLine = charPos - if lexerActionExecutor != nil && l.recog != nil { - lexerActionExecutor.execute(l.recog, input, startIndex) - } -} - -func (l *LexerATNSimulator) getReachableTarget(trans Transition, t int) ATNState { - if trans.Matches(t, 0, LexerMaxCharValue) { - return trans.getTarget() - } - - return nil -} - -func (l *LexerATNSimulator) computeStartState(input CharStream, p ATNState) *OrderedATNConfigSet { - configs := NewOrderedATNConfigSet() - for i := 0; i < len(p.GetTransitions()); i++ { - target := p.GetTransitions()[i].getTarget() - cfg := NewLexerATNConfig6(target, i+1, BasePredictionContextEMPTY) - l.closure(input, cfg, configs, false, false, false) - } - - return configs -} - -// Since the alternatives within any lexer decision are ordered by -// preference, l method stops pursuing the closure as soon as an accept -// state is reached. After the first accept state is reached by depth-first -// search from {@code config}, all other (potentially reachable) states for -// l rule would have a lower priority. -// -// @return {@code true} if an accept state is reached, otherwise -// {@code false}. -func (l *LexerATNSimulator) closure(input CharStream, config *LexerATNConfig, configs ATNConfigSet, - currentAltReachedAcceptState, speculative, treatEOFAsEpsilon bool) bool { - - if LexerATNSimulatorDebug { - fmt.Println("closure(" + config.String() + ")") // config.String(l.recog, true) + ")") - } - - _, ok := config.state.(*RuleStopState) - if ok { - - if LexerATNSimulatorDebug { - if l.recog != nil { - fmt.Printf("closure at %s rule stop %s\n", l.recog.GetRuleNames()[config.state.GetRuleIndex()], config) - } else { - fmt.Printf("closure at rule stop %s\n", config) - } - } - - if config.context == nil || config.context.hasEmptyPath() { - if config.context == nil || config.context.isEmpty() { - configs.Add(config, nil) - return true - } - - configs.Add(NewLexerATNConfig2(config, config.state, BasePredictionContextEMPTY), nil) - currentAltReachedAcceptState = true - } - if config.context != nil && !config.context.isEmpty() { - for i := 0; i < config.context.length(); i++ { - if config.context.getReturnState(i) != BasePredictionContextEmptyReturnState { - newContext := config.context.GetParent(i) // "pop" return state - returnState := l.atn.states[config.context.getReturnState(i)] - cfg := NewLexerATNConfig2(config, returnState, newContext) - currentAltReachedAcceptState = l.closure(input, cfg, configs, currentAltReachedAcceptState, speculative, treatEOFAsEpsilon) - } - } - } - return currentAltReachedAcceptState - } - // optimization - if !config.state.GetEpsilonOnlyTransitions() { - if !currentAltReachedAcceptState || !config.passedThroughNonGreedyDecision { - configs.Add(config, nil) - } - } - for j := 0; j < len(config.state.GetTransitions()); j++ { - trans := config.state.GetTransitions()[j] - cfg := l.getEpsilonTarget(input, config, trans, configs, speculative, treatEOFAsEpsilon) - if cfg != nil { - currentAltReachedAcceptState = l.closure(input, cfg, configs, - currentAltReachedAcceptState, speculative, treatEOFAsEpsilon) - } - } - return currentAltReachedAcceptState -} - -// side-effect: can alter configs.hasSemanticContext -func (l *LexerATNSimulator) getEpsilonTarget(input CharStream, config *LexerATNConfig, trans Transition, - configs ATNConfigSet, speculative, treatEOFAsEpsilon bool) *LexerATNConfig { - - var cfg *LexerATNConfig - - if trans.getSerializationType() == TransitionRULE { - - rt := trans.(*RuleTransition) - newContext := SingletonBasePredictionContextCreate(config.context, rt.followState.GetStateNumber()) - cfg = NewLexerATNConfig2(config, trans.getTarget(), newContext) - - } else if trans.getSerializationType() == TransitionPRECEDENCE { - panic("Precedence predicates are not supported in lexers.") - } else if trans.getSerializationType() == TransitionPREDICATE { - // Track traversing semantic predicates. If we traverse, - // we cannot add a DFA state for l "reach" computation - // because the DFA would not test the predicate again in the - // future. Rather than creating collections of semantic predicates - // like v3 and testing them on prediction, v4 will test them on the - // fly all the time using the ATN not the DFA. This is slower but - // semantically it's not used that often. One of the key elements to - // l predicate mechanism is not adding DFA states that see - // predicates immediately afterwards in the ATN. For example, - - // a : ID {p1}? | ID {p2}? - - // should create the start state for rule 'a' (to save start state - // competition), but should not create target of ID state. The - // collection of ATN states the following ID references includes - // states reached by traversing predicates. Since l is when we - // test them, we cannot cash the DFA state target of ID. - - pt := trans.(*PredicateTransition) - - if LexerATNSimulatorDebug { - fmt.Println("EVAL rule " + strconv.Itoa(trans.(*PredicateTransition).ruleIndex) + ":" + strconv.Itoa(pt.predIndex)) - } - configs.SetHasSemanticContext(true) - if l.evaluatePredicate(input, pt.ruleIndex, pt.predIndex, speculative) { - cfg = NewLexerATNConfig4(config, trans.getTarget()) - } - } else if trans.getSerializationType() == TransitionACTION { - if config.context == nil || config.context.hasEmptyPath() { - // execute actions anywhere in the start rule for a token. - // - // TODO: if the entry rule is invoked recursively, some - // actions may be executed during the recursive call. The - // problem can appear when hasEmptyPath() is true but - // isEmpty() is false. In l case, the config needs to be - // split into two contexts - one with just the empty path - // and another with everything but the empty path. - // Unfortunately, the current algorithm does not allow - // getEpsilonTarget to return two configurations, so - // additional modifications are needed before we can support - // the split operation. - lexerActionExecutor := LexerActionExecutorappend(config.lexerActionExecutor, l.atn.lexerActions[trans.(*ActionTransition).actionIndex]) - cfg = NewLexerATNConfig3(config, trans.getTarget(), lexerActionExecutor) - } else { - // ignore actions in referenced rules - cfg = NewLexerATNConfig4(config, trans.getTarget()) - } - } else if trans.getSerializationType() == TransitionEPSILON { - cfg = NewLexerATNConfig4(config, trans.getTarget()) - } else if trans.getSerializationType() == TransitionATOM || - trans.getSerializationType() == TransitionRANGE || - trans.getSerializationType() == TransitionSET { - if treatEOFAsEpsilon { - if trans.Matches(TokenEOF, 0, LexerMaxCharValue) { - cfg = NewLexerATNConfig4(config, trans.getTarget()) - } - } - } - return cfg -} - -// Evaluate a predicate specified in the lexer. -// -//

If {@code speculative} is {@code true}, l method was called before -// {@link //consume} for the Matched character. This method should call -// {@link //consume} before evaluating the predicate to ensure position -// sensitive values, including {@link Lexer//GetText}, {@link Lexer//GetLine}, -// and {@link Lexer//getcolumn}, properly reflect the current -// lexer state. This method should restore {@code input} and the simulator -// to the original state before returning (i.e. undo the actions made by the -// call to {@link //consume}.

-// -// @param input The input stream. -// @param ruleIndex The rule containing the predicate. -// @param predIndex The index of the predicate within the rule. -// @param speculative {@code true} if the current index in {@code input} is -// one character before the predicate's location. -// -// @return {@code true} if the specified predicate evaluates to -// {@code true}. -// / -func (l *LexerATNSimulator) evaluatePredicate(input CharStream, ruleIndex, predIndex int, speculative bool) bool { - // assume true if no recognizer was provided - if l.recog == nil { - return true - } - if !speculative { - return l.recog.Sempred(nil, ruleIndex, predIndex) - } - savedcolumn := l.CharPositionInLine - savedLine := l.Line - index := input.Index() - marker := input.Mark() - - defer func() { - l.CharPositionInLine = savedcolumn - l.Line = savedLine - input.Seek(index) - input.Release(marker) - }() - - l.Consume(input) - return l.recog.Sempred(nil, ruleIndex, predIndex) -} - -func (l *LexerATNSimulator) captureSimState(settings *SimState, input CharStream, dfaState *DFAState) { - settings.index = input.Index() - settings.line = l.Line - settings.column = l.CharPositionInLine - settings.dfaState = dfaState -} - -func (l *LexerATNSimulator) addDFAEdge(from *DFAState, tk int, to *DFAState, cfgs ATNConfigSet) *DFAState { - if to == nil && cfgs != nil { - // leading to l call, ATNConfigSet.hasSemanticContext is used as a - // marker indicating dynamic predicate evaluation makes l edge - // dependent on the specific input sequence, so the static edge in the - // DFA should be omitted. The target DFAState is still created since - // execATN has the ability to reSynchronize with the DFA state cache - // following the predicate evaluation step. - // - // TJP notes: next time through the DFA, we see a pred again and eval. - // If that gets us to a previously created (but dangling) DFA - // state, we can continue in pure DFA mode from there. - // / - suppressEdge := cfgs.HasSemanticContext() - cfgs.SetHasSemanticContext(false) - - to = l.addDFAState(cfgs) - - if suppressEdge { - return to - } - } - // add the edge - if tk < LexerATNSimulatorMinDFAEdge || tk > LexerATNSimulatorMaxDFAEdge { - // Only track edges within the DFA bounds - return to - } - if LexerATNSimulatorDebug { - fmt.Println("EDGE " + from.String() + " -> " + to.String() + " upon " + strconv.Itoa(tk)) - } - if from.edges == nil { - // make room for tokens 1..n and -1 masquerading as index 0 - from.edges = make([]*DFAState, LexerATNSimulatorMaxDFAEdge-LexerATNSimulatorMinDFAEdge+1) - } - from.edges[tk-LexerATNSimulatorMinDFAEdge] = to // connect - - return to -} - -// Add a NewDFA state if there isn't one with l set of -// configurations already. This method also detects the first -// configuration containing an ATN rule stop state. Later, when -// traversing the DFA, we will know which rule to accept. -func (l *LexerATNSimulator) addDFAState(configs ATNConfigSet) *DFAState { - - proposed := NewDFAState(-1, configs) - var firstConfigWithRuleStopState ATNConfig - - for _, cfg := range configs.GetItems() { - - _, ok := cfg.GetState().(*RuleStopState) - - if ok { - firstConfigWithRuleStopState = cfg - break - } - } - if firstConfigWithRuleStopState != nil { - proposed.isAcceptState = true - proposed.lexerActionExecutor = firstConfigWithRuleStopState.(*LexerATNConfig).lexerActionExecutor - proposed.setPrediction(l.atn.ruleToTokenType[firstConfigWithRuleStopState.GetState().GetRuleIndex()]) - } - hash := proposed.hash() - dfa := l.decisionToDFA[l.mode] - existing, ok := dfa.getState(hash) - if ok { - return existing - } - newState := proposed - newState.stateNumber = dfa.numStates() - configs.SetReadOnly(true) - newState.configs = configs - dfa.setState(hash, newState) - return newState -} - -func (l *LexerATNSimulator) getDFA(mode int) *DFA { - return l.decisionToDFA[mode] -} - -// Get the text Matched so far for the current token. -func (l *LexerATNSimulator) GetText(input CharStream) string { - // index is first lookahead char, don't include. - return input.GetTextFromInterval(NewInterval(l.startIndex, input.Index()-1)) -} - -func (l *LexerATNSimulator) Consume(input CharStream) { - curChar := input.LA(1) - if curChar == int('\n') { - l.Line++ - l.CharPositionInLine = 0 - } else { - l.CharPositionInLine++ - } - input.Consume() -} - -func (l *LexerATNSimulator) GetCharPositionInLine() int { - return l.CharPositionInLine -} - -func (l *LexerATNSimulator) GetLine() int { - return l.Line -} - -func (l *LexerATNSimulator) GetTokenName(tt int) string { - if tt == -1 { - return "EOF" - } - - return "'" + string(tt) + "'" -} - -func resetSimState(sim *SimState) { - sim.index = -1 - sim.line = 0 - sim.column = -1 - sim.dfaState = nil -} - -type SimState struct { - index int - line int - column int - dfaState *DFAState -} - -func NewSimState() *SimState { - s := new(SimState) - resetSimState(s) - return s -} - -func (s *SimState) reset() { - resetSimState(s) -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/ll1_analyzer.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/ll1_analyzer.go deleted file mode 100644 index f5afd09b39..0000000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/ll1_analyzer.go +++ /dev/null @@ -1,215 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -type LL1Analyzer struct { - atn *ATN -} - -func NewLL1Analyzer(atn *ATN) *LL1Analyzer { - la := new(LL1Analyzer) - la.atn = atn - return la -} - -//* Special value added to the lookahead sets to indicate that we hit -// a predicate during analysis if {@code seeThruPreds==false}. -/// -const ( - LL1AnalyzerHitPred = TokenInvalidType -) - -//* -// Calculates the SLL(1) expected lookahead set for each outgoing transition -// of an {@link ATNState}. The returned array has one element for each -// outgoing transition in {@code s}. If the closure from transition -// i leads to a semantic predicate before Matching a symbol, the -// element at index i of the result will be {@code nil}. -// -// @param s the ATN state -// @return the expected symbols for each outgoing transition of {@code s}. -func (la *LL1Analyzer) getDecisionLookahead(s ATNState) []*IntervalSet { - if s == nil { - return nil - } - count := len(s.GetTransitions()) - look := make([]*IntervalSet, count) - for alt := 0; alt < count; alt++ { - look[alt] = NewIntervalSet() - lookBusy := NewSet(nil, nil) - seeThruPreds := false // fail to get lookahead upon pred - la.look1(s.GetTransitions()[alt].getTarget(), nil, BasePredictionContextEMPTY, look[alt], lookBusy, NewBitSet(), seeThruPreds, false) - // Wipe out lookahead for la alternative if we found nothing - // or we had a predicate when we !seeThruPreds - if look[alt].length() == 0 || look[alt].contains(LL1AnalyzerHitPred) { - look[alt] = nil - } - } - return look -} - -//* -// Compute set of tokens that can follow {@code s} in the ATN in the -// specified {@code ctx}. -// -//

If {@code ctx} is {@code nil} and the end of the rule containing -// {@code s} is reached, {@link Token//EPSILON} is added to the result set. -// If {@code ctx} is not {@code nil} and the end of the outermost rule is -// reached, {@link Token//EOF} is added to the result set.

-// -// @param s the ATN state -// @param stopState the ATN state to stop at. This can be a -// {@link BlockEndState} to detect epsilon paths through a closure. -// @param ctx the complete parser context, or {@code nil} if the context -// should be ignored -// -// @return The set of tokens that can follow {@code s} in the ATN in the -// specified {@code ctx}. -/// -func (la *LL1Analyzer) Look(s, stopState ATNState, ctx RuleContext) *IntervalSet { - r := NewIntervalSet() - seeThruPreds := true // ignore preds get all lookahead - var lookContext PredictionContext - if ctx != nil { - lookContext = predictionContextFromRuleContext(s.GetATN(), ctx) - } - la.look1(s, stopState, lookContext, r, NewSet(nil, nil), NewBitSet(), seeThruPreds, true) - return r -} - -//* -// Compute set of tokens that can follow {@code s} in the ATN in the -// specified {@code ctx}. -// -//

If {@code ctx} is {@code nil} and {@code stopState} or the end of the -// rule containing {@code s} is reached, {@link Token//EPSILON} is added to -// the result set. If {@code ctx} is not {@code nil} and {@code addEOF} is -// {@code true} and {@code stopState} or the end of the outermost rule is -// reached, {@link Token//EOF} is added to the result set.

-// -// @param s the ATN state. -// @param stopState the ATN state to stop at. This can be a -// {@link BlockEndState} to detect epsilon paths through a closure. -// @param ctx The outer context, or {@code nil} if the outer context should -// not be used. -// @param look The result lookahead set. -// @param lookBusy A set used for preventing epsilon closures in the ATN -// from causing a stack overflow. Outside code should pass -// {@code NewSet} for la argument. -// @param calledRuleStack A set used for preventing left recursion in the -// ATN from causing a stack overflow. Outside code should pass -// {@code NewBitSet()} for la argument. -// @param seeThruPreds {@code true} to true semantic predicates as -// implicitly {@code true} and "see through them", otherwise {@code false} -// to treat semantic predicates as opaque and add {@link //HitPred} to the -// result if one is encountered. -// @param addEOF Add {@link Token//EOF} to the result if the end of the -// outermost context is reached. This parameter has no effect if {@code ctx} -// is {@code nil}. - -func (la *LL1Analyzer) look2(s, stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy *Set, calledRuleStack *BitSet, seeThruPreds, addEOF bool, i int) { - - returnState := la.atn.states[ctx.getReturnState(i)] - - removed := calledRuleStack.contains(returnState.GetRuleIndex()) - - defer func() { - if removed { - calledRuleStack.add(returnState.GetRuleIndex()) - } - }() - - calledRuleStack.remove(returnState.GetRuleIndex()) - la.look1(returnState, stopState, ctx.GetParent(i), look, lookBusy, calledRuleStack, seeThruPreds, addEOF) - -} - -func (la *LL1Analyzer) look1(s, stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy *Set, calledRuleStack *BitSet, seeThruPreds, addEOF bool) { - - c := NewBaseATNConfig6(s, 0, ctx) - - if lookBusy.contains(c) { - return - } - - lookBusy.add(c) - - if s == stopState { - if ctx == nil { - look.addOne(TokenEpsilon) - return - } else if ctx.isEmpty() && addEOF { - look.addOne(TokenEOF) - return - } - } - - _, ok := s.(*RuleStopState) - - if ok { - if ctx == nil { - look.addOne(TokenEpsilon) - return - } else if ctx.isEmpty() && addEOF { - look.addOne(TokenEOF) - return - } - - if ctx != BasePredictionContextEMPTY { - // run thru all possible stack tops in ctx - for i := 0; i < ctx.length(); i++ { - returnState := la.atn.states[ctx.getReturnState(i)] - la.look2(returnState, stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF, i) - } - return - } - } - - n := len(s.GetTransitions()) - - for i := 0; i < n; i++ { - t := s.GetTransitions()[i] - - if t1, ok := t.(*RuleTransition); ok { - if calledRuleStack.contains(t1.getTarget().GetRuleIndex()) { - continue - } - - newContext := SingletonBasePredictionContextCreate(ctx, t1.followState.GetStateNumber()) - la.look3(stopState, newContext, look, lookBusy, calledRuleStack, seeThruPreds, addEOF, t1) - } else if t2, ok := t.(AbstractPredicateTransition); ok { - if seeThruPreds { - la.look1(t2.getTarget(), stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF) - } else { - look.addOne(LL1AnalyzerHitPred) - } - } else if t.getIsEpsilon() { - la.look1(t.getTarget(), stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF) - } else if _, ok := t.(*WildcardTransition); ok { - look.addRange(TokenMinUserTokenType, la.atn.maxTokenType) - } else { - set := t.getLabel() - if set != nil { - if _, ok := t.(*NotSetTransition); ok { - set = set.complement(TokenMinUserTokenType, la.atn.maxTokenType) - } - look.addSet(set) - } - } - } -} - -func (la *LL1Analyzer) look3(stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy *Set, calledRuleStack *BitSet, seeThruPreds, addEOF bool, t1 *RuleTransition) { - - newContext := SingletonBasePredictionContextCreate(ctx, t1.followState.GetStateNumber()) - - defer func() { - calledRuleStack.remove(t1.getTarget().GetRuleIndex()) - }() - - calledRuleStack.add(t1.getTarget().GetRuleIndex()) - la.look1(t1.getTarget(), stopState, newContext, look, lookBusy, calledRuleStack, seeThruPreds, addEOF) - -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser.go deleted file mode 100644 index fb60258e33..0000000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser.go +++ /dev/null @@ -1,718 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "fmt" - "strconv" -) - -type Parser interface { - Recognizer - - GetInterpreter() *ParserATNSimulator - - GetTokenStream() TokenStream - GetTokenFactory() TokenFactory - GetParserRuleContext() ParserRuleContext - SetParserRuleContext(ParserRuleContext) - Consume() Token - GetParseListeners() []ParseTreeListener - - GetErrorHandler() ErrorStrategy - SetErrorHandler(ErrorStrategy) - GetInputStream() IntStream - GetCurrentToken() Token - GetExpectedTokens() *IntervalSet - NotifyErrorListeners(string, Token, RecognitionException) - IsExpectedToken(int) bool - GetPrecedence() int - GetRuleInvocationStack(ParserRuleContext) []string -} - -type BaseParser struct { - *BaseRecognizer - - Interpreter *ParserATNSimulator - BuildParseTrees bool - - input TokenStream - errHandler ErrorStrategy - precedenceStack IntStack - ctx ParserRuleContext - - tracer *TraceListener - parseListeners []ParseTreeListener - _SyntaxErrors int -} - -// p.is all the parsing support code essentially most of it is error -// recovery stuff.// -func NewBaseParser(input TokenStream) *BaseParser { - - p := new(BaseParser) - - p.BaseRecognizer = NewBaseRecognizer() - - // The input stream. - p.input = nil - // The error handling strategy for the parser. The default value is a new - // instance of {@link DefaultErrorStrategy}. - p.errHandler = NewDefaultErrorStrategy() - p.precedenceStack = make([]int, 0) - p.precedenceStack.Push(0) - // The {@link ParserRuleContext} object for the currently executing rule. - // p.is always non-nil during the parsing process. - p.ctx = nil - // Specifies whether or not the parser should construct a parse tree during - // the parsing process. The default value is {@code true}. - p.BuildParseTrees = true - // When {@link //setTrace}{@code (true)} is called, a reference to the - // {@link TraceListener} is stored here so it can be easily removed in a - // later call to {@link //setTrace}{@code (false)}. The listener itself is - // implemented as a parser listener so p.field is not directly used by - // other parser methods. - p.tracer = nil - // The list of {@link ParseTreeListener} listeners registered to receive - // events during the parse. - p.parseListeners = nil - // The number of syntax errors Reported during parsing. p.value is - // incremented each time {@link //NotifyErrorListeners} is called. - p._SyntaxErrors = 0 - p.SetInputStream(input) - - return p -} - -// p.field maps from the serialized ATN string to the deserialized {@link -// ATN} with -// bypass alternatives. -// -// @see ATNDeserializationOptions//isGenerateRuleBypassTransitions() -// -var bypassAltsAtnCache = make(map[string]int) - -// reset the parser's state// -func (p *BaseParser) reset() { - if p.input != nil { - p.input.Seek(0) - } - p.errHandler.reset(p) - p.ctx = nil - p._SyntaxErrors = 0 - p.SetTrace(nil) - p.precedenceStack = make([]int, 0) - p.precedenceStack.Push(0) - if p.Interpreter != nil { - p.Interpreter.reset() - } -} - -func (p *BaseParser) GetErrorHandler() ErrorStrategy { - return p.errHandler -} - -func (p *BaseParser) SetErrorHandler(e ErrorStrategy) { - p.errHandler = e -} - -// Match current input symbol against {@code ttype}. If the symbol type -// Matches, {@link ANTLRErrorStrategy//ReportMatch} and {@link //consume} are -// called to complete the Match process. -// -//

If the symbol type does not Match, -// {@link ANTLRErrorStrategy//recoverInline} is called on the current error -// strategy to attempt recovery. If {@link //getBuildParseTree} is -// {@code true} and the token index of the symbol returned by -// {@link ANTLRErrorStrategy//recoverInline} is -1, the symbol is added to -// the parse tree by calling {@link ParserRuleContext//addErrorNode}.

-// -// @param ttype the token type to Match -// @return the Matched symbol -// @panics RecognitionException if the current input symbol did not Match -// {@code ttype} and the error strategy could not recover from the -// mismatched symbol - -func (p *BaseParser) Match(ttype int) Token { - - t := p.GetCurrentToken() - - if t.GetTokenType() == ttype { - p.errHandler.ReportMatch(p) - p.Consume() - } else { - t = p.errHandler.RecoverInline(p) - if p.BuildParseTrees && t.GetTokenIndex() == -1 { - // we must have conjured up a Newtoken during single token - // insertion - // if it's not the current symbol - p.ctx.AddErrorNode(t) - } - } - - return t -} - -// Match current input symbol as a wildcard. If the symbol type Matches -// (i.e. has a value greater than 0), {@link ANTLRErrorStrategy//ReportMatch} -// and {@link //consume} are called to complete the Match process. -// -//

If the symbol type does not Match, -// {@link ANTLRErrorStrategy//recoverInline} is called on the current error -// strategy to attempt recovery. If {@link //getBuildParseTree} is -// {@code true} and the token index of the symbol returned by -// {@link ANTLRErrorStrategy//recoverInline} is -1, the symbol is added to -// the parse tree by calling {@link ParserRuleContext//addErrorNode}.

-// -// @return the Matched symbol -// @panics RecognitionException if the current input symbol did not Match -// a wildcard and the error strategy could not recover from the mismatched -// symbol - -func (p *BaseParser) MatchWildcard() Token { - t := p.GetCurrentToken() - if t.GetTokenType() > 0 { - p.errHandler.ReportMatch(p) - p.Consume() - } else { - t = p.errHandler.RecoverInline(p) - if p.BuildParseTrees && t.GetTokenIndex() == -1 { - // we must have conjured up a Newtoken during single token - // insertion - // if it's not the current symbol - p.ctx.AddErrorNode(t) - } - } - return t -} - -func (p *BaseParser) GetParserRuleContext() ParserRuleContext { - return p.ctx -} - -func (p *BaseParser) SetParserRuleContext(v ParserRuleContext) { - p.ctx = v -} - -func (p *BaseParser) GetParseListeners() []ParseTreeListener { - if p.parseListeners == nil { - return make([]ParseTreeListener, 0) - } - return p.parseListeners -} - -// Registers {@code listener} to receive events during the parsing process. -// -//

To support output-preserving grammar transformations (including but not -// limited to left-recursion removal, automated left-factoring, and -// optimized code generation), calls to listener methods during the parse -// may differ substantially from calls made by -// {@link ParseTreeWalker//DEFAULT} used after the parse is complete. In -// particular, rule entry and exit events may occur in a different order -// during the parse than after the parser. In addition, calls to certain -// rule entry methods may be omitted.

-// -//

With the following specific exceptions, calls to listener events are -// deterministic, i.e. for identical input the calls to listener -// methods will be the same.

-// -//
    -//
  • Alterations to the grammar used to generate code may change the -// behavior of the listener calls.
  • -//
  • Alterations to the command line options passed to ANTLR 4 when -// generating the parser may change the behavior of the listener calls.
  • -//
  • Changing the version of the ANTLR Tool used to generate the parser -// may change the behavior of the listener calls.
  • -//
-// -// @param listener the listener to add -// -// @panics nilPointerException if {@code} listener is {@code nil} -// -func (p *BaseParser) AddParseListener(listener ParseTreeListener) { - if listener == nil { - panic("listener") - } - if p.parseListeners == nil { - p.parseListeners = make([]ParseTreeListener, 0) - } - p.parseListeners = append(p.parseListeners, listener) -} - -// -// Remove {@code listener} from the list of parse listeners. -// -//

If {@code listener} is {@code nil} or has not been added as a parse -// listener, p.method does nothing.

-// @param listener the listener to remove -// -func (p *BaseParser) RemoveParseListener(listener ParseTreeListener) { - - if p.parseListeners != nil { - - idx := -1 - for i, v := range p.parseListeners { - if v == listener { - idx = i - break - } - } - - if idx == -1 { - return - } - - // remove the listener from the slice - p.parseListeners = append(p.parseListeners[0:idx], p.parseListeners[idx+1:]...) - - if len(p.parseListeners) == 0 { - p.parseListeners = nil - } - } -} - -// Remove all parse listeners. -func (p *BaseParser) removeParseListeners() { - p.parseListeners = nil -} - -// Notify any parse listeners of an enter rule event. -func (p *BaseParser) TriggerEnterRuleEvent() { - if p.parseListeners != nil { - ctx := p.ctx - for _, listener := range p.parseListeners { - listener.EnterEveryRule(ctx) - ctx.EnterRule(listener) - } - } -} - -// -// Notify any parse listeners of an exit rule event. -// -// @see //addParseListener -// -func (p *BaseParser) TriggerExitRuleEvent() { - if p.parseListeners != nil { - // reverse order walk of listeners - ctx := p.ctx - l := len(p.parseListeners) - 1 - - for i := range p.parseListeners { - listener := p.parseListeners[l-i] - ctx.ExitRule(listener) - listener.ExitEveryRule(ctx) - } - } -} - -func (p *BaseParser) GetInterpreter() *ParserATNSimulator { - return p.Interpreter -} - -func (p *BaseParser) GetATN() *ATN { - return p.Interpreter.atn -} - -func (p *BaseParser) GetTokenFactory() TokenFactory { - return p.input.GetTokenSource().GetTokenFactory() -} - -// Tell our token source and error strategy about a Newway to create tokens.// -func (p *BaseParser) setTokenFactory(factory TokenFactory) { - p.input.GetTokenSource().setTokenFactory(factory) -} - -// The ATN with bypass alternatives is expensive to create so we create it -// lazily. -// -// @panics UnsupportedOperationException if the current parser does not -// implement the {@link //getSerializedATN()} method. -// -func (p *BaseParser) GetATNWithBypassAlts() { - - // TODO - panic("Not implemented!") - - // serializedAtn := p.getSerializedATN() - // if (serializedAtn == nil) { - // panic("The current parser does not support an ATN with bypass alternatives.") - // } - // result := p.bypassAltsAtnCache[serializedAtn] - // if (result == nil) { - // deserializationOptions := NewATNDeserializationOptions(nil) - // deserializationOptions.generateRuleBypassTransitions = true - // result = NewATNDeserializer(deserializationOptions).deserialize(serializedAtn) - // p.bypassAltsAtnCache[serializedAtn] = result - // } - // return result -} - -// The preferred method of getting a tree pattern. For example, here's a -// sample use: -// -//
-// ParseTree t = parser.expr()
-// ParseTreePattern p = parser.compileParseTreePattern("<ID>+0",
-// MyParser.RULE_expr)
-// ParseTreeMatch m = p.Match(t)
-// String id = m.Get("ID")
-// 
- -func (p *BaseParser) compileParseTreePattern(pattern, patternRuleIndex, lexer Lexer) { - - panic("NewParseTreePatternMatcher not implemented!") - // - // if (lexer == nil) { - // if (p.GetTokenStream() != nil) { - // tokenSource := p.GetTokenStream().GetTokenSource() - // if _, ok := tokenSource.(ILexer); ok { - // lexer = tokenSource - // } - // } - // } - // if (lexer == nil) { - // panic("Parser can't discover a lexer to use") - // } - - // m := NewParseTreePatternMatcher(lexer, p) - // return m.compile(pattern, patternRuleIndex) -} - -func (p *BaseParser) GetInputStream() IntStream { - return p.GetTokenStream() -} - -func (p *BaseParser) SetInputStream(input TokenStream) { - p.SetTokenStream(input) -} - -func (p *BaseParser) GetTokenStream() TokenStream { - return p.input -} - -// Set the token stream and reset the parser.// -func (p *BaseParser) SetTokenStream(input TokenStream) { - p.input = nil - p.reset() - p.input = input -} - -// Match needs to return the current input symbol, which gets put -// into the label for the associated token ref e.g., x=ID. -// -func (p *BaseParser) GetCurrentToken() Token { - return p.input.LT(1) -} - -func (p *BaseParser) NotifyErrorListeners(msg string, offendingToken Token, err RecognitionException) { - if offendingToken == nil { - offendingToken = p.GetCurrentToken() - } - p._SyntaxErrors++ - line := offendingToken.GetLine() - column := offendingToken.GetColumn() - listener := p.GetErrorListenerDispatch() - listener.SyntaxError(p, offendingToken, line, column, msg, err) -} - -func (p *BaseParser) Consume() Token { - o := p.GetCurrentToken() - if o.GetTokenType() != TokenEOF { - p.GetInputStream().Consume() - } - hasListener := p.parseListeners != nil && len(p.parseListeners) > 0 - if p.BuildParseTrees || hasListener { - if p.errHandler.inErrorRecoveryMode(p) { - node := p.ctx.AddErrorNode(o) - if p.parseListeners != nil { - for _, l := range p.parseListeners { - l.VisitErrorNode(node) - } - } - - } else { - node := p.ctx.AddTokenNode(o) - if p.parseListeners != nil { - for _, l := range p.parseListeners { - l.VisitTerminal(node) - } - } - } - // node.invokingState = p.state - } - - return o -} - -func (p *BaseParser) addContextToParseTree() { - // add current context to parent if we have a parent - if p.ctx.GetParent() != nil { - p.ctx.GetParent().(ParserRuleContext).AddChild(p.ctx) - } -} - -func (p *BaseParser) EnterRule(localctx ParserRuleContext, state, ruleIndex int) { - p.SetState(state) - p.ctx = localctx - p.ctx.SetStart(p.input.LT(1)) - if p.BuildParseTrees { - p.addContextToParseTree() - } - if p.parseListeners != nil { - p.TriggerEnterRuleEvent() - } -} - -func (p *BaseParser) ExitRule() { - p.ctx.SetStop(p.input.LT(-1)) - // trigger event on ctx, before it reverts to parent - if p.parseListeners != nil { - p.TriggerExitRuleEvent() - } - p.SetState(p.ctx.GetInvokingState()) - if p.ctx.GetParent() != nil { - p.ctx = p.ctx.GetParent().(ParserRuleContext) - } else { - p.ctx = nil - } -} - -func (p *BaseParser) EnterOuterAlt(localctx ParserRuleContext, altNum int) { - localctx.SetAltNumber(altNum) - // if we have Newlocalctx, make sure we replace existing ctx - // that is previous child of parse tree - if p.BuildParseTrees && p.ctx != localctx { - if p.ctx.GetParent() != nil { - p.ctx.GetParent().(ParserRuleContext).RemoveLastChild() - p.ctx.GetParent().(ParserRuleContext).AddChild(localctx) - } - } - p.ctx = localctx -} - -// Get the precedence level for the top-most precedence rule. -// -// @return The precedence level for the top-most precedence rule, or -1 if -// the parser context is not nested within a precedence rule. - -func (p *BaseParser) GetPrecedence() int { - if len(p.precedenceStack) == 0 { - return -1 - } - - return p.precedenceStack[len(p.precedenceStack)-1] -} - -func (p *BaseParser) EnterRecursionRule(localctx ParserRuleContext, state, ruleIndex, precedence int) { - p.SetState(state) - p.precedenceStack.Push(precedence) - p.ctx = localctx - p.ctx.SetStart(p.input.LT(1)) - if p.parseListeners != nil { - p.TriggerEnterRuleEvent() // simulates rule entry for - // left-recursive rules - } -} - -// -// Like {@link //EnterRule} but for recursive rules. - -func (p *BaseParser) PushNewRecursionContext(localctx ParserRuleContext, state, ruleIndex int) { - previous := p.ctx - previous.SetParent(localctx) - previous.SetInvokingState(state) - previous.SetStop(p.input.LT(-1)) - - p.ctx = localctx - p.ctx.SetStart(previous.GetStart()) - if p.BuildParseTrees { - p.ctx.AddChild(previous) - } - if p.parseListeners != nil { - p.TriggerEnterRuleEvent() // simulates rule entry for - // left-recursive rules - } -} - -func (p *BaseParser) UnrollRecursionContexts(parentCtx ParserRuleContext) { - p.precedenceStack.Pop() - p.ctx.SetStop(p.input.LT(-1)) - retCtx := p.ctx // save current ctx (return value) - // unroll so ctx is as it was before call to recursive method - if p.parseListeners != nil { - for p.ctx != parentCtx { - p.TriggerExitRuleEvent() - p.ctx = p.ctx.GetParent().(ParserRuleContext) - } - } else { - p.ctx = parentCtx - } - // hook into tree - retCtx.SetParent(parentCtx) - if p.BuildParseTrees && parentCtx != nil { - // add return ctx into invoking rule's tree - parentCtx.AddChild(retCtx) - } -} - -func (p *BaseParser) GetInvokingContext(ruleIndex int) ParserRuleContext { - ctx := p.ctx - for ctx != nil { - if ctx.GetRuleIndex() == ruleIndex { - return ctx - } - ctx = ctx.GetParent().(ParserRuleContext) - } - return nil -} - -func (p *BaseParser) Precpred(localctx RuleContext, precedence int) bool { - return precedence >= p.precedenceStack[len(p.precedenceStack)-1] -} - -func (p *BaseParser) inContext(context ParserRuleContext) bool { - // TODO: useful in parser? - return false -} - -// -// Checks whether or not {@code symbol} can follow the current state in the -// ATN. The behavior of p.method is equivalent to the following, but is -// implemented such that the complete context-sensitive follow set does not -// need to be explicitly constructed. -// -//
-// return getExpectedTokens().contains(symbol)
-// 
-// -// @param symbol the symbol type to check -// @return {@code true} if {@code symbol} can follow the current state in -// the ATN, otherwise {@code false}. - -func (p *BaseParser) IsExpectedToken(symbol int) bool { - atn := p.Interpreter.atn - ctx := p.ctx - s := atn.states[p.state] - following := atn.NextTokens(s, nil) - if following.contains(symbol) { - return true - } - if !following.contains(TokenEpsilon) { - return false - } - for ctx != nil && ctx.GetInvokingState() >= 0 && following.contains(TokenEpsilon) { - invokingState := atn.states[ctx.GetInvokingState()] - rt := invokingState.GetTransitions()[0] - following = atn.NextTokens(rt.(*RuleTransition).followState, nil) - if following.contains(symbol) { - return true - } - ctx = ctx.GetParent().(ParserRuleContext) - } - if following.contains(TokenEpsilon) && symbol == TokenEOF { - return true - } - - return false -} - -// Computes the set of input symbols which could follow the current parser -// state and context, as given by {@link //GetState} and {@link //GetContext}, -// respectively. -// -// @see ATN//getExpectedTokens(int, RuleContext) -// -func (p *BaseParser) GetExpectedTokens() *IntervalSet { - return p.Interpreter.atn.getExpectedTokens(p.state, p.ctx) -} - -func (p *BaseParser) GetExpectedTokensWithinCurrentRule() *IntervalSet { - atn := p.Interpreter.atn - s := atn.states[p.state] - return atn.NextTokens(s, nil) -} - -// Get a rule's index (i.e., {@code RULE_ruleName} field) or -1 if not found.// -func (p *BaseParser) GetRuleIndex(ruleName string) int { - var ruleIndex, ok = p.GetRuleIndexMap()[ruleName] - if ok { - return ruleIndex - } - - return -1 -} - -// Return List<String> of the rule names in your parser instance -// leading up to a call to the current rule. You could override if -// you want more details such as the file/line info of where -// in the ATN a rule is invoked. -// -// this very useful for error messages. - -func (p *BaseParser) GetRuleInvocationStack(c ParserRuleContext) []string { - if c == nil { - c = p.ctx - } - stack := make([]string, 0) - for c != nil { - // compute what follows who invoked us - ruleIndex := c.GetRuleIndex() - if ruleIndex < 0 { - stack = append(stack, "n/a") - } else { - stack = append(stack, p.GetRuleNames()[ruleIndex]) - } - - vp := c.GetParent() - - if vp == nil { - break - } - - c = vp.(ParserRuleContext) - } - return stack -} - -// For debugging and other purposes.// -func (p *BaseParser) GetDFAStrings() string { - return fmt.Sprint(p.Interpreter.decisionToDFA) -} - -// For debugging and other purposes.// -func (p *BaseParser) DumpDFA() { - seenOne := false - for _, dfa := range p.Interpreter.decisionToDFA { - if dfa.numStates() > 0 { - if seenOne { - fmt.Println() - } - fmt.Println("Decision " + strconv.Itoa(dfa.decision) + ":") - fmt.Print(dfa.String(p.LiteralNames, p.SymbolicNames)) - seenOne = true - } - } -} - -func (p *BaseParser) GetSourceName() string { - return p.GrammarFileName -} - -// During a parse is sometimes useful to listen in on the rule entry and exit -// events as well as token Matches. p.is for quick and dirty debugging. -// -func (p *BaseParser) SetTrace(trace *TraceListener) { - if trace == nil { - p.RemoveParseListener(p.tracer) - p.tracer = nil - } else { - if p.tracer != nil { - p.RemoveParseListener(p.tracer) - } - p.tracer = NewTraceListener(p) - p.AddParseListener(p.tracer) - } -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_atn_simulator.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_atn_simulator.go deleted file mode 100644 index 128b9a96d4..0000000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_atn_simulator.go +++ /dev/null @@ -1,1473 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "fmt" - "strconv" - "strings" -) - -var ( - ParserATNSimulatorDebug = false - ParserATNSimulatorListATNDecisions = false - ParserATNSimulatorDFADebug = false - ParserATNSimulatorRetryDebug = false -) - -type ParserATNSimulator struct { - *BaseATNSimulator - - parser Parser - predictionMode int - input TokenStream - startIndex int - dfa *DFA - mergeCache *DoubleDict - outerContext ParserRuleContext -} - -func NewParserATNSimulator(parser Parser, atn *ATN, decisionToDFA []*DFA, sharedContextCache *PredictionContextCache) *ParserATNSimulator { - - p := new(ParserATNSimulator) - - p.BaseATNSimulator = NewBaseATNSimulator(atn, sharedContextCache) - - p.parser = parser - p.decisionToDFA = decisionToDFA - // SLL, LL, or LL + exact ambig detection?// - p.predictionMode = PredictionModeLL - // LAME globals to avoid parameters!!!!! I need these down deep in predTransition - p.input = nil - p.startIndex = 0 - p.outerContext = nil - p.dfa = nil - // Each prediction operation uses a cache for merge of prediction contexts. - // Don't keep around as it wastes huge amounts of memory. DoubleKeyMap - // isn't Synchronized but we're ok since two threads shouldn't reuse same - // parser/atnsim object because it can only handle one input at a time. - // This maps graphs a and b to merged result c. (a,b)&rarrc. We can avoid - // the merge if we ever see a and b again. Note that (b,a)&rarrc should - // also be examined during cache lookup. - // - p.mergeCache = nil - - return p -} - -func (p *ParserATNSimulator) GetPredictionMode() int { - return p.predictionMode -} - -func (p *ParserATNSimulator) SetPredictionMode(v int) { - p.predictionMode = v -} - -func (p *ParserATNSimulator) reset() { -} - -func (p *ParserATNSimulator) AdaptivePredict(input TokenStream, decision int, outerContext ParserRuleContext) int { - if ParserATNSimulatorDebug || ParserATNSimulatorListATNDecisions { - fmt.Println("AdaptivePredict decision " + strconv.Itoa(decision) + - " exec LA(1)==" + p.getLookaheadName(input) + - " line " + strconv.Itoa(input.LT(1).GetLine()) + ":" + - strconv.Itoa(input.LT(1).GetColumn())) - } - - p.input = input - p.startIndex = input.Index() - p.outerContext = outerContext - - dfa := p.decisionToDFA[decision] - p.dfa = dfa - m := input.Mark() - index := input.Index() - - defer func() { - p.dfa = nil - p.mergeCache = nil // wack cache after each prediction - input.Seek(index) - input.Release(m) - }() - - // Now we are certain to have a specific decision's DFA - // But, do we still need an initial state? - var s0 *DFAState - if dfa.precedenceDfa { - // the start state for a precedence DFA depends on the current - // parser precedence, and is provided by a DFA method. - s0 = dfa.getPrecedenceStartState(p.parser.GetPrecedence()) - } else { - // the start state for a "regular" DFA is just s0 - s0 = dfa.s0 - } - - if s0 == nil { - if outerContext == nil { - outerContext = RuleContextEmpty - } - if ParserATNSimulatorDebug || ParserATNSimulatorListATNDecisions { - fmt.Println("predictATN decision " + strconv.Itoa(dfa.decision) + - " exec LA(1)==" + p.getLookaheadName(input) + - ", outerContext=" + outerContext.String(p.parser.GetRuleNames(), nil)) - } - // If p is not a precedence DFA, we check the ATN start state - // to determine if p ATN start state is the decision for the - // closure block that determines whether a precedence rule - // should continue or complete. - - t2 := dfa.atnStartState - t, ok := t2.(*StarLoopEntryState) - if !dfa.precedenceDfa && ok { - if t.precedenceRuleDecision { - dfa.setPrecedenceDfa(true) - } - } - fullCtx := false - s0Closure := p.computeStartState(dfa.atnStartState, RuleContextEmpty, fullCtx) - - if dfa.precedenceDfa { - // If p is a precedence DFA, we use applyPrecedenceFilter - // to convert the computed start state to a precedence start - // state. We then use DFA.setPrecedenceStartState to set the - // appropriate start state for the precedence level rather - // than simply setting DFA.s0. - // - s0Closure = p.applyPrecedenceFilter(s0Closure) - s0 = p.addDFAState(dfa, NewDFAState(-1, s0Closure)) - dfa.setPrecedenceStartState(p.parser.GetPrecedence(), s0) - } else { - s0 = p.addDFAState(dfa, NewDFAState(-1, s0Closure)) - dfa.s0 = s0 - } - } - alt := p.execATN(dfa, s0, input, index, outerContext) - if ParserATNSimulatorDebug { - fmt.Println("DFA after predictATN: " + dfa.String(p.parser.GetLiteralNames(), nil)) - } - return alt - -} - -// Performs ATN simulation to compute a predicted alternative based -// upon the remaining input, but also updates the DFA cache to avoid -// having to traverse the ATN again for the same input sequence. - -// There are some key conditions we're looking for after computing a new -// set of ATN configs (proposed DFA state): -// if the set is empty, there is no viable alternative for current symbol -// does the state uniquely predict an alternative? -// does the state have a conflict that would prevent us from -// putting it on the work list? - -// We also have some key operations to do: -// add an edge from previous DFA state to potentially NewDFA state, D, -// upon current symbol but only if adding to work list, which means in all -// cases except no viable alternative (and possibly non-greedy decisions?) -// collecting predicates and adding semantic context to DFA accept states -// adding rule context to context-sensitive DFA accept states -// consuming an input symbol -// Reporting a conflict -// Reporting an ambiguity -// Reporting a context sensitivity -// Reporting insufficient predicates - -// cover these cases: -// dead end -// single alt -// single alt + preds -// conflict -// conflict + preds -// -func (p *ParserATNSimulator) execATN(dfa *DFA, s0 *DFAState, input TokenStream, startIndex int, outerContext ParserRuleContext) int { - - if ParserATNSimulatorDebug || ParserATNSimulatorListATNDecisions { - fmt.Println("execATN decision " + strconv.Itoa(dfa.decision) + - " exec LA(1)==" + p.getLookaheadName(input) + - " line " + strconv.Itoa(input.LT(1).GetLine()) + ":" + strconv.Itoa(input.LT(1).GetColumn())) - } - - previousD := s0 - - if ParserATNSimulatorDebug { - fmt.Println("s0 = " + s0.String()) - } - t := input.LA(1) - for { // for more work - D := p.getExistingTargetState(previousD, t) - if D == nil { - D = p.computeTargetState(dfa, previousD, t) - } - if D == ATNSimulatorError { - // if any configs in previous dipped into outer context, that - // means that input up to t actually finished entry rule - // at least for SLL decision. Full LL doesn't dip into outer - // so don't need special case. - // We will get an error no matter what so delay until after - // decision better error message. Also, no reachable target - // ATN states in SLL implies LL will also get nowhere. - // If conflict in states that dip out, choose min since we - // will get error no matter what. - e := p.noViableAlt(input, outerContext, previousD.configs, startIndex) - input.Seek(startIndex) - alt := p.getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previousD.configs, outerContext) - if alt != ATNInvalidAltNumber { - return alt - } - - panic(e) - } - if D.requiresFullContext && p.predictionMode != PredictionModeSLL { - // IF PREDS, MIGHT RESOLVE TO SINGLE ALT => SLL (or syntax error) - conflictingAlts := D.configs.GetConflictingAlts() - if D.predicates != nil { - if ParserATNSimulatorDebug { - fmt.Println("DFA state has preds in DFA sim LL failover") - } - conflictIndex := input.Index() - if conflictIndex != startIndex { - input.Seek(startIndex) - } - conflictingAlts = p.evalSemanticContext(D.predicates, outerContext, true) - if conflictingAlts.length() == 1 { - if ParserATNSimulatorDebug { - fmt.Println("Full LL avoided") - } - return conflictingAlts.minValue() - } - if conflictIndex != startIndex { - // restore the index so Reporting the fallback to full - // context occurs with the index at the correct spot - input.Seek(conflictIndex) - } - } - if ParserATNSimulatorDFADebug { - fmt.Println("ctx sensitive state " + outerContext.String(nil, nil) + " in " + D.String()) - } - fullCtx := true - s0Closure := p.computeStartState(dfa.atnStartState, outerContext, fullCtx) - p.ReportAttemptingFullContext(dfa, conflictingAlts, D.configs, startIndex, input.Index()) - alt := p.execATNWithFullContext(dfa, D, s0Closure, input, startIndex, outerContext) - return alt - } - if D.isAcceptState { - if D.predicates == nil { - return D.prediction - } - stopIndex := input.Index() - input.Seek(startIndex) - alts := p.evalSemanticContext(D.predicates, outerContext, true) - if alts.length() == 0 { - panic(p.noViableAlt(input, outerContext, D.configs, startIndex)) - } else if alts.length() == 1 { - return alts.minValue() - } else { - // Report ambiguity after predicate evaluation to make sure the correct set of ambig alts is Reported. - p.ReportAmbiguity(dfa, D, startIndex, stopIndex, false, alts, D.configs) - return alts.minValue() - } - } - previousD = D - - if t != TokenEOF { - input.Consume() - t = input.LA(1) - } - } - - panic("Should not have reached p state") -} - -// Get an existing target state for an edge in the DFA. If the target state -// for the edge has not yet been computed or is otherwise not available, -// p method returns {@code nil}. -// -// @param previousD The current DFA state -// @param t The next input symbol -// @return The existing target DFA state for the given input symbol -// {@code t}, or {@code nil} if the target state for p edge is not -// already cached - -func (p *ParserATNSimulator) getExistingTargetState(previousD *DFAState, t int) *DFAState { - edges := previousD.edges - if edges == nil { - return nil - } - - return edges[t+1] -} - -// Compute a target state for an edge in the DFA, and attempt to add the -// computed state and corresponding edge to the DFA. -// -// @param dfa The DFA -// @param previousD The current DFA state -// @param t The next input symbol -// -// @return The computed target DFA state for the given input symbol -// {@code t}. If {@code t} does not lead to a valid DFA state, p method -// returns {@link //ERROR}. - -func (p *ParserATNSimulator) computeTargetState(dfa *DFA, previousD *DFAState, t int) *DFAState { - reach := p.computeReachSet(previousD.configs, t, false) - - if reach == nil { - p.addDFAEdge(dfa, previousD, t, ATNSimulatorError) - return ATNSimulatorError - } - // create Newtarget state we'll add to DFA after it's complete - D := NewDFAState(-1, reach) - - predictedAlt := p.getUniqueAlt(reach) - - if ParserATNSimulatorDebug { - altSubSets := PredictionModegetConflictingAltSubsets(reach) - fmt.Println("SLL altSubSets=" + fmt.Sprint(altSubSets) + - ", previous=" + previousD.configs.String() + - ", configs=" + reach.String() + - ", predict=" + strconv.Itoa(predictedAlt) + - ", allSubsetsConflict=" + - fmt.Sprint(PredictionModeallSubsetsConflict(altSubSets)) + - ", conflictingAlts=" + p.getConflictingAlts(reach).String()) - } - if predictedAlt != ATNInvalidAltNumber { - // NO CONFLICT, UNIQUELY PREDICTED ALT - D.isAcceptState = true - D.configs.SetUniqueAlt(predictedAlt) - D.setPrediction(predictedAlt) - } else if PredictionModehasSLLConflictTerminatingPrediction(p.predictionMode, reach) { - // MORE THAN ONE VIABLE ALTERNATIVE - D.configs.SetConflictingAlts(p.getConflictingAlts(reach)) - D.requiresFullContext = true - // in SLL-only mode, we will stop at p state and return the minimum alt - D.isAcceptState = true - D.setPrediction(D.configs.GetConflictingAlts().minValue()) - } - if D.isAcceptState && D.configs.HasSemanticContext() { - p.predicateDFAState(D, p.atn.getDecisionState(dfa.decision)) - if D.predicates != nil { - D.setPrediction(ATNInvalidAltNumber) - } - } - // all adds to dfa are done after we've created full D state - D = p.addDFAEdge(dfa, previousD, t, D) - return D -} - -func (p *ParserATNSimulator) predicateDFAState(dfaState *DFAState, decisionState DecisionState) { - // We need to test all predicates, even in DFA states that - // uniquely predict alternative. - nalts := len(decisionState.GetTransitions()) - // Update DFA so reach becomes accept state with (predicate,alt) - // pairs if preds found for conflicting alts - altsToCollectPredsFrom := p.getConflictingAltsOrUniqueAlt(dfaState.configs) - altToPred := p.getPredsForAmbigAlts(altsToCollectPredsFrom, dfaState.configs, nalts) - if altToPred != nil { - dfaState.predicates = p.getPredicatePredictions(altsToCollectPredsFrom, altToPred) - dfaState.setPrediction(ATNInvalidAltNumber) // make sure we use preds - } else { - // There are preds in configs but they might go away - // when OR'd together like {p}? || NONE == NONE. If neither - // alt has preds, resolve to min alt - dfaState.setPrediction(altsToCollectPredsFrom.minValue()) - } -} - -// comes back with reach.uniqueAlt set to a valid alt -func (p *ParserATNSimulator) execATNWithFullContext(dfa *DFA, D *DFAState, s0 ATNConfigSet, input TokenStream, startIndex int, outerContext ParserRuleContext) int { - - if ParserATNSimulatorDebug || ParserATNSimulatorListATNDecisions { - fmt.Println("execATNWithFullContext " + s0.String()) - } - - fullCtx := true - foundExactAmbig := false - var reach ATNConfigSet - previous := s0 - input.Seek(startIndex) - t := input.LA(1) - predictedAlt := -1 - - for { // for more work - reach = p.computeReachSet(previous, t, fullCtx) - if reach == nil { - // if any configs in previous dipped into outer context, that - // means that input up to t actually finished entry rule - // at least for LL decision. Full LL doesn't dip into outer - // so don't need special case. - // We will get an error no matter what so delay until after - // decision better error message. Also, no reachable target - // ATN states in SLL implies LL will also get nowhere. - // If conflict in states that dip out, choose min since we - // will get error no matter what. - e := p.noViableAlt(input, outerContext, previous, startIndex) - input.Seek(startIndex) - alt := p.getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previous, outerContext) - if alt != ATNInvalidAltNumber { - return alt - } - - panic(e) - } - altSubSets := PredictionModegetConflictingAltSubsets(reach) - if ParserATNSimulatorDebug { - fmt.Println("LL altSubSets=" + fmt.Sprint(altSubSets) + ", predict=" + - strconv.Itoa(PredictionModegetUniqueAlt(altSubSets)) + ", resolvesToJustOneViableAlt=" + - fmt.Sprint(PredictionModeresolvesToJustOneViableAlt(altSubSets))) - } - reach.SetUniqueAlt(p.getUniqueAlt(reach)) - // unique prediction? - if reach.GetUniqueAlt() != ATNInvalidAltNumber { - predictedAlt = reach.GetUniqueAlt() - break - } else if p.predictionMode != PredictionModeLLExactAmbigDetection { - predictedAlt = PredictionModeresolvesToJustOneViableAlt(altSubSets) - if predictedAlt != ATNInvalidAltNumber { - break - } - } else { - // In exact ambiguity mode, we never try to terminate early. - // Just keeps scarfing until we know what the conflict is - if PredictionModeallSubsetsConflict(altSubSets) && PredictionModeallSubsetsEqual(altSubSets) { - foundExactAmbig = true - predictedAlt = PredictionModegetSingleViableAlt(altSubSets) - break - } - // else there are multiple non-conflicting subsets or - // we're not sure what the ambiguity is yet. - // So, keep going. - } - previous = reach - if t != TokenEOF { - input.Consume() - t = input.LA(1) - } - } - // If the configuration set uniquely predicts an alternative, - // without conflict, then we know that it's a full LL decision - // not SLL. - if reach.GetUniqueAlt() != ATNInvalidAltNumber { - p.ReportContextSensitivity(dfa, predictedAlt, reach, startIndex, input.Index()) - return predictedAlt - } - // We do not check predicates here because we have checked them - // on-the-fly when doing full context prediction. - - // - // In non-exact ambiguity detection mode, we might actually be able to - // detect an exact ambiguity, but I'm not going to spend the cycles - // needed to check. We only emit ambiguity warnings in exact ambiguity - // mode. - // - // For example, we might know that we have conflicting configurations. - // But, that does not mean that there is no way forward without a - // conflict. It's possible to have nonconflicting alt subsets as in: - - // altSubSets=[{1, 2}, {1, 2}, {1}, {1, 2}] - - // from - // - // [(17,1,[5 $]), (13,1,[5 10 $]), (21,1,[5 10 $]), (11,1,[$]), - // (13,2,[5 10 $]), (21,2,[5 10 $]), (11,2,[$])] - // - // In p case, (17,1,[5 $]) indicates there is some next sequence that - // would resolve p without conflict to alternative 1. Any other viable - // next sequence, however, is associated with a conflict. We stop - // looking for input because no amount of further lookahead will alter - // the fact that we should predict alternative 1. We just can't say for - // sure that there is an ambiguity without looking further. - - p.ReportAmbiguity(dfa, D, startIndex, input.Index(), foundExactAmbig, nil, reach) - - return predictedAlt -} - -func (p *ParserATNSimulator) computeReachSet(closure ATNConfigSet, t int, fullCtx bool) ATNConfigSet { - if ParserATNSimulatorDebug { - fmt.Println("in computeReachSet, starting closure: " + closure.String()) - } - if p.mergeCache == nil { - p.mergeCache = NewDoubleDict() - } - intermediate := NewBaseATNConfigSet(fullCtx) - - // Configurations already in a rule stop state indicate reaching the end - // of the decision rule (local context) or end of the start rule (full - // context). Once reached, these configurations are never updated by a - // closure operation, so they are handled separately for the performance - // advantage of having a smaller intermediate set when calling closure. - // - // For full-context reach operations, separate handling is required to - // ensure that the alternative Matching the longest overall sequence is - // chosen when multiple such configurations can Match the input. - - var SkippedStopStates []*BaseATNConfig - - // First figure out where we can reach on input t - for _, c := range closure.GetItems() { - if ParserATNSimulatorDebug { - fmt.Println("testing " + p.GetTokenName(t) + " at " + c.String()) - } - - _, ok := c.GetState().(*RuleStopState) - - if ok { - if fullCtx || t == TokenEOF { - if SkippedStopStates == nil { - SkippedStopStates = make([]*BaseATNConfig, 0) - } - SkippedStopStates = append(SkippedStopStates, c.(*BaseATNConfig)) - if ParserATNSimulatorDebug { - fmt.Println("added " + c.String() + " to SkippedStopStates") - } - } - continue - } - - for j := 0; j < len(c.GetState().GetTransitions()); j++ { - trans := c.GetState().GetTransitions()[j] - target := p.getReachableTarget(trans, t) - if target != nil { - cfg := NewBaseATNConfig4(c, target) - intermediate.Add(cfg, p.mergeCache) - if ParserATNSimulatorDebug { - fmt.Println("added " + cfg.String() + " to intermediate") - } - } - } - } - // Now figure out where the reach operation can take us... - var reach ATNConfigSet - - // This block optimizes the reach operation for intermediate sets which - // trivially indicate a termination state for the overall - // AdaptivePredict operation. - // - // The conditions assume that intermediate - // contains all configurations relevant to the reach set, but p - // condition is not true when one or more configurations have been - // withheld in SkippedStopStates, or when the current symbol is EOF. - // - if SkippedStopStates == nil && t != TokenEOF { - if len(intermediate.configs) == 1 { - // Don't pursue the closure if there is just one state. - // It can only have one alternative just add to result - // Also don't pursue the closure if there is unique alternative - // among the configurations. - reach = intermediate - } else if p.getUniqueAlt(intermediate) != ATNInvalidAltNumber { - // Also don't pursue the closure if there is unique alternative - // among the configurations. - reach = intermediate - } - } - // If the reach set could not be trivially determined, perform a closure - // operation on the intermediate set to compute its initial value. - // - if reach == nil { - reach = NewBaseATNConfigSet(fullCtx) - closureBusy := NewSet(nil, nil) - treatEOFAsEpsilon := t == TokenEOF - for k := 0; k < len(intermediate.configs); k++ { - p.closure(intermediate.configs[k], reach, closureBusy, false, fullCtx, treatEOFAsEpsilon) - } - } - if t == TokenEOF { - // After consuming EOF no additional input is possible, so we are - // only interested in configurations which reached the end of the - // decision rule (local context) or end of the start rule (full - // context). Update reach to contain only these configurations. This - // handles both explicit EOF transitions in the grammar and implicit - // EOF transitions following the end of the decision or start rule. - // - // When reach==intermediate, no closure operation was performed. In - // p case, removeAllConfigsNotInRuleStopState needs to check for - // reachable rule stop states as well as configurations already in - // a rule stop state. - // - // This is handled before the configurations in SkippedStopStates, - // because any configurations potentially added from that list are - // already guaranteed to meet p condition whether or not it's - // required. - // - reach = p.removeAllConfigsNotInRuleStopState(reach, reach == intermediate) - } - // If SkippedStopStates!=nil, then it contains at least one - // configuration. For full-context reach operations, these - // configurations reached the end of the start rule, in which case we - // only add them back to reach if no configuration during the current - // closure operation reached such a state. This ensures AdaptivePredict - // chooses an alternative Matching the longest overall sequence when - // multiple alternatives are viable. - // - if SkippedStopStates != nil && ((!fullCtx) || (!PredictionModehasConfigInRuleStopState(reach))) { - for l := 0; l < len(SkippedStopStates); l++ { - reach.Add(SkippedStopStates[l], p.mergeCache) - } - } - if len(reach.GetItems()) == 0 { - return nil - } - - return reach -} - -// -// Return a configuration set containing only the configurations from -// {@code configs} which are in a {@link RuleStopState}. If all -// configurations in {@code configs} are already in a rule stop state, p -// method simply returns {@code configs}. -// -//

When {@code lookToEndOfRule} is true, p method uses -// {@link ATN//NextTokens} for each configuration in {@code configs} which is -// not already in a rule stop state to see if a rule stop state is reachable -// from the configuration via epsilon-only transitions.

-// -// @param configs the configuration set to update -// @param lookToEndOfRule when true, p method checks for rule stop states -// reachable by epsilon-only transitions from each configuration in -// {@code configs}. -// -// @return {@code configs} if all configurations in {@code configs} are in a -// rule stop state, otherwise return a Newconfiguration set containing only -// the configurations from {@code configs} which are in a rule stop state -// -func (p *ParserATNSimulator) removeAllConfigsNotInRuleStopState(configs ATNConfigSet, lookToEndOfRule bool) ATNConfigSet { - if PredictionModeallConfigsInRuleStopStates(configs) { - return configs - } - result := NewBaseATNConfigSet(configs.FullContext()) - for _, config := range configs.GetItems() { - - _, ok := config.GetState().(*RuleStopState) - - if ok { - result.Add(config, p.mergeCache) - continue - } - if lookToEndOfRule && config.GetState().GetEpsilonOnlyTransitions() { - NextTokens := p.atn.NextTokens(config.GetState(), nil) - if NextTokens.contains(TokenEpsilon) { - endOfRuleState := p.atn.ruleToStopState[config.GetState().GetRuleIndex()] - result.Add(NewBaseATNConfig4(config, endOfRuleState), p.mergeCache) - } - } - } - return result -} - -func (p *ParserATNSimulator) computeStartState(a ATNState, ctx RuleContext, fullCtx bool) ATNConfigSet { - // always at least the implicit call to start rule - initialContext := predictionContextFromRuleContext(p.atn, ctx) - configs := NewBaseATNConfigSet(fullCtx) - for i := 0; i < len(a.GetTransitions()); i++ { - target := a.GetTransitions()[i].getTarget() - c := NewBaseATNConfig6(target, i+1, initialContext) - closureBusy := NewSet(nil, nil) - p.closure(c, configs, closureBusy, true, fullCtx, false) - } - return configs -} - -// -// This method transforms the start state computed by -// {@link //computeStartState} to the special start state used by a -// precedence DFA for a particular precedence value. The transformation -// process applies the following changes to the start state's configuration -// set. -// -//
    -//
  1. Evaluate the precedence predicates for each configuration using -// {@link SemanticContext//evalPrecedence}.
  2. -//
  3. Remove all configurations which predict an alternative greater than -// 1, for which another configuration that predicts alternative 1 is in the -// same ATN state with the same prediction context. This transformation is -// valid for the following reasons: -//
      -//
    • The closure block cannot contain any epsilon transitions which bypass -// the body of the closure, so all states reachable via alternative 1 are -// part of the precedence alternatives of the transformed left-recursive -// rule.
    • -//
    • The "primary" portion of a left recursive rule cannot contain an -// epsilon transition, so the only way an alternative other than 1 can exist -// in a state that is also reachable via alternative 1 is by nesting calls -// to the left-recursive rule, with the outer calls not being at the -// preferred precedence level.
    • -//
    -//
  4. -//
-// -//

-// The prediction context must be considered by p filter to address -// situations like the following. -//

-// -//
-// grammar TA
-// prog: statement* EOF
-// statement: letterA | statement letterA 'b'
-// letterA: 'a'
-// 
-//
-//

-// If the above grammar, the ATN state immediately before the token -// reference {@code 'a'} in {@code letterA} is reachable from the left edge -// of both the primary and closure blocks of the left-recursive rule -// {@code statement}. The prediction context associated with each of these -// configurations distinguishes between them, and prevents the alternative -// which stepped out to {@code prog} (and then back in to {@code statement} -// from being eliminated by the filter. -//

-// -// @param configs The configuration set computed by -// {@link //computeStartState} as the start state for the DFA. -// @return The transformed configuration set representing the start state -// for a precedence DFA at a particular precedence level (determined by -// calling {@link Parser//getPrecedence}). -// -func (p *ParserATNSimulator) applyPrecedenceFilter(configs ATNConfigSet) ATNConfigSet { - - statesFromAlt1 := make(map[int]PredictionContext) - configSet := NewBaseATNConfigSet(configs.FullContext()) - - for _, config := range configs.GetItems() { - // handle alt 1 first - if config.GetAlt() != 1 { - continue - } - updatedContext := config.GetSemanticContext().evalPrecedence(p.parser, p.outerContext) - if updatedContext == nil { - // the configuration was eliminated - continue - } - statesFromAlt1[config.GetState().GetStateNumber()] = config.GetContext() - if updatedContext != config.GetSemanticContext() { - configSet.Add(NewBaseATNConfig2(config, updatedContext), p.mergeCache) - } else { - configSet.Add(config, p.mergeCache) - } - } - for _, config := range configs.GetItems() { - - if config.GetAlt() == 1 { - // already handled - continue - } - // In the future, p elimination step could be updated to also - // filter the prediction context for alternatives predicting alt>1 - // (basically a graph subtraction algorithm). - if !config.getPrecedenceFilterSuppressed() { - context := statesFromAlt1[config.GetState().GetStateNumber()] - if context != nil && context.equals(config.GetContext()) { - // eliminated - continue - } - } - configSet.Add(config, p.mergeCache) - } - return configSet -} - -func (p *ParserATNSimulator) getReachableTarget(trans Transition, ttype int) ATNState { - if trans.Matches(ttype, 0, p.atn.maxTokenType) { - return trans.getTarget() - } - - return nil -} - -func (p *ParserATNSimulator) getPredsForAmbigAlts(ambigAlts *BitSet, configs ATNConfigSet, nalts int) []SemanticContext { - - altToPred := make([]SemanticContext, nalts+1) - for _, c := range configs.GetItems() { - if ambigAlts.contains(c.GetAlt()) { - altToPred[c.GetAlt()] = SemanticContextorContext(altToPred[c.GetAlt()], c.GetSemanticContext()) - } - } - nPredAlts := 0 - for i := 1; i < nalts+1; i++ { - pred := altToPred[i] - if pred == nil { - altToPred[i] = SemanticContextNone - } else if pred != SemanticContextNone { - nPredAlts++ - } - } - // nonambig alts are nil in altToPred - if nPredAlts == 0 { - altToPred = nil - } - if ParserATNSimulatorDebug { - fmt.Println("getPredsForAmbigAlts result " + fmt.Sprint(altToPred)) - } - return altToPred -} - -func (p *ParserATNSimulator) getPredicatePredictions(ambigAlts *BitSet, altToPred []SemanticContext) []*PredPrediction { - pairs := make([]*PredPrediction, 0) - containsPredicate := false - for i := 1; i < len(altToPred); i++ { - pred := altToPred[i] - // unpredicated is indicated by SemanticContextNONE - if ambigAlts != nil && ambigAlts.contains(i) { - pairs = append(pairs, NewPredPrediction(pred, i)) - } - if pred != SemanticContextNone { - containsPredicate = true - } - } - if !containsPredicate { - return nil - } - return pairs -} - -// -// This method is used to improve the localization of error messages by -// choosing an alternative rather than panicing a -// {@link NoViableAltException} in particular prediction scenarios where the -// {@link //ERROR} state was reached during ATN simulation. -// -//

-// The default implementation of p method uses the following -// algorithm to identify an ATN configuration which successfully parsed the -// decision entry rule. Choosing such an alternative ensures that the -// {@link ParserRuleContext} returned by the calling rule will be complete -// and valid, and the syntax error will be Reported later at a more -// localized location.

-// -//
    -//
  • If a syntactically valid path or paths reach the end of the decision rule and -// they are semantically valid if predicated, return the min associated alt.
  • -//
  • Else, if a semantically invalid but syntactically valid path exist -// or paths exist, return the minimum associated alt. -//
  • -//
  • Otherwise, return {@link ATN//INVALID_ALT_NUMBER}.
  • -//
-// -//

-// In some scenarios, the algorithm described above could predict an -// alternative which will result in a {@link FailedPredicateException} in -// the parser. Specifically, p could occur if the only configuration -// capable of successfully parsing to the end of the decision rule is -// blocked by a semantic predicate. By choosing p alternative within -// {@link //AdaptivePredict} instead of panicing a -// {@link NoViableAltException}, the resulting -// {@link FailedPredicateException} in the parser will identify the specific -// predicate which is preventing the parser from successfully parsing the -// decision rule, which helps developers identify and correct logic errors -// in semantic predicates. -//

-// -// @param configs The ATN configurations which were valid immediately before -// the {@link //ERROR} state was reached -// @param outerContext The is the \gamma_0 initial parser context from the paper -// or the parser stack at the instant before prediction commences. -// -// @return The value to return from {@link //AdaptivePredict}, or -// {@link ATN//INVALID_ALT_NUMBER} if a suitable alternative was not -// identified and {@link //AdaptivePredict} should Report an error instead. -// -func (p *ParserATNSimulator) getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(configs ATNConfigSet, outerContext ParserRuleContext) int { - cfgs := p.splitAccordingToSemanticValidity(configs, outerContext) - semValidConfigs := cfgs[0] - semInvalidConfigs := cfgs[1] - alt := p.GetAltThatFinishedDecisionEntryRule(semValidConfigs) - if alt != ATNInvalidAltNumber { // semantically/syntactically viable path exists - return alt - } - // Is there a syntactically valid path with a failed pred? - if len(semInvalidConfigs.GetItems()) > 0 { - alt = p.GetAltThatFinishedDecisionEntryRule(semInvalidConfigs) - if alt != ATNInvalidAltNumber { // syntactically viable path exists - return alt - } - } - return ATNInvalidAltNumber -} - -func (p *ParserATNSimulator) GetAltThatFinishedDecisionEntryRule(configs ATNConfigSet) int { - alts := NewIntervalSet() - - for _, c := range configs.GetItems() { - _, ok := c.GetState().(*RuleStopState) - - if c.GetReachesIntoOuterContext() > 0 || (ok && c.GetContext().hasEmptyPath()) { - alts.addOne(c.GetAlt()) - } - } - if alts.length() == 0 { - return ATNInvalidAltNumber - } - - return alts.first() -} - -// Walk the list of configurations and split them according to -// those that have preds evaluating to true/false. If no pred, assume -// true pred and include in succeeded set. Returns Pair of sets. -// -// Create a NewSet so as not to alter the incoming parameter. -// -// Assumption: the input stream has been restored to the starting point -// prediction, which is where predicates need to evaluate. - -type ATNConfigSetPair struct { - item0, item1 ATNConfigSet -} - -func (p *ParserATNSimulator) splitAccordingToSemanticValidity(configs ATNConfigSet, outerContext ParserRuleContext) []ATNConfigSet { - succeeded := NewBaseATNConfigSet(configs.FullContext()) - failed := NewBaseATNConfigSet(configs.FullContext()) - - for _, c := range configs.GetItems() { - if c.GetSemanticContext() != SemanticContextNone { - predicateEvaluationResult := c.GetSemanticContext().evaluate(p.parser, outerContext) - if predicateEvaluationResult { - succeeded.Add(c, nil) - } else { - failed.Add(c, nil) - } - } else { - succeeded.Add(c, nil) - } - } - return []ATNConfigSet{succeeded, failed} -} - -// Look through a list of predicate/alt pairs, returning alts for the -// pairs that win. A {@code NONE} predicate indicates an alt containing an -// unpredicated config which behaves as "always true." If !complete -// then we stop at the first predicate that evaluates to true. This -// includes pairs with nil predicates. -// -func (p *ParserATNSimulator) evalSemanticContext(predPredictions []*PredPrediction, outerContext ParserRuleContext, complete bool) *BitSet { - predictions := NewBitSet() - for i := 0; i < len(predPredictions); i++ { - pair := predPredictions[i] - if pair.pred == SemanticContextNone { - predictions.add(pair.alt) - if !complete { - break - } - continue - } - - predicateEvaluationResult := pair.pred.evaluate(p.parser, outerContext) - if ParserATNSimulatorDebug || ParserATNSimulatorDFADebug { - fmt.Println("eval pred " + pair.String() + "=" + fmt.Sprint(predicateEvaluationResult)) - } - if predicateEvaluationResult { - if ParserATNSimulatorDebug || ParserATNSimulatorDFADebug { - fmt.Println("PREDICT " + fmt.Sprint(pair.alt)) - } - predictions.add(pair.alt) - if !complete { - break - } - } - } - return predictions -} - -func (p *ParserATNSimulator) closure(config ATNConfig, configs ATNConfigSet, closureBusy *Set, collectPredicates, fullCtx, treatEOFAsEpsilon bool) { - initialDepth := 0 - p.closureCheckingStopState(config, configs, closureBusy, collectPredicates, - fullCtx, initialDepth, treatEOFAsEpsilon) -} - -func (p *ParserATNSimulator) closureCheckingStopState(config ATNConfig, configs ATNConfigSet, closureBusy *Set, collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) { - - if ParserATNSimulatorDebug { - fmt.Println("closure(" + config.String() + ")") - fmt.Println("configs(" + configs.String() + ")") - if config.GetReachesIntoOuterContext() > 50 { - panic("problem") - } - } - - _, ok := config.GetState().(*RuleStopState) - if ok { - // We hit rule end. If we have context info, use it - // run thru all possible stack tops in ctx - if !config.GetContext().isEmpty() { - for i := 0; i < config.GetContext().length(); i++ { - if config.GetContext().getReturnState(i) == BasePredictionContextEmptyReturnState { - if fullCtx { - configs.Add(NewBaseATNConfig1(config, config.GetState(), BasePredictionContextEMPTY), p.mergeCache) - continue - } else { - // we have no context info, just chase follow links (if greedy) - if ParserATNSimulatorDebug { - fmt.Println("FALLING off rule " + p.getRuleName(config.GetState().GetRuleIndex())) - } - p.closureWork(config, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon) - } - continue - } - returnState := p.atn.states[config.GetContext().getReturnState(i)] - newContext := config.GetContext().GetParent(i) // "pop" return state - - c := NewBaseATNConfig5(returnState, config.GetAlt(), newContext, config.GetSemanticContext()) - // While we have context to pop back from, we may have - // gotten that context AFTER having falling off a rule. - // Make sure we track that we are now out of context. - c.SetReachesIntoOuterContext(config.GetReachesIntoOuterContext()) - p.closureCheckingStopState(c, configs, closureBusy, collectPredicates, fullCtx, depth-1, treatEOFAsEpsilon) - } - return - } else if fullCtx { - // reached end of start rule - configs.Add(config, p.mergeCache) - return - } else { - // else if we have no context info, just chase follow links (if greedy) - if ParserATNSimulatorDebug { - fmt.Println("FALLING off rule " + p.getRuleName(config.GetState().GetRuleIndex())) - } - } - } - p.closureWork(config, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon) -} - -// Do the actual work of walking epsilon edges// -func (p *ParserATNSimulator) closureWork(config ATNConfig, configs ATNConfigSet, closureBusy *Set, collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) { - state := config.GetState() - // optimization - if !state.GetEpsilonOnlyTransitions() { - configs.Add(config, p.mergeCache) - // make sure to not return here, because EOF transitions can act as - // both epsilon transitions and non-epsilon transitions. - } - for i := 0; i < len(state.GetTransitions()); i++ { - t := state.GetTransitions()[i] - _, ok := t.(*ActionTransition) - continueCollecting := collectPredicates && !ok - c := p.getEpsilonTarget(config, t, continueCollecting, depth == 0, fullCtx, treatEOFAsEpsilon) - if ci, ok := c.(*BaseATNConfig); ok && ci != nil { - if !t.getIsEpsilon() && closureBusy.add(c) != c { - // avoid infinite recursion for EOF* and EOF+ - continue - } - newDepth := depth - - if _, ok := config.GetState().(*RuleStopState); ok { - - // target fell off end of rule mark resulting c as having dipped into outer context - // We can't get here if incoming config was rule stop and we had context - // track how far we dip into outer context. Might - // come in handy and we avoid evaluating context dependent - // preds if p is > 0. - - if closureBusy.add(c) != c { - // avoid infinite recursion for right-recursive rules - continue - } - - if p.dfa != nil && p.dfa.precedenceDfa { - if t.(*EpsilonTransition).outermostPrecedenceReturn == p.dfa.atnStartState.GetRuleIndex() { - c.setPrecedenceFilterSuppressed(true) - } - } - - c.SetReachesIntoOuterContext(c.GetReachesIntoOuterContext() + 1) - configs.SetDipsIntoOuterContext(true) // TODO: can remove? only care when we add to set per middle of p method - newDepth-- - if ParserATNSimulatorDebug { - fmt.Println("dips into outer ctx: " + c.String()) - } - } else if _, ok := t.(*RuleTransition); ok { - // latch when newDepth goes negative - once we step out of the entry context we can't return - if newDepth >= 0 { - newDepth++ - } - } - p.closureCheckingStopState(c, configs, closureBusy, continueCollecting, fullCtx, newDepth, treatEOFAsEpsilon) - } - } -} - -func (p *ParserATNSimulator) getRuleName(index int) string { - if p.parser != nil && index >= 0 { - return p.parser.GetRuleNames()[index] - } - - return "" -} - -func (p *ParserATNSimulator) getEpsilonTarget(config ATNConfig, t Transition, collectPredicates, inContext, fullCtx, treatEOFAsEpsilon bool) ATNConfig { - - switch t.getSerializationType() { - case TransitionRULE: - return p.ruleTransition(config, t.(*RuleTransition)) - case TransitionPRECEDENCE: - return p.precedenceTransition(config, t.(*PrecedencePredicateTransition), collectPredicates, inContext, fullCtx) - case TransitionPREDICATE: - return p.predTransition(config, t.(*PredicateTransition), collectPredicates, inContext, fullCtx) - case TransitionACTION: - return p.actionTransition(config, t.(*ActionTransition)) - case TransitionEPSILON: - return NewBaseATNConfig4(config, t.getTarget()) - case TransitionATOM: - // EOF transitions act like epsilon transitions after the first EOF - // transition is traversed - if treatEOFAsEpsilon { - if t.Matches(TokenEOF, 0, 1) { - return NewBaseATNConfig4(config, t.getTarget()) - } - } - return nil - case TransitionRANGE: - // EOF transitions act like epsilon transitions after the first EOF - // transition is traversed - if treatEOFAsEpsilon { - if t.Matches(TokenEOF, 0, 1) { - return NewBaseATNConfig4(config, t.getTarget()) - } - } - return nil - case TransitionSET: - // EOF transitions act like epsilon transitions after the first EOF - // transition is traversed - if treatEOFAsEpsilon { - if t.Matches(TokenEOF, 0, 1) { - return NewBaseATNConfig4(config, t.getTarget()) - } - } - return nil - default: - return nil - } -} - -func (p *ParserATNSimulator) actionTransition(config ATNConfig, t *ActionTransition) *BaseATNConfig { - if ParserATNSimulatorDebug { - fmt.Println("ACTION edge " + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.actionIndex)) - } - return NewBaseATNConfig4(config, t.getTarget()) -} - -func (p *ParserATNSimulator) precedenceTransition(config ATNConfig, - pt *PrecedencePredicateTransition, collectPredicates, inContext, fullCtx bool) *BaseATNConfig { - - if ParserATNSimulatorDebug { - fmt.Println("PRED (collectPredicates=" + fmt.Sprint(collectPredicates) + ") " + - strconv.Itoa(pt.precedence) + ">=_p, ctx dependent=true") - if p.parser != nil { - fmt.Println("context surrounding pred is " + fmt.Sprint(p.parser.GetRuleInvocationStack(nil))) - } - } - var c *BaseATNConfig - if collectPredicates && inContext { - if fullCtx { - // In full context mode, we can evaluate predicates on-the-fly - // during closure, which dramatically reduces the size of - // the config sets. It also obviates the need to test predicates - // later during conflict resolution. - currentPosition := p.input.Index() - p.input.Seek(p.startIndex) - predSucceeds := pt.getPredicate().evaluate(p.parser, p.outerContext) - p.input.Seek(currentPosition) - if predSucceeds { - c = NewBaseATNConfig4(config, pt.getTarget()) // no pred context - } - } else { - newSemCtx := SemanticContextandContext(config.GetSemanticContext(), pt.getPredicate()) - c = NewBaseATNConfig3(config, pt.getTarget(), newSemCtx) - } - } else { - c = NewBaseATNConfig4(config, pt.getTarget()) - } - if ParserATNSimulatorDebug { - fmt.Println("config from pred transition=" + c.String()) - } - return c -} - -func (p *ParserATNSimulator) predTransition(config ATNConfig, pt *PredicateTransition, collectPredicates, inContext, fullCtx bool) *BaseATNConfig { - - if ParserATNSimulatorDebug { - fmt.Println("PRED (collectPredicates=" + fmt.Sprint(collectPredicates) + ") " + strconv.Itoa(pt.ruleIndex) + - ":" + strconv.Itoa(pt.predIndex) + ", ctx dependent=" + fmt.Sprint(pt.isCtxDependent)) - if p.parser != nil { - fmt.Println("context surrounding pred is " + fmt.Sprint(p.parser.GetRuleInvocationStack(nil))) - } - } - var c *BaseATNConfig - if collectPredicates && ((pt.isCtxDependent && inContext) || !pt.isCtxDependent) { - if fullCtx { - // In full context mode, we can evaluate predicates on-the-fly - // during closure, which dramatically reduces the size of - // the config sets. It also obviates the need to test predicates - // later during conflict resolution. - currentPosition := p.input.Index() - p.input.Seek(p.startIndex) - predSucceeds := pt.getPredicate().evaluate(p.parser, p.outerContext) - p.input.Seek(currentPosition) - if predSucceeds { - c = NewBaseATNConfig4(config, pt.getTarget()) // no pred context - } - } else { - newSemCtx := SemanticContextandContext(config.GetSemanticContext(), pt.getPredicate()) - c = NewBaseATNConfig3(config, pt.getTarget(), newSemCtx) - } - } else { - c = NewBaseATNConfig4(config, pt.getTarget()) - } - if ParserATNSimulatorDebug { - fmt.Println("config from pred transition=" + c.String()) - } - return c -} - -func (p *ParserATNSimulator) ruleTransition(config ATNConfig, t *RuleTransition) *BaseATNConfig { - if ParserATNSimulatorDebug { - fmt.Println("CALL rule " + p.getRuleName(t.getTarget().GetRuleIndex()) + ", ctx=" + config.GetContext().String()) - } - returnState := t.followState - newContext := SingletonBasePredictionContextCreate(config.GetContext(), returnState.GetStateNumber()) - return NewBaseATNConfig1(config, t.getTarget(), newContext) -} - -func (p *ParserATNSimulator) getConflictingAlts(configs ATNConfigSet) *BitSet { - altsets := PredictionModegetConflictingAltSubsets(configs) - return PredictionModeGetAlts(altsets) -} - -// Sam pointed out a problem with the previous definition, v3, of -// ambiguous states. If we have another state associated with conflicting -// alternatives, we should keep going. For example, the following grammar -// -// s : (ID | ID ID?) '' -// -// When the ATN simulation reaches the state before '', it has a DFA -// state that looks like: [12|1|[], 6|2|[], 12|2|[]]. Naturally -// 12|1|[] and 12|2|[] conflict, but we cannot stop processing p node -// because alternative to has another way to continue, via [6|2|[]]. -// The key is that we have a single state that has config's only associated -// with a single alternative, 2, and crucially the state transitions -// among the configurations are all non-epsilon transitions. That means -// we don't consider any conflicts that include alternative 2. So, we -// ignore the conflict between alts 1 and 2. We ignore a set of -// conflicting alts when there is an intersection with an alternative -// associated with a single alt state in the state&rarrconfig-list map. -// -// It's also the case that we might have two conflicting configurations but -// also a 3rd nonconflicting configuration for a different alternative: -// [1|1|[], 1|2|[], 8|3|[]]. This can come about from grammar: -// -// a : A | A | A B -// -// After Matching input A, we reach the stop state for rule A, state 1. -// State 8 is the state right before B. Clearly alternatives 1 and 2 -// conflict and no amount of further lookahead will separate the two. -// However, alternative 3 will be able to continue and so we do not -// stop working on p state. In the previous example, we're concerned -// with states associated with the conflicting alternatives. Here alt -// 3 is not associated with the conflicting configs, but since we can continue -// looking for input reasonably, I don't declare the state done. We -// ignore a set of conflicting alts when we have an alternative -// that we still need to pursue. -// - -func (p *ParserATNSimulator) getConflictingAltsOrUniqueAlt(configs ATNConfigSet) *BitSet { - var conflictingAlts *BitSet - if configs.GetUniqueAlt() != ATNInvalidAltNumber { - conflictingAlts = NewBitSet() - conflictingAlts.add(configs.GetUniqueAlt()) - } else { - conflictingAlts = configs.GetConflictingAlts() - } - return conflictingAlts -} - -func (p *ParserATNSimulator) GetTokenName(t int) string { - if t == TokenEOF { - return "EOF" - } - - if p.parser != nil && p.parser.GetLiteralNames() != nil { - if t >= len(p.parser.GetLiteralNames()) { - fmt.Println(strconv.Itoa(t) + " ttype out of range: " + strings.Join(p.parser.GetLiteralNames(), ",")) - // fmt.Println(p.parser.GetInputStream().(TokenStream).GetAllText()) // p seems incorrect - } else { - return p.parser.GetLiteralNames()[t] + "<" + strconv.Itoa(t) + ">" - } - } - - return strconv.Itoa(t) -} - -func (p *ParserATNSimulator) getLookaheadName(input TokenStream) string { - return p.GetTokenName(input.LA(1)) -} - -// Used for debugging in AdaptivePredict around execATN but I cut -// it out for clarity now that alg. works well. We can leave p -// "dead" code for a bit. -// -func (p *ParserATNSimulator) dumpDeadEndConfigs(nvae *NoViableAltException) { - - panic("Not implemented") - - // fmt.Println("dead end configs: ") - // var decs = nvae.deadEndConfigs - // - // for i:=0; i0) { - // var t = c.state.GetTransitions()[0] - // if t2, ok := t.(*AtomTransition); ok { - // trans = "Atom "+ p.GetTokenName(t2.label) - // } else if t3, ok := t.(SetTransition); ok { - // _, ok := t.(*NotSetTransition) - // - // var s string - // if (ok){ - // s = "~" - // } - // - // trans = s + "Set " + t3.set - // } - // } - // fmt.Errorf(c.String(p.parser, true) + ":" + trans) - // } -} - -func (p *ParserATNSimulator) noViableAlt(input TokenStream, outerContext ParserRuleContext, configs ATNConfigSet, startIndex int) *NoViableAltException { - return NewNoViableAltException(p.parser, input, input.Get(startIndex), input.LT(1), configs, outerContext) -} - -func (p *ParserATNSimulator) getUniqueAlt(configs ATNConfigSet) int { - alt := ATNInvalidAltNumber - for _, c := range configs.GetItems() { - if alt == ATNInvalidAltNumber { - alt = c.GetAlt() // found first alt - } else if c.GetAlt() != alt { - return ATNInvalidAltNumber - } - } - return alt -} - -// -// Add an edge to the DFA, if possible. This method calls -// {@link //addDFAState} to ensure the {@code to} state is present in the -// DFA. If {@code from} is {@code nil}, or if {@code t} is outside the -// range of edges that can be represented in the DFA tables, p method -// returns without adding the edge to the DFA. -// -//

If {@code to} is {@code nil}, p method returns {@code nil}. -// Otherwise, p method returns the {@link DFAState} returned by calling -// {@link //addDFAState} for the {@code to} state.

-// -// @param dfa The DFA -// @param from The source state for the edge -// @param t The input symbol -// @param to The target state for the edge -// -// @return If {@code to} is {@code nil}, p method returns {@code nil} -// otherwise p method returns the result of calling {@link //addDFAState} -// on {@code to} -// -func (p *ParserATNSimulator) addDFAEdge(dfa *DFA, from *DFAState, t int, to *DFAState) *DFAState { - if ParserATNSimulatorDebug { - fmt.Println("EDGE " + from.String() + " -> " + to.String() + " upon " + p.GetTokenName(t)) - } - if to == nil { - return nil - } - to = p.addDFAState(dfa, to) // used existing if possible not incoming - if from == nil || t < -1 || t > p.atn.maxTokenType { - return to - } - if from.edges == nil { - from.edges = make([]*DFAState, p.atn.maxTokenType+1+1) - } - from.edges[t+1] = to // connect - - if ParserATNSimulatorDebug { - var names []string - if p.parser != nil { - names = p.parser.GetLiteralNames() - } - - fmt.Println("DFA=\n" + dfa.String(names, nil)) - } - return to -} - -// -// Add state {@code D} to the DFA if it is not already present, and return -// the actual instance stored in the DFA. If a state equivalent to {@code D} -// is already in the DFA, the existing state is returned. Otherwise p -// method returns {@code D} after adding it to the DFA. -// -//

If {@code D} is {@link //ERROR}, p method returns {@link //ERROR} and -// does not change the DFA.

-// -// @param dfa The dfa -// @param D The DFA state to add -// @return The state stored in the DFA. This will be either the existing -// state if {@code D} is already in the DFA, or {@code D} itself if the -// state was not already present. -// -func (p *ParserATNSimulator) addDFAState(dfa *DFA, d *DFAState) *DFAState { - if d == ATNSimulatorError { - return d - } - hash := d.hash() - existing, ok := dfa.getState(hash) - if ok { - return existing - } - d.stateNumber = dfa.numStates() - if !d.configs.ReadOnly() { - d.configs.OptimizeConfigs(p.BaseATNSimulator) - d.configs.SetReadOnly(true) - } - dfa.setState(hash, d) - if ParserATNSimulatorDebug { - fmt.Println("adding NewDFA state: " + d.String()) - } - return d -} - -func (p *ParserATNSimulator) ReportAttemptingFullContext(dfa *DFA, conflictingAlts *BitSet, configs ATNConfigSet, startIndex, stopIndex int) { - if ParserATNSimulatorDebug || ParserATNSimulatorRetryDebug { - interval := NewInterval(startIndex, stopIndex+1) - fmt.Println("ReportAttemptingFullContext decision=" + strconv.Itoa(dfa.decision) + ":" + configs.String() + - ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval)) - } - if p.parser != nil { - p.parser.GetErrorListenerDispatch().ReportAttemptingFullContext(p.parser, dfa, startIndex, stopIndex, conflictingAlts, configs) - } -} - -func (p *ParserATNSimulator) ReportContextSensitivity(dfa *DFA, prediction int, configs ATNConfigSet, startIndex, stopIndex int) { - if ParserATNSimulatorDebug || ParserATNSimulatorRetryDebug { - interval := NewInterval(startIndex, stopIndex+1) - fmt.Println("ReportContextSensitivity decision=" + strconv.Itoa(dfa.decision) + ":" + configs.String() + - ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval)) - } - if p.parser != nil { - p.parser.GetErrorListenerDispatch().ReportContextSensitivity(p.parser, dfa, startIndex, stopIndex, prediction, configs) - } -} - -// If context sensitive parsing, we know it's ambiguity not conflict// -func (p *ParserATNSimulator) ReportAmbiguity(dfa *DFA, D *DFAState, startIndex, stopIndex int, - exact bool, ambigAlts *BitSet, configs ATNConfigSet) { - if ParserATNSimulatorDebug || ParserATNSimulatorRetryDebug { - interval := NewInterval(startIndex, stopIndex+1) - fmt.Println("ReportAmbiguity " + ambigAlts.String() + ":" + configs.String() + - ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval)) - } - if p.parser != nil { - p.parser.GetErrorListenerDispatch().ReportAmbiguity(p.parser, dfa, startIndex, stopIndex, exact, ambigAlts, configs) - } -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_rule_context.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_rule_context.go deleted file mode 100644 index 49cd10c5ff..0000000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_rule_context.go +++ /dev/null @@ -1,362 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "reflect" - "strconv" -) - -type ParserRuleContext interface { - RuleContext - - SetException(RecognitionException) - - AddTokenNode(token Token) *TerminalNodeImpl - AddErrorNode(badToken Token) *ErrorNodeImpl - - EnterRule(listener ParseTreeListener) - ExitRule(listener ParseTreeListener) - - SetStart(Token) - GetStart() Token - - SetStop(Token) - GetStop() Token - - AddChild(child RuleContext) RuleContext - RemoveLastChild() -} - -type BaseParserRuleContext struct { - *BaseRuleContext - - start, stop Token - exception RecognitionException - children []Tree -} - -func NewBaseParserRuleContext(parent ParserRuleContext, invokingStateNumber int) *BaseParserRuleContext { - prc := new(BaseParserRuleContext) - - prc.BaseRuleContext = NewBaseRuleContext(parent, invokingStateNumber) - - prc.RuleIndex = -1 - // * If we are debugging or building a parse tree for a Visitor, - // we need to track all of the tokens and rule invocations associated - // with prc rule's context. This is empty for parsing w/o tree constr. - // operation because we don't the need to track the details about - // how we parse prc rule. - // / - prc.children = nil - prc.start = nil - prc.stop = nil - // The exception that forced prc rule to return. If the rule successfully - // completed, prc is {@code nil}. - prc.exception = nil - - return prc -} - -func (prc *BaseParserRuleContext) SetException(e RecognitionException) { - prc.exception = e -} - -func (prc *BaseParserRuleContext) GetChildren() []Tree { - return prc.children -} - -func (prc *BaseParserRuleContext) CopyFrom(ctx *BaseParserRuleContext) { - // from RuleContext - prc.parentCtx = ctx.parentCtx - prc.invokingState = ctx.invokingState - prc.children = nil - prc.start = ctx.start - prc.stop = ctx.stop -} - -func (prc *BaseParserRuleContext) GetText() string { - if prc.GetChildCount() == 0 { - return "" - } - - var s string - for _, child := range prc.children { - s += child.(ParseTree).GetText() - } - - return s -} - -// Double dispatch methods for listeners -func (prc *BaseParserRuleContext) EnterRule(listener ParseTreeListener) { -} - -func (prc *BaseParserRuleContext) ExitRule(listener ParseTreeListener) { -} - -// * Does not set parent link other add methods do that/// -func (prc *BaseParserRuleContext) addTerminalNodeChild(child TerminalNode) TerminalNode { - if prc.children == nil { - prc.children = make([]Tree, 0) - } - if child == nil { - panic("Child may not be null") - } - prc.children = append(prc.children, child) - return child -} - -func (prc *BaseParserRuleContext) AddChild(child RuleContext) RuleContext { - if prc.children == nil { - prc.children = make([]Tree, 0) - } - if child == nil { - panic("Child may not be null") - } - prc.children = append(prc.children, child) - return child -} - -// * Used by EnterOuterAlt to toss out a RuleContext previously added as -// we entered a rule. If we have // label, we will need to remove -// generic ruleContext object. -// / -func (prc *BaseParserRuleContext) RemoveLastChild() { - if prc.children != nil && len(prc.children) > 0 { - prc.children = prc.children[0 : len(prc.children)-1] - } -} - -func (prc *BaseParserRuleContext) AddTokenNode(token Token) *TerminalNodeImpl { - - node := NewTerminalNodeImpl(token) - prc.addTerminalNodeChild(node) - node.parentCtx = prc - return node - -} - -func (prc *BaseParserRuleContext) AddErrorNode(badToken Token) *ErrorNodeImpl { - node := NewErrorNodeImpl(badToken) - prc.addTerminalNodeChild(node) - node.parentCtx = prc - return node -} - -func (prc *BaseParserRuleContext) GetChild(i int) Tree { - if prc.children != nil && len(prc.children) >= i { - return prc.children[i] - } - - return nil -} - -func (prc *BaseParserRuleContext) GetChildOfType(i int, childType reflect.Type) RuleContext { - if childType == nil { - return prc.GetChild(i).(RuleContext) - } - - for j := 0; j < len(prc.children); j++ { - child := prc.children[j] - if reflect.TypeOf(child) == childType { - if i == 0 { - return child.(RuleContext) - } - - i-- - } - } - - return nil -} - -func (prc *BaseParserRuleContext) ToStringTree(ruleNames []string, recog Recognizer) string { - return TreesStringTree(prc, ruleNames, recog) -} - -func (prc *BaseParserRuleContext) GetRuleContext() RuleContext { - return prc -} - -func (prc *BaseParserRuleContext) Accept(visitor ParseTreeVisitor) interface{} { - return visitor.VisitChildren(prc) -} - -func (prc *BaseParserRuleContext) SetStart(t Token) { - prc.start = t -} - -func (prc *BaseParserRuleContext) GetStart() Token { - return prc.start -} - -func (prc *BaseParserRuleContext) SetStop(t Token) { - prc.stop = t -} - -func (prc *BaseParserRuleContext) GetStop() Token { - return prc.stop -} - -func (prc *BaseParserRuleContext) GetToken(ttype int, i int) TerminalNode { - - for j := 0; j < len(prc.children); j++ { - child := prc.children[j] - if c2, ok := child.(TerminalNode); ok { - if c2.GetSymbol().GetTokenType() == ttype { - if i == 0 { - return c2 - } - - i-- - } - } - } - return nil -} - -func (prc *BaseParserRuleContext) GetTokens(ttype int) []TerminalNode { - if prc.children == nil { - return make([]TerminalNode, 0) - } - - tokens := make([]TerminalNode, 0) - - for j := 0; j < len(prc.children); j++ { - child := prc.children[j] - if tchild, ok := child.(TerminalNode); ok { - if tchild.GetSymbol().GetTokenType() == ttype { - tokens = append(tokens, tchild) - } - } - } - - return tokens -} - -func (prc *BaseParserRuleContext) GetPayload() interface{} { - return prc -} - -func (prc *BaseParserRuleContext) getChild(ctxType reflect.Type, i int) RuleContext { - if prc.children == nil || i < 0 || i >= len(prc.children) { - return nil - } - - j := -1 // what element have we found with ctxType? - for _, o := range prc.children { - - childType := reflect.TypeOf(o) - - if childType.Implements(ctxType) { - j++ - if j == i { - return o.(RuleContext) - } - } - } - return nil -} - -// Go lacks generics, so it's not possible for us to return the child with the correct type, but we do -// check for convertibility - -func (prc *BaseParserRuleContext) GetTypedRuleContext(ctxType reflect.Type, i int) RuleContext { - return prc.getChild(ctxType, i) -} - -func (prc *BaseParserRuleContext) GetTypedRuleContexts(ctxType reflect.Type) []RuleContext { - if prc.children == nil { - return make([]RuleContext, 0) - } - - contexts := make([]RuleContext, 0) - - for _, child := range prc.children { - childType := reflect.TypeOf(child) - - if childType.ConvertibleTo(ctxType) { - contexts = append(contexts, child.(RuleContext)) - } - } - return contexts -} - -func (prc *BaseParserRuleContext) GetChildCount() int { - if prc.children == nil { - return 0 - } - - return len(prc.children) -} - -func (prc *BaseParserRuleContext) GetSourceInterval() *Interval { - if prc.start == nil || prc.stop == nil { - return TreeInvalidInterval - } - - return NewInterval(prc.start.GetTokenIndex(), prc.stop.GetTokenIndex()) -} - -//need to manage circular dependencies, so export now - -// Print out a whole tree, not just a node, in LISP format -// (root child1 .. childN). Print just a node if b is a leaf. -// - -func (prc *BaseParserRuleContext) String(ruleNames []string, stop RuleContext) string { - - var p ParserRuleContext = prc - s := "[" - for p != nil && p != stop { - if ruleNames == nil { - if !p.IsEmpty() { - s += strconv.Itoa(p.GetInvokingState()) - } - } else { - ri := p.GetRuleIndex() - var ruleName string - if ri >= 0 && ri < len(ruleNames) { - ruleName = ruleNames[ri] - } else { - ruleName = strconv.Itoa(ri) - } - s += ruleName - } - if p.GetParent() != nil && (ruleNames != nil || !p.GetParent().(ParserRuleContext).IsEmpty()) { - s += " " - } - pi := p.GetParent() - if pi != nil { - p = pi.(ParserRuleContext) - } else { - p = nil - } - } - s += "]" - return s -} - -var RuleContextEmpty = NewBaseParserRuleContext(nil, -1) - -type InterpreterRuleContext interface { - ParserRuleContext -} - -type BaseInterpreterRuleContext struct { - *BaseParserRuleContext -} - -func NewBaseInterpreterRuleContext(parent BaseInterpreterRuleContext, invokingStateNumber, ruleIndex int) *BaseInterpreterRuleContext { - - prc := new(BaseInterpreterRuleContext) - - prc.BaseParserRuleContext = NewBaseParserRuleContext(parent, invokingStateNumber) - - prc.RuleIndex = ruleIndex - - return prc -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/prediction_context.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/prediction_context.go deleted file mode 100644 index 99acb333fa..0000000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/prediction_context.go +++ /dev/null @@ -1,756 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "strconv" -) - -// Represents {@code $} in local context prediction, which means wildcard. -// {@code//+x =//}. -// / -const ( - BasePredictionContextEmptyReturnState = 0x7FFFFFFF -) - -// Represents {@code $} in an array in full context mode, when {@code $} -// doesn't mean wildcard: {@code $ + x = [$,x]}. Here, -// {@code $} = {@link //EmptyReturnState}. -// / - -var ( - BasePredictionContextglobalNodeCount = 1 - BasePredictionContextid = BasePredictionContextglobalNodeCount -) - -type PredictionContext interface { - hash() int - GetParent(int) PredictionContext - getReturnState(int) int - equals(PredictionContext) bool - length() int - isEmpty() bool - hasEmptyPath() bool - String() string -} - -type BasePredictionContext struct { - cachedHash int -} - -func NewBasePredictionContext(cachedHash int) *BasePredictionContext { - pc := new(BasePredictionContext) - pc.cachedHash = cachedHash - - return pc -} - -func (b *BasePredictionContext) isEmpty() bool { - return false -} - -func calculateHash(parent PredictionContext, returnState int) int { - h := murmurInit(1) - h = murmurUpdate(h, parent.hash()) - h = murmurUpdate(h, returnState) - return murmurFinish(h, 2) -} - -func calculateEmptyHash() int { - h := murmurInit(1) - return murmurFinish(h, 0) -} - -// Used to cache {@link BasePredictionContext} objects. Its used for the shared -// context cash associated with contexts in DFA states. This cache -// can be used for both lexers and parsers. - -type PredictionContextCache struct { - cache map[PredictionContext]PredictionContext -} - -func NewPredictionContextCache() *PredictionContextCache { - t := new(PredictionContextCache) - t.cache = make(map[PredictionContext]PredictionContext) - return t -} - -// Add a context to the cache and return it. If the context already exists, -// return that one instead and do not add a Newcontext to the cache. -// Protect shared cache from unsafe thread access. -// -func (p *PredictionContextCache) add(ctx PredictionContext) PredictionContext { - if ctx == BasePredictionContextEMPTY { - return BasePredictionContextEMPTY - } - existing := p.cache[ctx] - if existing != nil { - return existing - } - p.cache[ctx] = ctx - return ctx -} - -func (p *PredictionContextCache) Get(ctx PredictionContext) PredictionContext { - return p.cache[ctx] -} - -func (p *PredictionContextCache) length() int { - return len(p.cache) -} - -type SingletonPredictionContext interface { - PredictionContext -} - -type BaseSingletonPredictionContext struct { - *BasePredictionContext - - parentCtx PredictionContext - returnState int -} - -func NewBaseSingletonPredictionContext(parent PredictionContext, returnState int) *BaseSingletonPredictionContext { - - s := new(BaseSingletonPredictionContext) - s.BasePredictionContext = NewBasePredictionContext(37) - - if parent != nil { - s.cachedHash = calculateHash(parent, returnState) - } else { - s.cachedHash = calculateEmptyHash() - } - - s.parentCtx = parent - s.returnState = returnState - - return s -} - -func SingletonBasePredictionContextCreate(parent PredictionContext, returnState int) PredictionContext { - if returnState == BasePredictionContextEmptyReturnState && parent == nil { - // someone can pass in the bits of an array ctx that mean $ - return BasePredictionContextEMPTY - } - - return NewBaseSingletonPredictionContext(parent, returnState) -} - -func (b *BaseSingletonPredictionContext) length() int { - return 1 -} - -func (b *BaseSingletonPredictionContext) GetParent(index int) PredictionContext { - return b.parentCtx -} - -func (b *BaseSingletonPredictionContext) getReturnState(index int) int { - return b.returnState -} - -func (b *BaseSingletonPredictionContext) hasEmptyPath() bool { - return b.returnState == BasePredictionContextEmptyReturnState -} - -func (b *BaseSingletonPredictionContext) equals(other PredictionContext) bool { - if b == other { - return true - } else if _, ok := other.(*BaseSingletonPredictionContext); !ok { - return false - } else if b.hash() != other.hash() { - return false // can't be same if hash is different - } - - otherP := other.(*BaseSingletonPredictionContext) - - if b.returnState != other.getReturnState(0) { - return false - } else if b.parentCtx == nil { - return otherP.parentCtx == nil - } - - return b.parentCtx.equals(otherP.parentCtx) -} - -func (b *BaseSingletonPredictionContext) hash() int { - h := murmurInit(1) - - if b.parentCtx == nil { - return murmurFinish(h, 0) - } - - h = murmurUpdate(h, b.parentCtx.hash()) - h = murmurUpdate(h, b.returnState) - return murmurFinish(h, 2) -} - -func (b *BaseSingletonPredictionContext) String() string { - var up string - - if b.parentCtx == nil { - up = "" - } else { - up = b.parentCtx.String() - } - - if len(up) == 0 { - if b.returnState == BasePredictionContextEmptyReturnState { - return "$" - } - - return strconv.Itoa(b.returnState) - } - - return strconv.Itoa(b.returnState) + " " + up -} - -var BasePredictionContextEMPTY = NewEmptyPredictionContext() - -type EmptyPredictionContext struct { - *BaseSingletonPredictionContext -} - -func NewEmptyPredictionContext() *EmptyPredictionContext { - - p := new(EmptyPredictionContext) - - p.BaseSingletonPredictionContext = NewBaseSingletonPredictionContext(nil, BasePredictionContextEmptyReturnState) - - return p -} - -func (e *EmptyPredictionContext) isEmpty() bool { - return true -} - -func (e *EmptyPredictionContext) GetParent(index int) PredictionContext { - return nil -} - -func (e *EmptyPredictionContext) getReturnState(index int) int { - return e.returnState -} - -func (e *EmptyPredictionContext) equals(other PredictionContext) bool { - return e == other -} - -func (e *EmptyPredictionContext) String() string { - return "$" -} - -type ArrayPredictionContext struct { - *BasePredictionContext - - parents []PredictionContext - returnStates []int -} - -func NewArrayPredictionContext(parents []PredictionContext, returnStates []int) *ArrayPredictionContext { - // Parent can be nil only if full ctx mode and we make an array - // from {@link //EMPTY} and non-empty. We merge {@link //EMPTY} by using - // nil parent and - // returnState == {@link //EmptyReturnState}. - - c := new(ArrayPredictionContext) - c.BasePredictionContext = NewBasePredictionContext(37) - - for i := range parents { - c.cachedHash += calculateHash(parents[i], returnStates[i]) - } - - c.parents = parents - c.returnStates = returnStates - - return c -} - -func (a *ArrayPredictionContext) GetReturnStates() []int { - return a.returnStates -} - -func (a *ArrayPredictionContext) hasEmptyPath() bool { - return a.getReturnState(a.length()-1) == BasePredictionContextEmptyReturnState -} - -func (a *ArrayPredictionContext) isEmpty() bool { - // since EmptyReturnState can only appear in the last position, we - // don't need to verify that size==1 - return a.returnStates[0] == BasePredictionContextEmptyReturnState -} - -func (a *ArrayPredictionContext) length() int { - return len(a.returnStates) -} - -func (a *ArrayPredictionContext) GetParent(index int) PredictionContext { - return a.parents[index] -} - -func (a *ArrayPredictionContext) getReturnState(index int) int { - return a.returnStates[index] -} - -func (a *ArrayPredictionContext) equals(other PredictionContext) bool { - if _, ok := other.(*ArrayPredictionContext); !ok { - return false - } else if a.cachedHash != other.hash() { - return false // can't be same if hash is different - } else { - otherP := other.(*ArrayPredictionContext) - return &a.returnStates == &otherP.returnStates && &a.parents == &otherP.parents - } -} - -func (a *ArrayPredictionContext) hash() int { - h := murmurInit(1) - - for _, p := range a.parents { - h = murmurUpdate(h, p.hash()) - } - - for _, r := range a.returnStates { - h = murmurUpdate(h, r) - } - - return murmurFinish(h, 2 * len(a.parents)) -} - -func (a *ArrayPredictionContext) String() string { - if a.isEmpty() { - return "[]" - } - - s := "[" - for i := 0; i < len(a.returnStates); i++ { - if i > 0 { - s = s + ", " - } - if a.returnStates[i] == BasePredictionContextEmptyReturnState { - s = s + "$" - continue - } - s = s + strconv.Itoa(a.returnStates[i]) - if a.parents[i] != nil { - s = s + " " + a.parents[i].String() - } else { - s = s + "nil" - } - } - - return s + "]" -} - -// Convert a {@link RuleContext} tree to a {@link BasePredictionContext} graph. -// Return {@link //EMPTY} if {@code outerContext} is empty or nil. -// / -func predictionContextFromRuleContext(a *ATN, outerContext RuleContext) PredictionContext { - if outerContext == nil { - outerContext = RuleContextEmpty - } - // if we are in RuleContext of start rule, s, then BasePredictionContext - // is EMPTY. Nobody called us. (if we are empty, return empty) - if outerContext.GetParent() == nil || outerContext == RuleContextEmpty { - return BasePredictionContextEMPTY - } - // If we have a parent, convert it to a BasePredictionContext graph - parent := predictionContextFromRuleContext(a, outerContext.GetParent().(RuleContext)) - state := a.states[outerContext.GetInvokingState()] - transition := state.GetTransitions()[0] - - return SingletonBasePredictionContextCreate(parent, transition.(*RuleTransition).followState.GetStateNumber()) -} - -func merge(a, b PredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext { - // share same graph if both same - if a == b { - return a - } - - ac, ok1 := a.(*BaseSingletonPredictionContext) - bc, ok2 := b.(*BaseSingletonPredictionContext) - - if ok1 && ok2 { - return mergeSingletons(ac, bc, rootIsWildcard, mergeCache) - } - // At least one of a or b is array - // If one is $ and rootIsWildcard, return $ as// wildcard - if rootIsWildcard { - if _, ok := a.(*EmptyPredictionContext); ok { - return a - } - if _, ok := b.(*EmptyPredictionContext); ok { - return b - } - } - // convert singleton so both are arrays to normalize - if _, ok := a.(*BaseSingletonPredictionContext); ok { - a = NewArrayPredictionContext([]PredictionContext{a.GetParent(0)}, []int{a.getReturnState(0)}) - } - if _, ok := b.(*BaseSingletonPredictionContext); ok { - b = NewArrayPredictionContext([]PredictionContext{b.GetParent(0)}, []int{b.getReturnState(0)}) - } - return mergeArrays(a.(*ArrayPredictionContext), b.(*ArrayPredictionContext), rootIsWildcard, mergeCache) -} - -// -// Merge two {@link SingletonBasePredictionContext} instances. -// -//

Stack tops equal, parents merge is same return left graph.
-//

-// -//

Same stack top, parents differ merge parents giving array node, then -// remainders of those graphs. A Newroot node is created to point to the -// merged parents.
-//

-// -//

Different stack tops pointing to same parent. Make array node for the -// root where both element in the root point to the same (original) -// parent.
-//

-// -//

Different stack tops pointing to different parents. Make array node for -// the root where each element points to the corresponding original -// parent.
-//

-// -// @param a the first {@link SingletonBasePredictionContext} -// @param b the second {@link SingletonBasePredictionContext} -// @param rootIsWildcard {@code true} if this is a local-context merge, -// otherwise false to indicate a full-context merge -// @param mergeCache -// / -func mergeSingletons(a, b *BaseSingletonPredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext { - if mergeCache != nil { - previous := mergeCache.Get(a.hash(), b.hash()) - if previous != nil { - return previous.(PredictionContext) - } - previous = mergeCache.Get(b.hash(), a.hash()) - if previous != nil { - return previous.(PredictionContext) - } - } - - rootMerge := mergeRoot(a, b, rootIsWildcard) - if rootMerge != nil { - if mergeCache != nil { - mergeCache.set(a.hash(), b.hash(), rootMerge) - } - return rootMerge - } - if a.returnState == b.returnState { - parent := merge(a.parentCtx, b.parentCtx, rootIsWildcard, mergeCache) - // if parent is same as existing a or b parent or reduced to a parent, - // return it - if parent == a.parentCtx { - return a // ax + bx = ax, if a=b - } - if parent == b.parentCtx { - return b // ax + bx = bx, if a=b - } - // else: ax + ay = a'[x,y] - // merge parents x and y, giving array node with x,y then remainders - // of those graphs. dup a, a' points at merged array - // Newjoined parent so create Newsingleton pointing to it, a' - spc := SingletonBasePredictionContextCreate(parent, a.returnState) - if mergeCache != nil { - mergeCache.set(a.hash(), b.hash(), spc) - } - return spc - } - // a != b payloads differ - // see if we can collapse parents due to $+x parents if local ctx - var singleParent PredictionContext - if a == b || (a.parentCtx != nil && a.parentCtx == b.parentCtx) { // ax + - // bx = - // [a,b]x - singleParent = a.parentCtx - } - if singleParent != nil { // parents are same - // sort payloads and use same parent - payloads := []int{a.returnState, b.returnState} - if a.returnState > b.returnState { - payloads[0] = b.returnState - payloads[1] = a.returnState - } - parents := []PredictionContext{singleParent, singleParent} - apc := NewArrayPredictionContext(parents, payloads) - if mergeCache != nil { - mergeCache.set(a.hash(), b.hash(), apc) - } - return apc - } - // parents differ and can't merge them. Just pack together - // into array can't merge. - // ax + by = [ax,by] - payloads := []int{a.returnState, b.returnState} - parents := []PredictionContext{a.parentCtx, b.parentCtx} - if a.returnState > b.returnState { // sort by payload - payloads[0] = b.returnState - payloads[1] = a.returnState - parents = []PredictionContext{b.parentCtx, a.parentCtx} - } - apc := NewArrayPredictionContext(parents, payloads) - if mergeCache != nil { - mergeCache.set(a.hash(), b.hash(), apc) - } - return apc -} - -// -// Handle case where at least one of {@code a} or {@code b} is -// {@link //EMPTY}. In the following diagrams, the symbol {@code $} is used -// to represent {@link //EMPTY}. -// -//

Local-Context Merges

-// -//

These local-context merge operations are used when {@code rootIsWildcard} -// is true.

-// -//

{@link //EMPTY} is superset of any graph return {@link //EMPTY}.
-//

-// -//

{@link //EMPTY} and anything is {@code //EMPTY}, so merged parent is -// {@code //EMPTY} return left graph.
-//

-// -//

Special case of last merge if local context.
-//

-// -//

Full-Context Merges

-// -//

These full-context merge operations are used when {@code rootIsWildcard} -// is false.

-// -//

-// -//

Must keep all contexts {@link //EMPTY} in array is a special value (and -// nil parent).
-//

-// -//

-// -// @param a the first {@link SingletonBasePredictionContext} -// @param b the second {@link SingletonBasePredictionContext} -// @param rootIsWildcard {@code true} if this is a local-context merge, -// otherwise false to indicate a full-context merge -// / -func mergeRoot(a, b SingletonPredictionContext, rootIsWildcard bool) PredictionContext { - if rootIsWildcard { - if a == BasePredictionContextEMPTY { - return BasePredictionContextEMPTY // // + b =// - } - if b == BasePredictionContextEMPTY { - return BasePredictionContextEMPTY // a +// =// - } - } else { - if a == BasePredictionContextEMPTY && b == BasePredictionContextEMPTY { - return BasePredictionContextEMPTY // $ + $ = $ - } else if a == BasePredictionContextEMPTY { // $ + x = [$,x] - payloads := []int{b.getReturnState(-1), BasePredictionContextEmptyReturnState} - parents := []PredictionContext{b.GetParent(-1), nil} - return NewArrayPredictionContext(parents, payloads) - } else if b == BasePredictionContextEMPTY { // x + $ = [$,x] ($ is always first if present) - payloads := []int{a.getReturnState(-1), BasePredictionContextEmptyReturnState} - parents := []PredictionContext{a.GetParent(-1), nil} - return NewArrayPredictionContext(parents, payloads) - } - } - return nil -} - -// -// Merge two {@link ArrayBasePredictionContext} instances. -// -//

Different tops, different parents.
-//

-// -//

Shared top, same parents.
-//

-// -//

Shared top, different parents.
-//

-// -//

Shared top, all shared parents.
-//

-// -//

Equal tops, merge parents and reduce top to -// {@link SingletonBasePredictionContext}.
-//

-// / -func mergeArrays(a, b *ArrayPredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext { - if mergeCache != nil { - previous := mergeCache.Get(a.hash(), b.hash()) - if previous != nil { - return previous.(PredictionContext) - } - previous = mergeCache.Get(b.hash(), a.hash()) - if previous != nil { - return previous.(PredictionContext) - } - } - // merge sorted payloads a + b => M - i := 0 // walks a - j := 0 // walks b - k := 0 // walks target M array - - mergedReturnStates := make([]int, len(a.returnStates)+len(b.returnStates)) - mergedParents := make([]PredictionContext, len(a.returnStates)+len(b.returnStates)) - // walk and merge to yield mergedParents, mergedReturnStates - for i < len(a.returnStates) && j < len(b.returnStates) { - aParent := a.parents[i] - bParent := b.parents[j] - if a.returnStates[i] == b.returnStates[j] { - // same payload (stack tops are equal), must yield merged singleton - payload := a.returnStates[i] - // $+$ = $ - bothDollars := payload == BasePredictionContextEmptyReturnState && aParent == nil && bParent == nil - axAX := (aParent != nil && bParent != nil && aParent == bParent) // ax+ax - // -> - // ax - if bothDollars || axAX { - mergedParents[k] = aParent // choose left - mergedReturnStates[k] = payload - } else { // ax+ay -> a'[x,y] - mergedParent := merge(aParent, bParent, rootIsWildcard, mergeCache) - mergedParents[k] = mergedParent - mergedReturnStates[k] = payload - } - i++ // hop over left one as usual - j++ // but also Skip one in right side since we merge - } else if a.returnStates[i] < b.returnStates[j] { // copy a[i] to M - mergedParents[k] = aParent - mergedReturnStates[k] = a.returnStates[i] - i++ - } else { // b > a, copy b[j] to M - mergedParents[k] = bParent - mergedReturnStates[k] = b.returnStates[j] - j++ - } - k++ - } - // copy over any payloads remaining in either array - if i < len(a.returnStates) { - for p := i; p < len(a.returnStates); p++ { - mergedParents[k] = a.parents[p] - mergedReturnStates[k] = a.returnStates[p] - k++ - } - } else { - for p := j; p < len(b.returnStates); p++ { - mergedParents[k] = b.parents[p] - mergedReturnStates[k] = b.returnStates[p] - k++ - } - } - // trim merged if we combined a few that had same stack tops - if k < len(mergedParents) { // write index < last position trim - if k == 1 { // for just one merged element, return singleton top - pc := SingletonBasePredictionContextCreate(mergedParents[0], mergedReturnStates[0]) - if mergeCache != nil { - mergeCache.set(a.hash(), b.hash(), pc) - } - return pc - } - mergedParents = mergedParents[0:k] - mergedReturnStates = mergedReturnStates[0:k] - } - - M := NewArrayPredictionContext(mergedParents, mergedReturnStates) - - // if we created same array as a or b, return that instead - // TODO: track whether this is possible above during merge sort for speed - if M == a { - if mergeCache != nil { - mergeCache.set(a.hash(), b.hash(), a) - } - return a - } - if M == b { - if mergeCache != nil { - mergeCache.set(a.hash(), b.hash(), b) - } - return b - } - combineCommonParents(mergedParents) - - if mergeCache != nil { - mergeCache.set(a.hash(), b.hash(), M) - } - return M -} - -// -// Make pass over all M {@code parents} merge any {@code equals()} -// ones. -// / -func combineCommonParents(parents []PredictionContext) { - uniqueParents := make(map[PredictionContext]PredictionContext) - - for p := 0; p < len(parents); p++ { - parent := parents[p] - if uniqueParents[parent] == nil { - uniqueParents[parent] = parent - } - } - for q := 0; q < len(parents); q++ { - parents[q] = uniqueParents[parents[q]] - } -} - -func getCachedBasePredictionContext(context PredictionContext, contextCache *PredictionContextCache, visited map[PredictionContext]PredictionContext) PredictionContext { - - if context.isEmpty() { - return context - } - existing := visited[context] - if existing != nil { - return existing - } - existing = contextCache.Get(context) - if existing != nil { - visited[context] = existing - return existing - } - changed := false - parents := make([]PredictionContext, context.length()) - for i := 0; i < len(parents); i++ { - parent := getCachedBasePredictionContext(context.GetParent(i), contextCache, visited) - if changed || parent != context.GetParent(i) { - if !changed { - parents = make([]PredictionContext, context.length()) - for j := 0; j < context.length(); j++ { - parents[j] = context.GetParent(j) - } - changed = true - } - parents[i] = parent - } - } - if !changed { - contextCache.add(context) - visited[context] = context - return context - } - var updated PredictionContext - if len(parents) == 0 { - updated = BasePredictionContextEMPTY - } else if len(parents) == 1 { - updated = SingletonBasePredictionContextCreate(parents[0], context.getReturnState(0)) - } else { - updated = NewArrayPredictionContext(parents, context.(*ArrayPredictionContext).GetReturnStates()) - } - contextCache.add(updated) - visited[updated] = updated - visited[context] = updated - - return updated -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/prediction_mode.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/prediction_mode.go deleted file mode 100644 index 15718f912b..0000000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/prediction_mode.go +++ /dev/null @@ -1,553 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -// This enumeration defines the prediction modes available in ANTLR 4 along with -// utility methods for analyzing configuration sets for conflicts and/or -// ambiguities. - -const ( - // - // The SLL(*) prediction mode. This prediction mode ignores the current - // parser context when making predictions. This is the fastest prediction - // mode, and provides correct results for many grammars. This prediction - // mode is more powerful than the prediction mode provided by ANTLR 3, but - // may result in syntax errors for grammar and input combinations which are - // not SLL. - // - //

- // When using this prediction mode, the parser will either return a correct - // parse tree (i.e. the same parse tree that would be returned with the - // {@link //LL} prediction mode), or it will Report a syntax error. If a - // syntax error is encountered when using the {@link //SLL} prediction mode, - // it may be due to either an actual syntax error in the input or indicate - // that the particular combination of grammar and input requires the more - // powerful {@link //LL} prediction abilities to complete successfully.

- // - //

- // This prediction mode does not provide any guarantees for prediction - // behavior for syntactically-incorrect inputs.

- // - PredictionModeSLL = 0 - // - // The LL(*) prediction mode. This prediction mode allows the current parser - // context to be used for resolving SLL conflicts that occur during - // prediction. This is the fastest prediction mode that guarantees correct - // parse results for all combinations of grammars with syntactically correct - // inputs. - // - //

- // When using this prediction mode, the parser will make correct decisions - // for all syntactically-correct grammar and input combinations. However, in - // cases where the grammar is truly ambiguous this prediction mode might not - // Report a precise answer for exactly which alternatives are - // ambiguous.

- // - //

- // This prediction mode does not provide any guarantees for prediction - // behavior for syntactically-incorrect inputs.

- // - PredictionModeLL = 1 - // - // The LL(*) prediction mode with exact ambiguity detection. In addition to - // the correctness guarantees provided by the {@link //LL} prediction mode, - // this prediction mode instructs the prediction algorithm to determine the - // complete and exact set of ambiguous alternatives for every ambiguous - // decision encountered while parsing. - // - //

- // This prediction mode may be used for diagnosing ambiguities during - // grammar development. Due to the performance overhead of calculating sets - // of ambiguous alternatives, this prediction mode should be avoided when - // the exact results are not necessary.

- // - //

- // This prediction mode does not provide any guarantees for prediction - // behavior for syntactically-incorrect inputs.

- // - PredictionModeLLExactAmbigDetection = 2 -) - -// -// Computes the SLL prediction termination condition. -// -//

-// This method computes the SLL prediction termination condition for both of -// the following cases.

-// -//
    -//
  • The usual SLL+LL fallback upon SLL conflict
  • -//
  • Pure SLL without LL fallback
  • -//
-// -//

COMBINED SLL+LL PARSING

-// -//

When LL-fallback is enabled upon SLL conflict, correct predictions are -// ensured regardless of how the termination condition is computed by this -// method. Due to the substantially higher cost of LL prediction, the -// prediction should only fall back to LL when the additional lookahead -// cannot lead to a unique SLL prediction.

-// -//

Assuming combined SLL+LL parsing, an SLL configuration set with only -// conflicting subsets should fall back to full LL, even if the -// configuration sets don't resolve to the same alternative (e.g. -// {@code {1,2}} and {@code {3,4}}. If there is at least one non-conflicting -// configuration, SLL could continue with the hopes that more lookahead will -// resolve via one of those non-conflicting configurations.

-// -//

Here's the prediction termination rule them: SLL (for SLL+LL parsing) -// stops when it sees only conflicting configuration subsets. In contrast, -// full LL keeps going when there is uncertainty.

-// -//

HEURISTIC

-// -//

As a heuristic, we stop prediction when we see any conflicting subset -// unless we see a state that only has one alternative associated with it. -// The single-alt-state thing lets prediction continue upon rules like -// (otherwise, it would admit defeat too soon):

-// -//

{@code [12|1|[], 6|2|[], 12|2|[]]. s : (ID | ID ID?) '' }

-// -//

When the ATN simulation reaches the state before {@code ''}, it has a -// DFA state that looks like: {@code [12|1|[], 6|2|[], 12|2|[]]}. Naturally -// {@code 12|1|[]} and {@code 12|2|[]} conflict, but we cannot stop -// processing this node because alternative to has another way to continue, -// via {@code [6|2|[]]}.

-// -//

It also let's us continue for this rule:

-// -//

{@code [1|1|[], 1|2|[], 8|3|[]] a : A | A | A B }

-// -//

After Matching input A, we reach the stop state for rule A, state 1. -// State 8 is the state right before B. Clearly alternatives 1 and 2 -// conflict and no amount of further lookahead will separate the two. -// However, alternative 3 will be able to continue and so we do not stop -// working on this state. In the previous example, we're concerned with -// states associated with the conflicting alternatives. Here alt 3 is not -// associated with the conflicting configs, but since we can continue -// looking for input reasonably, don't declare the state done.

-// -//

PURE SLL PARSING

-// -//

To handle pure SLL parsing, all we have to do is make sure that we -// combine stack contexts for configurations that differ only by semantic -// predicate. From there, we can do the usual SLL termination heuristic.

-// -//

PREDICATES IN SLL+LL PARSING

-// -//

SLL decisions don't evaluate predicates until after they reach DFA stop -// states because they need to create the DFA cache that works in all -// semantic situations. In contrast, full LL evaluates predicates collected -// during start state computation so it can ignore predicates thereafter. -// This means that SLL termination detection can totally ignore semantic -// predicates.

-// -//

Implementation-wise, {@link ATNConfigSet} combines stack contexts but not -// semantic predicate contexts so we might see two configurations like the -// following.

-// -//

{@code (s, 1, x, {}), (s, 1, x', {p})}

-// -//

Before testing these configurations against others, we have to merge -// {@code x} and {@code x'} (without modifying the existing configurations). -// For example, we test {@code (x+x')==x''} when looking for conflicts in -// the following configurations.

-// -//

{@code (s, 1, x, {}), (s, 1, x', {p}), (s, 2, x'', {})}

-// -//

If the configuration set has predicates (as indicated by -// {@link ATNConfigSet//hasSemanticContext}), this algorithm makes a copy of -// the configurations to strip out all of the predicates so that a standard -// {@link ATNConfigSet} will merge everything ignoring predicates.

-// -func PredictionModehasSLLConflictTerminatingPrediction(mode int, configs ATNConfigSet) bool { - // Configs in rule stop states indicate reaching the end of the decision - // rule (local context) or end of start rule (full context). If all - // configs meet this condition, then none of the configurations is able - // to Match additional input so we terminate prediction. - // - if PredictionModeallConfigsInRuleStopStates(configs) { - return true - } - // pure SLL mode parsing - if mode == PredictionModeSLL { - // Don't bother with combining configs from different semantic - // contexts if we can fail over to full LL costs more time - // since we'll often fail over anyway. - if configs.HasSemanticContext() { - // dup configs, tossing out semantic predicates - dup := NewBaseATNConfigSet(false) - for _, c := range configs.GetItems() { - - // NewBaseATNConfig({semanticContext:}, c) - c = NewBaseATNConfig2(c, SemanticContextNone) - dup.Add(c, nil) - } - configs = dup - } - // now we have combined contexts for configs with dissimilar preds - } - // pure SLL or combined SLL+LL mode parsing - altsets := PredictionModegetConflictingAltSubsets(configs) - return PredictionModehasConflictingAltSet(altsets) && !PredictionModehasStateAssociatedWithOneAlt(configs) -} - -// Checks if any configuration in {@code configs} is in a -// {@link RuleStopState}. Configurations meeting this condition have reached -// the end of the decision rule (local context) or end of start rule (full -// context). -// -// @param configs the configuration set to test -// @return {@code true} if any configuration in {@code configs} is in a -// {@link RuleStopState}, otherwise {@code false} -func PredictionModehasConfigInRuleStopState(configs ATNConfigSet) bool { - for _, c := range configs.GetItems() { - if _, ok := c.GetState().(*RuleStopState); ok { - return true - } - } - return false -} - -// Checks if all configurations in {@code configs} are in a -// {@link RuleStopState}. Configurations meeting this condition have reached -// the end of the decision rule (local context) or end of start rule (full -// context). -// -// @param configs the configuration set to test -// @return {@code true} if all configurations in {@code configs} are in a -// {@link RuleStopState}, otherwise {@code false} -func PredictionModeallConfigsInRuleStopStates(configs ATNConfigSet) bool { - - for _, c := range configs.GetItems() { - if _, ok := c.GetState().(*RuleStopState); !ok { - return false - } - } - return true -} - -// -// Full LL prediction termination. -// -//

Can we stop looking ahead during ATN simulation or is there some -// uncertainty as to which alternative we will ultimately pick, after -// consuming more input? Even if there are partial conflicts, we might know -// that everything is going to resolve to the same minimum alternative. That -// means we can stop since no more lookahead will change that fact. On the -// other hand, there might be multiple conflicts that resolve to different -// minimums. That means we need more look ahead to decide which of those -// alternatives we should predict.

-// -//

The basic idea is to split the set of configurations {@code C}, into -// conflicting subsets {@code (s, _, ctx, _)} and singleton subsets with -// non-conflicting configurations. Two configurations conflict if they have -// identical {@link ATNConfig//state} and {@link ATNConfig//context} values -// but different {@link ATNConfig//alt} value, e.g. {@code (s, i, ctx, _)} -// and {@code (s, j, ctx, _)} for {@code i!=j}.

-// -//

Reduce these configuration subsets to the set of possible alternatives. -// You can compute the alternative subsets in one pass as follows:

-// -//

{@code A_s,ctx = {i | (s, i, ctx, _)}} for each configuration in -// {@code C} holding {@code s} and {@code ctx} fixed.

-// -//

Or in pseudo-code, for each configuration {@code c} in {@code C}:

-// -//
-// map[c] U= c.{@link ATNConfig//alt alt} // map hash/equals uses s and x, not
-// alt and not pred
-// 
-// -//

The values in {@code map} are the set of {@code A_s,ctx} sets.

-// -//

If {@code |A_s,ctx|=1} then there is no conflict associated with -// {@code s} and {@code ctx}.

-// -//

Reduce the subsets to singletons by choosing a minimum of each subset. If -// the union of these alternative subsets is a singleton, then no amount of -// more lookahead will help us. We will always pick that alternative. If, -// however, there is more than one alternative, then we are uncertain which -// alternative to predict and must continue looking for resolution. We may -// or may not discover an ambiguity in the future, even if there are no -// conflicting subsets this round.

-// -//

The biggest sin is to terminate early because it means we've made a -// decision but were uncertain as to the eventual outcome. We haven't used -// enough lookahead. On the other hand, announcing a conflict too late is no -// big deal you will still have the conflict. It's just inefficient. It -// might even look until the end of file.

-// -//

No special consideration for semantic predicates is required because -// predicates are evaluated on-the-fly for full LL prediction, ensuring that -// no configuration contains a semantic context during the termination -// check.

-// -//

CONFLICTING CONFIGS

-// -//

Two configurations {@code (s, i, x)} and {@code (s, j, x')}, conflict -// when {@code i!=j} but {@code x=x'}. Because we merge all -// {@code (s, i, _)} configurations together, that means that there are at -// most {@code n} configurations associated with state {@code s} for -// {@code n} possible alternatives in the decision. The merged stacks -// complicate the comparison of configuration contexts {@code x} and -// {@code x'}. Sam checks to see if one is a subset of the other by calling -// merge and checking to see if the merged result is either {@code x} or -// {@code x'}. If the {@code x} associated with lowest alternative {@code i} -// is the superset, then {@code i} is the only possible prediction since the -// others resolve to {@code min(i)} as well. However, if {@code x} is -// associated with {@code j>i} then at least one stack configuration for -// {@code j} is not in conflict with alternative {@code i}. The algorithm -// should keep going, looking for more lookahead due to the uncertainty.

-// -//

For simplicity, I'm doing a equality check between {@code x} and -// {@code x'} that lets the algorithm continue to consume lookahead longer -// than necessary. The reason I like the equality is of course the -// simplicity but also because that is the test you need to detect the -// alternatives that are actually in conflict.

-// -//

CONTINUE/STOP RULE

-// -//

Continue if union of resolved alternative sets from non-conflicting and -// conflicting alternative subsets has more than one alternative. We are -// uncertain about which alternative to predict.

-// -//

The complete set of alternatives, {@code [i for (_,i,_)]}, tells us which -// alternatives are still in the running for the amount of input we've -// consumed at this point. The conflicting sets let us to strip away -// configurations that won't lead to more states because we resolve -// conflicts to the configuration with a minimum alternate for the -// conflicting set.

-// -//

CASES

-// -//
    -// -//
  • no conflicts and more than 1 alternative in set => continue
  • -// -//
  • {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s, 3, z)}, -// {@code (s', 1, y)}, {@code (s', 2, y)} yields non-conflicting set -// {@code {3}} U conflicting sets {@code min({1,2})} U {@code min({1,2})} = -// {@code {1,3}} => continue -//
  • -// -//
  • {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 1, y)}, -// {@code (s', 2, y)}, {@code (s'', 1, z)} yields non-conflicting set -// {@code {1}} U conflicting sets {@code min({1,2})} U {@code min({1,2})} = -// {@code {1}} => stop and predict 1
  • -// -//
  • {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 1, y)}, -// {@code (s', 2, y)} yields conflicting, reduced sets {@code {1}} U -// {@code {1}} = {@code {1}} => stop and predict 1, can announce -// ambiguity {@code {1,2}}
  • -// -//
  • {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 2, y)}, -// {@code (s', 3, y)} yields conflicting, reduced sets {@code {1}} U -// {@code {2}} = {@code {1,2}} => continue
  • -// -//
  • {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 3, y)}, -// {@code (s', 4, y)} yields conflicting, reduced sets {@code {1}} U -// {@code {3}} = {@code {1,3}} => continue
  • -// -//
-// -//

EXACT AMBIGUITY DETECTION

-// -//

If all states Report the same conflicting set of alternatives, then we -// know we have the exact ambiguity set.

-// -//

|A_i|>1 and -// A_i = A_j for all i, j.

-// -//

In other words, we continue examining lookahead until all {@code A_i} -// have more than one alternative and all {@code A_i} are the same. If -// {@code A={{1,2}, {1,3}}}, then regular LL prediction would terminate -// because the resolved set is {@code {1}}. To determine what the real -// ambiguity is, we have to know whether the ambiguity is between one and -// two or one and three so we keep going. We can only stop prediction when -// we need exact ambiguity detection when the sets look like -// {@code A={{1,2}}} or {@code {{1,2},{1,2}}}, etc...

-// -func PredictionModeresolvesToJustOneViableAlt(altsets []*BitSet) int { - return PredictionModegetSingleViableAlt(altsets) -} - -// -// Determines if every alternative subset in {@code altsets} contains more -// than one alternative. -// -// @param altsets a collection of alternative subsets -// @return {@code true} if every {@link BitSet} in {@code altsets} has -// {@link BitSet//cardinality cardinality} > 1, otherwise {@code false} -// -func PredictionModeallSubsetsConflict(altsets []*BitSet) bool { - return !PredictionModehasNonConflictingAltSet(altsets) -} - -// -// Determines if any single alternative subset in {@code altsets} contains -// exactly one alternative. -// -// @param altsets a collection of alternative subsets -// @return {@code true} if {@code altsets} contains a {@link BitSet} with -// {@link BitSet//cardinality cardinality} 1, otherwise {@code false} -// -func PredictionModehasNonConflictingAltSet(altsets []*BitSet) bool { - for i := 0; i < len(altsets); i++ { - alts := altsets[i] - if alts.length() == 1 { - return true - } - } - return false -} - -// -// Determines if any single alternative subset in {@code altsets} contains -// more than one alternative. -// -// @param altsets a collection of alternative subsets -// @return {@code true} if {@code altsets} contains a {@link BitSet} with -// {@link BitSet//cardinality cardinality} > 1, otherwise {@code false} -// -func PredictionModehasConflictingAltSet(altsets []*BitSet) bool { - for i := 0; i < len(altsets); i++ { - alts := altsets[i] - if alts.length() > 1 { - return true - } - } - return false -} - -// -// Determines if every alternative subset in {@code altsets} is equivalent. -// -// @param altsets a collection of alternative subsets -// @return {@code true} if every member of {@code altsets} is equal to the -// others, otherwise {@code false} -// -func PredictionModeallSubsetsEqual(altsets []*BitSet) bool { - var first *BitSet - - for i := 0; i < len(altsets); i++ { - alts := altsets[i] - if first == nil { - first = alts - } else if alts != first { - return false - } - } - - return true -} - -// -// Returns the unique alternative predicted by all alternative subsets in -// {@code altsets}. If no such alternative exists, this method returns -// {@link ATN//INVALID_ALT_NUMBER}. -// -// @param altsets a collection of alternative subsets -// -func PredictionModegetUniqueAlt(altsets []*BitSet) int { - all := PredictionModeGetAlts(altsets) - if all.length() == 1 { - return all.minValue() - } - - return ATNInvalidAltNumber -} - -// Gets the complete set of represented alternatives for a collection of -// alternative subsets. This method returns the union of each {@link BitSet} -// in {@code altsets}. -// -// @param altsets a collection of alternative subsets -// @return the set of represented alternatives in {@code altsets} -// -func PredictionModeGetAlts(altsets []*BitSet) *BitSet { - all := NewBitSet() - for _, alts := range altsets { - all.or(alts) - } - return all -} - -// -// This func gets the conflicting alt subsets from a configuration set. -// For each configuration {@code c} in {@code configs}: -// -//
-// map[c] U= c.{@link ATNConfig//alt alt} // map hash/equals uses s and x, not
-// alt and not pred
-// 
-// -func PredictionModegetConflictingAltSubsets(configs ATNConfigSet) []*BitSet { - configToAlts := make(map[int]*BitSet) - - for _, c := range configs.GetItems() { - key := 31 * c.GetState().GetStateNumber() + c.GetContext().hash() - - alts, ok := configToAlts[key] - if !ok { - alts = NewBitSet() - configToAlts[key] = alts - } - alts.add(c.GetAlt()) - } - - values := make([]*BitSet, 0, 10) - for _, v := range configToAlts { - values = append(values, v) - } - return values -} - -// -// Get a map from state to alt subset from a configuration set. For each -// configuration {@code c} in {@code configs}: -// -//
-// map[c.{@link ATNConfig//state state}] U= c.{@link ATNConfig//alt alt}
-// 
-// -func PredictionModeGetStateToAltMap(configs ATNConfigSet) *AltDict { - m := NewAltDict() - - for _, c := range configs.GetItems() { - alts := m.Get(c.GetState().String()) - if alts == nil { - alts = NewBitSet() - m.put(c.GetState().String(), alts) - } - alts.(*BitSet).add(c.GetAlt()) - } - return m -} - -func PredictionModehasStateAssociatedWithOneAlt(configs ATNConfigSet) bool { - values := PredictionModeGetStateToAltMap(configs).values() - for i := 0; i < len(values); i++ { - if values[i].(*BitSet).length() == 1 { - return true - } - } - return false -} - -func PredictionModegetSingleViableAlt(altsets []*BitSet) int { - result := ATNInvalidAltNumber - - for i := 0; i < len(altsets); i++ { - alts := altsets[i] - minAlt := alts.minValue() - if result == ATNInvalidAltNumber { - result = minAlt - } else if result != minAlt { // more than 1 viable alt - return ATNInvalidAltNumber - } - } - return result -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/recognizer.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/recognizer.go deleted file mode 100644 index d114800f42..0000000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/recognizer.go +++ /dev/null @@ -1,217 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "fmt" - "strings" - - "strconv" -) - -type Recognizer interface { - GetLiteralNames() []string - GetSymbolicNames() []string - GetRuleNames() []string - - Sempred(RuleContext, int, int) bool - Precpred(RuleContext, int) bool - - GetState() int - SetState(int) - Action(RuleContext, int, int) - AddErrorListener(ErrorListener) - RemoveErrorListeners() - GetATN() *ATN - GetErrorListenerDispatch() ErrorListener -} - -type BaseRecognizer struct { - listeners []ErrorListener - state int - - RuleNames []string - LiteralNames []string - SymbolicNames []string - GrammarFileName string -} - -func NewBaseRecognizer() *BaseRecognizer { - rec := new(BaseRecognizer) - rec.listeners = []ErrorListener{ConsoleErrorListenerINSTANCE} - rec.state = -1 - return rec -} - -var tokenTypeMapCache = make(map[string]int) -var ruleIndexMapCache = make(map[string]int) - -func (b *BaseRecognizer) checkVersion(toolVersion string) { - runtimeVersion := "4.8" - if runtimeVersion != toolVersion { - fmt.Println("ANTLR runtime and generated code versions disagree: " + runtimeVersion + "!=" + toolVersion) - } -} - -func (b *BaseRecognizer) Action(context RuleContext, ruleIndex, actionIndex int) { - panic("action not implemented on Recognizer!") -} - -func (b *BaseRecognizer) AddErrorListener(listener ErrorListener) { - b.listeners = append(b.listeners, listener) -} - -func (b *BaseRecognizer) RemoveErrorListeners() { - b.listeners = make([]ErrorListener, 0) -} - -func (b *BaseRecognizer) GetRuleNames() []string { - return b.RuleNames -} - -func (b *BaseRecognizer) GetTokenNames() []string { - return b.LiteralNames -} - -func (b *BaseRecognizer) GetSymbolicNames() []string { - return b.SymbolicNames -} - -func (b *BaseRecognizer) GetLiteralNames() []string { - return b.LiteralNames -} - -func (b *BaseRecognizer) GetState() int { - return b.state -} - -func (b *BaseRecognizer) SetState(v int) { - b.state = v -} - -//func (b *Recognizer) GetTokenTypeMap() { -// var tokenNames = b.GetTokenNames() -// if (tokenNames==nil) { -// panic("The current recognizer does not provide a list of token names.") -// } -// var result = tokenTypeMapCache[tokenNames] -// if(result==nil) { -// result = tokenNames.reduce(function(o, k, i) { o[k] = i }) -// result.EOF = TokenEOF -// tokenTypeMapCache[tokenNames] = result -// } -// return result -//} - -// Get a map from rule names to rule indexes. -// -//

Used for XPath and tree pattern compilation.

-// -func (b *BaseRecognizer) GetRuleIndexMap() map[string]int { - - panic("Method not defined!") - // var ruleNames = b.GetRuleNames() - // if (ruleNames==nil) { - // panic("The current recognizer does not provide a list of rule names.") - // } - // - // var result = ruleIndexMapCache[ruleNames] - // if(result==nil) { - // result = ruleNames.reduce(function(o, k, i) { o[k] = i }) - // ruleIndexMapCache[ruleNames] = result - // } - // return result -} - -func (b *BaseRecognizer) GetTokenType(tokenName string) int { - panic("Method not defined!") - // var ttype = b.GetTokenTypeMap()[tokenName] - // if (ttype !=nil) { - // return ttype - // } else { - // return TokenInvalidType - // } -} - -//func (b *Recognizer) GetTokenTypeMap() map[string]int { -// Vocabulary vocabulary = getVocabulary() -// -// Synchronized (tokenTypeMapCache) { -// Map result = tokenTypeMapCache.Get(vocabulary) -// if (result == null) { -// result = new HashMap() -// for (int i = 0; i < GetATN().maxTokenType; i++) { -// String literalName = vocabulary.getLiteralName(i) -// if (literalName != null) { -// result.put(literalName, i) -// } -// -// String symbolicName = vocabulary.GetSymbolicName(i) -// if (symbolicName != null) { -// result.put(symbolicName, i) -// } -// } -// -// result.put("EOF", Token.EOF) -// result = Collections.unmodifiableMap(result) -// tokenTypeMapCache.put(vocabulary, result) -// } -// -// return result -// } -//} - -// What is the error header, normally line/character position information?// -func (b *BaseRecognizer) GetErrorHeader(e RecognitionException) string { - line := e.GetOffendingToken().GetLine() - column := e.GetOffendingToken().GetColumn() - return "line " + strconv.Itoa(line) + ":" + strconv.Itoa(column) -} - -// How should a token be displayed in an error message? The default -// is to display just the text, but during development you might -// want to have a lot of information spit out. Override in that case -// to use t.String() (which, for CommonToken, dumps everything about -// the token). This is better than forcing you to override a method in -// your token objects because you don't have to go modify your lexer -// so that it creates a NewJava type. -// -// @deprecated This method is not called by the ANTLR 4 Runtime. Specific -// implementations of {@link ANTLRErrorStrategy} may provide a similar -// feature when necessary. For example, see -// {@link DefaultErrorStrategy//GetTokenErrorDisplay}. -// -func (b *BaseRecognizer) GetTokenErrorDisplay(t Token) string { - if t == nil { - return "" - } - s := t.GetText() - if s == "" { - if t.GetTokenType() == TokenEOF { - s = "" - } else { - s = "<" + strconv.Itoa(t.GetTokenType()) + ">" - } - } - s = strings.Replace(s, "\t", "\\t", -1) - s = strings.Replace(s, "\n", "\\n", -1) - s = strings.Replace(s, "\r", "\\r", -1) - - return "'" + s + "'" -} - -func (b *BaseRecognizer) GetErrorListenerDispatch() ErrorListener { - return NewProxyErrorListener(b.listeners) -} - -// subclass needs to override these if there are sempreds or actions -// that the ATN interp needs to execute -func (b *BaseRecognizer) Sempred(localctx RuleContext, ruleIndex int, actionIndex int) bool { - return true -} - -func (b *BaseRecognizer) Precpred(localctx RuleContext, precedence int) bool { - return true -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/rule_context.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/rule_context.go deleted file mode 100644 index 600cf8c062..0000000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/rule_context.go +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -// A rule context is a record of a single rule invocation. It knows -// which context invoked it, if any. If there is no parent context, then -// naturally the invoking state is not valid. The parent link -// provides a chain upwards from the current rule invocation to the root -// of the invocation tree, forming a stack. We actually carry no -// information about the rule associated with b context (except -// when parsing). We keep only the state number of the invoking state from -// the ATN submachine that invoked b. Contrast b with the s -// pointer inside ParserRuleContext that tracks the current state -// being "executed" for the current rule. -// -// The parent contexts are useful for computing lookahead sets and -// getting error information. -// -// These objects are used during parsing and prediction. -// For the special case of parsers, we use the subclass -// ParserRuleContext. -// -// @see ParserRuleContext -// - -type RuleContext interface { - RuleNode - - GetInvokingState() int - SetInvokingState(int) - - GetRuleIndex() int - IsEmpty() bool - - GetAltNumber() int - SetAltNumber(altNumber int) - - String([]string, RuleContext) string -} - -type BaseRuleContext struct { - parentCtx RuleContext - invokingState int - RuleIndex int -} - -func NewBaseRuleContext(parent RuleContext, invokingState int) *BaseRuleContext { - - rn := new(BaseRuleContext) - - // What context invoked b rule? - rn.parentCtx = parent - - // What state invoked the rule associated with b context? - // The "return address" is the followState of invokingState - // If parent is nil, b should be -1. - if parent == nil { - rn.invokingState = -1 - } else { - rn.invokingState = invokingState - } - - return rn -} - -func (b *BaseRuleContext) GetBaseRuleContext() *BaseRuleContext { - return b -} - -func (b *BaseRuleContext) SetParent(v Tree) { - if v == nil { - b.parentCtx = nil - } else { - b.parentCtx = v.(RuleContext) - } -} - -func (b *BaseRuleContext) GetInvokingState() int { - return b.invokingState -} - -func (b *BaseRuleContext) SetInvokingState(t int) { - b.invokingState = t -} - -func (b *BaseRuleContext) GetRuleIndex() int { - return b.RuleIndex -} - -func (b *BaseRuleContext) GetAltNumber() int { - return ATNInvalidAltNumber -} - -func (b *BaseRuleContext) SetAltNumber(altNumber int) {} - -// A context is empty if there is no invoking state meaning nobody call -// current context. -func (b *BaseRuleContext) IsEmpty() bool { - return b.invokingState == -1 -} - -// Return the combined text of all child nodes. This method only considers -// tokens which have been added to the parse tree. -//

-// Since tokens on hidden channels (e.g. whitespace or comments) are not -// added to the parse trees, they will not appear in the output of b -// method. -// - -func (b *BaseRuleContext) GetParent() Tree { - return b.parentCtx -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/semantic_context.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/semantic_context.go deleted file mode 100644 index 49205a1624..0000000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/semantic_context.go +++ /dev/null @@ -1,455 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "fmt" - "strconv" -) - -// A tree structure used to record the semantic context in which -// an ATN configuration is valid. It's either a single predicate, -// a conjunction {@code p1&&p2}, or a sum of products {@code p1||p2}. -// -//

I have scoped the {@link AND}, {@link OR}, and {@link Predicate} subclasses of -// {@link SemanticContext} within the scope of this outer class.

-// - -type SemanticContext interface { - comparable - - evaluate(parser Recognizer, outerContext RuleContext) bool - evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext - - hash() int - String() string -} - -func SemanticContextandContext(a, b SemanticContext) SemanticContext { - if a == nil || a == SemanticContextNone { - return b - } - if b == nil || b == SemanticContextNone { - return a - } - result := NewAND(a, b) - if len(result.opnds) == 1 { - return result.opnds[0] - } - - return result -} - -func SemanticContextorContext(a, b SemanticContext) SemanticContext { - if a == nil { - return b - } - if b == nil { - return a - } - if a == SemanticContextNone || b == SemanticContextNone { - return SemanticContextNone - } - result := NewOR(a, b) - if len(result.opnds) == 1 { - return result.opnds[0] - } - - return result -} - -type Predicate struct { - ruleIndex int - predIndex int - isCtxDependent bool -} - -func NewPredicate(ruleIndex, predIndex int, isCtxDependent bool) *Predicate { - p := new(Predicate) - - p.ruleIndex = ruleIndex - p.predIndex = predIndex - p.isCtxDependent = isCtxDependent // e.g., $i ref in pred - return p -} - -//The default {@link SemanticContext}, which is semantically equivalent to -//a predicate of the form {@code {true}?}. - -var SemanticContextNone SemanticContext = NewPredicate(-1, -1, false) - -func (p *Predicate) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext { - return p -} - -func (p *Predicate) evaluate(parser Recognizer, outerContext RuleContext) bool { - - var localctx RuleContext - - if p.isCtxDependent { - localctx = outerContext - } - - return parser.Sempred(localctx, p.ruleIndex, p.predIndex) -} - -func (p *Predicate) equals(other interface{}) bool { - if p == other { - return true - } else if _, ok := other.(*Predicate); !ok { - return false - } else { - return p.ruleIndex == other.(*Predicate).ruleIndex && - p.predIndex == other.(*Predicate).predIndex && - p.isCtxDependent == other.(*Predicate).isCtxDependent - } -} - -func (p *Predicate) hash() int { - return p.ruleIndex*43 + p.predIndex*47 -} - -func (p *Predicate) String() string { - return "{" + strconv.Itoa(p.ruleIndex) + ":" + strconv.Itoa(p.predIndex) + "}?" -} - -type PrecedencePredicate struct { - precedence int -} - -func NewPrecedencePredicate(precedence int) *PrecedencePredicate { - - p := new(PrecedencePredicate) - p.precedence = precedence - - return p -} - -func (p *PrecedencePredicate) evaluate(parser Recognizer, outerContext RuleContext) bool { - return parser.Precpred(outerContext, p.precedence) -} - -func (p *PrecedencePredicate) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext { - if parser.Precpred(outerContext, p.precedence) { - return SemanticContextNone - } - - return nil -} - -func (p *PrecedencePredicate) compareTo(other *PrecedencePredicate) int { - return p.precedence - other.precedence -} - -func (p *PrecedencePredicate) equals(other interface{}) bool { - if p == other { - return true - } else if _, ok := other.(*PrecedencePredicate); !ok { - return false - } else { - return p.precedence == other.(*PrecedencePredicate).precedence - } -} - -func (p *PrecedencePredicate) hash() int { - return p.precedence * 51 -} - -func (p *PrecedencePredicate) String() string { - return "{" + strconv.Itoa(p.precedence) + ">=prec}?" -} - -func PrecedencePredicatefilterPrecedencePredicates(set *Set) []*PrecedencePredicate { - result := make([]*PrecedencePredicate, 0) - - for _, v := range set.values() { - if c2, ok := v.(*PrecedencePredicate); ok { - result = append(result, c2) - } - } - - return result -} - -// A semantic context which is true whenever none of the contained contexts -// is false.` - -type AND struct { - opnds []SemanticContext -} - -func NewAND(a, b SemanticContext) *AND { - - operands := NewSet(nil, nil) - if aa, ok := a.(*AND); ok { - for _, o := range aa.opnds { - operands.add(o) - } - } else { - operands.add(a) - } - - if ba, ok := b.(*AND); ok { - for _, o := range ba.opnds { - operands.add(o) - } - } else { - operands.add(b) - } - precedencePredicates := PrecedencePredicatefilterPrecedencePredicates(operands) - if len(precedencePredicates) > 0 { - // interested in the transition with the lowest precedence - var reduced *PrecedencePredicate - - for _, p := range precedencePredicates { - if reduced == nil || p.precedence < reduced.precedence { - reduced = p - } - } - - operands.add(reduced) - } - - vs := operands.values() - opnds := make([]SemanticContext, len(vs)) - for i, v := range vs { - opnds[i] = v.(SemanticContext) - } - - and := new(AND) - and.opnds = opnds - - return and -} - -func (a *AND) equals(other interface{}) bool { - if a == other { - return true - } else if _, ok := other.(*AND); !ok { - return false - } else { - for i, v := range other.(*AND).opnds { - if !a.opnds[i].equals(v) { - return false - } - } - return true - } -} - -// -// {@inheritDoc} -// -//

-// The evaluation of predicates by a context is short-circuiting, but -// unordered.

-// -func (a *AND) evaluate(parser Recognizer, outerContext RuleContext) bool { - for i := 0; i < len(a.opnds); i++ { - if !a.opnds[i].evaluate(parser, outerContext) { - return false - } - } - return true -} - -func (a *AND) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext { - differs := false - operands := make([]SemanticContext, 0) - - for i := 0; i < len(a.opnds); i++ { - context := a.opnds[i] - evaluated := context.evalPrecedence(parser, outerContext) - differs = differs || (evaluated != context) - if evaluated == nil { - // The AND context is false if any element is false - return nil - } else if evaluated != SemanticContextNone { - // Reduce the result by Skipping true elements - operands = append(operands, evaluated) - } - } - if !differs { - return a - } - - if len(operands) == 0 { - // all elements were true, so the AND context is true - return SemanticContextNone - } - - var result SemanticContext - - for _, o := range operands { - if result == nil { - result = o - } else { - result = SemanticContextandContext(result, o) - } - } - - return result -} - -func (a *AND) hash() int { - h := murmurInit(37) // Init with a value different from OR - for _, op := range a.opnds { - h = murmurUpdate(h, op.hash()) - } - return murmurFinish(h, len(a.opnds)) -} - -func (a *OR) hash() int { - h := murmurInit(41) // Init with a value different from AND - for _, op := range a.opnds { - h = murmurUpdate(h, op.hash()) - } - return murmurFinish(h, len(a.opnds)) -} - -func (a *AND) String() string { - s := "" - - for _, o := range a.opnds { - s += "&& " + fmt.Sprint(o) - } - - if len(s) > 3 { - return s[0:3] - } - - return s -} - -// -// A semantic context which is true whenever at least one of the contained -// contexts is true. -// - -type OR struct { - opnds []SemanticContext -} - -func NewOR(a, b SemanticContext) *OR { - - operands := NewSet(nil, nil) - if aa, ok := a.(*OR); ok { - for _, o := range aa.opnds { - operands.add(o) - } - } else { - operands.add(a) - } - - if ba, ok := b.(*OR); ok { - for _, o := range ba.opnds { - operands.add(o) - } - } else { - operands.add(b) - } - precedencePredicates := PrecedencePredicatefilterPrecedencePredicates(operands) - if len(precedencePredicates) > 0 { - // interested in the transition with the lowest precedence - var reduced *PrecedencePredicate - - for _, p := range precedencePredicates { - if reduced == nil || p.precedence > reduced.precedence { - reduced = p - } - } - - operands.add(reduced) - } - - vs := operands.values() - - opnds := make([]SemanticContext, len(vs)) - for i, v := range vs { - opnds[i] = v.(SemanticContext) - } - - o := new(OR) - o.opnds = opnds - - return o -} - -func (o *OR) equals(other interface{}) bool { - if o == other { - return true - } else if _, ok := other.(*OR); !ok { - return false - } else { - for i, v := range other.(*OR).opnds { - if !o.opnds[i].equals(v) { - return false - } - } - return true - } -} - -//

-// The evaluation of predicates by o context is short-circuiting, but -// unordered.

-// -func (o *OR) evaluate(parser Recognizer, outerContext RuleContext) bool { - for i := 0; i < len(o.opnds); i++ { - if o.opnds[i].evaluate(parser, outerContext) { - return true - } - } - return false -} - -func (o *OR) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext { - differs := false - operands := make([]SemanticContext, 0) - for i := 0; i < len(o.opnds); i++ { - context := o.opnds[i] - evaluated := context.evalPrecedence(parser, outerContext) - differs = differs || (evaluated != context) - if evaluated == SemanticContextNone { - // The OR context is true if any element is true - return SemanticContextNone - } else if evaluated != nil { - // Reduce the result by Skipping false elements - operands = append(operands, evaluated) - } - } - if !differs { - return o - } - if len(operands) == 0 { - // all elements were false, so the OR context is false - return nil - } - var result SemanticContext - - for _, o := range operands { - if result == nil { - result = o - } else { - result = SemanticContextorContext(result, o) - } - } - - return result -} - -func (o *OR) String() string { - s := "" - - for _, o := range o.opnds { - s += "|| " + fmt.Sprint(o) - } - - if len(s) > 3 { - return s[0:3] - } - - return s -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token.go deleted file mode 100644 index 2d8e99095d..0000000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token.go +++ /dev/null @@ -1,210 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "strconv" - "strings" -) - -type TokenSourceCharStreamPair struct { - tokenSource TokenSource - charStream CharStream -} - -// A token has properties: text, type, line, character position in the line -// (so we can ignore tabs), token channel, index, and source from which -// we obtained this token. - -type Token interface { - GetSource() *TokenSourceCharStreamPair - GetTokenType() int - GetChannel() int - GetStart() int - GetStop() int - GetLine() int - GetColumn() int - - GetText() string - SetText(s string) - - GetTokenIndex() int - SetTokenIndex(v int) - - GetTokenSource() TokenSource - GetInputStream() CharStream -} - -type BaseToken struct { - source *TokenSourceCharStreamPair - tokenType int // token type of the token - channel int // The parser ignores everything not on DEFAULT_CHANNEL - start int // optional return -1 if not implemented. - stop int // optional return -1 if not implemented. - tokenIndex int // from 0..n-1 of the token object in the input stream - line int // line=1..n of the 1st character - column int // beginning of the line at which it occurs, 0..n-1 - text string // text of the token. - readOnly bool -} - -const ( - TokenInvalidType = 0 - - // During lookahead operations, this "token" signifies we hit rule end ATN state - // and did not follow it despite needing to. - TokenEpsilon = -2 - - TokenMinUserTokenType = 1 - - TokenEOF = -1 - - // All tokens go to the parser (unless Skip() is called in that rule) - // on a particular "channel". The parser tunes to a particular channel - // so that whitespace etc... can go to the parser on a "hidden" channel. - - TokenDefaultChannel = 0 - - // Anything on different channel than DEFAULT_CHANNEL is not parsed - // by parser. - - TokenHiddenChannel = 1 -) - -func (b *BaseToken) GetChannel() int { - return b.channel -} - -func (b *BaseToken) GetStart() int { - return b.start -} - -func (b *BaseToken) GetStop() int { - return b.stop -} - -func (b *BaseToken) GetLine() int { - return b.line -} - -func (b *BaseToken) GetColumn() int { - return b.column -} - -func (b *BaseToken) GetTokenType() int { - return b.tokenType -} - -func (b *BaseToken) GetSource() *TokenSourceCharStreamPair { - return b.source -} - -func (b *BaseToken) GetTokenIndex() int { - return b.tokenIndex -} - -func (b *BaseToken) SetTokenIndex(v int) { - b.tokenIndex = v -} - -func (b *BaseToken) GetTokenSource() TokenSource { - return b.source.tokenSource -} - -func (b *BaseToken) GetInputStream() CharStream { - return b.source.charStream -} - -type CommonToken struct { - *BaseToken -} - -func NewCommonToken(source *TokenSourceCharStreamPair, tokenType, channel, start, stop int) *CommonToken { - - t := new(CommonToken) - - t.BaseToken = new(BaseToken) - - t.source = source - t.tokenType = tokenType - t.channel = channel - t.start = start - t.stop = stop - t.tokenIndex = -1 - if t.source.tokenSource != nil { - t.line = source.tokenSource.GetLine() - t.column = source.tokenSource.GetCharPositionInLine() - } else { - t.column = -1 - } - return t -} - -// An empty {@link Pair} which is used as the default value of -// {@link //source} for tokens that do not have a source. - -//CommonToken.EMPTY_SOURCE = [ nil, nil ] - -// Constructs a New{@link CommonToken} as a copy of another {@link Token}. -// -//

-// If {@code oldToken} is also a {@link CommonToken} instance, the newly -// constructed token will share a reference to the {@link //text} field and -// the {@link Pair} stored in {@link //source}. Otherwise, {@link //text} will -// be assigned the result of calling {@link //GetText}, and {@link //source} -// will be constructed from the result of {@link Token//GetTokenSource} and -// {@link Token//GetInputStream}.

-// -// @param oldToken The token to copy. -// -func (c *CommonToken) clone() *CommonToken { - t := NewCommonToken(c.source, c.tokenType, c.channel, c.start, c.stop) - t.tokenIndex = c.GetTokenIndex() - t.line = c.GetLine() - t.column = c.GetColumn() - t.text = c.GetText() - return t -} - -func (c *CommonToken) GetText() string { - if c.text != "" { - return c.text - } - input := c.GetInputStream() - if input == nil { - return "" - } - n := input.Size() - if c.start < n && c.stop < n { - return input.GetTextFromInterval(NewInterval(c.start, c.stop)) - } - return "" -} - -func (c *CommonToken) SetText(text string) { - c.text = text -} - -func (c *CommonToken) String() string { - txt := c.GetText() - if txt != "" { - txt = strings.Replace(txt, "\n", "\\n", -1) - txt = strings.Replace(txt, "\r", "\\r", -1) - txt = strings.Replace(txt, "\t", "\\t", -1) - } else { - txt = "" - } - - var ch string - if c.channel > 0 { - ch = ",channel=" + strconv.Itoa(c.channel) - } else { - ch = "" - } - - return "[@" + strconv.Itoa(c.tokenIndex) + "," + strconv.Itoa(c.start) + ":" + strconv.Itoa(c.stop) + "='" + - txt + "',<" + strconv.Itoa(c.tokenType) + ">" + - ch + "," + strconv.Itoa(c.line) + ":" + strconv.Itoa(c.column) + "]" -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token_source.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token_source.go deleted file mode 100644 index e023978fef..0000000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token_source.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -type TokenSource interface { - NextToken() Token - Skip() - More() - GetLine() int - GetCharPositionInLine() int - GetInputStream() CharStream - GetSourceName() string - setTokenFactory(factory TokenFactory) - GetTokenFactory() TokenFactory -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token_stream.go deleted file mode 100644 index df92c81478..0000000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token_stream.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -type TokenStream interface { - IntStream - - LT(k int) Token - - Get(index int) Token - GetTokenSource() TokenSource - SetTokenSource(TokenSource) - - GetAllText() string - GetTextFromInterval(*Interval) string - GetTextFromRuleContext(RuleContext) string - GetTextFromTokens(Token, Token) string -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/tokenstream_rewriter.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/tokenstream_rewriter.go deleted file mode 100644 index 96a03f02aa..0000000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/tokenstream_rewriter.go +++ /dev/null @@ -1,649 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. -package antlr - -import ( -"bytes" -"fmt" -) - - -// -// Useful for rewriting out a buffered input token stream after doing some -// augmentation or other manipulations on it. - -//

-// You can insert stuff, replace, and delete chunks. Note that the operations -// are done lazily--only if you convert the buffer to a {@link String} with -// {@link TokenStream#getText()}. This is very efficient because you are not -// moving data around all the time. As the buffer of tokens is converted to -// strings, the {@link #getText()} method(s) scan the input token stream and -// check to see if there is an operation at the current index. If so, the -// operation is done and then normal {@link String} rendering continues on the -// buffer. This is like having multiple Turing machine instruction streams -// (programs) operating on a single input tape. :)

-//

- -// This rewriter makes no modifications to the token stream. It does not ask the -// stream to fill itself up nor does it advance the input cursor. The token -// stream {@link TokenStream#index()} will return the same value before and -// after any {@link #getText()} call.

- -//

-// The rewriter only works on tokens that you have in the buffer and ignores the -// current input cursor. If you are buffering tokens on-demand, calling -// {@link #getText()} halfway through the input will only do rewrites for those -// tokens in the first half of the file.

- -//

-// Since the operations are done lazily at {@link #getText}-time, operations do -// not screw up the token index values. That is, an insert operation at token -// index {@code i} does not change the index values for tokens -// {@code i}+1..n-1.

- -//

-// Because operations never actually alter the buffer, you may always get the -// original token stream back without undoing anything. Since the instructions -// are queued up, you can easily simulate transactions and roll back any changes -// if there is an error just by removing instructions. For example,

- -//
-// CharStream input = new ANTLRFileStream("input");
-// TLexer lex = new TLexer(input);
-// CommonTokenStream tokens = new CommonTokenStream(lex);
-// T parser = new T(tokens);
-// TokenStreamRewriter rewriter = new TokenStreamRewriter(tokens);
-// parser.startRule();
-// 
- -//

-// Then in the rules, you can execute (assuming rewriter is visible):

- -//
-// Token t,u;
-// ...
-// rewriter.insertAfter(t, "text to put after t");}
-// rewriter.insertAfter(u, "text after u");}
-// System.out.println(rewriter.getText());
-// 
- -//

-// You can also have multiple "instruction streams" and get multiple rewrites -// from a single pass over the input. Just name the instruction streams and use -// that name again when printing the buffer. This could be useful for generating -// a C file and also its header file--all from the same buffer:

- -//
-// rewriter.insertAfter("pass1", t, "text to put after t");}
-// rewriter.insertAfter("pass2", u, "text after u");}
-// System.out.println(rewriter.getText("pass1"));
-// System.out.println(rewriter.getText("pass2"));
-// 
- -//

-// If you don't use named rewrite streams, a "default" stream is used as the -// first example shows.

- - - -const( - Default_Program_Name = "default" - Program_Init_Size = 100 - Min_Token_Index = 0 -) - -// Define the rewrite operation hierarchy - -type RewriteOperation interface { - // Execute the rewrite operation by possibly adding to the buffer. - // Return the index of the next token to operate on. - Execute(buffer *bytes.Buffer) int - String() string - GetInstructionIndex() int - GetIndex() int - GetText() string - GetOpName() string - GetTokens() TokenStream - SetInstructionIndex(val int) - SetIndex(int) - SetText(string) - SetOpName(string) - SetTokens(TokenStream) -} - -type BaseRewriteOperation struct { - //Current index of rewrites list - instruction_index int - //Token buffer index - index int - //Substitution text - text string - //Actual operation name - op_name string - //Pointer to token steam - tokens TokenStream -} - -func (op *BaseRewriteOperation)GetInstructionIndex() int{ - return op.instruction_index -} - -func (op *BaseRewriteOperation)GetIndex() int{ - return op.index -} - -func (op *BaseRewriteOperation)GetText() string{ - return op.text -} - -func (op *BaseRewriteOperation)GetOpName() string{ - return op.op_name -} - -func (op *BaseRewriteOperation)GetTokens() TokenStream{ - return op.tokens -} - -func (op *BaseRewriteOperation)SetInstructionIndex(val int){ - op.instruction_index = val -} - -func (op *BaseRewriteOperation)SetIndex(val int) { - op.index = val -} - -func (op *BaseRewriteOperation)SetText(val string){ - op.text = val -} - -func (op *BaseRewriteOperation)SetOpName(val string){ - op.op_name = val -} - -func (op *BaseRewriteOperation)SetTokens(val TokenStream) { - op.tokens = val -} - - -func (op *BaseRewriteOperation) Execute(buffer *bytes.Buffer) int{ - return op.index -} - -func (op *BaseRewriteOperation) String() string { - return fmt.Sprintf("<%s@%d:\"%s\">", - op.op_name, - op.tokens.Get(op.GetIndex()), - op.text, - ) - -} - - -type InsertBeforeOp struct { - BaseRewriteOperation -} - -func NewInsertBeforeOp(index int, text string, stream TokenStream) *InsertBeforeOp{ - return &InsertBeforeOp{BaseRewriteOperation:BaseRewriteOperation{ - index:index, - text:text, - op_name:"InsertBeforeOp", - tokens:stream, - }} -} - -func (op *InsertBeforeOp) Execute(buffer *bytes.Buffer) int{ - buffer.WriteString(op.text) - if op.tokens.Get(op.index).GetTokenType() != TokenEOF{ - buffer.WriteString(op.tokens.Get(op.index).GetText()) - } - return op.index+1 -} - -func (op *InsertBeforeOp) String() string { - return op.BaseRewriteOperation.String() -} - -// Distinguish between insert after/before to do the "insert afters" -// first and then the "insert befores" at same index. Implementation -// of "insert after" is "insert before index+1". - -type InsertAfterOp struct { - BaseRewriteOperation -} - -func NewInsertAfterOp(index int, text string, stream TokenStream) *InsertAfterOp{ - return &InsertAfterOp{BaseRewriteOperation:BaseRewriteOperation{ - index:index+1, - text:text, - tokens:stream, - }} -} - -func (op *InsertAfterOp) Execute(buffer *bytes.Buffer) int { - buffer.WriteString(op.text) - if op.tokens.Get(op.index).GetTokenType() != TokenEOF{ - buffer.WriteString(op.tokens.Get(op.index).GetText()) - } - return op.index+1 -} - -func (op *InsertAfterOp) String() string { - return op.BaseRewriteOperation.String() -} - -// I'm going to try replacing range from x..y with (y-x)+1 ReplaceOp -// instructions. -type ReplaceOp struct{ - BaseRewriteOperation - LastIndex int -} - -func NewReplaceOp(from, to int, text string, stream TokenStream)*ReplaceOp { - return &ReplaceOp{ - BaseRewriteOperation:BaseRewriteOperation{ - index:from, - text:text, - op_name:"ReplaceOp", - tokens:stream, - }, - LastIndex:to, - } -} - -func (op *ReplaceOp)Execute(buffer *bytes.Buffer) int{ - if op.text != ""{ - buffer.WriteString(op.text) - } - return op.LastIndex +1 -} - -func (op *ReplaceOp) String() string { - if op.text == "" { - return fmt.Sprintf("", - op.tokens.Get(op.index), op.tokens.Get(op.LastIndex)) - } - return fmt.Sprintf("", - op.tokens.Get(op.index), op.tokens.Get(op.LastIndex), op.text) -} - - -type TokenStreamRewriter struct { - //Our source stream - tokens TokenStream - // You may have multiple, named streams of rewrite operations. - // I'm calling these things "programs." - // Maps String (name) → rewrite (List) - programs map[string][]RewriteOperation - last_rewrite_token_indexes map[string]int -} - -func NewTokenStreamRewriter(tokens TokenStream) *TokenStreamRewriter{ - return &TokenStreamRewriter{ - tokens: tokens, - programs: map[string][]RewriteOperation{ - Default_Program_Name:make([]RewriteOperation,0, Program_Init_Size), - }, - last_rewrite_token_indexes: map[string]int{}, - } -} - -func (tsr *TokenStreamRewriter) GetTokenStream() TokenStream{ - return tsr.tokens -} - -// Rollback the instruction stream for a program so that -// the indicated instruction (via instructionIndex) is no -// longer in the stream. UNTESTED! -func (tsr *TokenStreamRewriter) Rollback(program_name string, instruction_index int){ - is, ok := tsr.programs[program_name] - if ok{ - tsr.programs[program_name] = is[Min_Token_Index:instruction_index] - } -} - -func (tsr *TokenStreamRewriter) RollbackDefault(instruction_index int){ - tsr.Rollback(Default_Program_Name, instruction_index) -} -//Reset the program so that no instructions exist -func (tsr *TokenStreamRewriter) DeleteProgram(program_name string){ - tsr.Rollback(program_name, Min_Token_Index) //TODO: double test on that cause lower bound is not included -} - -func (tsr *TokenStreamRewriter) DeleteProgramDefault(){ - tsr.DeleteProgram(Default_Program_Name) -} - -func (tsr *TokenStreamRewriter) InsertAfter(program_name string, index int, text string){ - // to insert after, just insert before next index (even if past end) - var op RewriteOperation = NewInsertAfterOp(index, text, tsr.tokens) - rewrites := tsr.GetProgram(program_name) - op.SetInstructionIndex(len(rewrites)) - tsr.AddToProgram(program_name, op) -} - -func (tsr *TokenStreamRewriter) InsertAfterDefault(index int, text string){ - tsr.InsertAfter(Default_Program_Name, index, text) -} - -func (tsr *TokenStreamRewriter) InsertAfterToken(program_name string, token Token, text string){ - tsr.InsertAfter(program_name, token.GetTokenIndex(), text) -} - -func (tsr* TokenStreamRewriter) InsertBefore(program_name string, index int, text string){ - var op RewriteOperation = NewInsertBeforeOp(index, text, tsr.tokens) - rewrites := tsr.GetProgram(program_name) - op.SetInstructionIndex(len(rewrites)) - tsr.AddToProgram(program_name, op) -} - -func (tsr *TokenStreamRewriter) InsertBeforeDefault(index int, text string){ - tsr.InsertBefore(Default_Program_Name, index, text) -} - -func (tsr *TokenStreamRewriter) InsertBeforeToken(program_name string,token Token, text string){ - tsr.InsertBefore(program_name, token.GetTokenIndex(), text) -} - -func (tsr *TokenStreamRewriter) Replace(program_name string, from, to int, text string){ - if from > to || from < 0 || to < 0 || to >= tsr.tokens.Size(){ - panic(fmt.Sprintf("replace: range invalid: %d..%d(size=%d)", - from, to, tsr.tokens.Size())) - } - var op RewriteOperation = NewReplaceOp(from, to, text, tsr.tokens) - rewrites := tsr.GetProgram(program_name) - op.SetInstructionIndex(len(rewrites)) - tsr.AddToProgram(program_name, op) -} - -func (tsr *TokenStreamRewriter)ReplaceDefault(from, to int, text string) { - tsr.Replace(Default_Program_Name, from, to, text) -} - -func (tsr *TokenStreamRewriter)ReplaceDefaultPos(index int, text string){ - tsr.ReplaceDefault(index, index, text) -} - -func (tsr *TokenStreamRewriter)ReplaceToken(program_name string, from, to Token, text string){ - tsr.Replace(program_name, from.GetTokenIndex(), to.GetTokenIndex(), text) -} - -func (tsr *TokenStreamRewriter)ReplaceTokenDefault(from, to Token, text string){ - tsr.ReplaceToken(Default_Program_Name, from, to, text) -} - -func (tsr *TokenStreamRewriter)ReplaceTokenDefaultPos(index Token, text string){ - tsr.ReplaceTokenDefault(index, index, text) -} - -func (tsr *TokenStreamRewriter)Delete(program_name string, from, to int){ - tsr.Replace(program_name, from, to, "" ) -} - -func (tsr *TokenStreamRewriter)DeleteDefault(from, to int){ - tsr.Delete(Default_Program_Name, from, to) -} - -func (tsr *TokenStreamRewriter)DeleteDefaultPos(index int){ - tsr.DeleteDefault(index,index) -} - -func (tsr *TokenStreamRewriter)DeleteToken(program_name string, from, to Token) { - tsr.ReplaceToken(program_name, from, to, "") -} - -func (tsr *TokenStreamRewriter)DeleteTokenDefault(from,to Token){ - tsr.DeleteToken(Default_Program_Name, from, to) -} - -func (tsr *TokenStreamRewriter)GetLastRewriteTokenIndex(program_name string)int { - i, ok := tsr.last_rewrite_token_indexes[program_name] - if !ok{ - return -1 - } - return i -} - -func (tsr *TokenStreamRewriter)GetLastRewriteTokenIndexDefault()int{ - return tsr.GetLastRewriteTokenIndex(Default_Program_Name) -} - -func (tsr *TokenStreamRewriter)SetLastRewriteTokenIndex(program_name string, i int){ - tsr.last_rewrite_token_indexes[program_name] = i -} - -func (tsr *TokenStreamRewriter)InitializeProgram(name string)[]RewriteOperation{ - is := make([]RewriteOperation, 0, Program_Init_Size) - tsr.programs[name] = is - return is -} - -func (tsr *TokenStreamRewriter)AddToProgram(name string, op RewriteOperation){ - is := tsr.GetProgram(name) - is = append(is, op) - tsr.programs[name] = is -} - -func (tsr *TokenStreamRewriter)GetProgram(name string) []RewriteOperation { - is, ok := tsr.programs[name] - if !ok{ - is = tsr.InitializeProgram(name) - } - return is -} -// Return the text from the original tokens altered per the -// instructions given to this rewriter. -func (tsr *TokenStreamRewriter)GetTextDefault() string{ - return tsr.GetText( - Default_Program_Name, - NewInterval(0, tsr.tokens.Size()-1)) -} -// Return the text from the original tokens altered per the -// instructions given to this rewriter. -func (tsr *TokenStreamRewriter)GetText(program_name string, interval *Interval) string { - rewrites := tsr.programs[program_name] - start := interval.Start - stop := interval.Stop - // ensure start/end are in range - stop = min(stop, tsr.tokens.Size()-1) - start = max(start,0) - if rewrites == nil || len(rewrites) == 0{ - return tsr.tokens.GetTextFromInterval(interval) // no instructions to execute - } - buf := bytes.Buffer{} - // First, optimize instruction stream - indexToOp := reduceToSingleOperationPerIndex(rewrites) - // Walk buffer, executing instructions and emitting tokens - for i:=start; i<=stop && i= tsr.tokens.Size()-1 {buf.WriteString(op.GetText())} - } - } - return buf.String() -} - -// We need to combine operations and report invalid operations (like -// overlapping replaces that are not completed nested). Inserts to -// same index need to be combined etc... Here are the cases: -// -// I.i.u I.j.v leave alone, nonoverlapping -// I.i.u I.i.v combine: Iivu -// -// R.i-j.u R.x-y.v | i-j in x-y delete first R -// R.i-j.u R.i-j.v delete first R -// R.i-j.u R.x-y.v | x-y in i-j ERROR -// R.i-j.u R.x-y.v | boundaries overlap ERROR -// -// Delete special case of replace (text==null): -// D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right) -// -// I.i.u R.x-y.v | i in (x+1)-y delete I (since insert before -// we're not deleting i) -// I.i.u R.x-y.v | i not in (x+1)-y leave alone, nonoverlapping -// R.x-y.v I.i.u | i in x-y ERROR -// R.x-y.v I.x.u R.x-y.uv (combine, delete I) -// R.x-y.v I.i.u | i not in x-y leave alone, nonoverlapping -// -// I.i.u = insert u before op @ index i -// R.x-y.u = replace x-y indexed tokens with u -// -// First we need to examine replaces. For any replace op: -// -// 1. wipe out any insertions before op within that range. -// 2. Drop any replace op before that is contained completely within -// that range. -// 3. Throw exception upon boundary overlap with any previous replace. -// -// Then we can deal with inserts: -// -// 1. for any inserts to same index, combine even if not adjacent. -// 2. for any prior replace with same left boundary, combine this -// insert with replace and delete this replace. -// 3. throw exception if index in same range as previous replace -// -// Don't actually delete; make op null in list. Easier to walk list. -// Later we can throw as we add to index → op map. -// -// Note that I.2 R.2-2 will wipe out I.2 even though, technically, the -// inserted stuff would be before the replace range. But, if you -// add tokens in front of a method body '{' and then delete the method -// body, I think the stuff before the '{' you added should disappear too. -// -// Return a map from token index to operation. -// -func reduceToSingleOperationPerIndex(rewrites []RewriteOperation) map[int]RewriteOperation{ - // WALK REPLACES - for i:=0; i < len(rewrites); i++{ - op := rewrites[i] - if op == nil{continue} - rop, ok := op.(*ReplaceOp) - if !ok{continue} - // Wipe prior inserts within range - for j:=0; j rop.index && iop.index <=rop.LastIndex{ - // delete insert as it's a no-op. - rewrites[iop.instruction_index] = nil - } - } - } - // Drop any prior replaces contained within - for j:=0; j=rop.index && prevop.LastIndex <= rop.LastIndex{ - // delete replace as it's a no-op. - rewrites[prevop.instruction_index] = nil - continue - } - // throw exception unless disjoint or identical - disjoint := prevop.LastIndex < rop.index || prevop.index > rop.LastIndex - // Delete special case of replace (text==null): - // D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right) - if prevop.text == "" && rop.text == "" && !disjoint{ - rewrites[prevop.instruction_index] = nil - rop.index = min(prevop.index, rop.index) - rop.LastIndex = max(prevop.LastIndex, rop.LastIndex) - println("new rop" + rop.String()) //TODO: remove console write, taken from Java version - }else if !disjoint{ - panic("replace op boundaries of " + rop.String() + " overlap with previous " + prevop.String()) - } - } - } - } - // WALK INSERTS - for i:=0; i < len(rewrites); i++ { - op := rewrites[i] - if op == nil{continue} - //hack to replicate inheritance in composition - _, iok := rewrites[i].(*InsertBeforeOp) - _, aok := rewrites[i].(*InsertAfterOp) - if !iok && !aok{continue} - iop := rewrites[i] - // combine current insert with prior if any at same index - // deviating a bit from TokenStreamRewriter.java - hard to incorporate inheritance logic - for j:=0; j= rop.index && iop.GetIndex() <= rop.LastIndex{ - panic("insert op "+iop.String()+" within boundaries of previous "+rop.String()) - } - } - } - } - m := map[int]RewriteOperation{} - for i:=0; i < len(rewrites); i++{ - op := rewrites[i] - if op == nil {continue} - if _, ok := m[op.GetIndex()]; ok{ - panic("should only be one op per index") - } - m[op.GetIndex()] = op - } - return m -} - - -/* - Quick fixing Go lack of overloads - */ - -func max(a,b int)int{ - if a>b{ - return a - }else { - return b - } -} -func min(a,b int)int{ - if aThis is a one way link. It emanates from a state (usually via a list of -// transitions) and has a target state.

-// -//

Since we never have to change the ATN transitions once we construct it, -// the states. We'll use the term Edge for the DFA to distinguish them from -// ATN transitions.

- -type Transition interface { - getTarget() ATNState - setTarget(ATNState) - getIsEpsilon() bool - getLabel() *IntervalSet - getSerializationType() int - Matches(int, int, int) bool -} - -type BaseTransition struct { - target ATNState - isEpsilon bool - label int - intervalSet *IntervalSet - serializationType int -} - -func NewBaseTransition(target ATNState) *BaseTransition { - - if target == nil { - panic("target cannot be nil.") - } - - t := new(BaseTransition) - - t.target = target - // Are we epsilon, action, sempred? - t.isEpsilon = false - t.intervalSet = nil - - return t -} - -func (t *BaseTransition) getTarget() ATNState { - return t.target -} - -func (t *BaseTransition) setTarget(s ATNState) { - t.target = s -} - -func (t *BaseTransition) getIsEpsilon() bool { - return t.isEpsilon -} - -func (t *BaseTransition) getLabel() *IntervalSet { - return t.intervalSet -} - -func (t *BaseTransition) getSerializationType() int { - return t.serializationType -} - -func (t *BaseTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { - panic("Not implemented") -} - -const ( - TransitionEPSILON = 1 - TransitionRANGE = 2 - TransitionRULE = 3 - TransitionPREDICATE = 4 // e.g., {isType(input.LT(1))}? - TransitionATOM = 5 - TransitionACTION = 6 - TransitionSET = 7 // ~(A|B) or ~atom, wildcard, which convert to next 2 - TransitionNOTSET = 8 - TransitionWILDCARD = 9 - TransitionPRECEDENCE = 10 -) - -var TransitionserializationNames = []string{ - "INVALID", - "EPSILON", - "RANGE", - "RULE", - "PREDICATE", - "ATOM", - "ACTION", - "SET", - "NOT_SET", - "WILDCARD", - "PRECEDENCE", -} - -//var TransitionserializationTypes struct { -// EpsilonTransition int -// RangeTransition int -// RuleTransition int -// PredicateTransition int -// AtomTransition int -// ActionTransition int -// SetTransition int -// NotSetTransition int -// WildcardTransition int -// PrecedencePredicateTransition int -//}{ -// TransitionEPSILON, -// TransitionRANGE, -// TransitionRULE, -// TransitionPREDICATE, -// TransitionATOM, -// TransitionACTION, -// TransitionSET, -// TransitionNOTSET, -// TransitionWILDCARD, -// TransitionPRECEDENCE -//} - -// TODO: make all transitions sets? no, should remove set edges -type AtomTransition struct { - *BaseTransition -} - -func NewAtomTransition(target ATNState, intervalSet int) *AtomTransition { - - t := new(AtomTransition) - t.BaseTransition = NewBaseTransition(target) - - t.label = intervalSet // The token type or character value or, signifies special intervalSet. - t.intervalSet = t.makeLabel() - t.serializationType = TransitionATOM - - return t -} - -func (t *AtomTransition) makeLabel() *IntervalSet { - s := NewIntervalSet() - s.addOne(t.label) - return s -} - -func (t *AtomTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { - return t.label == symbol -} - -func (t *AtomTransition) String() string { - return strconv.Itoa(t.label) -} - -type RuleTransition struct { - *BaseTransition - - followState ATNState - ruleIndex, precedence int -} - -func NewRuleTransition(ruleStart ATNState, ruleIndex, precedence int, followState ATNState) *RuleTransition { - - t := new(RuleTransition) - t.BaseTransition = NewBaseTransition(ruleStart) - - t.ruleIndex = ruleIndex - t.precedence = precedence - t.followState = followState - t.serializationType = TransitionRULE - t.isEpsilon = true - - return t -} - -func (t *RuleTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { - return false -} - -type EpsilonTransition struct { - *BaseTransition - - outermostPrecedenceReturn int -} - -func NewEpsilonTransition(target ATNState, outermostPrecedenceReturn int) *EpsilonTransition { - - t := new(EpsilonTransition) - t.BaseTransition = NewBaseTransition(target) - - t.serializationType = TransitionEPSILON - t.isEpsilon = true - t.outermostPrecedenceReturn = outermostPrecedenceReturn - return t -} - -func (t *EpsilonTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { - return false -} - -func (t *EpsilonTransition) String() string { - return "epsilon" -} - -type RangeTransition struct { - *BaseTransition - - start, stop int -} - -func NewRangeTransition(target ATNState, start, stop int) *RangeTransition { - - t := new(RangeTransition) - t.BaseTransition = NewBaseTransition(target) - - t.serializationType = TransitionRANGE - t.start = start - t.stop = stop - t.intervalSet = t.makeLabel() - return t -} - -func (t *RangeTransition) makeLabel() *IntervalSet { - s := NewIntervalSet() - s.addRange(t.start, t.stop) - return s -} - -func (t *RangeTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { - return symbol >= t.start && symbol <= t.stop -} - -func (t *RangeTransition) String() string { - return "'" + string(t.start) + "'..'" + string(t.stop) + "'" -} - -type AbstractPredicateTransition interface { - Transition - IAbstractPredicateTransitionFoo() -} - -type BaseAbstractPredicateTransition struct { - *BaseTransition -} - -func NewBasePredicateTransition(target ATNState) *BaseAbstractPredicateTransition { - - t := new(BaseAbstractPredicateTransition) - t.BaseTransition = NewBaseTransition(target) - - return t -} - -func (a *BaseAbstractPredicateTransition) IAbstractPredicateTransitionFoo() {} - -type PredicateTransition struct { - *BaseAbstractPredicateTransition - - isCtxDependent bool - ruleIndex, predIndex int -} - -func NewPredicateTransition(target ATNState, ruleIndex, predIndex int, isCtxDependent bool) *PredicateTransition { - - t := new(PredicateTransition) - t.BaseAbstractPredicateTransition = NewBasePredicateTransition(target) - - t.serializationType = TransitionPREDICATE - t.ruleIndex = ruleIndex - t.predIndex = predIndex - t.isCtxDependent = isCtxDependent // e.g., $i ref in pred - t.isEpsilon = true - return t -} - -func (t *PredicateTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { - return false -} - -func (t *PredicateTransition) getPredicate() *Predicate { - return NewPredicate(t.ruleIndex, t.predIndex, t.isCtxDependent) -} - -func (t *PredicateTransition) String() string { - return "pred_" + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.predIndex) -} - -type ActionTransition struct { - *BaseTransition - - isCtxDependent bool - ruleIndex, actionIndex, predIndex int -} - -func NewActionTransition(target ATNState, ruleIndex, actionIndex int, isCtxDependent bool) *ActionTransition { - - t := new(ActionTransition) - t.BaseTransition = NewBaseTransition(target) - - t.serializationType = TransitionACTION - t.ruleIndex = ruleIndex - t.actionIndex = actionIndex - t.isCtxDependent = isCtxDependent // e.g., $i ref in pred - t.isEpsilon = true - return t -} - -func (t *ActionTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { - return false -} - -func (t *ActionTransition) String() string { - return "action_" + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.actionIndex) -} - -type SetTransition struct { - *BaseTransition -} - -func NewSetTransition(target ATNState, set *IntervalSet) *SetTransition { - - t := new(SetTransition) - t.BaseTransition = NewBaseTransition(target) - - t.serializationType = TransitionSET - if set != nil { - t.intervalSet = set - } else { - t.intervalSet = NewIntervalSet() - t.intervalSet.addOne(TokenInvalidType) - } - - return t -} - -func (t *SetTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { - return t.intervalSet.contains(symbol) -} - -func (t *SetTransition) String() string { - return t.intervalSet.String() -} - -type NotSetTransition struct { - *SetTransition -} - -func NewNotSetTransition(target ATNState, set *IntervalSet) *NotSetTransition { - - t := new(NotSetTransition) - - t.SetTransition = NewSetTransition(target, set) - - t.serializationType = TransitionNOTSET - - return t -} - -func (t *NotSetTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { - return symbol >= minVocabSymbol && symbol <= maxVocabSymbol && !t.intervalSet.contains(symbol) -} - -func (t *NotSetTransition) String() string { - return "~" + t.intervalSet.String() -} - -type WildcardTransition struct { - *BaseTransition -} - -func NewWildcardTransition(target ATNState) *WildcardTransition { - - t := new(WildcardTransition) - t.BaseTransition = NewBaseTransition(target) - - t.serializationType = TransitionWILDCARD - return t -} - -func (t *WildcardTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { - return symbol >= minVocabSymbol && symbol <= maxVocabSymbol -} - -func (t *WildcardTransition) String() string { - return "." -} - -type PrecedencePredicateTransition struct { - *BaseAbstractPredicateTransition - - precedence int -} - -func NewPrecedencePredicateTransition(target ATNState, precedence int) *PrecedencePredicateTransition { - - t := new(PrecedencePredicateTransition) - t.BaseAbstractPredicateTransition = NewBasePredicateTransition(target) - - t.serializationType = TransitionPRECEDENCE - t.precedence = precedence - t.isEpsilon = true - - return t -} - -func (t *PrecedencePredicateTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { - return false -} - -func (t *PrecedencePredicateTransition) getPredicate() *PrecedencePredicate { - return NewPrecedencePredicate(t.precedence) -} - -func (t *PrecedencePredicateTransition) String() string { - return fmt.Sprint(t.precedence) + " >= _p" -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/tree.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/tree.go deleted file mode 100644 index bdeb6d7881..0000000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/tree.go +++ /dev/null @@ -1,256 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -// The basic notion of a tree has a parent, a payload, and a list of children. -// It is the most abstract interface for all the trees used by ANTLR. -/// - -var TreeInvalidInterval = NewInterval(-1, -2) - -type Tree interface { - GetParent() Tree - SetParent(Tree) - GetPayload() interface{} - GetChild(i int) Tree - GetChildCount() int - GetChildren() []Tree -} - -type SyntaxTree interface { - Tree - - GetSourceInterval() *Interval -} - -type ParseTree interface { - SyntaxTree - - Accept(Visitor ParseTreeVisitor) interface{} - GetText() string - - ToStringTree([]string, Recognizer) string -} - -type RuleNode interface { - ParseTree - - GetRuleContext() RuleContext - GetBaseRuleContext() *BaseRuleContext -} - -type TerminalNode interface { - ParseTree - - GetSymbol() Token -} - -type ErrorNode interface { - TerminalNode - - errorNode() -} - -type ParseTreeVisitor interface { - Visit(tree ParseTree) interface{} - VisitChildren(node RuleNode) interface{} - VisitTerminal(node TerminalNode) interface{} - VisitErrorNode(node ErrorNode) interface{} -} - -type BaseParseTreeVisitor struct{} - -var _ ParseTreeVisitor = &BaseParseTreeVisitor{} - -func (v *BaseParseTreeVisitor) Visit(tree ParseTree) interface{} { return nil } -func (v *BaseParseTreeVisitor) VisitChildren(node RuleNode) interface{} { return nil } -func (v *BaseParseTreeVisitor) VisitTerminal(node TerminalNode) interface{} { return nil } -func (v *BaseParseTreeVisitor) VisitErrorNode(node ErrorNode) interface{} { return nil } - -// TODO -//func (this ParseTreeVisitor) Visit(ctx) { -// if (Utils.isArray(ctx)) { -// self := this -// return ctx.map(function(child) { return VisitAtom(self, child)}) -// } else { -// return VisitAtom(this, ctx) -// } -//} -// -//func VisitAtom(Visitor, ctx) { -// if (ctx.parser == nil) { //is terminal -// return -// } -// -// name := ctx.parser.ruleNames[ctx.ruleIndex] -// funcName := "Visit" + Utils.titleCase(name) -// -// return Visitor[funcName](ctx) -//} - -type ParseTreeListener interface { - VisitTerminal(node TerminalNode) - VisitErrorNode(node ErrorNode) - EnterEveryRule(ctx ParserRuleContext) - ExitEveryRule(ctx ParserRuleContext) -} - -type BaseParseTreeListener struct{} - -var _ ParseTreeListener = &BaseParseTreeListener{} - -func (l *BaseParseTreeListener) VisitTerminal(node TerminalNode) {} -func (l *BaseParseTreeListener) VisitErrorNode(node ErrorNode) {} -func (l *BaseParseTreeListener) EnterEveryRule(ctx ParserRuleContext) {} -func (l *BaseParseTreeListener) ExitEveryRule(ctx ParserRuleContext) {} - -type TerminalNodeImpl struct { - parentCtx RuleContext - - symbol Token -} - -var _ TerminalNode = &TerminalNodeImpl{} - -func NewTerminalNodeImpl(symbol Token) *TerminalNodeImpl { - tn := new(TerminalNodeImpl) - - tn.parentCtx = nil - tn.symbol = symbol - - return tn -} - -func (t *TerminalNodeImpl) GetChild(i int) Tree { - return nil -} - -func (t *TerminalNodeImpl) GetChildren() []Tree { - return nil -} - -func (t *TerminalNodeImpl) SetChildren(tree []Tree) { - panic("Cannot set children on terminal node") -} - -func (t *TerminalNodeImpl) GetSymbol() Token { - return t.symbol -} - -func (t *TerminalNodeImpl) GetParent() Tree { - return t.parentCtx -} - -func (t *TerminalNodeImpl) SetParent(tree Tree) { - t.parentCtx = tree.(RuleContext) -} - -func (t *TerminalNodeImpl) GetPayload() interface{} { - return t.symbol -} - -func (t *TerminalNodeImpl) GetSourceInterval() *Interval { - if t.symbol == nil { - return TreeInvalidInterval - } - tokenIndex := t.symbol.GetTokenIndex() - return NewInterval(tokenIndex, tokenIndex) -} - -func (t *TerminalNodeImpl) GetChildCount() int { - return 0 -} - -func (t *TerminalNodeImpl) Accept(v ParseTreeVisitor) interface{} { - return v.VisitTerminal(t) -} - -func (t *TerminalNodeImpl) GetText() string { - return t.symbol.GetText() -} - -func (t *TerminalNodeImpl) String() string { - if t.symbol.GetTokenType() == TokenEOF { - return "" - } - - return t.symbol.GetText() -} - -func (t *TerminalNodeImpl) ToStringTree(s []string, r Recognizer) string { - return t.String() -} - -// Represents a token that was consumed during reSynchronization -// rather than during a valid Match operation. For example, -// we will create this kind of a node during single token insertion -// and deletion as well as during "consume until error recovery set" -// upon no viable alternative exceptions. - -type ErrorNodeImpl struct { - *TerminalNodeImpl -} - -var _ ErrorNode = &ErrorNodeImpl{} - -func NewErrorNodeImpl(token Token) *ErrorNodeImpl { - en := new(ErrorNodeImpl) - en.TerminalNodeImpl = NewTerminalNodeImpl(token) - return en -} - -func (e *ErrorNodeImpl) errorNode() {} - -func (e *ErrorNodeImpl) Accept(v ParseTreeVisitor) interface{} { - return v.VisitErrorNode(e) -} - -type ParseTreeWalker struct { -} - -func NewParseTreeWalker() *ParseTreeWalker { - return new(ParseTreeWalker) -} - -// Performs a walk on the given parse tree starting at the root and going down recursively -// with depth-first search. On each node, EnterRule is called before -// recursively walking down into child nodes, then -// ExitRule is called after the recursive call to wind up. -func (p *ParseTreeWalker) Walk(listener ParseTreeListener, t Tree) { - switch tt := t.(type) { - case ErrorNode: - listener.VisitErrorNode(tt) - case TerminalNode: - listener.VisitTerminal(tt) - default: - p.EnterRule(listener, t.(RuleNode)) - for i := 0; i < t.GetChildCount(); i++ { - child := t.GetChild(i) - p.Walk(listener, child) - } - p.ExitRule(listener, t.(RuleNode)) - } -} - -// -// Enters a grammar rule by first triggering the generic event {@link ParseTreeListener//EnterEveryRule} -// then by triggering the event specific to the given parse tree node -// -func (p *ParseTreeWalker) EnterRule(listener ParseTreeListener, r RuleNode) { - ctx := r.GetRuleContext().(ParserRuleContext) - listener.EnterEveryRule(ctx) - ctx.EnterRule(listener) -} - -// Exits a grammar rule by first triggering the event specific to the given parse tree node -// then by triggering the generic event {@link ParseTreeListener//ExitEveryRule} -// -func (p *ParseTreeWalker) ExitRule(listener ParseTreeListener, r RuleNode) { - ctx := r.GetRuleContext().(ParserRuleContext) - ctx.ExitRule(listener) - listener.ExitEveryRule(ctx) -} - -var ParseTreeWalkerDefault = NewParseTreeWalker() diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/trees.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/trees.go deleted file mode 100644 index 80144ecade..0000000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/trees.go +++ /dev/null @@ -1,137 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import "fmt" - -/** A set of utility routines useful for all kinds of ANTLR trees. */ - -// Print out a whole tree in LISP form. {@link //getNodeText} is used on the -// node payloads to get the text for the nodes. Detect -// parse trees and extract data appropriately. -func TreesStringTree(tree Tree, ruleNames []string, recog Recognizer) string { - - if recog != nil { - ruleNames = recog.GetRuleNames() - } - - s := TreesGetNodeText(tree, ruleNames, nil) - - s = EscapeWhitespace(s, false) - c := tree.GetChildCount() - if c == 0 { - return s - } - res := "(" + s + " " - if c > 0 { - s = TreesStringTree(tree.GetChild(0), ruleNames, nil) - res += s - } - for i := 1; i < c; i++ { - s = TreesStringTree(tree.GetChild(i), ruleNames, nil) - res += (" " + s) - } - res += ")" - return res -} - -func TreesGetNodeText(t Tree, ruleNames []string, recog Parser) string { - if recog != nil { - ruleNames = recog.GetRuleNames() - } - - if ruleNames != nil { - switch t2 := t.(type) { - case RuleNode: - t3 := t2.GetRuleContext() - altNumber := t3.GetAltNumber() - - if altNumber != ATNInvalidAltNumber { - return fmt.Sprintf("%s:%d", ruleNames[t3.GetRuleIndex()], altNumber) - } - return ruleNames[t3.GetRuleIndex()] - case ErrorNode: - return fmt.Sprint(t2) - case TerminalNode: - if t2.GetSymbol() != nil { - return t2.GetSymbol().GetText() - } - } - } - - // no recog for rule names - payload := t.GetPayload() - if p2, ok := payload.(Token); ok { - return p2.GetText() - } - - return fmt.Sprint(t.GetPayload()) -} - -// Return ordered list of all children of this node -func TreesGetChildren(t Tree) []Tree { - list := make([]Tree, 0) - for i := 0; i < t.GetChildCount(); i++ { - list = append(list, t.GetChild(i)) - } - return list -} - -// Return a list of all ancestors of this node. The first node of -// list is the root and the last is the parent of this node. -// -func TreesgetAncestors(t Tree) []Tree { - ancestors := make([]Tree, 0) - t = t.GetParent() - for t != nil { - f := []Tree{t} - ancestors = append(f, ancestors...) - t = t.GetParent() - } - return ancestors -} - -func TreesFindAllTokenNodes(t ParseTree, ttype int) []ParseTree { - return TreesfindAllNodes(t, ttype, true) -} - -func TreesfindAllRuleNodes(t ParseTree, ruleIndex int) []ParseTree { - return TreesfindAllNodes(t, ruleIndex, false) -} - -func TreesfindAllNodes(t ParseTree, index int, findTokens bool) []ParseTree { - nodes := make([]ParseTree, 0) - treesFindAllNodes(t, index, findTokens, &nodes) - return nodes -} - -func treesFindAllNodes(t ParseTree, index int, findTokens bool, nodes *[]ParseTree) { - // check this node (the root) first - - t2, ok := t.(TerminalNode) - t3, ok2 := t.(ParserRuleContext) - - if findTokens && ok { - if t2.GetSymbol().GetTokenType() == index { - *nodes = append(*nodes, t2) - } - } else if !findTokens && ok2 { - if t3.GetRuleIndex() == index { - *nodes = append(*nodes, t3) - } - } - // check children - for i := 0; i < t.GetChildCount(); i++ { - treesFindAllNodes(t.GetChild(i).(ParseTree), index, findTokens, nodes) - } -} - -func TreesDescendants(t ParseTree) []ParseTree { - nodes := []ParseTree{t} - for i := 0; i < t.GetChildCount(); i++ { - nodes = append(nodes, TreesDescendants(t.GetChild(i).(ParseTree))...) - } - return nodes -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/utils.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/utils.go deleted file mode 100644 index bba2ffae79..0000000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/utils.go +++ /dev/null @@ -1,417 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "bytes" - "errors" - "fmt" - "sort" - "strconv" - "strings" -) - -func intMin(a, b int) int { - if a < b { - return a - } - return b -} - -func intMax(a, b int) int { - if a > b { - return a - } - return b -} - -// A simple integer stack - -type IntStack []int - -var ErrEmptyStack = errors.New("Stack is empty") - -func (s *IntStack) Pop() (int, error) { - l := len(*s) - 1 - if l < 0 { - return 0, ErrEmptyStack - } - v := (*s)[l] - *s = (*s)[0:l] - return v, nil -} - -func (s *IntStack) Push(e int) { - *s = append(*s, e) -} - -type Set struct { - data map[int][]interface{} - hashcodeFunction func(interface{}) int - equalsFunction func(interface{}, interface{}) bool -} - -func NewSet( - hashcodeFunction func(interface{}) int, - equalsFunction func(interface{}, interface{}) bool) *Set { - - s := new(Set) - - s.data = make(map[int][]interface{}) - - if hashcodeFunction != nil { - s.hashcodeFunction = hashcodeFunction - } else { - s.hashcodeFunction = standardHashFunction - } - - if equalsFunction == nil { - s.equalsFunction = standardEqualsFunction - } else { - s.equalsFunction = equalsFunction - } - - return s -} - -func standardEqualsFunction(a interface{}, b interface{}) bool { - - ac, oka := a.(comparable) - bc, okb := b.(comparable) - - if !oka || !okb { - panic("Not Comparable") - } - - return ac.equals(bc) -} - -func standardHashFunction(a interface{}) int { - if h, ok := a.(hasher); ok { - return h.hash() - } - - panic("Not Hasher") -} - -type hasher interface { - hash() int -} - -func (s *Set) length() int { - return len(s.data) -} - -func (s *Set) add(value interface{}) interface{} { - - key := s.hashcodeFunction(value) - - values := s.data[key] - - if s.data[key] != nil { - for i := 0; i < len(values); i++ { - if s.equalsFunction(value, values[i]) { - return values[i] - } - } - - s.data[key] = append(s.data[key], value) - return value - } - - v := make([]interface{}, 1, 10) - v[0] = value - s.data[key] = v - - return value -} - -func (s *Set) contains(value interface{}) bool { - - key := s.hashcodeFunction(value) - - values := s.data[key] - - if s.data[key] != nil { - for i := 0; i < len(values); i++ { - if s.equalsFunction(value, values[i]) { - return true - } - } - } - return false -} - -func (s *Set) values() []interface{} { - var l []interface{} - - for _, v := range s.data { - l = append(l, v...) - } - - return l -} - -func (s *Set) String() string { - r := "" - - for _, av := range s.data { - for _, v := range av { - r += fmt.Sprint(v) - } - } - - return r -} - -type BitSet struct { - data map[int]bool -} - -func NewBitSet() *BitSet { - b := new(BitSet) - b.data = make(map[int]bool) - return b -} - -func (b *BitSet) add(value int) { - b.data[value] = true -} - -func (b *BitSet) clear(index int) { - delete(b.data, index) -} - -func (b *BitSet) or(set *BitSet) { - for k := range set.data { - b.add(k) - } -} - -func (b *BitSet) remove(value int) { - delete(b.data, value) -} - -func (b *BitSet) contains(value int) bool { - return b.data[value] -} - -func (b *BitSet) values() []int { - ks := make([]int, len(b.data)) - i := 0 - for k := range b.data { - ks[i] = k - i++ - } - sort.Ints(ks) - return ks -} - -func (b *BitSet) minValue() int { - min := 2147483647 - - for k := range b.data { - if k < min { - min = k - } - } - - return min -} - -func (b *BitSet) equals(other interface{}) bool { - otherBitSet, ok := other.(*BitSet) - if !ok { - return false - } - - if len(b.data) != len(otherBitSet.data) { - return false - } - - for k, v := range b.data { - if otherBitSet.data[k] != v { - return false - } - } - - return true -} - -func (b *BitSet) length() int { - return len(b.data) -} - -func (b *BitSet) String() string { - vals := b.values() - valsS := make([]string, len(vals)) - - for i, val := range vals { - valsS[i] = strconv.Itoa(val) - } - return "{" + strings.Join(valsS, ", ") + "}" -} - -type AltDict struct { - data map[string]interface{} -} - -func NewAltDict() *AltDict { - d := new(AltDict) - d.data = make(map[string]interface{}) - return d -} - -func (a *AltDict) Get(key string) interface{} { - key = "k-" + key - return a.data[key] -} - -func (a *AltDict) put(key string, value interface{}) { - key = "k-" + key - a.data[key] = value -} - -func (a *AltDict) values() []interface{} { - vs := make([]interface{}, len(a.data)) - i := 0 - for _, v := range a.data { - vs[i] = v - i++ - } - return vs -} - -type DoubleDict struct { - data map[int]map[int]interface{} -} - -func NewDoubleDict() *DoubleDict { - dd := new(DoubleDict) - dd.data = make(map[int]map[int]interface{}) - return dd -} - -func (d *DoubleDict) Get(a, b int) interface{} { - data := d.data[a] - - if data == nil { - return nil - } - - return data[b] -} - -func (d *DoubleDict) set(a, b int, o interface{}) { - data := d.data[a] - - if data == nil { - data = make(map[int]interface{}) - d.data[a] = data - } - - data[b] = o -} - -func EscapeWhitespace(s string, escapeSpaces bool) string { - - s = strings.Replace(s, "\t", "\\t", -1) - s = strings.Replace(s, "\n", "\\n", -1) - s = strings.Replace(s, "\r", "\\r", -1) - if escapeSpaces { - s = strings.Replace(s, " ", "\u00B7", -1) - } - return s -} - -func TerminalNodeToStringArray(sa []TerminalNode) []string { - st := make([]string, len(sa)) - - for i, s := range sa { - st[i] = fmt.Sprintf("%v", s) - } - - return st -} - -func PrintArrayJavaStyle(sa []string) string { - var buffer bytes.Buffer - - buffer.WriteString("[") - - for i, s := range sa { - buffer.WriteString(s) - if i != len(sa)-1 { - buffer.WriteString(", ") - } - } - - buffer.WriteString("]") - - return buffer.String() -} - -// The following routines were lifted from bits.rotate* available in Go 1.9. - -const uintSize = 32 << (^uint(0) >> 32 & 1) // 32 or 64 - -// rotateLeft returns the value of x rotated left by (k mod UintSize) bits. -// To rotate x right by k bits, call RotateLeft(x, -k). -func rotateLeft(x uint, k int) uint { - if uintSize == 32 { - return uint(rotateLeft32(uint32(x), k)) - } - return uint(rotateLeft64(uint64(x), k)) -} - -// rotateLeft32 returns the value of x rotated left by (k mod 32) bits. -func rotateLeft32(x uint32, k int) uint32 { - const n = 32 - s := uint(k) & (n - 1) - return x<>(n-s) -} - -// rotateLeft64 returns the value of x rotated left by (k mod 64) bits. -func rotateLeft64(x uint64, k int) uint64 { - const n = 64 - s := uint(k) & (n - 1) - return x<>(n-s) -} - - -// murmur hash -const ( - c1_32 uint = 0xCC9E2D51 - c2_32 uint = 0x1B873593 - n1_32 uint = 0xE6546B64 -) - -func murmurInit(seed int) int { - return seed -} - -func murmurUpdate(h1 int, k1 int) int { - var k1u uint - k1u = uint(k1) * c1_32 - k1u = rotateLeft(k1u, 15) - k1u *= c2_32 - - var h1u = uint(h1) ^ k1u - k1u = rotateLeft(k1u, 13) - h1u = h1u*5 + 0xe6546b64 - return int(h1u) -} - -func murmurFinish(h1 int, numberOfWords int) int { - var h1u uint = uint(h1) - h1u ^= uint(numberOfWords * 4) - h1u ^= h1u >> 16 - h1u *= uint(0x85ebca6b) - h1u ^= h1u >> 13 - h1u *= 0xc2b2ae35 - h1u ^= h1u >> 16 - - return int(h1u) -} diff --git a/vendor/github.com/aws/aws-sdk-go/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go/LICENSE.txt deleted file mode 100644 index d645695673..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/LICENSE.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go/NOTICE.txt b/vendor/github.com/aws/aws-sdk-go/NOTICE.txt deleted file mode 100644 index 899129ecc4..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/NOTICE.txt +++ /dev/null @@ -1,3 +0,0 @@ -AWS SDK for Go -Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. -Copyright 2014-2015 Stripe, Inc. diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go b/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go deleted file mode 100644 index 99849c0e19..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go +++ /dev/null @@ -1,164 +0,0 @@ -// Package awserr represents API error interface accessors for the SDK. -package awserr - -// An Error wraps lower level errors with code, message and an original error. -// The underlying concrete error type may also satisfy other interfaces which -// can be to used to obtain more specific information about the error. -// -// Calling Error() or String() will always include the full information about -// an error based on its underlying type. -// -// Example: -// -// output, err := s3manage.Upload(svc, input, opts) -// if err != nil { -// if awsErr, ok := err.(awserr.Error); ok { -// // Get error details -// log.Println("Error:", awsErr.Code(), awsErr.Message()) -// -// // Prints out full error message, including original error if there was one. -// log.Println("Error:", awsErr.Error()) -// -// // Get original error -// if origErr := awsErr.OrigErr(); origErr != nil { -// // operate on original error. -// } -// } else { -// fmt.Println(err.Error()) -// } -// } -// -type Error interface { - // Satisfy the generic error interface. - error - - // Returns the short phrase depicting the classification of the error. - Code() string - - // Returns the error details message. - Message() string - - // Returns the original error if one was set. Nil is returned if not set. - OrigErr() error -} - -// BatchError is a batch of errors which also wraps lower level errors with -// code, message, and original errors. Calling Error() will include all errors -// that occurred in the batch. -// -// Deprecated: Replaced with BatchedErrors. Only defined for backwards -// compatibility. -type BatchError interface { - // Satisfy the generic error interface. - error - - // Returns the short phrase depicting the classification of the error. - Code() string - - // Returns the error details message. - Message() string - - // Returns the original error if one was set. Nil is returned if not set. - OrigErrs() []error -} - -// BatchedErrors is a batch of errors which also wraps lower level errors with -// code, message, and original errors. Calling Error() will include all errors -// that occurred in the batch. -// -// Replaces BatchError -type BatchedErrors interface { - // Satisfy the base Error interface. - Error - - // Returns the original error if one was set. Nil is returned if not set. - OrigErrs() []error -} - -// New returns an Error object described by the code, message, and origErr. -// -// If origErr satisfies the Error interface it will not be wrapped within a new -// Error object and will instead be returned. -func New(code, message string, origErr error) Error { - var errs []error - if origErr != nil { - errs = append(errs, origErr) - } - return newBaseError(code, message, errs) -} - -// NewBatchError returns an BatchedErrors with a collection of errors as an -// array of errors. -func NewBatchError(code, message string, errs []error) BatchedErrors { - return newBaseError(code, message, errs) -} - -// A RequestFailure is an interface to extract request failure information from -// an Error such as the request ID of the failed request returned by a service. -// RequestFailures may not always have a requestID value if the request failed -// prior to reaching the service such as a connection error. -// -// Example: -// -// output, err := s3manage.Upload(svc, input, opts) -// if err != nil { -// if reqerr, ok := err.(RequestFailure); ok { -// log.Println("Request failed", reqerr.Code(), reqerr.Message(), reqerr.RequestID()) -// } else { -// log.Println("Error:", err.Error()) -// } -// } -// -// Combined with awserr.Error: -// -// output, err := s3manage.Upload(svc, input, opts) -// if err != nil { -// if awsErr, ok := err.(awserr.Error); ok { -// // Generic AWS Error with Code, Message, and original error (if any) -// fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) -// -// if reqErr, ok := err.(awserr.RequestFailure); ok { -// // A service error occurred -// fmt.Println(reqErr.StatusCode(), reqErr.RequestID()) -// } -// } else { -// fmt.Println(err.Error()) -// } -// } -// -type RequestFailure interface { - Error - - // The status code of the HTTP response. - StatusCode() int - - // The request ID returned by the service for a request failure. This will - // be empty if no request ID is available such as the request failed due - // to a connection error. - RequestID() string -} - -// NewRequestFailure returns a wrapped error with additional information for -// request status code, and service requestID. -// -// Should be used to wrap all request which involve service requests. Even if -// the request failed without a service response, but had an HTTP status code -// that may be meaningful. -func NewRequestFailure(err Error, statusCode int, reqID string) RequestFailure { - return newRequestError(err, statusCode, reqID) -} - -// UnmarshalError provides the interface for the SDK failing to unmarshal data. -type UnmarshalError interface { - awsError - Bytes() []byte -} - -// NewUnmarshalError returns an initialized UnmarshalError error wrapper adding -// the bytes that fail to unmarshal to the error. -func NewUnmarshalError(err error, msg string, bytes []byte) UnmarshalError { - return &unmarshalError{ - awsError: New("UnmarshalError", msg, err), - bytes: bytes, - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go b/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go deleted file mode 100644 index 9cf7eaf400..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go +++ /dev/null @@ -1,221 +0,0 @@ -package awserr - -import ( - "encoding/hex" - "fmt" -) - -// SprintError returns a string of the formatted error code. -// -// Both extra and origErr are optional. If they are included their lines -// will be added, but if they are not included their lines will be ignored. -func SprintError(code, message, extra string, origErr error) string { - msg := fmt.Sprintf("%s: %s", code, message) - if extra != "" { - msg = fmt.Sprintf("%s\n\t%s", msg, extra) - } - if origErr != nil { - msg = fmt.Sprintf("%s\ncaused by: %s", msg, origErr.Error()) - } - return msg -} - -// A baseError wraps the code and message which defines an error. It also -// can be used to wrap an original error object. -// -// Should be used as the root for errors satisfying the awserr.Error. Also -// for any error which does not fit into a specific error wrapper type. -type baseError struct { - // Classification of error - code string - - // Detailed information about error - message string - - // Optional original error this error is based off of. Allows building - // chained errors. - errs []error -} - -// newBaseError returns an error object for the code, message, and errors. -// -// code is a short no whitespace phrase depicting the classification of -// the error that is being created. -// -// message is the free flow string containing detailed information about the -// error. -// -// origErrs is the error objects which will be nested under the new errors to -// be returned. -func newBaseError(code, message string, origErrs []error) *baseError { - b := &baseError{ - code: code, - message: message, - errs: origErrs, - } - - return b -} - -// Error returns the string representation of the error. -// -// See ErrorWithExtra for formatting. -// -// Satisfies the error interface. -func (b baseError) Error() string { - size := len(b.errs) - if size > 0 { - return SprintError(b.code, b.message, "", errorList(b.errs)) - } - - return SprintError(b.code, b.message, "", nil) -} - -// String returns the string representation of the error. -// Alias for Error to satisfy the stringer interface. -func (b baseError) String() string { - return b.Error() -} - -// Code returns the short phrase depicting the classification of the error. -func (b baseError) Code() string { - return b.code -} - -// Message returns the error details message. -func (b baseError) Message() string { - return b.message -} - -// OrigErr returns the original error if one was set. Nil is returned if no -// error was set. This only returns the first element in the list. If the full -// list is needed, use BatchedErrors. -func (b baseError) OrigErr() error { - switch len(b.errs) { - case 0: - return nil - case 1: - return b.errs[0] - default: - if err, ok := b.errs[0].(Error); ok { - return NewBatchError(err.Code(), err.Message(), b.errs[1:]) - } - return NewBatchError("BatchedErrors", - "multiple errors occurred", b.errs) - } -} - -// OrigErrs returns the original errors if one was set. An empty slice is -// returned if no error was set. -func (b baseError) OrigErrs() []error { - return b.errs -} - -// So that the Error interface type can be included as an anonymous field -// in the requestError struct and not conflict with the error.Error() method. -type awsError Error - -// A requestError wraps a request or service error. -// -// Composed of baseError for code, message, and original error. -type requestError struct { - awsError - statusCode int - requestID string - bytes []byte -} - -// newRequestError returns a wrapped error with additional information for -// request status code, and service requestID. -// -// Should be used to wrap all request which involve service requests. Even if -// the request failed without a service response, but had an HTTP status code -// that may be meaningful. -// -// Also wraps original errors via the baseError. -func newRequestError(err Error, statusCode int, requestID string) *requestError { - return &requestError{ - awsError: err, - statusCode: statusCode, - requestID: requestID, - } -} - -// Error returns the string representation of the error. -// Satisfies the error interface. -func (r requestError) Error() string { - extra := fmt.Sprintf("status code: %d, request id: %s", - r.statusCode, r.requestID) - return SprintError(r.Code(), r.Message(), extra, r.OrigErr()) -} - -// String returns the string representation of the error. -// Alias for Error to satisfy the stringer interface. -func (r requestError) String() string { - return r.Error() -} - -// StatusCode returns the wrapped status code for the error -func (r requestError) StatusCode() int { - return r.statusCode -} - -// RequestID returns the wrapped requestID -func (r requestError) RequestID() string { - return r.requestID -} - -// OrigErrs returns the original errors if one was set. An empty slice is -// returned if no error was set. -func (r requestError) OrigErrs() []error { - if b, ok := r.awsError.(BatchedErrors); ok { - return b.OrigErrs() - } - return []error{r.OrigErr()} -} - -type unmarshalError struct { - awsError - bytes []byte -} - -// Error returns the string representation of the error. -// Satisfies the error interface. -func (e unmarshalError) Error() string { - extra := hex.Dump(e.bytes) - return SprintError(e.Code(), e.Message(), extra, e.OrigErr()) -} - -// String returns the string representation of the error. -// Alias for Error to satisfy the stringer interface. -func (e unmarshalError) String() string { - return e.Error() -} - -// Bytes returns the bytes that failed to unmarshal. -func (e unmarshalError) Bytes() []byte { - return e.bytes -} - -// An error list that satisfies the golang interface -type errorList []error - -// Error returns the string representation of the error. -// -// Satisfies the error interface. -func (e errorList) Error() string { - msg := "" - // How do we want to handle the array size being zero - if size := len(e); size > 0 { - for i := 0; i < size; i++ { - msg += e[i].Error() - // We check the next index to see if it is within the slice. - // If it is, then we append a newline. We do this, because unit tests - // could be broken with the additional '\n' - if i+1 < size { - msg += "\n" - } - } - } - return msg -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go deleted file mode 100644 index 1a3d106d5c..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go +++ /dev/null @@ -1,108 +0,0 @@ -package awsutil - -import ( - "io" - "reflect" - "time" -) - -// Copy deeply copies a src structure to dst. Useful for copying request and -// response structures. -// -// Can copy between structs of different type, but will only copy fields which -// are assignable, and exist in both structs. Fields which are not assignable, -// or do not exist in both structs are ignored. -func Copy(dst, src interface{}) { - dstval := reflect.ValueOf(dst) - if !dstval.IsValid() { - panic("Copy dst cannot be nil") - } - - rcopy(dstval, reflect.ValueOf(src), true) -} - -// CopyOf returns a copy of src while also allocating the memory for dst. -// src must be a pointer type or this operation will fail. -func CopyOf(src interface{}) (dst interface{}) { - dsti := reflect.New(reflect.TypeOf(src).Elem()) - dst = dsti.Interface() - rcopy(dsti, reflect.ValueOf(src), true) - return -} - -// rcopy performs a recursive copy of values from the source to destination. -// -// root is used to skip certain aspects of the copy which are not valid -// for the root node of a object. -func rcopy(dst, src reflect.Value, root bool) { - if !src.IsValid() { - return - } - - switch src.Kind() { - case reflect.Ptr: - if _, ok := src.Interface().(io.Reader); ok { - if dst.Kind() == reflect.Ptr && dst.Elem().CanSet() { - dst.Elem().Set(src) - } else if dst.CanSet() { - dst.Set(src) - } - } else { - e := src.Type().Elem() - if dst.CanSet() && !src.IsNil() { - if _, ok := src.Interface().(*time.Time); !ok { - dst.Set(reflect.New(e)) - } else { - tempValue := reflect.New(e) - tempValue.Elem().Set(src.Elem()) - // Sets time.Time's unexported values - dst.Set(tempValue) - } - } - if src.Elem().IsValid() { - // Keep the current root state since the depth hasn't changed - rcopy(dst.Elem(), src.Elem(), root) - } - } - case reflect.Struct: - t := dst.Type() - for i := 0; i < t.NumField(); i++ { - name := t.Field(i).Name - srcVal := src.FieldByName(name) - dstVal := dst.FieldByName(name) - if srcVal.IsValid() && dstVal.CanSet() { - rcopy(dstVal, srcVal, false) - } - } - case reflect.Slice: - if src.IsNil() { - break - } - - s := reflect.MakeSlice(src.Type(), src.Len(), src.Cap()) - dst.Set(s) - for i := 0; i < src.Len(); i++ { - rcopy(dst.Index(i), src.Index(i), false) - } - case reflect.Map: - if src.IsNil() { - break - } - - s := reflect.MakeMap(src.Type()) - dst.Set(s) - for _, k := range src.MapKeys() { - v := src.MapIndex(k) - v2 := reflect.New(v.Type()).Elem() - rcopy(v2, v, false) - dst.SetMapIndex(k, v2) - } - default: - // Assign the value if possible. If its not assignable, the value would - // need to be converted and the impact of that may be unexpected, or is - // not compatible with the dst type. - if src.Type().AssignableTo(dst.Type()) { - dst.Set(src) - } - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go deleted file mode 100644 index 142a7a01c5..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go +++ /dev/null @@ -1,27 +0,0 @@ -package awsutil - -import ( - "reflect" -) - -// DeepEqual returns if the two values are deeply equal like reflect.DeepEqual. -// In addition to this, this method will also dereference the input values if -// possible so the DeepEqual performed will not fail if one parameter is a -// pointer and the other is not. -// -// DeepEqual will not perform indirection of nested values of the input parameters. -func DeepEqual(a, b interface{}) bool { - ra := reflect.Indirect(reflect.ValueOf(a)) - rb := reflect.Indirect(reflect.ValueOf(b)) - - if raValid, rbValid := ra.IsValid(), rb.IsValid(); !raValid && !rbValid { - // If the elements are both nil, and of the same type they are equal - // If they are of different types they are not equal - return reflect.TypeOf(a) == reflect.TypeOf(b) - } else if raValid != rbValid { - // Both values must be valid to be equal - return false - } - - return reflect.DeepEqual(ra.Interface(), rb.Interface()) -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go deleted file mode 100644 index a4eb6a7f43..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go +++ /dev/null @@ -1,221 +0,0 @@ -package awsutil - -import ( - "reflect" - "regexp" - "strconv" - "strings" - - "github.com/jmespath/go-jmespath" -) - -var indexRe = regexp.MustCompile(`(.+)\[(-?\d+)?\]$`) - -// rValuesAtPath returns a slice of values found in value v. The values -// in v are explored recursively so all nested values are collected. -func rValuesAtPath(v interface{}, path string, createPath, caseSensitive, nilTerm bool) []reflect.Value { - pathparts := strings.Split(path, "||") - if len(pathparts) > 1 { - for _, pathpart := range pathparts { - vals := rValuesAtPath(v, pathpart, createPath, caseSensitive, nilTerm) - if len(vals) > 0 { - return vals - } - } - return nil - } - - values := []reflect.Value{reflect.Indirect(reflect.ValueOf(v))} - components := strings.Split(path, ".") - for len(values) > 0 && len(components) > 0 { - var index *int64 - var indexStar bool - c := strings.TrimSpace(components[0]) - if c == "" { // no actual component, illegal syntax - return nil - } else if caseSensitive && c != "*" && strings.ToLower(c[0:1]) == c[0:1] { - // TODO normalize case for user - return nil // don't support unexported fields - } - - // parse this component - if m := indexRe.FindStringSubmatch(c); m != nil { - c = m[1] - if m[2] == "" { - index = nil - indexStar = true - } else { - i, _ := strconv.ParseInt(m[2], 10, 32) - index = &i - indexStar = false - } - } - - nextvals := []reflect.Value{} - for _, value := range values { - // pull component name out of struct member - if value.Kind() != reflect.Struct { - continue - } - - if c == "*" { // pull all members - for i := 0; i < value.NumField(); i++ { - if f := reflect.Indirect(value.Field(i)); f.IsValid() { - nextvals = append(nextvals, f) - } - } - continue - } - - value = value.FieldByNameFunc(func(name string) bool { - if c == name { - return true - } else if !caseSensitive && strings.EqualFold(name, c) { - return true - } - return false - }) - - if nilTerm && value.Kind() == reflect.Ptr && len(components[1:]) == 0 { - if !value.IsNil() { - value.Set(reflect.Zero(value.Type())) - } - return []reflect.Value{value} - } - - if createPath && value.Kind() == reflect.Ptr && value.IsNil() { - // TODO if the value is the terminus it should not be created - // if the value to be set to its position is nil. - value.Set(reflect.New(value.Type().Elem())) - value = value.Elem() - } else { - value = reflect.Indirect(value) - } - - if value.Kind() == reflect.Slice || value.Kind() == reflect.Map { - if !createPath && value.IsNil() { - value = reflect.ValueOf(nil) - } - } - - if value.IsValid() { - nextvals = append(nextvals, value) - } - } - values = nextvals - - if indexStar || index != nil { - nextvals = []reflect.Value{} - for _, valItem := range values { - value := reflect.Indirect(valItem) - if value.Kind() != reflect.Slice { - continue - } - - if indexStar { // grab all indices - for i := 0; i < value.Len(); i++ { - idx := reflect.Indirect(value.Index(i)) - if idx.IsValid() { - nextvals = append(nextvals, idx) - } - } - continue - } - - // pull out index - i := int(*index) - if i >= value.Len() { // check out of bounds - if createPath { - // TODO resize slice - } else { - continue - } - } else if i < 0 { // support negative indexing - i = value.Len() + i - } - value = reflect.Indirect(value.Index(i)) - - if value.Kind() == reflect.Slice || value.Kind() == reflect.Map { - if !createPath && value.IsNil() { - value = reflect.ValueOf(nil) - } - } - - if value.IsValid() { - nextvals = append(nextvals, value) - } - } - values = nextvals - } - - components = components[1:] - } - return values -} - -// ValuesAtPath returns a list of values at the case insensitive lexical -// path inside of a structure. -func ValuesAtPath(i interface{}, path string) ([]interface{}, error) { - result, err := jmespath.Search(path, i) - if err != nil { - return nil, err - } - - v := reflect.ValueOf(result) - if !v.IsValid() || (v.Kind() == reflect.Ptr && v.IsNil()) { - return nil, nil - } - if s, ok := result.([]interface{}); ok { - return s, err - } - if v.Kind() == reflect.Map && v.Len() == 0 { - return nil, nil - } - if v.Kind() == reflect.Slice { - out := make([]interface{}, v.Len()) - for i := 0; i < v.Len(); i++ { - out[i] = v.Index(i).Interface() - } - return out, nil - } - - return []interface{}{result}, nil -} - -// SetValueAtPath sets a value at the case insensitive lexical path inside -// of a structure. -func SetValueAtPath(i interface{}, path string, v interface{}) { - rvals := rValuesAtPath(i, path, true, false, v == nil) - for _, rval := range rvals { - if rval.Kind() == reflect.Ptr && rval.IsNil() { - continue - } - setValue(rval, v) - } -} - -func setValue(dstVal reflect.Value, src interface{}) { - if dstVal.Kind() == reflect.Ptr { - dstVal = reflect.Indirect(dstVal) - } - srcVal := reflect.ValueOf(src) - - if !srcVal.IsValid() { // src is literal nil - if dstVal.CanAddr() { - // Convert to pointer so that pointer's value can be nil'ed - // dstVal = dstVal.Addr() - } - dstVal.Set(reflect.Zero(dstVal.Type())) - - } else if srcVal.Kind() == reflect.Ptr { - if srcVal.IsNil() { - srcVal = reflect.Zero(dstVal.Type()) - } else { - srcVal = reflect.ValueOf(src).Elem() - } - dstVal.Set(srcVal) - } else { - dstVal.Set(srcVal) - } - -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go deleted file mode 100644 index 710eb432f8..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go +++ /dev/null @@ -1,113 +0,0 @@ -package awsutil - -import ( - "bytes" - "fmt" - "io" - "reflect" - "strings" -) - -// Prettify returns the string representation of a value. -func Prettify(i interface{}) string { - var buf bytes.Buffer - prettify(reflect.ValueOf(i), 0, &buf) - return buf.String() -} - -// prettify will recursively walk value v to build a textual -// representation of the value. -func prettify(v reflect.Value, indent int, buf *bytes.Buffer) { - for v.Kind() == reflect.Ptr { - v = v.Elem() - } - - switch v.Kind() { - case reflect.Struct: - strtype := v.Type().String() - if strtype == "time.Time" { - fmt.Fprintf(buf, "%s", v.Interface()) - break - } else if strings.HasPrefix(strtype, "io.") { - buf.WriteString("") - break - } - - buf.WriteString("{\n") - - names := []string{} - for i := 0; i < v.Type().NumField(); i++ { - name := v.Type().Field(i).Name - f := v.Field(i) - if name[0:1] == strings.ToLower(name[0:1]) { - continue // ignore unexported fields - } - if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice || f.Kind() == reflect.Map) && f.IsNil() { - continue // ignore unset fields - } - names = append(names, name) - } - - for i, n := range names { - val := v.FieldByName(n) - buf.WriteString(strings.Repeat(" ", indent+2)) - buf.WriteString(n + ": ") - prettify(val, indent+2, buf) - - if i < len(names)-1 { - buf.WriteString(",\n") - } - } - - buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") - case reflect.Slice: - strtype := v.Type().String() - if strtype == "[]uint8" { - fmt.Fprintf(buf, " len %d", v.Len()) - break - } - - nl, id, id2 := "", "", "" - if v.Len() > 3 { - nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2) - } - buf.WriteString("[" + nl) - for i := 0; i < v.Len(); i++ { - buf.WriteString(id2) - prettify(v.Index(i), indent+2, buf) - - if i < v.Len()-1 { - buf.WriteString("," + nl) - } - } - - buf.WriteString(nl + id + "]") - case reflect.Map: - buf.WriteString("{\n") - - for i, k := range v.MapKeys() { - buf.WriteString(strings.Repeat(" ", indent+2)) - buf.WriteString(k.String() + ": ") - prettify(v.MapIndex(k), indent+2, buf) - - if i < v.Len()-1 { - buf.WriteString(",\n") - } - } - - buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") - default: - if !v.IsValid() { - fmt.Fprint(buf, "") - return - } - format := "%v" - switch v.Interface().(type) { - case string: - format = "%q" - case io.ReadSeeker, io.Reader: - format = "buffer(%p)" - } - fmt.Fprintf(buf, format, v.Interface()) - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go deleted file mode 100644 index 645df2450f..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go +++ /dev/null @@ -1,88 +0,0 @@ -package awsutil - -import ( - "bytes" - "fmt" - "reflect" - "strings" -) - -// StringValue returns the string representation of a value. -func StringValue(i interface{}) string { - var buf bytes.Buffer - stringValue(reflect.ValueOf(i), 0, &buf) - return buf.String() -} - -func stringValue(v reflect.Value, indent int, buf *bytes.Buffer) { - for v.Kind() == reflect.Ptr { - v = v.Elem() - } - - switch v.Kind() { - case reflect.Struct: - buf.WriteString("{\n") - - for i := 0; i < v.Type().NumField(); i++ { - ft := v.Type().Field(i) - fv := v.Field(i) - - if ft.Name[0:1] == strings.ToLower(ft.Name[0:1]) { - continue // ignore unexported fields - } - if (fv.Kind() == reflect.Ptr || fv.Kind() == reflect.Slice) && fv.IsNil() { - continue // ignore unset fields - } - - buf.WriteString(strings.Repeat(" ", indent+2)) - buf.WriteString(ft.Name + ": ") - - if tag := ft.Tag.Get("sensitive"); tag == "true" { - buf.WriteString("") - } else { - stringValue(fv, indent+2, buf) - } - - buf.WriteString(",\n") - } - - buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") - case reflect.Slice: - nl, id, id2 := "", "", "" - if v.Len() > 3 { - nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2) - } - buf.WriteString("[" + nl) - for i := 0; i < v.Len(); i++ { - buf.WriteString(id2) - stringValue(v.Index(i), indent+2, buf) - - if i < v.Len()-1 { - buf.WriteString("," + nl) - } - } - - buf.WriteString(nl + id + "]") - case reflect.Map: - buf.WriteString("{\n") - - for i, k := range v.MapKeys() { - buf.WriteString(strings.Repeat(" ", indent+2)) - buf.WriteString(k.String() + ": ") - stringValue(v.MapIndex(k), indent+2, buf) - - if i < v.Len()-1 { - buf.WriteString(",\n") - } - } - - buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") - default: - format := "%v" - switch v.Interface().(type) { - case string: - format = "%q" - } - fmt.Fprintf(buf, format, v.Interface()) - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/client.go b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go deleted file mode 100644 index 03334d6920..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/client.go +++ /dev/null @@ -1,97 +0,0 @@ -package client - -import ( - "fmt" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/client/metadata" - "github.com/aws/aws-sdk-go/aws/request" -) - -// A Config provides configuration to a service client instance. -type Config struct { - Config *aws.Config - Handlers request.Handlers - PartitionID string - Endpoint string - SigningRegion string - SigningName string - - // States that the signing name did not come from a modeled source but - // was derived based on other data. Used by service client constructors - // to determine if the signin name can be overridden based on metadata the - // service has. - SigningNameDerived bool -} - -// ConfigProvider provides a generic way for a service client to receive -// the ClientConfig without circular dependencies. -type ConfigProvider interface { - ClientConfig(serviceName string, cfgs ...*aws.Config) Config -} - -// ConfigNoResolveEndpointProvider same as ConfigProvider except it will not -// resolve the endpoint automatically. The service client's endpoint must be -// provided via the aws.Config.Endpoint field. -type ConfigNoResolveEndpointProvider interface { - ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) Config -} - -// A Client implements the base client request and response handling -// used by all service clients. -type Client struct { - request.Retryer - metadata.ClientInfo - - Config aws.Config - Handlers request.Handlers -} - -// New will return a pointer to a new initialized service client. -func New(cfg aws.Config, info metadata.ClientInfo, handlers request.Handlers, options ...func(*Client)) *Client { - svc := &Client{ - Config: cfg, - ClientInfo: info, - Handlers: handlers.Copy(), - } - - switch retryer, ok := cfg.Retryer.(request.Retryer); { - case ok: - svc.Retryer = retryer - case cfg.Retryer != nil && cfg.Logger != nil: - s := fmt.Sprintf("WARNING: %T does not implement request.Retryer; using DefaultRetryer instead", cfg.Retryer) - cfg.Logger.Log(s) - fallthrough - default: - maxRetries := aws.IntValue(cfg.MaxRetries) - if cfg.MaxRetries == nil || maxRetries == aws.UseServiceDefaultRetries { - maxRetries = DefaultRetryerMaxNumRetries - } - svc.Retryer = DefaultRetryer{NumMaxRetries: maxRetries} - } - - svc.AddDebugHandlers() - - for _, option := range options { - option(svc) - } - - return svc -} - -// NewRequest returns a new Request pointer for the service API -// operation and parameters. -func (c *Client) NewRequest(operation *request.Operation, params interface{}, data interface{}) *request.Request { - return request.New(c.Config, c.ClientInfo, c.Handlers, c.Retryer, operation, params, data) -} - -// AddDebugHandlers injects debug logging handlers into the service to log request -// debug information. -func (c *Client) AddDebugHandlers() { - if !c.Config.LogLevel.AtLeast(aws.LogDebug) { - return - } - - c.Handlers.Send.PushFrontNamed(LogHTTPRequestHandler) - c.Handlers.Send.PushBackNamed(LogHTTPResponseHandler) -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go deleted file mode 100644 index 9f6af19dd4..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go +++ /dev/null @@ -1,177 +0,0 @@ -package client - -import ( - "math" - "strconv" - "time" - - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/internal/sdkrand" -) - -// DefaultRetryer implements basic retry logic using exponential backoff for -// most services. If you want to implement custom retry logic, you can implement the -// request.Retryer interface. -// -type DefaultRetryer struct { - // Num max Retries is the number of max retries that will be performed. - // By default, this is zero. - NumMaxRetries int - - // MinRetryDelay is the minimum retry delay after which retry will be performed. - // If not set, the value is 0ns. - MinRetryDelay time.Duration - - // MinThrottleRetryDelay is the minimum retry delay when throttled. - // If not set, the value is 0ns. - MinThrottleDelay time.Duration - - // MaxRetryDelay is the maximum retry delay before which retry must be performed. - // If not set, the value is 0ns. - MaxRetryDelay time.Duration - - // MaxThrottleDelay is the maximum retry delay when throttled. - // If not set, the value is 0ns. - MaxThrottleDelay time.Duration -} - -const ( - // DefaultRetryerMaxNumRetries sets maximum number of retries - DefaultRetryerMaxNumRetries = 3 - - // DefaultRetryerMinRetryDelay sets minimum retry delay - DefaultRetryerMinRetryDelay = 30 * time.Millisecond - - // DefaultRetryerMinThrottleDelay sets minimum delay when throttled - DefaultRetryerMinThrottleDelay = 500 * time.Millisecond - - // DefaultRetryerMaxRetryDelay sets maximum retry delay - DefaultRetryerMaxRetryDelay = 300 * time.Second - - // DefaultRetryerMaxThrottleDelay sets maximum delay when throttled - DefaultRetryerMaxThrottleDelay = 300 * time.Second -) - -// MaxRetries returns the number of maximum returns the service will use to make -// an individual API request. -func (d DefaultRetryer) MaxRetries() int { - return d.NumMaxRetries -} - -// setRetryerDefaults sets the default values of the retryer if not set -func (d *DefaultRetryer) setRetryerDefaults() { - if d.MinRetryDelay == 0 { - d.MinRetryDelay = DefaultRetryerMinRetryDelay - } - if d.MaxRetryDelay == 0 { - d.MaxRetryDelay = DefaultRetryerMaxRetryDelay - } - if d.MinThrottleDelay == 0 { - d.MinThrottleDelay = DefaultRetryerMinThrottleDelay - } - if d.MaxThrottleDelay == 0 { - d.MaxThrottleDelay = DefaultRetryerMaxThrottleDelay - } -} - -// RetryRules returns the delay duration before retrying this request again -func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration { - - // if number of max retries is zero, no retries will be performed. - if d.NumMaxRetries == 0 { - return 0 - } - - // Sets default value for retryer members - d.setRetryerDefaults() - - // minDelay is the minimum retryer delay - minDelay := d.MinRetryDelay - - var initialDelay time.Duration - - isThrottle := r.IsErrorThrottle() - if isThrottle { - if delay, ok := getRetryAfterDelay(r); ok { - initialDelay = delay - } - minDelay = d.MinThrottleDelay - } - - retryCount := r.RetryCount - - // maxDelay the maximum retryer delay - maxDelay := d.MaxRetryDelay - - if isThrottle { - maxDelay = d.MaxThrottleDelay - } - - var delay time.Duration - - // Logic to cap the retry count based on the minDelay provided - actualRetryCount := int(math.Log2(float64(minDelay))) + 1 - if actualRetryCount < 63-retryCount { - delay = time.Duration(1< maxDelay { - delay = getJitterDelay(maxDelay / 2) - } - } else { - delay = getJitterDelay(maxDelay / 2) - } - return delay + initialDelay -} - -// getJitterDelay returns a jittered delay for retry -func getJitterDelay(duration time.Duration) time.Duration { - return time.Duration(sdkrand.SeededRand.Int63n(int64(duration)) + int64(duration)) -} - -// ShouldRetry returns true if the request should be retried. -func (d DefaultRetryer) ShouldRetry(r *request.Request) bool { - - // ShouldRetry returns false if number of max retries is 0. - if d.NumMaxRetries == 0 { - return false - } - - // If one of the other handlers already set the retry state - // we don't want to override it based on the service's state - if r.Retryable != nil { - return *r.Retryable - } - return r.IsErrorRetryable() || r.IsErrorThrottle() -} - -// This will look in the Retry-After header, RFC 7231, for how long -// it will wait before attempting another request -func getRetryAfterDelay(r *request.Request) (time.Duration, bool) { - if !canUseRetryAfterHeader(r) { - return 0, false - } - - delayStr := r.HTTPResponse.Header.Get("Retry-After") - if len(delayStr) == 0 { - return 0, false - } - - delay, err := strconv.Atoi(delayStr) - if err != nil { - return 0, false - } - - return time.Duration(delay) * time.Second, true -} - -// Will look at the status code to see if the retry header pertains to -// the status code. -func canUseRetryAfterHeader(r *request.Request) bool { - switch r.HTTPResponse.StatusCode { - case 429: - case 503: - default: - return false - } - - return true -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go b/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go deleted file mode 100644 index 8958c32d4e..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go +++ /dev/null @@ -1,194 +0,0 @@ -package client - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "net/http/httputil" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/request" -) - -const logReqMsg = `DEBUG: Request %s/%s Details: ----[ REQUEST POST-SIGN ]----------------------------- -%s ------------------------------------------------------` - -const logReqErrMsg = `DEBUG ERROR: Request %s/%s: ----[ REQUEST DUMP ERROR ]----------------------------- -%s -------------------------------------------------------` - -type logWriter struct { - // Logger is what we will use to log the payload of a response. - Logger aws.Logger - // buf stores the contents of what has been read - buf *bytes.Buffer -} - -func (logger *logWriter) Write(b []byte) (int, error) { - return logger.buf.Write(b) -} - -type teeReaderCloser struct { - // io.Reader will be a tee reader that is used during logging. - // This structure will read from a body and write the contents to a logger. - io.Reader - // Source is used just to close when we are done reading. - Source io.ReadCloser -} - -func (reader *teeReaderCloser) Close() error { - return reader.Source.Close() -} - -// LogHTTPRequestHandler is a SDK request handler to log the HTTP request sent -// to a service. Will include the HTTP request body if the LogLevel of the -// request matches LogDebugWithHTTPBody. -var LogHTTPRequestHandler = request.NamedHandler{ - Name: "awssdk.client.LogRequest", - Fn: logRequest, -} - -func logRequest(r *request.Request) { - logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) - bodySeekable := aws.IsReaderSeekable(r.Body) - - b, err := httputil.DumpRequestOut(r.HTTPRequest, logBody) - if err != nil { - r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, - r.ClientInfo.ServiceName, r.Operation.Name, err)) - return - } - - if logBody { - if !bodySeekable { - r.SetReaderBody(aws.ReadSeekCloser(r.HTTPRequest.Body)) - } - // Reset the request body because dumpRequest will re-wrap the - // r.HTTPRequest's Body as a NoOpCloser and will not be reset after - // read by the HTTP client reader. - if err := r.Error; err != nil { - r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, - r.ClientInfo.ServiceName, r.Operation.Name, err)) - return - } - } - - r.Config.Logger.Log(fmt.Sprintf(logReqMsg, - r.ClientInfo.ServiceName, r.Operation.Name, string(b))) -} - -// LogHTTPRequestHeaderHandler is a SDK request handler to log the HTTP request sent -// to a service. Will only log the HTTP request's headers. The request payload -// will not be read. -var LogHTTPRequestHeaderHandler = request.NamedHandler{ - Name: "awssdk.client.LogRequestHeader", - Fn: logRequestHeader, -} - -func logRequestHeader(r *request.Request) { - b, err := httputil.DumpRequestOut(r.HTTPRequest, false) - if err != nil { - r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, - r.ClientInfo.ServiceName, r.Operation.Name, err)) - return - } - - r.Config.Logger.Log(fmt.Sprintf(logReqMsg, - r.ClientInfo.ServiceName, r.Operation.Name, string(b))) -} - -const logRespMsg = `DEBUG: Response %s/%s Details: ----[ RESPONSE ]-------------------------------------- -%s ------------------------------------------------------` - -const logRespErrMsg = `DEBUG ERROR: Response %s/%s: ----[ RESPONSE DUMP ERROR ]----------------------------- -%s ------------------------------------------------------` - -// LogHTTPResponseHandler is a SDK request handler to log the HTTP response -// received from a service. Will include the HTTP response body if the LogLevel -// of the request matches LogDebugWithHTTPBody. -var LogHTTPResponseHandler = request.NamedHandler{ - Name: "awssdk.client.LogResponse", - Fn: logResponse, -} - -func logResponse(r *request.Request) { - lw := &logWriter{r.Config.Logger, bytes.NewBuffer(nil)} - - if r.HTTPResponse == nil { - lw.Logger.Log(fmt.Sprintf(logRespErrMsg, - r.ClientInfo.ServiceName, r.Operation.Name, "request's HTTPResponse is nil")) - return - } - - logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) - if logBody { - r.HTTPResponse.Body = &teeReaderCloser{ - Reader: io.TeeReader(r.HTTPResponse.Body, lw), - Source: r.HTTPResponse.Body, - } - } - - handlerFn := func(req *request.Request) { - b, err := httputil.DumpResponse(req.HTTPResponse, false) - if err != nil { - lw.Logger.Log(fmt.Sprintf(logRespErrMsg, - req.ClientInfo.ServiceName, req.Operation.Name, err)) - return - } - - lw.Logger.Log(fmt.Sprintf(logRespMsg, - req.ClientInfo.ServiceName, req.Operation.Name, string(b))) - - if logBody { - b, err := ioutil.ReadAll(lw.buf) - if err != nil { - lw.Logger.Log(fmt.Sprintf(logRespErrMsg, - req.ClientInfo.ServiceName, req.Operation.Name, err)) - return - } - - lw.Logger.Log(string(b)) - } - } - - const handlerName = "awsdk.client.LogResponse.ResponseBody" - - r.Handlers.Unmarshal.SetBackNamed(request.NamedHandler{ - Name: handlerName, Fn: handlerFn, - }) - r.Handlers.UnmarshalError.SetBackNamed(request.NamedHandler{ - Name: handlerName, Fn: handlerFn, - }) -} - -// LogHTTPResponseHeaderHandler is a SDK request handler to log the HTTP -// response received from a service. Will only log the HTTP response's headers. -// The response payload will not be read. -var LogHTTPResponseHeaderHandler = request.NamedHandler{ - Name: "awssdk.client.LogResponseHeader", - Fn: logResponseHeader, -} - -func logResponseHeader(r *request.Request) { - if r.Config.Logger == nil { - return - } - - b, err := httputil.DumpResponse(r.HTTPResponse, false) - if err != nil { - r.Config.Logger.Log(fmt.Sprintf(logRespErrMsg, - r.ClientInfo.ServiceName, r.Operation.Name, err)) - return - } - - r.Config.Logger.Log(fmt.Sprintf(logRespMsg, - r.ClientInfo.ServiceName, r.Operation.Name, string(b))) -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go b/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go deleted file mode 100644 index 0c48f72e08..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go +++ /dev/null @@ -1,14 +0,0 @@ -package metadata - -// ClientInfo wraps immutable data from the client.Client structure. -type ClientInfo struct { - ServiceName string - ServiceID string - APIVersion string - PartitionID string - Endpoint string - SigningName string - SigningRegion string - JSONVersion string - TargetPrefix string -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/no_op_retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/client/no_op_retryer.go deleted file mode 100644 index 881d575f01..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/no_op_retryer.go +++ /dev/null @@ -1,28 +0,0 @@ -package client - -import ( - "time" - - "github.com/aws/aws-sdk-go/aws/request" -) - -// NoOpRetryer provides a retryer that performs no retries. -// It should be used when we do not want retries to be performed. -type NoOpRetryer struct{} - -// MaxRetries returns the number of maximum returns the service will use to make -// an individual API; For NoOpRetryer the MaxRetries will always be zero. -func (d NoOpRetryer) MaxRetries() int { - return 0 -} - -// ShouldRetry will always return false for NoOpRetryer, as it should never retry. -func (d NoOpRetryer) ShouldRetry(_ *request.Request) bool { - return false -} - -// RetryRules returns the delay duration before retrying this request again; -// since NoOpRetryer does not retry, RetryRules always returns 0. -func (d NoOpRetryer) RetryRules(_ *request.Request) time.Duration { - return 0 -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/config.go b/vendor/github.com/aws/aws-sdk-go/aws/config.go deleted file mode 100644 index 2c002e152a..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/config.go +++ /dev/null @@ -1,587 +0,0 @@ -package aws - -import ( - "net/http" - "time" - - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/endpoints" -) - -// UseServiceDefaultRetries instructs the config to use the service's own -// default number of retries. This will be the default action if -// Config.MaxRetries is nil also. -const UseServiceDefaultRetries = -1 - -// RequestRetryer is an alias for a type that implements the request.Retryer -// interface. -type RequestRetryer interface{} - -// A Config provides service configuration for service clients. By default, -// all clients will use the defaults.DefaultConfig structure. -// -// // Create Session with MaxRetries configuration to be shared by multiple -// // service clients. -// sess := session.Must(session.NewSession(&aws.Config{ -// MaxRetries: aws.Int(3), -// })) -// -// // Create S3 service client with a specific Region. -// svc := s3.New(sess, &aws.Config{ -// Region: aws.String("us-west-2"), -// }) -type Config struct { - // Enables verbose error printing of all credential chain errors. - // Should be used when wanting to see all errors while attempting to - // retrieve credentials. - CredentialsChainVerboseErrors *bool - - // The credentials object to use when signing requests. Defaults to a - // chain of credential providers to search for credentials in environment - // variables, shared credential file, and EC2 Instance Roles. - Credentials *credentials.Credentials - - // An optional endpoint URL (hostname only or fully qualified URI) - // that overrides the default generated endpoint for a client. Set this - // to `nil` to use the default generated endpoint. - // - // Note: You must still provide a `Region` value when specifying an - // endpoint for a client. - Endpoint *string - - // The resolver to use for looking up endpoints for AWS service clients - // to use based on region. - EndpointResolver endpoints.Resolver - - // EnforceShouldRetryCheck is used in the AfterRetryHandler to always call - // ShouldRetry regardless of whether or not if request.Retryable is set. - // This will utilize ShouldRetry method of custom retryers. If EnforceShouldRetryCheck - // is not set, then ShouldRetry will only be called if request.Retryable is nil. - // Proper handling of the request.Retryable field is important when setting this field. - EnforceShouldRetryCheck *bool - - // The region to send requests to. This parameter is required and must - // be configured globally or on a per-client basis unless otherwise - // noted. A full list of regions is found in the "Regions and Endpoints" - // document. - // - // See http://docs.aws.amazon.com/general/latest/gr/rande.html for AWS - // Regions and Endpoints. - Region *string - - // Set this to `true` to disable SSL when sending requests. Defaults - // to `false`. - DisableSSL *bool - - // The HTTP client to use when sending requests. Defaults to - // `http.DefaultClient`. - HTTPClient *http.Client - - // An integer value representing the logging level. The default log level - // is zero (LogOff), which represents no logging. To enable logging set - // to a LogLevel Value. - LogLevel *LogLevelType - - // The logger writer interface to write logging messages to. Defaults to - // standard out. - Logger Logger - - // The maximum number of times that a request will be retried for failures. - // Defaults to -1, which defers the max retry setting to the service - // specific configuration. - MaxRetries *int - - // Retryer guides how HTTP requests should be retried in case of - // recoverable failures. - // - // When nil or the value does not implement the request.Retryer interface, - // the client.DefaultRetryer will be used. - // - // When both Retryer and MaxRetries are non-nil, the former is used and - // the latter ignored. - // - // To set the Retryer field in a type-safe manner and with chaining, use - // the request.WithRetryer helper function: - // - // cfg := request.WithRetryer(aws.NewConfig(), myRetryer) - // - Retryer RequestRetryer - - // Disables semantic parameter validation, which validates input for - // missing required fields and/or other semantic request input errors. - DisableParamValidation *bool - - // Disables the computation of request and response checksums, e.g., - // CRC32 checksums in Amazon DynamoDB. - DisableComputeChecksums *bool - - // Set this to `true` to force the request to use path-style addressing, - // i.e., `http://s3.amazonaws.com/BUCKET/KEY`. By default, the S3 client - // will use virtual hosted bucket addressing when possible - // (`http://BUCKET.s3.amazonaws.com/KEY`). - // - // Note: This configuration option is specific to the Amazon S3 service. - // - // See http://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html - // for Amazon S3: Virtual Hosting of Buckets - S3ForcePathStyle *bool - - // Set this to `true` to disable the SDK adding the `Expect: 100-Continue` - // header to PUT requests over 2MB of content. 100-Continue instructs the - // HTTP client not to send the body until the service responds with a - // `continue` status. This is useful to prevent sending the request body - // until after the request is authenticated, and validated. - // - // http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html - // - // 100-Continue is only enabled for Go 1.6 and above. See `http.Transport`'s - // `ExpectContinueTimeout` for information on adjusting the continue wait - // timeout. https://golang.org/pkg/net/http/#Transport - // - // You should use this flag to disble 100-Continue if you experience issues - // with proxies or third party S3 compatible services. - S3Disable100Continue *bool - - // Set this to `true` to enable S3 Accelerate feature. For all operations - // compatible with S3 Accelerate will use the accelerate endpoint for - // requests. Requests not compatible will fall back to normal S3 requests. - // - // The bucket must be enable for accelerate to be used with S3 client with - // accelerate enabled. If the bucket is not enabled for accelerate an error - // will be returned. The bucket name must be DNS compatible to also work - // with accelerate. - S3UseAccelerate *bool - - // S3DisableContentMD5Validation config option is temporarily disabled, - // For S3 GetObject API calls, #1837. - // - // Set this to `true` to disable the S3 service client from automatically - // adding the ContentMD5 to S3 Object Put and Upload API calls. This option - // will also disable the SDK from performing object ContentMD5 validation - // on GetObject API calls. - S3DisableContentMD5Validation *bool - - // Set this to `true` to have the S3 service client to use the region specified - // in the ARN, when an ARN is provided as an argument to a bucket parameter. - S3UseARNRegion *bool - - // Set this to `true` to enable the SDK to unmarshal API response header maps to - // normalized lower case map keys. - // - // For example S3's X-Amz-Meta prefixed header will be unmarshaled to lower case - // Metadata member's map keys. The value of the header in the map is unaffected. - LowerCaseHeaderMaps *bool - - // Set this to `true` to disable the EC2Metadata client from overriding the - // default http.Client's Timeout. This is helpful if you do not want the - // EC2Metadata client to create a new http.Client. This options is only - // meaningful if you're not already using a custom HTTP client with the - // SDK. Enabled by default. - // - // Must be set and provided to the session.NewSession() in order to disable - // the EC2Metadata overriding the timeout for default credentials chain. - // - // Example: - // sess := session.Must(session.NewSession(aws.NewConfig() - // .WithEC2MetadataDiableTimeoutOverride(true))) - // - // svc := s3.New(sess) - // - EC2MetadataDisableTimeoutOverride *bool - - // Instructs the endpoint to be generated for a service client to - // be the dual stack endpoint. The dual stack endpoint will support - // both IPv4 and IPv6 addressing. - // - // Setting this for a service which does not support dual stack will fail - // to make requets. It is not recommended to set this value on the session - // as it will apply to all service clients created with the session. Even - // services which don't support dual stack endpoints. - // - // If the Endpoint config value is also provided the UseDualStack flag - // will be ignored. - // - // Only supported with. - // - // sess := session.Must(session.NewSession()) - // - // svc := s3.New(sess, &aws.Config{ - // UseDualStack: aws.Bool(true), - // }) - UseDualStack *bool - - // SleepDelay is an override for the func the SDK will call when sleeping - // during the lifecycle of a request. Specifically this will be used for - // request delays. This value should only be used for testing. To adjust - // the delay of a request see the aws/client.DefaultRetryer and - // aws/request.Retryer. - // - // SleepDelay will prevent any Context from being used for canceling retry - // delay of an API operation. It is recommended to not use SleepDelay at all - // and specify a Retryer instead. - SleepDelay func(time.Duration) - - // DisableRestProtocolURICleaning will not clean the URL path when making rest protocol requests. - // Will default to false. This would only be used for empty directory names in s3 requests. - // - // Example: - // sess := session.Must(session.NewSession(&aws.Config{ - // DisableRestProtocolURICleaning: aws.Bool(true), - // })) - // - // svc := s3.New(sess) - // out, err := svc.GetObject(&s3.GetObjectInput { - // Bucket: aws.String("bucketname"), - // Key: aws.String("//foo//bar//moo"), - // }) - DisableRestProtocolURICleaning *bool - - // EnableEndpointDiscovery will allow for endpoint discovery on operations that - // have the definition in its model. By default, endpoint discovery is off. - // To use EndpointDiscovery, Endpoint should be unset or set to an empty string. - // - // Example: - // sess := session.Must(session.NewSession(&aws.Config{ - // EnableEndpointDiscovery: aws.Bool(true), - // })) - // - // svc := s3.New(sess) - // out, err := svc.GetObject(&s3.GetObjectInput { - // Bucket: aws.String("bucketname"), - // Key: aws.String("/foo/bar/moo"), - // }) - EnableEndpointDiscovery *bool - - // DisableEndpointHostPrefix will disable the SDK's behavior of prefixing - // request endpoint hosts with modeled information. - // - // Disabling this feature is useful when you want to use local endpoints - // for testing that do not support the modeled host prefix pattern. - DisableEndpointHostPrefix *bool - - // STSRegionalEndpoint will enable regional or legacy endpoint resolving - STSRegionalEndpoint endpoints.STSRegionalEndpoint - - // S3UsEast1RegionalEndpoint will enable regional or legacy endpoint resolving - S3UsEast1RegionalEndpoint endpoints.S3UsEast1RegionalEndpoint -} - -// NewConfig returns a new Config pointer that can be chained with builder -// methods to set multiple configuration values inline without using pointers. -// -// // Create Session with MaxRetries configuration to be shared by multiple -// // service clients. -// sess := session.Must(session.NewSession(aws.NewConfig(). -// WithMaxRetries(3), -// )) -// -// // Create S3 service client with a specific Region. -// svc := s3.New(sess, aws.NewConfig(). -// WithRegion("us-west-2"), -// ) -func NewConfig() *Config { - return &Config{} -} - -// WithCredentialsChainVerboseErrors sets a config verbose errors boolean and returning -// a Config pointer. -func (c *Config) WithCredentialsChainVerboseErrors(verboseErrs bool) *Config { - c.CredentialsChainVerboseErrors = &verboseErrs - return c -} - -// WithCredentials sets a config Credentials value returning a Config pointer -// for chaining. -func (c *Config) WithCredentials(creds *credentials.Credentials) *Config { - c.Credentials = creds - return c -} - -// WithEndpoint sets a config Endpoint value returning a Config pointer for -// chaining. -func (c *Config) WithEndpoint(endpoint string) *Config { - c.Endpoint = &endpoint - return c -} - -// WithEndpointResolver sets a config EndpointResolver value returning a -// Config pointer for chaining. -func (c *Config) WithEndpointResolver(resolver endpoints.Resolver) *Config { - c.EndpointResolver = resolver - return c -} - -// WithRegion sets a config Region value returning a Config pointer for -// chaining. -func (c *Config) WithRegion(region string) *Config { - c.Region = ®ion - return c -} - -// WithDisableSSL sets a config DisableSSL value returning a Config pointer -// for chaining. -func (c *Config) WithDisableSSL(disable bool) *Config { - c.DisableSSL = &disable - return c -} - -// WithHTTPClient sets a config HTTPClient value returning a Config pointer -// for chaining. -func (c *Config) WithHTTPClient(client *http.Client) *Config { - c.HTTPClient = client - return c -} - -// WithMaxRetries sets a config MaxRetries value returning a Config pointer -// for chaining. -func (c *Config) WithMaxRetries(max int) *Config { - c.MaxRetries = &max - return c -} - -// WithDisableParamValidation sets a config DisableParamValidation value -// returning a Config pointer for chaining. -func (c *Config) WithDisableParamValidation(disable bool) *Config { - c.DisableParamValidation = &disable - return c -} - -// WithDisableComputeChecksums sets a config DisableComputeChecksums value -// returning a Config pointer for chaining. -func (c *Config) WithDisableComputeChecksums(disable bool) *Config { - c.DisableComputeChecksums = &disable - return c -} - -// WithLogLevel sets a config LogLevel value returning a Config pointer for -// chaining. -func (c *Config) WithLogLevel(level LogLevelType) *Config { - c.LogLevel = &level - return c -} - -// WithLogger sets a config Logger value returning a Config pointer for -// chaining. -func (c *Config) WithLogger(logger Logger) *Config { - c.Logger = logger - return c -} - -// WithS3ForcePathStyle sets a config S3ForcePathStyle value returning a Config -// pointer for chaining. -func (c *Config) WithS3ForcePathStyle(force bool) *Config { - c.S3ForcePathStyle = &force - return c -} - -// WithS3Disable100Continue sets a config S3Disable100Continue value returning -// a Config pointer for chaining. -func (c *Config) WithS3Disable100Continue(disable bool) *Config { - c.S3Disable100Continue = &disable - return c -} - -// WithS3UseAccelerate sets a config S3UseAccelerate value returning a Config -// pointer for chaining. -func (c *Config) WithS3UseAccelerate(enable bool) *Config { - c.S3UseAccelerate = &enable - return c - -} - -// WithS3DisableContentMD5Validation sets a config -// S3DisableContentMD5Validation value returning a Config pointer for chaining. -func (c *Config) WithS3DisableContentMD5Validation(enable bool) *Config { - c.S3DisableContentMD5Validation = &enable - return c - -} - -// WithS3UseARNRegion sets a config S3UseARNRegion value and -// returning a Config pointer for chaining -func (c *Config) WithS3UseARNRegion(enable bool) *Config { - c.S3UseARNRegion = &enable - return c -} - -// WithUseDualStack sets a config UseDualStack value returning a Config -// pointer for chaining. -func (c *Config) WithUseDualStack(enable bool) *Config { - c.UseDualStack = &enable - return c -} - -// WithEC2MetadataDisableTimeoutOverride sets a config EC2MetadataDisableTimeoutOverride value -// returning a Config pointer for chaining. -func (c *Config) WithEC2MetadataDisableTimeoutOverride(enable bool) *Config { - c.EC2MetadataDisableTimeoutOverride = &enable - return c -} - -// WithSleepDelay overrides the function used to sleep while waiting for the -// next retry. Defaults to time.Sleep. -func (c *Config) WithSleepDelay(fn func(time.Duration)) *Config { - c.SleepDelay = fn - return c -} - -// WithEndpointDiscovery will set whether or not to use endpoint discovery. -func (c *Config) WithEndpointDiscovery(t bool) *Config { - c.EnableEndpointDiscovery = &t - return c -} - -// WithDisableEndpointHostPrefix will set whether or not to use modeled host prefix -// when making requests. -func (c *Config) WithDisableEndpointHostPrefix(t bool) *Config { - c.DisableEndpointHostPrefix = &t - return c -} - -// MergeIn merges the passed in configs into the existing config object. -func (c *Config) MergeIn(cfgs ...*Config) { - for _, other := range cfgs { - mergeInConfig(c, other) - } -} - -// WithSTSRegionalEndpoint will set whether or not to use regional endpoint flag -// when resolving the endpoint for a service -func (c *Config) WithSTSRegionalEndpoint(sre endpoints.STSRegionalEndpoint) *Config { - c.STSRegionalEndpoint = sre - return c -} - -// WithS3UsEast1RegionalEndpoint will set whether or not to use regional endpoint flag -// when resolving the endpoint for a service -func (c *Config) WithS3UsEast1RegionalEndpoint(sre endpoints.S3UsEast1RegionalEndpoint) *Config { - c.S3UsEast1RegionalEndpoint = sre - return c -} - -func mergeInConfig(dst *Config, other *Config) { - if other == nil { - return - } - - if other.CredentialsChainVerboseErrors != nil { - dst.CredentialsChainVerboseErrors = other.CredentialsChainVerboseErrors - } - - if other.Credentials != nil { - dst.Credentials = other.Credentials - } - - if other.Endpoint != nil { - dst.Endpoint = other.Endpoint - } - - if other.EndpointResolver != nil { - dst.EndpointResolver = other.EndpointResolver - } - - if other.Region != nil { - dst.Region = other.Region - } - - if other.DisableSSL != nil { - dst.DisableSSL = other.DisableSSL - } - - if other.HTTPClient != nil { - dst.HTTPClient = other.HTTPClient - } - - if other.LogLevel != nil { - dst.LogLevel = other.LogLevel - } - - if other.Logger != nil { - dst.Logger = other.Logger - } - - if other.MaxRetries != nil { - dst.MaxRetries = other.MaxRetries - } - - if other.Retryer != nil { - dst.Retryer = other.Retryer - } - - if other.DisableParamValidation != nil { - dst.DisableParamValidation = other.DisableParamValidation - } - - if other.DisableComputeChecksums != nil { - dst.DisableComputeChecksums = other.DisableComputeChecksums - } - - if other.S3ForcePathStyle != nil { - dst.S3ForcePathStyle = other.S3ForcePathStyle - } - - if other.S3Disable100Continue != nil { - dst.S3Disable100Continue = other.S3Disable100Continue - } - - if other.S3UseAccelerate != nil { - dst.S3UseAccelerate = other.S3UseAccelerate - } - - if other.S3DisableContentMD5Validation != nil { - dst.S3DisableContentMD5Validation = other.S3DisableContentMD5Validation - } - - if other.S3UseARNRegion != nil { - dst.S3UseARNRegion = other.S3UseARNRegion - } - - if other.UseDualStack != nil { - dst.UseDualStack = other.UseDualStack - } - - if other.EC2MetadataDisableTimeoutOverride != nil { - dst.EC2MetadataDisableTimeoutOverride = other.EC2MetadataDisableTimeoutOverride - } - - if other.SleepDelay != nil { - dst.SleepDelay = other.SleepDelay - } - - if other.DisableRestProtocolURICleaning != nil { - dst.DisableRestProtocolURICleaning = other.DisableRestProtocolURICleaning - } - - if other.EnforceShouldRetryCheck != nil { - dst.EnforceShouldRetryCheck = other.EnforceShouldRetryCheck - } - - if other.EnableEndpointDiscovery != nil { - dst.EnableEndpointDiscovery = other.EnableEndpointDiscovery - } - - if other.DisableEndpointHostPrefix != nil { - dst.DisableEndpointHostPrefix = other.DisableEndpointHostPrefix - } - - if other.STSRegionalEndpoint != endpoints.UnsetSTSEndpoint { - dst.STSRegionalEndpoint = other.STSRegionalEndpoint - } - - if other.S3UsEast1RegionalEndpoint != endpoints.UnsetS3UsEast1Endpoint { - dst.S3UsEast1RegionalEndpoint = other.S3UsEast1RegionalEndpoint - } -} - -// Copy will return a shallow copy of the Config object. If any additional -// configurations are provided they will be merged into the new config returned. -func (c *Config) Copy(cfgs ...*Config) *Config { - dst := &Config{} - dst.MergeIn(c) - - for _, cfg := range cfgs { - dst.MergeIn(cfg) - } - - return dst -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_1_5.go b/vendor/github.com/aws/aws-sdk-go/aws/context_1_5.go deleted file mode 100644 index 2866f9a7fb..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/context_1_5.go +++ /dev/null @@ -1,37 +0,0 @@ -// +build !go1.9 - -package aws - -import "time" - -// Context is an copy of the Go v1.7 stdlib's context.Context interface. -// It is represented as a SDK interface to enable you to use the "WithContext" -// API methods with Go v1.6 and a Context type such as golang.org/x/net/context. -// -// See https://golang.org/pkg/context on how to use contexts. -type Context interface { - // Deadline returns the time when work done on behalf of this context - // should be canceled. Deadline returns ok==false when no deadline is - // set. Successive calls to Deadline return the same results. - Deadline() (deadline time.Time, ok bool) - - // Done returns a channel that's closed when work done on behalf of this - // context should be canceled. Done may return nil if this context can - // never be canceled. Successive calls to Done return the same value. - Done() <-chan struct{} - - // Err returns a non-nil error value after Done is closed. Err returns - // Canceled if the context was canceled or DeadlineExceeded if the - // context's deadline passed. No other values for Err are defined. - // After Done is closed, successive calls to Err return the same value. - Err() error - - // Value returns the value associated with this context for key, or nil - // if no value is associated with key. Successive calls to Value with - // the same key returns the same result. - // - // Use context values only for request-scoped data that transits - // processes and API boundaries, not for passing optional parameters to - // functions. - Value(key interface{}) interface{} -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go b/vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go deleted file mode 100644 index 3718b26e10..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build go1.9 - -package aws - -import "context" - -// Context is an alias of the Go stdlib's context.Context interface. -// It can be used within the SDK's API operation "WithContext" methods. -// -// See https://golang.org/pkg/context on how to use contexts. -type Context = context.Context diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go b/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go deleted file mode 100644 index 2f9446333a..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go +++ /dev/null @@ -1,22 +0,0 @@ -// +build !go1.7 - -package aws - -import ( - "github.com/aws/aws-sdk-go/internal/context" -) - -// BackgroundContext returns a context that will never be canceled, has no -// values, and no deadline. This context is used by the SDK to provide -// backwards compatibility with non-context API operations and functionality. -// -// Go 1.6 and before: -// This context function is equivalent to context.Background in the Go stdlib. -// -// Go 1.7 and later: -// The context returned will be the value returned by context.Background() -// -// See https://golang.org/pkg/context for more information on Contexts. -func BackgroundContext() Context { - return context.BackgroundCtx -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go deleted file mode 100644 index 9c29f29af1..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go +++ /dev/null @@ -1,20 +0,0 @@ -// +build go1.7 - -package aws - -import "context" - -// BackgroundContext returns a context that will never be canceled, has no -// values, and no deadline. This context is used by the SDK to provide -// backwards compatibility with non-context API operations and functionality. -// -// Go 1.6 and before: -// This context function is equivalent to context.Background in the Go stdlib. -// -// Go 1.7 and later: -// The context returned will be the value returned by context.Background() -// -// See https://golang.org/pkg/context for more information on Contexts. -func BackgroundContext() Context { - return context.Background() -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_sleep.go b/vendor/github.com/aws/aws-sdk-go/aws/context_sleep.go deleted file mode 100644 index 304fd15612..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/context_sleep.go +++ /dev/null @@ -1,24 +0,0 @@ -package aws - -import ( - "time" -) - -// SleepWithContext will wait for the timer duration to expire, or the context -// is canceled. Which ever happens first. If the context is canceled the Context's -// error will be returned. -// -// Expects Context to always return a non-nil error if the Done channel is closed. -func SleepWithContext(ctx Context, dur time.Duration) error { - t := time.NewTimer(dur) - defer t.Stop() - - select { - case <-t.C: - break - case <-ctx.Done(): - return ctx.Err() - } - - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go b/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go deleted file mode 100644 index 4e076c1837..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go +++ /dev/null @@ -1,918 +0,0 @@ -package aws - -import "time" - -// String returns a pointer to the string value passed in. -func String(v string) *string { - return &v -} - -// StringValue returns the value of the string pointer passed in or -// "" if the pointer is nil. -func StringValue(v *string) string { - if v != nil { - return *v - } - return "" -} - -// StringSlice converts a slice of string values into a slice of -// string pointers -func StringSlice(src []string) []*string { - dst := make([]*string, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// StringValueSlice converts a slice of string pointers into a slice of -// string values -func StringValueSlice(src []*string) []string { - dst := make([]string, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// StringMap converts a string map of string values into a string -// map of string pointers -func StringMap(src map[string]string) map[string]*string { - dst := make(map[string]*string) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// StringValueMap converts a string map of string pointers into a string -// map of string values -func StringValueMap(src map[string]*string) map[string]string { - dst := make(map[string]string) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Bool returns a pointer to the bool value passed in. -func Bool(v bool) *bool { - return &v -} - -// BoolValue returns the value of the bool pointer passed in or -// false if the pointer is nil. -func BoolValue(v *bool) bool { - if v != nil { - return *v - } - return false -} - -// BoolSlice converts a slice of bool values into a slice of -// bool pointers -func BoolSlice(src []bool) []*bool { - dst := make([]*bool, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// BoolValueSlice converts a slice of bool pointers into a slice of -// bool values -func BoolValueSlice(src []*bool) []bool { - dst := make([]bool, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// BoolMap converts a string map of bool values into a string -// map of bool pointers -func BoolMap(src map[string]bool) map[string]*bool { - dst := make(map[string]*bool) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// BoolValueMap converts a string map of bool pointers into a string -// map of bool values -func BoolValueMap(src map[string]*bool) map[string]bool { - dst := make(map[string]bool) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Int returns a pointer to the int value passed in. -func Int(v int) *int { - return &v -} - -// IntValue returns the value of the int pointer passed in or -// 0 if the pointer is nil. -func IntValue(v *int) int { - if v != nil { - return *v - } - return 0 -} - -// IntSlice converts a slice of int values into a slice of -// int pointers -func IntSlice(src []int) []*int { - dst := make([]*int, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// IntValueSlice converts a slice of int pointers into a slice of -// int values -func IntValueSlice(src []*int) []int { - dst := make([]int, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// IntMap converts a string map of int values into a string -// map of int pointers -func IntMap(src map[string]int) map[string]*int { - dst := make(map[string]*int) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// IntValueMap converts a string map of int pointers into a string -// map of int values -func IntValueMap(src map[string]*int) map[string]int { - dst := make(map[string]int) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Uint returns a pointer to the uint value passed in. -func Uint(v uint) *uint { - return &v -} - -// UintValue returns the value of the uint pointer passed in or -// 0 if the pointer is nil. -func UintValue(v *uint) uint { - if v != nil { - return *v - } - return 0 -} - -// UintSlice converts a slice of uint values uinto a slice of -// uint pointers -func UintSlice(src []uint) []*uint { - dst := make([]*uint, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// UintValueSlice converts a slice of uint pointers uinto a slice of -// uint values -func UintValueSlice(src []*uint) []uint { - dst := make([]uint, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// UintMap converts a string map of uint values uinto a string -// map of uint pointers -func UintMap(src map[string]uint) map[string]*uint { - dst := make(map[string]*uint) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// UintValueMap converts a string map of uint pointers uinto a string -// map of uint values -func UintValueMap(src map[string]*uint) map[string]uint { - dst := make(map[string]uint) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Int8 returns a pointer to the int8 value passed in. -func Int8(v int8) *int8 { - return &v -} - -// Int8Value returns the value of the int8 pointer passed in or -// 0 if the pointer is nil. -func Int8Value(v *int8) int8 { - if v != nil { - return *v - } - return 0 -} - -// Int8Slice converts a slice of int8 values into a slice of -// int8 pointers -func Int8Slice(src []int8) []*int8 { - dst := make([]*int8, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// Int8ValueSlice converts a slice of int8 pointers into a slice of -// int8 values -func Int8ValueSlice(src []*int8) []int8 { - dst := make([]int8, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// Int8Map converts a string map of int8 values into a string -// map of int8 pointers -func Int8Map(src map[string]int8) map[string]*int8 { - dst := make(map[string]*int8) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// Int8ValueMap converts a string map of int8 pointers into a string -// map of int8 values -func Int8ValueMap(src map[string]*int8) map[string]int8 { - dst := make(map[string]int8) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Int16 returns a pointer to the int16 value passed in. -func Int16(v int16) *int16 { - return &v -} - -// Int16Value returns the value of the int16 pointer passed in or -// 0 if the pointer is nil. -func Int16Value(v *int16) int16 { - if v != nil { - return *v - } - return 0 -} - -// Int16Slice converts a slice of int16 values into a slice of -// int16 pointers -func Int16Slice(src []int16) []*int16 { - dst := make([]*int16, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// Int16ValueSlice converts a slice of int16 pointers into a slice of -// int16 values -func Int16ValueSlice(src []*int16) []int16 { - dst := make([]int16, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// Int16Map converts a string map of int16 values into a string -// map of int16 pointers -func Int16Map(src map[string]int16) map[string]*int16 { - dst := make(map[string]*int16) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// Int16ValueMap converts a string map of int16 pointers into a string -// map of int16 values -func Int16ValueMap(src map[string]*int16) map[string]int16 { - dst := make(map[string]int16) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Int32 returns a pointer to the int32 value passed in. -func Int32(v int32) *int32 { - return &v -} - -// Int32Value returns the value of the int32 pointer passed in or -// 0 if the pointer is nil. -func Int32Value(v *int32) int32 { - if v != nil { - return *v - } - return 0 -} - -// Int32Slice converts a slice of int32 values into a slice of -// int32 pointers -func Int32Slice(src []int32) []*int32 { - dst := make([]*int32, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// Int32ValueSlice converts a slice of int32 pointers into a slice of -// int32 values -func Int32ValueSlice(src []*int32) []int32 { - dst := make([]int32, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// Int32Map converts a string map of int32 values into a string -// map of int32 pointers -func Int32Map(src map[string]int32) map[string]*int32 { - dst := make(map[string]*int32) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// Int32ValueMap converts a string map of int32 pointers into a string -// map of int32 values -func Int32ValueMap(src map[string]*int32) map[string]int32 { - dst := make(map[string]int32) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Int64 returns a pointer to the int64 value passed in. -func Int64(v int64) *int64 { - return &v -} - -// Int64Value returns the value of the int64 pointer passed in or -// 0 if the pointer is nil. -func Int64Value(v *int64) int64 { - if v != nil { - return *v - } - return 0 -} - -// Int64Slice converts a slice of int64 values into a slice of -// int64 pointers -func Int64Slice(src []int64) []*int64 { - dst := make([]*int64, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// Int64ValueSlice converts a slice of int64 pointers into a slice of -// int64 values -func Int64ValueSlice(src []*int64) []int64 { - dst := make([]int64, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// Int64Map converts a string map of int64 values into a string -// map of int64 pointers -func Int64Map(src map[string]int64) map[string]*int64 { - dst := make(map[string]*int64) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// Int64ValueMap converts a string map of int64 pointers into a string -// map of int64 values -func Int64ValueMap(src map[string]*int64) map[string]int64 { - dst := make(map[string]int64) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Uint8 returns a pointer to the uint8 value passed in. -func Uint8(v uint8) *uint8 { - return &v -} - -// Uint8Value returns the value of the uint8 pointer passed in or -// 0 if the pointer is nil. -func Uint8Value(v *uint8) uint8 { - if v != nil { - return *v - } - return 0 -} - -// Uint8Slice converts a slice of uint8 values into a slice of -// uint8 pointers -func Uint8Slice(src []uint8) []*uint8 { - dst := make([]*uint8, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// Uint8ValueSlice converts a slice of uint8 pointers into a slice of -// uint8 values -func Uint8ValueSlice(src []*uint8) []uint8 { - dst := make([]uint8, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// Uint8Map converts a string map of uint8 values into a string -// map of uint8 pointers -func Uint8Map(src map[string]uint8) map[string]*uint8 { - dst := make(map[string]*uint8) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// Uint8ValueMap converts a string map of uint8 pointers into a string -// map of uint8 values -func Uint8ValueMap(src map[string]*uint8) map[string]uint8 { - dst := make(map[string]uint8) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Uint16 returns a pointer to the uint16 value passed in. -func Uint16(v uint16) *uint16 { - return &v -} - -// Uint16Value returns the value of the uint16 pointer passed in or -// 0 if the pointer is nil. -func Uint16Value(v *uint16) uint16 { - if v != nil { - return *v - } - return 0 -} - -// Uint16Slice converts a slice of uint16 values into a slice of -// uint16 pointers -func Uint16Slice(src []uint16) []*uint16 { - dst := make([]*uint16, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// Uint16ValueSlice converts a slice of uint16 pointers into a slice of -// uint16 values -func Uint16ValueSlice(src []*uint16) []uint16 { - dst := make([]uint16, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// Uint16Map converts a string map of uint16 values into a string -// map of uint16 pointers -func Uint16Map(src map[string]uint16) map[string]*uint16 { - dst := make(map[string]*uint16) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// Uint16ValueMap converts a string map of uint16 pointers into a string -// map of uint16 values -func Uint16ValueMap(src map[string]*uint16) map[string]uint16 { - dst := make(map[string]uint16) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Uint32 returns a pointer to the uint32 value passed in. -func Uint32(v uint32) *uint32 { - return &v -} - -// Uint32Value returns the value of the uint32 pointer passed in or -// 0 if the pointer is nil. -func Uint32Value(v *uint32) uint32 { - if v != nil { - return *v - } - return 0 -} - -// Uint32Slice converts a slice of uint32 values into a slice of -// uint32 pointers -func Uint32Slice(src []uint32) []*uint32 { - dst := make([]*uint32, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// Uint32ValueSlice converts a slice of uint32 pointers into a slice of -// uint32 values -func Uint32ValueSlice(src []*uint32) []uint32 { - dst := make([]uint32, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// Uint32Map converts a string map of uint32 values into a string -// map of uint32 pointers -func Uint32Map(src map[string]uint32) map[string]*uint32 { - dst := make(map[string]*uint32) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// Uint32ValueMap converts a string map of uint32 pointers into a string -// map of uint32 values -func Uint32ValueMap(src map[string]*uint32) map[string]uint32 { - dst := make(map[string]uint32) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Uint64 returns a pointer to the uint64 value passed in. -func Uint64(v uint64) *uint64 { - return &v -} - -// Uint64Value returns the value of the uint64 pointer passed in or -// 0 if the pointer is nil. -func Uint64Value(v *uint64) uint64 { - if v != nil { - return *v - } - return 0 -} - -// Uint64Slice converts a slice of uint64 values into a slice of -// uint64 pointers -func Uint64Slice(src []uint64) []*uint64 { - dst := make([]*uint64, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// Uint64ValueSlice converts a slice of uint64 pointers into a slice of -// uint64 values -func Uint64ValueSlice(src []*uint64) []uint64 { - dst := make([]uint64, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// Uint64Map converts a string map of uint64 values into a string -// map of uint64 pointers -func Uint64Map(src map[string]uint64) map[string]*uint64 { - dst := make(map[string]*uint64) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// Uint64ValueMap converts a string map of uint64 pointers into a string -// map of uint64 values -func Uint64ValueMap(src map[string]*uint64) map[string]uint64 { - dst := make(map[string]uint64) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Float32 returns a pointer to the float32 value passed in. -func Float32(v float32) *float32 { - return &v -} - -// Float32Value returns the value of the float32 pointer passed in or -// 0 if the pointer is nil. -func Float32Value(v *float32) float32 { - if v != nil { - return *v - } - return 0 -} - -// Float32Slice converts a slice of float32 values into a slice of -// float32 pointers -func Float32Slice(src []float32) []*float32 { - dst := make([]*float32, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// Float32ValueSlice converts a slice of float32 pointers into a slice of -// float32 values -func Float32ValueSlice(src []*float32) []float32 { - dst := make([]float32, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// Float32Map converts a string map of float32 values into a string -// map of float32 pointers -func Float32Map(src map[string]float32) map[string]*float32 { - dst := make(map[string]*float32) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// Float32ValueMap converts a string map of float32 pointers into a string -// map of float32 values -func Float32ValueMap(src map[string]*float32) map[string]float32 { - dst := make(map[string]float32) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Float64 returns a pointer to the float64 value passed in. -func Float64(v float64) *float64 { - return &v -} - -// Float64Value returns the value of the float64 pointer passed in or -// 0 if the pointer is nil. -func Float64Value(v *float64) float64 { - if v != nil { - return *v - } - return 0 -} - -// Float64Slice converts a slice of float64 values into a slice of -// float64 pointers -func Float64Slice(src []float64) []*float64 { - dst := make([]*float64, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// Float64ValueSlice converts a slice of float64 pointers into a slice of -// float64 values -func Float64ValueSlice(src []*float64) []float64 { - dst := make([]float64, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// Float64Map converts a string map of float64 values into a string -// map of float64 pointers -func Float64Map(src map[string]float64) map[string]*float64 { - dst := make(map[string]*float64) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// Float64ValueMap converts a string map of float64 pointers into a string -// map of float64 values -func Float64ValueMap(src map[string]*float64) map[string]float64 { - dst := make(map[string]float64) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Time returns a pointer to the time.Time value passed in. -func Time(v time.Time) *time.Time { - return &v -} - -// TimeValue returns the value of the time.Time pointer passed in or -// time.Time{} if the pointer is nil. -func TimeValue(v *time.Time) time.Time { - if v != nil { - return *v - } - return time.Time{} -} - -// SecondsTimeValue converts an int64 pointer to a time.Time value -// representing seconds since Epoch or time.Time{} if the pointer is nil. -func SecondsTimeValue(v *int64) time.Time { - if v != nil { - return time.Unix((*v / 1000), 0) - } - return time.Time{} -} - -// MillisecondsTimeValue converts an int64 pointer to a time.Time value -// representing milliseconds sinch Epoch or time.Time{} if the pointer is nil. -func MillisecondsTimeValue(v *int64) time.Time { - if v != nil { - return time.Unix(0, (*v * 1000000)) - } - return time.Time{} -} - -// TimeUnixMilli returns a Unix timestamp in milliseconds from "January 1, 1970 UTC". -// The result is undefined if the Unix time cannot be represented by an int64. -// Which includes calling TimeUnixMilli on a zero Time is undefined. -// -// This utility is useful for service API's such as CloudWatch Logs which require -// their unix time values to be in milliseconds. -// -// See Go stdlib https://golang.org/pkg/time/#Time.UnixNano for more information. -func TimeUnixMilli(t time.Time) int64 { - return t.UnixNano() / int64(time.Millisecond/time.Nanosecond) -} - -// TimeSlice converts a slice of time.Time values into a slice of -// time.Time pointers -func TimeSlice(src []time.Time) []*time.Time { - dst := make([]*time.Time, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// TimeValueSlice converts a slice of time.Time pointers into a slice of -// time.Time values -func TimeValueSlice(src []*time.Time) []time.Time { - dst := make([]time.Time, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// TimeMap converts a string map of time.Time values into a string -// map of time.Time pointers -func TimeMap(src map[string]time.Time) map[string]*time.Time { - dst := make(map[string]*time.Time) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// TimeValueMap converts a string map of time.Time pointers into a string -// map of time.Time values -func TimeValueMap(src map[string]*time.Time) map[string]time.Time { - dst := make(map[string]time.Time) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go deleted file mode 100644 index aa902d7083..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go +++ /dev/null @@ -1,230 +0,0 @@ -package corehandlers - -import ( - "bytes" - "fmt" - "io/ioutil" - "net/http" - "net/url" - "regexp" - "strconv" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/request" -) - -// Interface for matching types which also have a Len method. -type lener interface { - Len() int -} - -// BuildContentLengthHandler builds the content length of a request based on the body, -// or will use the HTTPRequest.Header's "Content-Length" if defined. If unable -// to determine request body length and no "Content-Length" was specified it will panic. -// -// The Content-Length will only be added to the request if the length of the body -// is greater than 0. If the body is empty or the current `Content-Length` -// header is <= 0, the header will also be stripped. -var BuildContentLengthHandler = request.NamedHandler{Name: "core.BuildContentLengthHandler", Fn: func(r *request.Request) { - var length int64 - - if slength := r.HTTPRequest.Header.Get("Content-Length"); slength != "" { - length, _ = strconv.ParseInt(slength, 10, 64) - } else { - if r.Body != nil { - var err error - length, err = aws.SeekerLen(r.Body) - if err != nil { - r.Error = awserr.New(request.ErrCodeSerialization, "failed to get request body's length", err) - return - } - } - } - - if length > 0 { - r.HTTPRequest.ContentLength = length - r.HTTPRequest.Header.Set("Content-Length", fmt.Sprintf("%d", length)) - } else { - r.HTTPRequest.ContentLength = 0 - r.HTTPRequest.Header.Del("Content-Length") - } -}} - -var reStatusCode = regexp.MustCompile(`^(\d{3})`) - -// ValidateReqSigHandler is a request handler to ensure that the request's -// signature doesn't expire before it is sent. This can happen when a request -// is built and signed significantly before it is sent. Or significant delays -// occur when retrying requests that would cause the signature to expire. -var ValidateReqSigHandler = request.NamedHandler{ - Name: "core.ValidateReqSigHandler", - Fn: func(r *request.Request) { - // Unsigned requests are not signed - if r.Config.Credentials == credentials.AnonymousCredentials { - return - } - - signedTime := r.Time - if !r.LastSignedAt.IsZero() { - signedTime = r.LastSignedAt - } - - // 5 minutes to allow for some clock skew/delays in transmission. - // Would be improved with aws/aws-sdk-go#423 - if signedTime.Add(5 * time.Minute).After(time.Now()) { - return - } - - fmt.Println("request expired, resigning") - r.Sign() - }, -} - -// SendHandler is a request handler to send service request using HTTP client. -var SendHandler = request.NamedHandler{ - Name: "core.SendHandler", - Fn: func(r *request.Request) { - sender := sendFollowRedirects - if r.DisableFollowRedirects { - sender = sendWithoutFollowRedirects - } - - if request.NoBody == r.HTTPRequest.Body { - // Strip off the request body if the NoBody reader was used as a - // place holder for a request body. This prevents the SDK from - // making requests with a request body when it would be invalid - // to do so. - // - // Use a shallow copy of the http.Request to ensure the race condition - // of transport on Body will not trigger - reqOrig, reqCopy := r.HTTPRequest, *r.HTTPRequest - reqCopy.Body = nil - r.HTTPRequest = &reqCopy - defer func() { - r.HTTPRequest = reqOrig - }() - } - - var err error - r.HTTPResponse, err = sender(r) - if err != nil { - handleSendError(r, err) - } - }, -} - -func sendFollowRedirects(r *request.Request) (*http.Response, error) { - return r.Config.HTTPClient.Do(r.HTTPRequest) -} - -func sendWithoutFollowRedirects(r *request.Request) (*http.Response, error) { - transport := r.Config.HTTPClient.Transport - if transport == nil { - transport = http.DefaultTransport - } - - return transport.RoundTrip(r.HTTPRequest) -} - -func handleSendError(r *request.Request, err error) { - // Prevent leaking if an HTTPResponse was returned. Clean up - // the body. - if r.HTTPResponse != nil { - r.HTTPResponse.Body.Close() - } - // Capture the case where url.Error is returned for error processing - // response. e.g. 301 without location header comes back as string - // error and r.HTTPResponse is nil. Other URL redirect errors will - // comeback in a similar method. - if e, ok := err.(*url.Error); ok && e.Err != nil { - if s := reStatusCode.FindStringSubmatch(e.Err.Error()); s != nil { - code, _ := strconv.ParseInt(s[1], 10, 64) - r.HTTPResponse = &http.Response{ - StatusCode: int(code), - Status: http.StatusText(int(code)), - Body: ioutil.NopCloser(bytes.NewReader([]byte{})), - } - return - } - } - if r.HTTPResponse == nil { - // Add a dummy request response object to ensure the HTTPResponse - // value is consistent. - r.HTTPResponse = &http.Response{ - StatusCode: int(0), - Status: http.StatusText(int(0)), - Body: ioutil.NopCloser(bytes.NewReader([]byte{})), - } - } - // Catch all request errors, and let the default retrier determine - // if the error is retryable. - r.Error = awserr.New(request.ErrCodeRequestError, "send request failed", err) - - // Override the error with a context canceled error, if that was canceled. - ctx := r.Context() - select { - case <-ctx.Done(): - r.Error = awserr.New(request.CanceledErrorCode, - "request context canceled", ctx.Err()) - r.Retryable = aws.Bool(false) - default: - } -} - -// ValidateResponseHandler is a request handler to validate service response. -var ValidateResponseHandler = request.NamedHandler{Name: "core.ValidateResponseHandler", Fn: func(r *request.Request) { - if r.HTTPResponse.StatusCode == 0 || r.HTTPResponse.StatusCode >= 300 { - // this may be replaced by an UnmarshalError handler - r.Error = awserr.New("UnknownError", "unknown error", nil) - } -}} - -// AfterRetryHandler performs final checks to determine if the request should -// be retried and how long to delay. -var AfterRetryHandler = request.NamedHandler{ - Name: "core.AfterRetryHandler", - Fn: func(r *request.Request) { - // If one of the other handlers already set the retry state - // we don't want to override it based on the service's state - if r.Retryable == nil || aws.BoolValue(r.Config.EnforceShouldRetryCheck) { - r.Retryable = aws.Bool(r.ShouldRetry(r)) - } - - if r.WillRetry() { - r.RetryDelay = r.RetryRules(r) - - if sleepFn := r.Config.SleepDelay; sleepFn != nil { - // Support SleepDelay for backwards compatibility and testing - sleepFn(r.RetryDelay) - } else if err := aws.SleepWithContext(r.Context(), r.RetryDelay); err != nil { - r.Error = awserr.New(request.CanceledErrorCode, - "request context canceled", err) - r.Retryable = aws.Bool(false) - return - } - - // when the expired token exception occurs the credentials - // need to be expired locally so that the next request to - // get credentials will trigger a credentials refresh. - if r.IsErrorExpired() { - r.Config.Credentials.Expire() - } - - r.RetryCount++ - r.Error = nil - } - }} - -// ValidateEndpointHandler is a request handler to validate a request had the -// appropriate Region and Endpoint set. Will set r.Error if the endpoint or -// region is not valid. -var ValidateEndpointHandler = request.NamedHandler{Name: "core.ValidateEndpointHandler", Fn: func(r *request.Request) { - if r.ClientInfo.SigningRegion == "" && aws.StringValue(r.Config.Region) == "" { - r.Error = aws.ErrMissingRegion - } else if r.ClientInfo.Endpoint == "" { - r.Error = aws.ErrMissingEndpoint - } -}} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go deleted file mode 100644 index 7d50b1557c..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go +++ /dev/null @@ -1,17 +0,0 @@ -package corehandlers - -import "github.com/aws/aws-sdk-go/aws/request" - -// ValidateParametersHandler is a request handler to validate the input parameters. -// Validating parameters only has meaning if done prior to the request being sent. -var ValidateParametersHandler = request.NamedHandler{Name: "core.ValidateParametersHandler", Fn: func(r *request.Request) { - if !r.ParamsFilled() { - return - } - - if v, ok := r.Params.(request.Validator); ok { - if err := v.Validate(); err != nil { - r.Error = err - } - } -}} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go deleted file mode 100644 index ab69c7a6f3..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go +++ /dev/null @@ -1,37 +0,0 @@ -package corehandlers - -import ( - "os" - "runtime" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/request" -) - -// SDKVersionUserAgentHandler is a request handler for adding the SDK Version -// to the user agent. -var SDKVersionUserAgentHandler = request.NamedHandler{ - Name: "core.SDKVersionUserAgentHandler", - Fn: request.MakeAddToUserAgentHandler(aws.SDKName, aws.SDKVersion, - runtime.Version(), runtime.GOOS, runtime.GOARCH), -} - -const execEnvVar = `AWS_EXECUTION_ENV` -const execEnvUAKey = `exec-env` - -// AddHostExecEnvUserAgentHander is a request handler appending the SDK's -// execution environment to the user agent. -// -// If the environment variable AWS_EXECUTION_ENV is set, its value will be -// appended to the user agent string. -var AddHostExecEnvUserAgentHander = request.NamedHandler{ - Name: "core.AddHostExecEnvUserAgentHander", - Fn: func(r *request.Request) { - v := os.Getenv(execEnvVar) - if len(v) == 0 { - return - } - - request.AddToUserAgent(r, execEnvUAKey+"/"+v) - }, -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go deleted file mode 100644 index 3ad1e798df..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go +++ /dev/null @@ -1,100 +0,0 @@ -package credentials - -import ( - "github.com/aws/aws-sdk-go/aws/awserr" -) - -var ( - // ErrNoValidProvidersFoundInChain Is returned when there are no valid - // providers in the ChainProvider. - // - // This has been deprecated. For verbose error messaging set - // aws.Config.CredentialsChainVerboseErrors to true. - ErrNoValidProvidersFoundInChain = awserr.New("NoCredentialProviders", - `no valid providers in chain. Deprecated. - For verbose messaging see aws.Config.CredentialsChainVerboseErrors`, - nil) -) - -// A ChainProvider will search for a provider which returns credentials -// and cache that provider until Retrieve is called again. -// -// The ChainProvider provides a way of chaining multiple providers together -// which will pick the first available using priority order of the Providers -// in the list. -// -// If none of the Providers retrieve valid credentials Value, ChainProvider's -// Retrieve() will return the error ErrNoValidProvidersFoundInChain. -// -// If a Provider is found which returns valid credentials Value ChainProvider -// will cache that Provider for all calls to IsExpired(), until Retrieve is -// called again. -// -// Example of ChainProvider to be used with an EnvProvider and EC2RoleProvider. -// In this example EnvProvider will first check if any credentials are available -// via the environment variables. If there are none ChainProvider will check -// the next Provider in the list, EC2RoleProvider in this case. If EC2RoleProvider -// does not return any credentials ChainProvider will return the error -// ErrNoValidProvidersFoundInChain -// -// creds := credentials.NewChainCredentials( -// []credentials.Provider{ -// &credentials.EnvProvider{}, -// &ec2rolecreds.EC2RoleProvider{ -// Client: ec2metadata.New(sess), -// }, -// }) -// -// // Usage of ChainCredentials with aws.Config -// svc := ec2.New(session.Must(session.NewSession(&aws.Config{ -// Credentials: creds, -// }))) -// -type ChainProvider struct { - Providers []Provider - curr Provider - VerboseErrors bool -} - -// NewChainCredentials returns a pointer to a new Credentials object -// wrapping a chain of providers. -func NewChainCredentials(providers []Provider) *Credentials { - return NewCredentials(&ChainProvider{ - Providers: append([]Provider{}, providers...), - }) -} - -// Retrieve returns the credentials value or error if no provider returned -// without error. -// -// If a provider is found it will be cached and any calls to IsExpired() -// will return the expired state of the cached provider. -func (c *ChainProvider) Retrieve() (Value, error) { - var errs []error - for _, p := range c.Providers { - creds, err := p.Retrieve() - if err == nil { - c.curr = p - return creds, nil - } - errs = append(errs, err) - } - c.curr = nil - - var err error - err = ErrNoValidProvidersFoundInChain - if c.VerboseErrors { - err = awserr.NewBatchError("NoCredentialProviders", "no valid providers in chain", errs) - } - return Value{}, err -} - -// IsExpired will returned the expired state of the currently cached provider -// if there is one. If there is no current provider, true will be returned. -func (c *ChainProvider) IsExpired() bool { - if c.curr != nil { - return c.curr.IsExpired() - } - - return true -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.5.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.5.go deleted file mode 100644 index 5852b26487..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.5.go +++ /dev/null @@ -1,22 +0,0 @@ -// +build !go1.7 - -package credentials - -import ( - "github.com/aws/aws-sdk-go/internal/context" -) - -// backgroundContext returns a context that will never be canceled, has no -// values, and no deadline. This context is used by the SDK to provide -// backwards compatibility with non-context API operations and functionality. -// -// Go 1.6 and before: -// This context function is equivalent to context.Background in the Go stdlib. -// -// Go 1.7 and later: -// The context returned will be the value returned by context.Background() -// -// See https://golang.org/pkg/context for more information on Contexts. -func backgroundContext() Context { - return context.BackgroundCtx -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.7.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.7.go deleted file mode 100644 index 388b215418..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.7.go +++ /dev/null @@ -1,20 +0,0 @@ -// +build go1.7 - -package credentials - -import "context" - -// backgroundContext returns a context that will never be canceled, has no -// values, and no deadline. This context is used by the SDK to provide -// backwards compatibility with non-context API operations and functionality. -// -// Go 1.6 and before: -// This context function is equivalent to context.Background in the Go stdlib. -// -// Go 1.7 and later: -// The context returned will be the value returned by context.Background() -// -// See https://golang.org/pkg/context for more information on Contexts. -func backgroundContext() Context { - return context.Background() -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.5.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.5.go deleted file mode 100644 index 8152a864ad..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.5.go +++ /dev/null @@ -1,39 +0,0 @@ -// +build !go1.9 - -package credentials - -import "time" - -// Context is an copy of the Go v1.7 stdlib's context.Context interface. -// It is represented as a SDK interface to enable you to use the "WithContext" -// API methods with Go v1.6 and a Context type such as golang.org/x/net/context. -// -// This type, aws.Context, and context.Context are equivalent. -// -// See https://golang.org/pkg/context on how to use contexts. -type Context interface { - // Deadline returns the time when work done on behalf of this context - // should be canceled. Deadline returns ok==false when no deadline is - // set. Successive calls to Deadline return the same results. - Deadline() (deadline time.Time, ok bool) - - // Done returns a channel that's closed when work done on behalf of this - // context should be canceled. Done may return nil if this context can - // never be canceled. Successive calls to Done return the same value. - Done() <-chan struct{} - - // Err returns a non-nil error value after Done is closed. Err returns - // Canceled if the context was canceled or DeadlineExceeded if the - // context's deadline passed. No other values for Err are defined. - // After Done is closed, successive calls to Err return the same value. - Err() error - - // Value returns the value associated with this context for key, or nil - // if no value is associated with key. Successive calls to Value with - // the same key returns the same result. - // - // Use context values only for request-scoped data that transits - // processes and API boundaries, not for passing optional parameters to - // functions. - Value(key interface{}) interface{} -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.9.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.9.go deleted file mode 100644 index 4356edb3d5..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.9.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build go1.9 - -package credentials - -import "context" - -// Context is an alias of the Go stdlib's context.Context interface. -// It can be used within the SDK's API operation "WithContext" methods. -// -// This type, aws.Context, and context.Context are equivalent. -// -// See https://golang.org/pkg/context on how to use contexts. -type Context = context.Context diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go deleted file mode 100644 index 9f8fd92a50..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go +++ /dev/null @@ -1,339 +0,0 @@ -// Package credentials provides credential retrieval and management -// -// The Credentials is the primary method of getting access to and managing -// credentials Values. Using dependency injection retrieval of the credential -// values is handled by a object which satisfies the Provider interface. -// -// By default the Credentials.Get() will cache the successful result of a -// Provider's Retrieve() until Provider.IsExpired() returns true. At which -// point Credentials will call Provider's Retrieve() to get new credential Value. -// -// The Provider is responsible for determining when credentials Value have expired. -// It is also important to note that Credentials will always call Retrieve the -// first time Credentials.Get() is called. -// -// Example of using the environment variable credentials. -// -// creds := credentials.NewEnvCredentials() -// -// // Retrieve the credentials value -// credValue, err := creds.Get() -// if err != nil { -// // handle error -// } -// -// Example of forcing credentials to expire and be refreshed on the next Get(). -// This may be helpful to proactively expire credentials and refresh them sooner -// than they would naturally expire on their own. -// -// creds := credentials.NewCredentials(&ec2rolecreds.EC2RoleProvider{}) -// creds.Expire() -// credsValue, err := creds.Get() -// // New credentials will be retrieved instead of from cache. -// -// -// Custom Provider -// -// Each Provider built into this package also provides a helper method to generate -// a Credentials pointer setup with the provider. To use a custom Provider just -// create a type which satisfies the Provider interface and pass it to the -// NewCredentials method. -// -// type MyProvider struct{} -// func (m *MyProvider) Retrieve() (Value, error) {...} -// func (m *MyProvider) IsExpired() bool {...} -// -// creds := credentials.NewCredentials(&MyProvider{}) -// credValue, err := creds.Get() -// -package credentials - -import ( - "fmt" - "sync/atomic" - "time" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/internal/sync/singleflight" -) - -// AnonymousCredentials is an empty Credential object that can be used as -// dummy placeholder credentials for requests that do not need signed. -// -// This Credentials can be used to configure a service to not sign requests -// when making service API calls. For example, when accessing public -// s3 buckets. -// -// svc := s3.New(session.Must(session.NewSession(&aws.Config{ -// Credentials: credentials.AnonymousCredentials, -// }))) -// // Access public S3 buckets. -var AnonymousCredentials = NewStaticCredentials("", "", "") - -// A Value is the AWS credentials value for individual credential fields. -type Value struct { - // AWS Access key ID - AccessKeyID string - - // AWS Secret Access Key - SecretAccessKey string - - // AWS Session Token - SessionToken string - - // Provider used to get credentials - ProviderName string -} - -// HasKeys returns if the credentials Value has both AccessKeyID and -// SecretAccessKey value set. -func (v Value) HasKeys() bool { - return len(v.AccessKeyID) != 0 && len(v.SecretAccessKey) != 0 -} - -// A Provider is the interface for any component which will provide credentials -// Value. A provider is required to manage its own Expired state, and what to -// be expired means. -// -// The Provider should not need to implement its own mutexes, because -// that will be managed by Credentials. -type Provider interface { - // Retrieve returns nil if it successfully retrieved the value. - // Error is returned if the value were not obtainable, or empty. - Retrieve() (Value, error) - - // IsExpired returns if the credentials are no longer valid, and need - // to be retrieved. - IsExpired() bool -} - -// ProviderWithContext is a Provider that can retrieve credentials with a Context -type ProviderWithContext interface { - Provider - - RetrieveWithContext(Context) (Value, error) -} - -// An Expirer is an interface that Providers can implement to expose the expiration -// time, if known. If the Provider cannot accurately provide this info, -// it should not implement this interface. -type Expirer interface { - // The time at which the credentials are no longer valid - ExpiresAt() time.Time -} - -// An ErrorProvider is a stub credentials provider that always returns an error -// this is used by the SDK when construction a known provider is not possible -// due to an error. -type ErrorProvider struct { - // The error to be returned from Retrieve - Err error - - // The provider name to set on the Retrieved returned Value - ProviderName string -} - -// Retrieve will always return the error that the ErrorProvider was created with. -func (p ErrorProvider) Retrieve() (Value, error) { - return Value{ProviderName: p.ProviderName}, p.Err -} - -// IsExpired will always return not expired. -func (p ErrorProvider) IsExpired() bool { - return false -} - -// A Expiry provides shared expiration logic to be used by credentials -// providers to implement expiry functionality. -// -// The best method to use this struct is as an anonymous field within the -// provider's struct. -// -// Example: -// type EC2RoleProvider struct { -// Expiry -// ... -// } -type Expiry struct { - // The date/time when to expire on - expiration time.Time - - // If set will be used by IsExpired to determine the current time. - // Defaults to time.Now if CurrentTime is not set. Available for testing - // to be able to mock out the current time. - CurrentTime func() time.Time -} - -// SetExpiration sets the expiration IsExpired will check when called. -// -// If window is greater than 0 the expiration time will be reduced by the -// window value. -// -// Using a window is helpful to trigger credentials to expire sooner than -// the expiration time given to ensure no requests are made with expired -// tokens. -func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) { - e.expiration = expiration - if window > 0 { - e.expiration = e.expiration.Add(-window) - } -} - -// IsExpired returns if the credentials are expired. -func (e *Expiry) IsExpired() bool { - curTime := e.CurrentTime - if curTime == nil { - curTime = time.Now - } - return e.expiration.Before(curTime()) -} - -// ExpiresAt returns the expiration time of the credential -func (e *Expiry) ExpiresAt() time.Time { - return e.expiration -} - -// A Credentials provides concurrency safe retrieval of AWS credentials Value. -// Credentials will cache the credentials value until they expire. Once the value -// expires the next Get will attempt to retrieve valid credentials. -// -// Credentials is safe to use across multiple goroutines and will manage the -// synchronous state so the Providers do not need to implement their own -// synchronization. -// -// The first Credentials.Get() will always call Provider.Retrieve() to get the -// first instance of the credentials Value. All calls to Get() after that -// will return the cached credentials Value until IsExpired() returns true. -type Credentials struct { - creds atomic.Value - sf singleflight.Group - - provider Provider -} - -// NewCredentials returns a pointer to a new Credentials with the provider set. -func NewCredentials(provider Provider) *Credentials { - c := &Credentials{ - provider: provider, - } - c.creds.Store(Value{}) - return c -} - -// GetWithContext returns the credentials value, or error if the credentials -// Value failed to be retrieved. Will return early if the passed in context is -// canceled. -// -// Will return the cached credentials Value if it has not expired. If the -// credentials Value has expired the Provider's Retrieve() will be called -// to refresh the credentials. -// -// If Credentials.Expire() was called the credentials Value will be force -// expired, and the next call to Get() will cause them to be refreshed. -// -// Passed in Context is equivalent to aws.Context, and context.Context. -func (c *Credentials) GetWithContext(ctx Context) (Value, error) { - if curCreds := c.creds.Load(); !c.isExpired(curCreds) { - return curCreds.(Value), nil - } - - // Cannot pass context down to the actual retrieve, because the first - // context would cancel the whole group when there is not direct - // association of items in the group. - resCh := c.sf.DoChan("", func() (interface{}, error) { - return c.singleRetrieve(&suppressedContext{ctx}) - }) - select { - case res := <-resCh: - return res.Val.(Value), res.Err - case <-ctx.Done(): - return Value{}, awserr.New("RequestCanceled", - "request context canceled", ctx.Err()) - } -} - -func (c *Credentials) singleRetrieve(ctx Context) (creds interface{}, err error) { - if curCreds := c.creds.Load(); !c.isExpired(curCreds) { - return curCreds.(Value), nil - } - - if p, ok := c.provider.(ProviderWithContext); ok { - creds, err = p.RetrieveWithContext(ctx) - } else { - creds, err = c.provider.Retrieve() - } - if err == nil { - c.creds.Store(creds) - } - - return creds, err -} - -// Get returns the credentials value, or error if the credentials Value failed -// to be retrieved. -// -// Will return the cached credentials Value if it has not expired. If the -// credentials Value has expired the Provider's Retrieve() will be called -// to refresh the credentials. -// -// If Credentials.Expire() was called the credentials Value will be force -// expired, and the next call to Get() will cause them to be refreshed. -func (c *Credentials) Get() (Value, error) { - return c.GetWithContext(backgroundContext()) -} - -// Expire expires the credentials and forces them to be retrieved on the -// next call to Get(). -// -// This will override the Provider's expired state, and force Credentials -// to call the Provider's Retrieve(). -func (c *Credentials) Expire() { - c.creds.Store(Value{}) -} - -// IsExpired returns if the credentials are no longer valid, and need -// to be retrieved. -// -// If the Credentials were forced to be expired with Expire() this will -// reflect that override. -func (c *Credentials) IsExpired() bool { - return c.isExpired(c.creds.Load()) -} - -// isExpired helper method wrapping the definition of expired credentials. -func (c *Credentials) isExpired(creds interface{}) bool { - return creds == nil || creds.(Value) == Value{} || c.provider.IsExpired() -} - -// ExpiresAt provides access to the functionality of the Expirer interface of -// the underlying Provider, if it supports that interface. Otherwise, it returns -// an error. -func (c *Credentials) ExpiresAt() (time.Time, error) { - expirer, ok := c.provider.(Expirer) - if !ok { - return time.Time{}, awserr.New("ProviderNotExpirer", - fmt.Sprintf("provider %s does not support ExpiresAt()", c.creds.Load().(Value).ProviderName), - nil) - } - if c.creds.Load().(Value) == (Value{}) { - // set expiration time to the distant past - return time.Time{}, nil - } - return expirer.ExpiresAt(), nil -} - -type suppressedContext struct { - Context -} - -func (s *suppressedContext) Deadline() (deadline time.Time, ok bool) { - return time.Time{}, false -} - -func (s *suppressedContext) Done() <-chan struct{} { - return nil -} - -func (s *suppressedContext) Err() error { - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go deleted file mode 100644 index 92af5b7250..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go +++ /dev/null @@ -1,188 +0,0 @@ -package ec2rolecreds - -import ( - "bufio" - "encoding/json" - "fmt" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/ec2metadata" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/internal/sdkuri" -) - -// ProviderName provides a name of EC2Role provider -const ProviderName = "EC2RoleProvider" - -// A EC2RoleProvider retrieves credentials from the EC2 service, and keeps track if -// those credentials are expired. -// -// Example how to configure the EC2RoleProvider with custom http Client, Endpoint -// or ExpiryWindow -// -// p := &ec2rolecreds.EC2RoleProvider{ -// // Pass in a custom timeout to be used when requesting -// // IAM EC2 Role credentials. -// Client: ec2metadata.New(sess, aws.Config{ -// HTTPClient: &http.Client{Timeout: 10 * time.Second}, -// }), -// -// // Do not use early expiry of credentials. If a non zero value is -// // specified the credentials will be expired early -// ExpiryWindow: 0, -// } -type EC2RoleProvider struct { - credentials.Expiry - - // Required EC2Metadata client to use when connecting to EC2 metadata service. - Client *ec2metadata.EC2Metadata - - // ExpiryWindow will allow the credentials to trigger refreshing prior to - // the credentials actually expiring. This is beneficial so race conditions - // with expiring credentials do not cause request to fail unexpectedly - // due to ExpiredTokenException exceptions. - // - // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true - // 10 seconds before the credentials are actually expired. - // - // If ExpiryWindow is 0 or less it will be ignored. - ExpiryWindow time.Duration -} - -// NewCredentials returns a pointer to a new Credentials object wrapping -// the EC2RoleProvider. Takes a ConfigProvider to create a EC2Metadata client. -// The ConfigProvider is satisfied by the session.Session type. -func NewCredentials(c client.ConfigProvider, options ...func(*EC2RoleProvider)) *credentials.Credentials { - p := &EC2RoleProvider{ - Client: ec2metadata.New(c), - } - - for _, option := range options { - option(p) - } - - return credentials.NewCredentials(p) -} - -// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping -// the EC2RoleProvider. Takes a EC2Metadata client to use when connecting to EC2 -// metadata service. -func NewCredentialsWithClient(client *ec2metadata.EC2Metadata, options ...func(*EC2RoleProvider)) *credentials.Credentials { - p := &EC2RoleProvider{ - Client: client, - } - - for _, option := range options { - option(p) - } - - return credentials.NewCredentials(p) -} - -// Retrieve retrieves credentials from the EC2 service. -// Error will be returned if the request fails, or unable to extract -// the desired credentials. -func (m *EC2RoleProvider) Retrieve() (credentials.Value, error) { - return m.RetrieveWithContext(aws.BackgroundContext()) -} - -// RetrieveWithContext retrieves credentials from the EC2 service. -// Error will be returned if the request fails, or unable to extract -// the desired credentials. -func (m *EC2RoleProvider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) { - credsList, err := requestCredList(ctx, m.Client) - if err != nil { - return credentials.Value{ProviderName: ProviderName}, err - } - - if len(credsList) == 0 { - return credentials.Value{ProviderName: ProviderName}, awserr.New("EmptyEC2RoleList", "empty EC2 Role list", nil) - } - credsName := credsList[0] - - roleCreds, err := requestCred(ctx, m.Client, credsName) - if err != nil { - return credentials.Value{ProviderName: ProviderName}, err - } - - m.SetExpiration(roleCreds.Expiration, m.ExpiryWindow) - - return credentials.Value{ - AccessKeyID: roleCreds.AccessKeyID, - SecretAccessKey: roleCreds.SecretAccessKey, - SessionToken: roleCreds.Token, - ProviderName: ProviderName, - }, nil -} - -// A ec2RoleCredRespBody provides the shape for unmarshaling credential -// request responses. -type ec2RoleCredRespBody struct { - // Success State - Expiration time.Time - AccessKeyID string - SecretAccessKey string - Token string - - // Error state - Code string - Message string -} - -const iamSecurityCredsPath = "iam/security-credentials/" - -// requestCredList requests a list of credentials from the EC2 service. -// If there are no credentials, or there is an error making or receiving the request -func requestCredList(ctx aws.Context, client *ec2metadata.EC2Metadata) ([]string, error) { - resp, err := client.GetMetadataWithContext(ctx, iamSecurityCredsPath) - if err != nil { - return nil, awserr.New("EC2RoleRequestError", "no EC2 instance role found", err) - } - - credsList := []string{} - s := bufio.NewScanner(strings.NewReader(resp)) - for s.Scan() { - credsList = append(credsList, s.Text()) - } - - if err := s.Err(); err != nil { - return nil, awserr.New(request.ErrCodeSerialization, - "failed to read EC2 instance role from metadata service", err) - } - - return credsList, nil -} - -// requestCred requests the credentials for a specific credentials from the EC2 service. -// -// If the credentials cannot be found, or there is an error reading the response -// and error will be returned. -func requestCred(ctx aws.Context, client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCredRespBody, error) { - resp, err := client.GetMetadataWithContext(ctx, sdkuri.PathJoin(iamSecurityCredsPath, credsName)) - if err != nil { - return ec2RoleCredRespBody{}, - awserr.New("EC2RoleRequestError", - fmt.Sprintf("failed to get %s EC2 instance role credentials", credsName), - err) - } - - respCreds := ec2RoleCredRespBody{} - if err := json.NewDecoder(strings.NewReader(resp)).Decode(&respCreds); err != nil { - return ec2RoleCredRespBody{}, - awserr.New(request.ErrCodeSerialization, - fmt.Sprintf("failed to decode %s EC2 instance role credentials", credsName), - err) - } - - if respCreds.Code != "Success" { - // If an error code was returned something failed requesting the role. - return ec2RoleCredRespBody{}, awserr.New(respCreds.Code, respCreds.Message, nil) - } - - return respCreds, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go deleted file mode 100644 index 785f30d8e6..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go +++ /dev/null @@ -1,210 +0,0 @@ -// Package endpointcreds provides support for retrieving credentials from an -// arbitrary HTTP endpoint. -// -// The credentials endpoint Provider can receive both static and refreshable -// credentials that will expire. Credentials are static when an "Expiration" -// value is not provided in the endpoint's response. -// -// Static credentials will never expire once they have been retrieved. The format -// of the static credentials response: -// { -// "AccessKeyId" : "MUA...", -// "SecretAccessKey" : "/7PC5om....", -// } -// -// Refreshable credentials will expire within the "ExpiryWindow" of the Expiration -// value in the response. The format of the refreshable credentials response: -// { -// "AccessKeyId" : "MUA...", -// "SecretAccessKey" : "/7PC5om....", -// "Token" : "AQoDY....=", -// "Expiration" : "2016-02-25T06:03:31Z" -// } -// -// Errors should be returned in the following format and only returned with 400 -// or 500 HTTP status codes. -// { -// "code": "ErrorCode", -// "message": "Helpful error message." -// } -package endpointcreds - -import ( - "encoding/json" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/client/metadata" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil" -) - -// ProviderName is the name of the credentials provider. -const ProviderName = `CredentialsEndpointProvider` - -// Provider satisfies the credentials.Provider interface, and is a client to -// retrieve credentials from an arbitrary endpoint. -type Provider struct { - staticCreds bool - credentials.Expiry - - // Requires a AWS Client to make HTTP requests to the endpoint with. - // the Endpoint the request will be made to is provided by the aws.Config's - // Endpoint value. - Client *client.Client - - // ExpiryWindow will allow the credentials to trigger refreshing prior to - // the credentials actually expiring. This is beneficial so race conditions - // with expiring credentials do not cause request to fail unexpectedly - // due to ExpiredTokenException exceptions. - // - // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true - // 10 seconds before the credentials are actually expired. - // - // If ExpiryWindow is 0 or less it will be ignored. - ExpiryWindow time.Duration - - // Optional authorization token value if set will be used as the value of - // the Authorization header of the endpoint credential request. - AuthorizationToken string -} - -// NewProviderClient returns a credentials Provider for retrieving AWS credentials -// from arbitrary endpoint. -func NewProviderClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) credentials.Provider { - p := &Provider{ - Client: client.New( - cfg, - metadata.ClientInfo{ - ServiceName: "CredentialsEndpoint", - Endpoint: endpoint, - }, - handlers, - ), - } - - p.Client.Handlers.Unmarshal.PushBack(unmarshalHandler) - p.Client.Handlers.UnmarshalError.PushBack(unmarshalError) - p.Client.Handlers.Validate.Clear() - p.Client.Handlers.Validate.PushBack(validateEndpointHandler) - - for _, option := range options { - option(p) - } - - return p -} - -// NewCredentialsClient returns a pointer to a new Credentials object -// wrapping the endpoint credentials Provider. -func NewCredentialsClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) *credentials.Credentials { - return credentials.NewCredentials(NewProviderClient(cfg, handlers, endpoint, options...)) -} - -// IsExpired returns true if the credentials retrieved are expired, or not yet -// retrieved. -func (p *Provider) IsExpired() bool { - if p.staticCreds { - return false - } - return p.Expiry.IsExpired() -} - -// Retrieve will attempt to request the credentials from the endpoint the Provider -// was configured for. And error will be returned if the retrieval fails. -func (p *Provider) Retrieve() (credentials.Value, error) { - return p.RetrieveWithContext(aws.BackgroundContext()) -} - -// RetrieveWithContext will attempt to request the credentials from the endpoint the Provider -// was configured for. And error will be returned if the retrieval fails. -func (p *Provider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) { - resp, err := p.getCredentials(ctx) - if err != nil { - return credentials.Value{ProviderName: ProviderName}, - awserr.New("CredentialsEndpointError", "failed to load credentials", err) - } - - if resp.Expiration != nil { - p.SetExpiration(*resp.Expiration, p.ExpiryWindow) - } else { - p.staticCreds = true - } - - return credentials.Value{ - AccessKeyID: resp.AccessKeyID, - SecretAccessKey: resp.SecretAccessKey, - SessionToken: resp.Token, - ProviderName: ProviderName, - }, nil -} - -type getCredentialsOutput struct { - Expiration *time.Time - AccessKeyID string - SecretAccessKey string - Token string -} - -type errorOutput struct { - Code string `json:"code"` - Message string `json:"message"` -} - -func (p *Provider) getCredentials(ctx aws.Context) (*getCredentialsOutput, error) { - op := &request.Operation{ - Name: "GetCredentials", - HTTPMethod: "GET", - } - - out := &getCredentialsOutput{} - req := p.Client.NewRequest(op, nil, out) - req.SetContext(ctx) - req.HTTPRequest.Header.Set("Accept", "application/json") - if authToken := p.AuthorizationToken; len(authToken) != 0 { - req.HTTPRequest.Header.Set("Authorization", authToken) - } - - return out, req.Send() -} - -func validateEndpointHandler(r *request.Request) { - if len(r.ClientInfo.Endpoint) == 0 { - r.Error = aws.ErrMissingEndpoint - } -} - -func unmarshalHandler(r *request.Request) { - defer r.HTTPResponse.Body.Close() - - out := r.Data.(*getCredentialsOutput) - if err := json.NewDecoder(r.HTTPResponse.Body).Decode(&out); err != nil { - r.Error = awserr.New(request.ErrCodeSerialization, - "failed to decode endpoint credentials", - err, - ) - } -} - -func unmarshalError(r *request.Request) { - defer r.HTTPResponse.Body.Close() - - var errOut errorOutput - err := jsonutil.UnmarshalJSONError(&errOut, r.HTTPResponse.Body) - if err != nil { - r.Error = awserr.NewRequestFailure( - awserr.New(request.ErrCodeSerialization, - "failed to decode error message", err), - r.HTTPResponse.StatusCode, - r.RequestID, - ) - return - } - - // Response body format is not consistent between metadata endpoints. - // Grab the error message as a string and include that as the source error - r.Error = awserr.New(errOut.Code, errOut.Message, nil) -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go deleted file mode 100644 index 54c5cf7333..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go +++ /dev/null @@ -1,74 +0,0 @@ -package credentials - -import ( - "os" - - "github.com/aws/aws-sdk-go/aws/awserr" -) - -// EnvProviderName provides a name of Env provider -const EnvProviderName = "EnvProvider" - -var ( - // ErrAccessKeyIDNotFound is returned when the AWS Access Key ID can't be - // found in the process's environment. - ErrAccessKeyIDNotFound = awserr.New("EnvAccessKeyNotFound", "AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment", nil) - - // ErrSecretAccessKeyNotFound is returned when the AWS Secret Access Key - // can't be found in the process's environment. - ErrSecretAccessKeyNotFound = awserr.New("EnvSecretNotFound", "AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment", nil) -) - -// A EnvProvider retrieves credentials from the environment variables of the -// running process. Environment credentials never expire. -// -// Environment variables used: -// -// * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY -// -// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY -type EnvProvider struct { - retrieved bool -} - -// NewEnvCredentials returns a pointer to a new Credentials object -// wrapping the environment variable provider. -func NewEnvCredentials() *Credentials { - return NewCredentials(&EnvProvider{}) -} - -// Retrieve retrieves the keys from the environment. -func (e *EnvProvider) Retrieve() (Value, error) { - e.retrieved = false - - id := os.Getenv("AWS_ACCESS_KEY_ID") - if id == "" { - id = os.Getenv("AWS_ACCESS_KEY") - } - - secret := os.Getenv("AWS_SECRET_ACCESS_KEY") - if secret == "" { - secret = os.Getenv("AWS_SECRET_KEY") - } - - if id == "" { - return Value{ProviderName: EnvProviderName}, ErrAccessKeyIDNotFound - } - - if secret == "" { - return Value{ProviderName: EnvProviderName}, ErrSecretAccessKeyNotFound - } - - e.retrieved = true - return Value{ - AccessKeyID: id, - SecretAccessKey: secret, - SessionToken: os.Getenv("AWS_SESSION_TOKEN"), - ProviderName: EnvProviderName, - }, nil -} - -// IsExpired returns if the credentials have been retrieved. -func (e *EnvProvider) IsExpired() bool { - return !e.retrieved -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini b/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini deleted file mode 100644 index 7fc91d9d20..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini +++ /dev/null @@ -1,12 +0,0 @@ -[default] -aws_access_key_id = accessKey -aws_secret_access_key = secret -aws_session_token = token - -[no_token] -aws_access_key_id = accessKey -aws_secret_access_key = secret - -[with_colon] -aws_access_key_id: accessKey -aws_secret_access_key: secret diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go deleted file mode 100644 index e624836002..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go +++ /dev/null @@ -1,426 +0,0 @@ -/* -Package processcreds is a credential Provider to retrieve `credential_process` -credentials. - -WARNING: The following describes a method of sourcing credentials from an external -process. This can potentially be dangerous, so proceed with caution. Other -credential providers should be preferred if at all possible. If using this -option, you should make sure that the config file is as locked down as possible -using security best practices for your operating system. - -You can use credentials from a `credential_process` in a variety of ways. - -One way is to setup your shared config file, located in the default -location, with the `credential_process` key and the command you want to be -called. You also need to set the AWS_SDK_LOAD_CONFIG environment variable -(e.g., `export AWS_SDK_LOAD_CONFIG=1`) to use the shared config file. - - [default] - credential_process = /command/to/call - -Creating a new session will use the credential process to retrieve credentials. -NOTE: If there are credentials in the profile you are using, the credential -process will not be used. - - // Initialize a session to load credentials. - sess, _ := session.NewSession(&aws.Config{ - Region: aws.String("us-east-1")}, - ) - - // Create S3 service client to use the credentials. - svc := s3.New(sess) - -Another way to use the `credential_process` method is by using -`credentials.NewCredentials()` and providing a command to be executed to -retrieve credentials: - - // Create credentials using the ProcessProvider. - creds := processcreds.NewCredentials("/path/to/command") - - // Create service client value configured for credentials. - svc := s3.New(sess, &aws.Config{Credentials: creds}) - -You can set a non-default timeout for the `credential_process` with another -constructor, `credentials.NewCredentialsTimeout()`, providing the timeout. To -set a one minute timeout: - - // Create credentials using the ProcessProvider. - creds := processcreds.NewCredentialsTimeout( - "/path/to/command", - time.Duration(500) * time.Millisecond) - -If you need more control, you can set any configurable options in the -credentials using one or more option functions. For example, you can set a two -minute timeout, a credential duration of 60 minutes, and a maximum stdout -buffer size of 2k. - - creds := processcreds.NewCredentials( - "/path/to/command", - func(opt *ProcessProvider) { - opt.Timeout = time.Duration(2) * time.Minute - opt.Duration = time.Duration(60) * time.Minute - opt.MaxBufSize = 2048 - }) - -You can also use your own `exec.Cmd`: - - // Create an exec.Cmd - myCommand := exec.Command("/path/to/command") - - // Create credentials using your exec.Cmd and custom timeout - creds := processcreds.NewCredentialsCommand( - myCommand, - func(opt *processcreds.ProcessProvider) { - opt.Timeout = time.Duration(1) * time.Second - }) -*/ -package processcreds - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "os" - "os/exec" - "runtime" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/internal/sdkio" -) - -const ( - // ProviderName is the name this credentials provider will label any - // returned credentials Value with. - ProviderName = `ProcessProvider` - - // ErrCodeProcessProviderParse error parsing process output - ErrCodeProcessProviderParse = "ProcessProviderParseError" - - // ErrCodeProcessProviderVersion version error in output - ErrCodeProcessProviderVersion = "ProcessProviderVersionError" - - // ErrCodeProcessProviderRequired required attribute missing in output - ErrCodeProcessProviderRequired = "ProcessProviderRequiredError" - - // ErrCodeProcessProviderExecution execution of command failed - ErrCodeProcessProviderExecution = "ProcessProviderExecutionError" - - // errMsgProcessProviderTimeout process took longer than allowed - errMsgProcessProviderTimeout = "credential process timed out" - - // errMsgProcessProviderProcess process error - errMsgProcessProviderProcess = "error in credential_process" - - // errMsgProcessProviderParse problem parsing output - errMsgProcessProviderParse = "parse failed of credential_process output" - - // errMsgProcessProviderVersion version error in output - errMsgProcessProviderVersion = "wrong version in process output (not 1)" - - // errMsgProcessProviderMissKey missing access key id in output - errMsgProcessProviderMissKey = "missing AccessKeyId in process output" - - // errMsgProcessProviderMissSecret missing secret acess key in output - errMsgProcessProviderMissSecret = "missing SecretAccessKey in process output" - - // errMsgProcessProviderPrepareCmd prepare of command failed - errMsgProcessProviderPrepareCmd = "failed to prepare command" - - // errMsgProcessProviderEmptyCmd command must not be empty - errMsgProcessProviderEmptyCmd = "command must not be empty" - - // errMsgProcessProviderPipe failed to initialize pipe - errMsgProcessProviderPipe = "failed to initialize pipe" - - // DefaultDuration is the default amount of time in minutes that the - // credentials will be valid for. - DefaultDuration = time.Duration(15) * time.Minute - - // DefaultBufSize limits buffer size from growing to an enormous - // amount due to a faulty process. - DefaultBufSize = int(8 * sdkio.KibiByte) - - // DefaultTimeout default limit on time a process can run. - DefaultTimeout = time.Duration(1) * time.Minute -) - -// ProcessProvider satisfies the credentials.Provider interface, and is a -// client to retrieve credentials from a process. -type ProcessProvider struct { - staticCreds bool - credentials.Expiry - originalCommand []string - - // Expiry duration of the credentials. Defaults to 15 minutes if not set. - Duration time.Duration - - // ExpiryWindow will allow the credentials to trigger refreshing prior to - // the credentials actually expiring. This is beneficial so race conditions - // with expiring credentials do not cause request to fail unexpectedly - // due to ExpiredTokenException exceptions. - // - // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true - // 10 seconds before the credentials are actually expired. - // - // If ExpiryWindow is 0 or less it will be ignored. - ExpiryWindow time.Duration - - // A string representing an os command that should return a JSON with - // credential information. - command *exec.Cmd - - // MaxBufSize limits memory usage from growing to an enormous - // amount due to a faulty process. - MaxBufSize int - - // Timeout limits the time a process can run. - Timeout time.Duration -} - -// NewCredentials returns a pointer to a new Credentials object wrapping the -// ProcessProvider. The credentials will expire every 15 minutes by default. -func NewCredentials(command string, options ...func(*ProcessProvider)) *credentials.Credentials { - p := &ProcessProvider{ - command: exec.Command(command), - Duration: DefaultDuration, - Timeout: DefaultTimeout, - MaxBufSize: DefaultBufSize, - } - - for _, option := range options { - option(p) - } - - return credentials.NewCredentials(p) -} - -// NewCredentialsTimeout returns a pointer to a new Credentials object with -// the specified command and timeout, and default duration and max buffer size. -func NewCredentialsTimeout(command string, timeout time.Duration) *credentials.Credentials { - p := NewCredentials(command, func(opt *ProcessProvider) { - opt.Timeout = timeout - }) - - return p -} - -// NewCredentialsCommand returns a pointer to a new Credentials object with -// the specified command, and default timeout, duration and max buffer size. -func NewCredentialsCommand(command *exec.Cmd, options ...func(*ProcessProvider)) *credentials.Credentials { - p := &ProcessProvider{ - command: command, - Duration: DefaultDuration, - Timeout: DefaultTimeout, - MaxBufSize: DefaultBufSize, - } - - for _, option := range options { - option(p) - } - - return credentials.NewCredentials(p) -} - -type credentialProcessResponse struct { - Version int - AccessKeyID string `json:"AccessKeyId"` - SecretAccessKey string - SessionToken string - Expiration *time.Time -} - -// Retrieve executes the 'credential_process' and returns the credentials. -func (p *ProcessProvider) Retrieve() (credentials.Value, error) { - out, err := p.executeCredentialProcess() - if err != nil { - return credentials.Value{ProviderName: ProviderName}, err - } - - // Serialize and validate response - resp := &credentialProcessResponse{} - if err = json.Unmarshal(out, resp); err != nil { - return credentials.Value{ProviderName: ProviderName}, awserr.New( - ErrCodeProcessProviderParse, - fmt.Sprintf("%s: %s", errMsgProcessProviderParse, string(out)), - err) - } - - if resp.Version != 1 { - return credentials.Value{ProviderName: ProviderName}, awserr.New( - ErrCodeProcessProviderVersion, - errMsgProcessProviderVersion, - nil) - } - - if len(resp.AccessKeyID) == 0 { - return credentials.Value{ProviderName: ProviderName}, awserr.New( - ErrCodeProcessProviderRequired, - errMsgProcessProviderMissKey, - nil) - } - - if len(resp.SecretAccessKey) == 0 { - return credentials.Value{ProviderName: ProviderName}, awserr.New( - ErrCodeProcessProviderRequired, - errMsgProcessProviderMissSecret, - nil) - } - - // Handle expiration - p.staticCreds = resp.Expiration == nil - if resp.Expiration != nil { - p.SetExpiration(*resp.Expiration, p.ExpiryWindow) - } - - return credentials.Value{ - ProviderName: ProviderName, - AccessKeyID: resp.AccessKeyID, - SecretAccessKey: resp.SecretAccessKey, - SessionToken: resp.SessionToken, - }, nil -} - -// IsExpired returns true if the credentials retrieved are expired, or not yet -// retrieved. -func (p *ProcessProvider) IsExpired() bool { - if p.staticCreds { - return false - } - return p.Expiry.IsExpired() -} - -// prepareCommand prepares the command to be executed. -func (p *ProcessProvider) prepareCommand() error { - - var cmdArgs []string - if runtime.GOOS == "windows" { - cmdArgs = []string{"cmd.exe", "/C"} - } else { - cmdArgs = []string{"sh", "-c"} - } - - if len(p.originalCommand) == 0 { - p.originalCommand = make([]string, len(p.command.Args)) - copy(p.originalCommand, p.command.Args) - - // check for empty command because it succeeds - if len(strings.TrimSpace(p.originalCommand[0])) < 1 { - return awserr.New( - ErrCodeProcessProviderExecution, - fmt.Sprintf( - "%s: %s", - errMsgProcessProviderPrepareCmd, - errMsgProcessProviderEmptyCmd), - nil) - } - } - - cmdArgs = append(cmdArgs, p.originalCommand...) - p.command = exec.Command(cmdArgs[0], cmdArgs[1:]...) - p.command.Env = os.Environ() - - return nil -} - -// executeCredentialProcess starts the credential process on the OS and -// returns the results or an error. -func (p *ProcessProvider) executeCredentialProcess() ([]byte, error) { - - if err := p.prepareCommand(); err != nil { - return nil, err - } - - // Setup the pipes - outReadPipe, outWritePipe, err := os.Pipe() - if err != nil { - return nil, awserr.New( - ErrCodeProcessProviderExecution, - errMsgProcessProviderPipe, - err) - } - - p.command.Stderr = os.Stderr // display stderr on console for MFA - p.command.Stdout = outWritePipe // get creds json on process's stdout - p.command.Stdin = os.Stdin // enable stdin for MFA - - output := bytes.NewBuffer(make([]byte, 0, p.MaxBufSize)) - - stdoutCh := make(chan error, 1) - go readInput( - io.LimitReader(outReadPipe, int64(p.MaxBufSize)), - output, - stdoutCh) - - execCh := make(chan error, 1) - go executeCommand(*p.command, execCh) - - finished := false - var errors []error - for !finished { - select { - case readError := <-stdoutCh: - errors = appendError(errors, readError) - finished = true - case execError := <-execCh: - err := outWritePipe.Close() - errors = appendError(errors, err) - errors = appendError(errors, execError) - if errors != nil { - return output.Bytes(), awserr.NewBatchError( - ErrCodeProcessProviderExecution, - errMsgProcessProviderProcess, - errors) - } - case <-time.After(p.Timeout): - finished = true - return output.Bytes(), awserr.NewBatchError( - ErrCodeProcessProviderExecution, - errMsgProcessProviderTimeout, - errors) // errors can be nil - } - } - - out := output.Bytes() - - if runtime.GOOS == "windows" { - // windows adds slashes to quotes - out = []byte(strings.Replace(string(out), `\"`, `"`, -1)) - } - - return out, nil -} - -// appendError conveniently checks for nil before appending slice -func appendError(errors []error, err error) []error { - if err != nil { - return append(errors, err) - } - return errors -} - -func executeCommand(cmd exec.Cmd, exec chan error) { - // Start the command - err := cmd.Start() - if err == nil { - err = cmd.Wait() - } - - exec <- err -} - -func readInput(r io.Reader, w io.Writer, read chan error) { - tee := io.TeeReader(r, w) - - _, err := ioutil.ReadAll(tee) - - if err == io.EOF { - err = nil - } - - read <- err // will only arrive here when write end of pipe is closed -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go deleted file mode 100644 index e155149581..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go +++ /dev/null @@ -1,150 +0,0 @@ -package credentials - -import ( - "fmt" - "os" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/internal/ini" - "github.com/aws/aws-sdk-go/internal/shareddefaults" -) - -// SharedCredsProviderName provides a name of SharedCreds provider -const SharedCredsProviderName = "SharedCredentialsProvider" - -var ( - // ErrSharedCredentialsHomeNotFound is emitted when the user directory cannot be found. - ErrSharedCredentialsHomeNotFound = awserr.New("UserHomeNotFound", "user home directory not found.", nil) -) - -// A SharedCredentialsProvider retrieves credentials from the current user's home -// directory, and keeps track if those credentials are expired. -// -// Profile ini file example: $HOME/.aws/credentials -type SharedCredentialsProvider struct { - // Path to the shared credentials file. - // - // If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the - // env value is empty will default to current user's home directory. - // Linux/OSX: "$HOME/.aws/credentials" - // Windows: "%USERPROFILE%\.aws\credentials" - Filename string - - // AWS Profile to extract credentials from the shared credentials file. If empty - // will default to environment variable "AWS_PROFILE" or "default" if - // environment variable is also not set. - Profile string - - // retrieved states if the credentials have been successfully retrieved. - retrieved bool -} - -// NewSharedCredentials returns a pointer to a new Credentials object -// wrapping the Profile file provider. -func NewSharedCredentials(filename, profile string) *Credentials { - return NewCredentials(&SharedCredentialsProvider{ - Filename: filename, - Profile: profile, - }) -} - -// Retrieve reads and extracts the shared credentials from the current -// users home directory. -func (p *SharedCredentialsProvider) Retrieve() (Value, error) { - p.retrieved = false - - filename, err := p.filename() - if err != nil { - return Value{ProviderName: SharedCredsProviderName}, err - } - - creds, err := loadProfile(filename, p.profile()) - if err != nil { - return Value{ProviderName: SharedCredsProviderName}, err - } - - p.retrieved = true - return creds, nil -} - -// IsExpired returns if the shared credentials have expired. -func (p *SharedCredentialsProvider) IsExpired() bool { - return !p.retrieved -} - -// loadProfiles loads from the file pointed to by shared credentials filename for profile. -// The credentials retrieved from the profile will be returned or error. Error will be -// returned if it fails to read from the file, or the data is invalid. -func loadProfile(filename, profile string) (Value, error) { - config, err := ini.OpenFile(filename) - if err != nil { - return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to load shared credentials file", err) - } - - iniProfile, ok := config.GetSection(profile) - if !ok { - return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to get profile", nil) - } - - id := iniProfile.String("aws_access_key_id") - if len(id) == 0 { - return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsAccessKey", - fmt.Sprintf("shared credentials %s in %s did not contain aws_access_key_id", profile, filename), - nil) - } - - secret := iniProfile.String("aws_secret_access_key") - if len(secret) == 0 { - return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsSecret", - fmt.Sprintf("shared credentials %s in %s did not contain aws_secret_access_key", profile, filename), - nil) - } - - // Default to empty string if not found - token := iniProfile.String("aws_session_token") - - return Value{ - AccessKeyID: id, - SecretAccessKey: secret, - SessionToken: token, - ProviderName: SharedCredsProviderName, - }, nil -} - -// filename returns the filename to use to read AWS shared credentials. -// -// Will return an error if the user's home directory path cannot be found. -func (p *SharedCredentialsProvider) filename() (string, error) { - if len(p.Filename) != 0 { - return p.Filename, nil - } - - if p.Filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE"); len(p.Filename) != 0 { - return p.Filename, nil - } - - if home := shareddefaults.UserHomeDir(); len(home) == 0 { - // Backwards compatibility of home directly not found error being returned. - // This error is too verbose, failure when opening the file would of been - // a better error to return. - return "", ErrSharedCredentialsHomeNotFound - } - - p.Filename = shareddefaults.SharedCredentialsFilename() - - return p.Filename, nil -} - -// profile returns the AWS shared credentials profile. If empty will read -// environment variable "AWS_PROFILE". If that is not set profile will -// return "default". -func (p *SharedCredentialsProvider) profile() string { - if p.Profile == "" { - p.Profile = os.Getenv("AWS_PROFILE") - } - if p.Profile == "" { - p.Profile = "default" - } - - return p.Profile -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go deleted file mode 100644 index cbba1e3d56..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go +++ /dev/null @@ -1,57 +0,0 @@ -package credentials - -import ( - "github.com/aws/aws-sdk-go/aws/awserr" -) - -// StaticProviderName provides a name of Static provider -const StaticProviderName = "StaticProvider" - -var ( - // ErrStaticCredentialsEmpty is emitted when static credentials are empty. - ErrStaticCredentialsEmpty = awserr.New("EmptyStaticCreds", "static credentials are empty", nil) -) - -// A StaticProvider is a set of credentials which are set programmatically, -// and will never expire. -type StaticProvider struct { - Value -} - -// NewStaticCredentials returns a pointer to a new Credentials object -// wrapping a static credentials value provider. Token is only required -// for temporary security credentials retrieved via STS, otherwise an empty -// string can be passed for this parameter. -func NewStaticCredentials(id, secret, token string) *Credentials { - return NewCredentials(&StaticProvider{Value: Value{ - AccessKeyID: id, - SecretAccessKey: secret, - SessionToken: token, - }}) -} - -// NewStaticCredentialsFromCreds returns a pointer to a new Credentials object -// wrapping the static credentials value provide. Same as NewStaticCredentials -// but takes the creds Value instead of individual fields -func NewStaticCredentialsFromCreds(creds Value) *Credentials { - return NewCredentials(&StaticProvider{Value: creds}) -} - -// Retrieve returns the credentials or error if the credentials are invalid. -func (s *StaticProvider) Retrieve() (Value, error) { - if s.AccessKeyID == "" || s.SecretAccessKey == "" { - return Value{ProviderName: StaticProviderName}, ErrStaticCredentialsEmpty - } - - if len(s.Value.ProviderName) == 0 { - s.Value.ProviderName = StaticProviderName - } - return s.Value, nil -} - -// IsExpired returns if the credentials are expired. -// -// For StaticProvider, the credentials never expired. -func (s *StaticProvider) IsExpired() bool { - return false -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go deleted file mode 100644 index 6846ef6f80..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go +++ /dev/null @@ -1,363 +0,0 @@ -/* -Package stscreds are credential Providers to retrieve STS AWS credentials. - -STS provides multiple ways to retrieve credentials which can be used when making -future AWS service API operation calls. - -The SDK will ensure that per instance of credentials.Credentials all requests -to refresh the credentials will be synchronized. But, the SDK is unable to -ensure synchronous usage of the AssumeRoleProvider if the value is shared -between multiple Credentials, Sessions or service clients. - -Assume Role - -To assume an IAM role using STS with the SDK you can create a new Credentials -with the SDKs's stscreds package. - - // Initial credentials loaded from SDK's default credential chain. Such as - // the environment, shared credentials (~/.aws/credentials), or EC2 Instance - // Role. These credentials will be used to to make the STS Assume Role API. - sess := session.Must(session.NewSession()) - - // Create the credentials from AssumeRoleProvider to assume the role - // referenced by the "myRoleARN" ARN. - creds := stscreds.NewCredentials(sess, "myRoleArn") - - // Create service client value configured for credentials - // from assumed role. - svc := s3.New(sess, &aws.Config{Credentials: creds}) - -Assume Role with static MFA Token - -To assume an IAM role with a MFA token you can either specify a MFA token code -directly or provide a function to prompt the user each time the credentials -need to refresh the role's credentials. Specifying the TokenCode should be used -for short lived operations that will not need to be refreshed, and when you do -not want to have direct control over the user provides their MFA token. - -With TokenCode the AssumeRoleProvider will be not be able to refresh the role's -credentials. - - // Create the credentials from AssumeRoleProvider to assume the role - // referenced by the "myRoleARN" ARN using the MFA token code provided. - creds := stscreds.NewCredentials(sess, "myRoleArn", func(p *stscreds.AssumeRoleProvider) { - p.SerialNumber = aws.String("myTokenSerialNumber") - p.TokenCode = aws.String("00000000") - }) - - // Create service client value configured for credentials - // from assumed role. - svc := s3.New(sess, &aws.Config{Credentials: creds}) - -Assume Role with MFA Token Provider - -To assume an IAM role with MFA for longer running tasks where the credentials -may need to be refreshed setting the TokenProvider field of AssumeRoleProvider -will allow the credential provider to prompt for new MFA token code when the -role's credentials need to be refreshed. - -The StdinTokenProvider function is available to prompt on stdin to retrieve -the MFA token code from the user. You can also implement custom prompts by -satisfing the TokenProvider function signature. - -Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will -have undesirable results as the StdinTokenProvider will not be synchronized. A -single Credentials with an AssumeRoleProvider can be shared safely. - - // Create the credentials from AssumeRoleProvider to assume the role - // referenced by the "myRoleARN" ARN. Prompting for MFA token from stdin. - creds := stscreds.NewCredentials(sess, "myRoleArn", func(p *stscreds.AssumeRoleProvider) { - p.SerialNumber = aws.String("myTokenSerialNumber") - p.TokenProvider = stscreds.StdinTokenProvider - }) - - // Create service client value configured for credentials - // from assumed role. - svc := s3.New(sess, &aws.Config{Credentials: creds}) - -*/ -package stscreds - -import ( - "fmt" - "os" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/internal/sdkrand" - "github.com/aws/aws-sdk-go/service/sts" -) - -// StdinTokenProvider will prompt on stderr and read from stdin for a string value. -// An error is returned if reading from stdin fails. -// -// Use this function go read MFA tokens from stdin. The function makes no attempt -// to make atomic prompts from stdin across multiple gorouties. -// -// Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will -// have undesirable results as the StdinTokenProvider will not be synchronized. A -// single Credentials with an AssumeRoleProvider can be shared safely -// -// Will wait forever until something is provided on the stdin. -func StdinTokenProvider() (string, error) { - var v string - fmt.Fprintf(os.Stderr, "Assume Role MFA token code: ") - _, err := fmt.Scanln(&v) - - return v, err -} - -// ProviderName provides a name of AssumeRole provider -const ProviderName = "AssumeRoleProvider" - -// AssumeRoler represents the minimal subset of the STS client API used by this provider. -type AssumeRoler interface { - AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) -} - -type assumeRolerWithContext interface { - AssumeRoleWithContext(aws.Context, *sts.AssumeRoleInput, ...request.Option) (*sts.AssumeRoleOutput, error) -} - -// DefaultDuration is the default amount of time in minutes that the credentials -// will be valid for. -var DefaultDuration = time.Duration(15) * time.Minute - -// AssumeRoleProvider retrieves temporary credentials from the STS service, and -// keeps track of their expiration time. -// -// This credential provider will be used by the SDKs default credential change -// when shared configuration is enabled, and the shared config or shared credentials -// file configure assume role. See Session docs for how to do this. -// -// AssumeRoleProvider does not provide any synchronization and it is not safe -// to share this value across multiple Credentials, Sessions, or service clients -// without also sharing the same Credentials instance. -type AssumeRoleProvider struct { - credentials.Expiry - - // STS client to make assume role request with. - Client AssumeRoler - - // Role to be assumed. - RoleARN string - - // Session name, if you wish to reuse the credentials elsewhere. - RoleSessionName string - - // Optional, you can pass tag key-value pairs to your session. These tags are called session tags. - Tags []*sts.Tag - - // A list of keys for session tags that you want to set as transitive. - // If you set a tag key as transitive, the corresponding key and value passes to subsequent sessions in a role chain. - TransitiveTagKeys []*string - - // Expiry duration of the STS credentials. Defaults to 15 minutes if not set. - Duration time.Duration - - // Optional ExternalID to pass along, defaults to nil if not set. - ExternalID *string - - // The policy plain text must be 2048 bytes or shorter. However, an internal - // conversion compresses it into a packed binary format with a separate limit. - // The PackedPolicySize response element indicates by percentage how close to - // the upper size limit the policy is, with 100% equaling the maximum allowed - // size. - Policy *string - - // The ARNs of IAM managed policies you want to use as managed session policies. - // The policies must exist in the same account as the role. - // - // This parameter is optional. You can provide up to 10 managed policy ARNs. - // However, the plain text that you use for both inline and managed session - // policies can't exceed 2,048 characters. - // - // An AWS conversion compresses the passed session policies and session tags - // into a packed binary format that has a separate limit. Your request can fail - // for this limit even if your plain text meets the other requirements. The - // PackedPolicySize response element indicates by percentage how close the policies - // and tags for your request are to the upper size limit. - // - // Passing policies to this operation returns new temporary credentials. The - // resulting session's permissions are the intersection of the role's identity-based - // policy and the session policies. You can use the role's temporary credentials - // in subsequent AWS API calls to access resources in the account that owns - // the role. You cannot use session policies to grant more permissions than - // those allowed by the identity-based policy of the role that is being assumed. - // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // in the IAM User Guide. - PolicyArns []*sts.PolicyDescriptorType - - // The identification number of the MFA device that is associated with the user - // who is making the AssumeRole call. Specify this value if the trust policy - // of the role being assumed includes a condition that requires MFA authentication. - // The value is either the serial number for a hardware device (such as GAHT12345678) - // or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). - SerialNumber *string - - // The value provided by the MFA device, if the trust policy of the role being - // assumed requires MFA (that is, if the policy includes a condition that tests - // for MFA). If the role being assumed requires MFA and if the TokenCode value - // is missing or expired, the AssumeRole call returns an "access denied" error. - // - // If SerialNumber is set and neither TokenCode nor TokenProvider are also - // set an error will be returned. - TokenCode *string - - // Async method of providing MFA token code for assuming an IAM role with MFA. - // The value returned by the function will be used as the TokenCode in the Retrieve - // call. See StdinTokenProvider for a provider that prompts and reads from stdin. - // - // This token provider will be called when ever the assumed role's - // credentials need to be refreshed when SerialNumber is also set and - // TokenCode is not set. - // - // If both TokenCode and TokenProvider is set, TokenProvider will be used and - // TokenCode is ignored. - TokenProvider func() (string, error) - - // ExpiryWindow will allow the credentials to trigger refreshing prior to - // the credentials actually expiring. This is beneficial so race conditions - // with expiring credentials do not cause request to fail unexpectedly - // due to ExpiredTokenException exceptions. - // - // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true - // 10 seconds before the credentials are actually expired. - // - // If ExpiryWindow is 0 or less it will be ignored. - ExpiryWindow time.Duration - - // MaxJitterFrac reduces the effective Duration of each credential requested - // by a random percentage between 0 and MaxJitterFraction. MaxJitterFrac must - // have a value between 0 and 1. Any other value may lead to expected behavior. - // With a MaxJitterFrac value of 0, default) will no jitter will be used. - // - // For example, with a Duration of 30m and a MaxJitterFrac of 0.1, the - // AssumeRole call will be made with an arbitrary Duration between 27m and - // 30m. - // - // MaxJitterFrac should not be negative. - MaxJitterFrac float64 -} - -// NewCredentials returns a pointer to a new Credentials object wrapping the -// AssumeRoleProvider. The credentials will expire every 15 minutes and the -// role will be named after a nanosecond timestamp of this operation. -// -// Takes a Config provider to create the STS client. The ConfigProvider is -// satisfied by the session.Session type. -// -// It is safe to share the returned Credentials with multiple Sessions and -// service clients. All access to the credentials and refreshing them -// will be synchronized. -func NewCredentials(c client.ConfigProvider, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials { - p := &AssumeRoleProvider{ - Client: sts.New(c), - RoleARN: roleARN, - Duration: DefaultDuration, - } - - for _, option := range options { - option(p) - } - - return credentials.NewCredentials(p) -} - -// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping the -// AssumeRoleProvider. The credentials will expire every 15 minutes and the -// role will be named after a nanosecond timestamp of this operation. -// -// Takes an AssumeRoler which can be satisfied by the STS client. -// -// It is safe to share the returned Credentials with multiple Sessions and -// service clients. All access to the credentials and refreshing them -// will be synchronized. -func NewCredentialsWithClient(svc AssumeRoler, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials { - p := &AssumeRoleProvider{ - Client: svc, - RoleARN: roleARN, - Duration: DefaultDuration, - } - - for _, option := range options { - option(p) - } - - return credentials.NewCredentials(p) -} - -// Retrieve generates a new set of temporary credentials using STS. -func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) { - return p.RetrieveWithContext(aws.BackgroundContext()) -} - -// RetrieveWithContext generates a new set of temporary credentials using STS. -func (p *AssumeRoleProvider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) { - // Apply defaults where parameters are not set. - if p.RoleSessionName == "" { - // Try to work out a role name that will hopefully end up unique. - p.RoleSessionName = fmt.Sprintf("%d", time.Now().UTC().UnixNano()) - } - if p.Duration == 0 { - // Expire as often as AWS permits. - p.Duration = DefaultDuration - } - jitter := time.Duration(sdkrand.SeededRand.Float64() * p.MaxJitterFrac * float64(p.Duration)) - input := &sts.AssumeRoleInput{ - DurationSeconds: aws.Int64(int64((p.Duration - jitter) / time.Second)), - RoleArn: aws.String(p.RoleARN), - RoleSessionName: aws.String(p.RoleSessionName), - ExternalId: p.ExternalID, - Tags: p.Tags, - PolicyArns: p.PolicyArns, - TransitiveTagKeys: p.TransitiveTagKeys, - } - if p.Policy != nil { - input.Policy = p.Policy - } - if p.SerialNumber != nil { - if p.TokenCode != nil { - input.SerialNumber = p.SerialNumber - input.TokenCode = p.TokenCode - } else if p.TokenProvider != nil { - input.SerialNumber = p.SerialNumber - code, err := p.TokenProvider() - if err != nil { - return credentials.Value{ProviderName: ProviderName}, err - } - input.TokenCode = aws.String(code) - } else { - return credentials.Value{ProviderName: ProviderName}, - awserr.New("AssumeRoleTokenNotAvailable", - "assume role with MFA enabled, but neither TokenCode nor TokenProvider are set", nil) - } - } - - var roleOutput *sts.AssumeRoleOutput - var err error - - if c, ok := p.Client.(assumeRolerWithContext); ok { - roleOutput, err = c.AssumeRoleWithContext(ctx, input) - } else { - roleOutput, err = p.Client.AssumeRole(input) - } - - if err != nil { - return credentials.Value{ProviderName: ProviderName}, err - } - - // We will proactively generate new credentials before they expire. - p.SetExpiration(*roleOutput.Credentials.Expiration, p.ExpiryWindow) - - return credentials.Value{ - AccessKeyID: *roleOutput.Credentials.AccessKeyId, - SecretAccessKey: *roleOutput.Credentials.SecretAccessKey, - SessionToken: *roleOutput.Credentials.SessionToken, - ProviderName: ProviderName, - }, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go deleted file mode 100644 index 6feb262b2f..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go +++ /dev/null @@ -1,135 +0,0 @@ -package stscreds - -import ( - "fmt" - "io/ioutil" - "strconv" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/service/sts" - "github.com/aws/aws-sdk-go/service/sts/stsiface" -) - -const ( - // ErrCodeWebIdentity will be used as an error code when constructing - // a new error to be returned during session creation or retrieval. - ErrCodeWebIdentity = "WebIdentityErr" - - // WebIdentityProviderName is the web identity provider name - WebIdentityProviderName = "WebIdentityCredentials" -) - -// now is used to return a time.Time object representing -// the current time. This can be used to easily test and -// compare test values. -var now = time.Now - -// TokenFetcher shuold return WebIdentity token bytes or an error -type TokenFetcher interface { - FetchToken(credentials.Context) ([]byte, error) -} - -// FetchTokenPath is a path to a WebIdentity token file -type FetchTokenPath string - -// FetchToken returns a token by reading from the filesystem -func (f FetchTokenPath) FetchToken(ctx credentials.Context) ([]byte, error) { - data, err := ioutil.ReadFile(string(f)) - if err != nil { - errMsg := fmt.Sprintf("unable to read file at %s", f) - return nil, awserr.New(ErrCodeWebIdentity, errMsg, err) - } - return data, nil -} - -// WebIdentityRoleProvider is used to retrieve credentials using -// an OIDC token. -type WebIdentityRoleProvider struct { - credentials.Expiry - PolicyArns []*sts.PolicyDescriptorType - - client stsiface.STSAPI - ExpiryWindow time.Duration - - tokenFetcher TokenFetcher - roleARN string - roleSessionName string -} - -// NewWebIdentityCredentials will return a new set of credentials with a given -// configuration, role arn, and token file path. -func NewWebIdentityCredentials(c client.ConfigProvider, roleARN, roleSessionName, path string) *credentials.Credentials { - svc := sts.New(c) - p := NewWebIdentityRoleProvider(svc, roleARN, roleSessionName, path) - return credentials.NewCredentials(p) -} - -// NewWebIdentityRoleProvider will return a new WebIdentityRoleProvider with the -// provided stsiface.STSAPI -func NewWebIdentityRoleProvider(svc stsiface.STSAPI, roleARN, roleSessionName, path string) *WebIdentityRoleProvider { - return NewWebIdentityRoleProviderWithToken(svc, roleARN, roleSessionName, FetchTokenPath(path)) -} - -// NewWebIdentityRoleProviderWithToken will return a new WebIdentityRoleProvider with the -// provided stsiface.STSAPI and a TokenFetcher -func NewWebIdentityRoleProviderWithToken(svc stsiface.STSAPI, roleARN, roleSessionName string, tokenFetcher TokenFetcher) *WebIdentityRoleProvider { - return &WebIdentityRoleProvider{ - client: svc, - tokenFetcher: tokenFetcher, - roleARN: roleARN, - roleSessionName: roleSessionName, - } -} - -// Retrieve will attempt to assume a role from a token which is located at -// 'WebIdentityTokenFilePath' specified destination and if that is empty an -// error will be returned. -func (p *WebIdentityRoleProvider) Retrieve() (credentials.Value, error) { - return p.RetrieveWithContext(aws.BackgroundContext()) -} - -// RetrieveWithContext will attempt to assume a role from a token which is located at -// 'WebIdentityTokenFilePath' specified destination and if that is empty an -// error will be returned. -func (p *WebIdentityRoleProvider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) { - b, err := p.tokenFetcher.FetchToken(ctx) - if err != nil { - return credentials.Value{}, awserr.New(ErrCodeWebIdentity, "failed fetching WebIdentity token: ", err) - } - - sessionName := p.roleSessionName - if len(sessionName) == 0 { - // session name is used to uniquely identify a session. This simply - // uses unix time in nanoseconds to uniquely identify sessions. - sessionName = strconv.FormatInt(now().UnixNano(), 10) - } - req, resp := p.client.AssumeRoleWithWebIdentityRequest(&sts.AssumeRoleWithWebIdentityInput{ - PolicyArns: p.PolicyArns, - RoleArn: &p.roleARN, - RoleSessionName: &sessionName, - WebIdentityToken: aws.String(string(b)), - }) - - req.SetContext(ctx) - - // InvalidIdentityToken error is a temporary error that can occur - // when assuming an Role with a JWT web identity token. - req.RetryErrorCodes = append(req.RetryErrorCodes, sts.ErrCodeInvalidIdentityTokenException) - if err := req.Send(); err != nil { - return credentials.Value{}, awserr.New(ErrCodeWebIdentity, "failed to retrieve credentials", err) - } - - p.SetExpiration(aws.TimeValue(resp.Credentials.Expiration), p.ExpiryWindow) - - value := credentials.Value{ - AccessKeyID: aws.StringValue(resp.Credentials.AccessKeyId), - SecretAccessKey: aws.StringValue(resp.Credentials.SecretAccessKey), - SessionToken: aws.StringValue(resp.Credentials.SessionToken), - ProviderName: WebIdentityProviderName, - } - return value, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go deleted file mode 100644 index 25a66d1dda..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go +++ /dev/null @@ -1,69 +0,0 @@ -// Package csm provides the Client Side Monitoring (CSM) client which enables -// sending metrics via UDP connection to the CSM agent. This package provides -// control options, and configuration for the CSM client. The client can be -// controlled manually, or automatically via the SDK's Session configuration. -// -// Enabling CSM client via SDK's Session configuration -// -// The CSM client can be enabled automatically via SDK's Session configuration. -// The SDK's session configuration enables the CSM client if the AWS_CSM_PORT -// environment variable is set to a non-empty value. -// -// The configuration options for the CSM client via the SDK's session -// configuration are: -// -// * AWS_CSM_PORT= -// The port number the CSM agent will receive metrics on. -// -// * AWS_CSM_HOST= -// The hostname, or IP address the CSM agent will receive metrics on. -// Without port number. -// -// Manually enabling the CSM client -// -// The CSM client can be started, paused, and resumed manually. The Start -// function will enable the CSM client to publish metrics to the CSM agent. It -// is safe to call Start concurrently, but if Start is called additional times -// with different ClientID or address it will panic. -// -// r, err := csm.Start("clientID", ":31000") -// if err != nil { -// panic(fmt.Errorf("failed starting CSM: %v", err)) -// } -// -// When controlling the CSM client manually, you must also inject its request -// handlers into the SDK's Session configuration for the SDK's API clients to -// publish metrics. -// -// sess, err := session.NewSession(&aws.Config{}) -// if err != nil { -// panic(fmt.Errorf("failed loading session: %v", err)) -// } -// -// // Add CSM client's metric publishing request handlers to the SDK's -// // Session Configuration. -// r.InjectHandlers(&sess.Handlers) -// -// Controlling CSM client -// -// Once the CSM client has been enabled the Get function will return a Reporter -// value that you can use to pause and resume the metrics published to the CSM -// agent. If Get function is called before the reporter is enabled with the -// Start function or via SDK's Session configuration nil will be returned. -// -// The Pause method can be called to stop the CSM client publishing metrics to -// the CSM agent. The Continue method will resume metric publishing. -// -// // Get the CSM client Reporter. -// r := csm.Get() -// -// // Will pause monitoring -// r.Pause() -// resp, err = client.GetObject(&s3.GetObjectInput{ -// Bucket: aws.String("bucket"), -// Key: aws.String("key"), -// }) -// -// // Resume monitoring -// r.Continue() -package csm diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go deleted file mode 100644 index 4b19e2800e..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go +++ /dev/null @@ -1,89 +0,0 @@ -package csm - -import ( - "fmt" - "strings" - "sync" -) - -var ( - lock sync.Mutex -) - -const ( - // DefaultPort is used when no port is specified. - DefaultPort = "31000" - - // DefaultHost is the host that will be used when none is specified. - DefaultHost = "127.0.0.1" -) - -// AddressWithDefaults returns a CSM address built from the host and port -// values. If the host or port is not set, default values will be used -// instead. If host is "localhost" it will be replaced with "127.0.0.1". -func AddressWithDefaults(host, port string) string { - if len(host) == 0 || strings.EqualFold(host, "localhost") { - host = DefaultHost - } - - if len(port) == 0 { - port = DefaultPort - } - - // Only IP6 host can contain a colon - if strings.Contains(host, ":") { - return "[" + host + "]:" + port - } - - return host + ":" + port -} - -// Start will start a long running go routine to capture -// client side metrics. Calling start multiple time will only -// start the metric listener once and will panic if a different -// client ID or port is passed in. -// -// r, err := csm.Start("clientID", "127.0.0.1:31000") -// if err != nil { -// panic(fmt.Errorf("expected no error, but received %v", err)) -// } -// sess := session.NewSession() -// r.InjectHandlers(sess.Handlers) -// -// svc := s3.New(sess) -// out, err := svc.GetObject(&s3.GetObjectInput{ -// Bucket: aws.String("bucket"), -// Key: aws.String("key"), -// }) -func Start(clientID string, url string) (*Reporter, error) { - lock.Lock() - defer lock.Unlock() - - if sender == nil { - sender = newReporter(clientID, url) - } else { - if sender.clientID != clientID { - panic(fmt.Errorf("inconsistent client IDs. %q was expected, but received %q", sender.clientID, clientID)) - } - - if sender.url != url { - panic(fmt.Errorf("inconsistent URLs. %q was expected, but received %q", sender.url, url)) - } - } - - if err := connect(url); err != nil { - sender = nil - return nil, err - } - - return sender, nil -} - -// Get will return a reporter if one exists, if one does not exist, nil will -// be returned. -func Get() *Reporter { - lock.Lock() - defer lock.Unlock() - - return sender -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go deleted file mode 100644 index 5bacc791a1..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go +++ /dev/null @@ -1,109 +0,0 @@ -package csm - -import ( - "strconv" - "time" - - "github.com/aws/aws-sdk-go/aws" -) - -type metricTime time.Time - -func (t metricTime) MarshalJSON() ([]byte, error) { - ns := time.Duration(time.Time(t).UnixNano()) - return []byte(strconv.FormatInt(int64(ns/time.Millisecond), 10)), nil -} - -type metric struct { - ClientID *string `json:"ClientId,omitempty"` - API *string `json:"Api,omitempty"` - Service *string `json:"Service,omitempty"` - Timestamp *metricTime `json:"Timestamp,omitempty"` - Type *string `json:"Type,omitempty"` - Version *int `json:"Version,omitempty"` - - AttemptCount *int `json:"AttemptCount,omitempty"` - Latency *int `json:"Latency,omitempty"` - - Fqdn *string `json:"Fqdn,omitempty"` - UserAgent *string `json:"UserAgent,omitempty"` - AttemptLatency *int `json:"AttemptLatency,omitempty"` - - SessionToken *string `json:"SessionToken,omitempty"` - Region *string `json:"Region,omitempty"` - AccessKey *string `json:"AccessKey,omitempty"` - HTTPStatusCode *int `json:"HttpStatusCode,omitempty"` - XAmzID2 *string `json:"XAmzId2,omitempty"` - XAmzRequestID *string `json:"XAmznRequestId,omitempty"` - - AWSException *string `json:"AwsException,omitempty"` - AWSExceptionMessage *string `json:"AwsExceptionMessage,omitempty"` - SDKException *string `json:"SdkException,omitempty"` - SDKExceptionMessage *string `json:"SdkExceptionMessage,omitempty"` - - FinalHTTPStatusCode *int `json:"FinalHttpStatusCode,omitempty"` - FinalAWSException *string `json:"FinalAwsException,omitempty"` - FinalAWSExceptionMessage *string `json:"FinalAwsExceptionMessage,omitempty"` - FinalSDKException *string `json:"FinalSdkException,omitempty"` - FinalSDKExceptionMessage *string `json:"FinalSdkExceptionMessage,omitempty"` - - DestinationIP *string `json:"DestinationIp,omitempty"` - ConnectionReused *int `json:"ConnectionReused,omitempty"` - - AcquireConnectionLatency *int `json:"AcquireConnectionLatency,omitempty"` - ConnectLatency *int `json:"ConnectLatency,omitempty"` - RequestLatency *int `json:"RequestLatency,omitempty"` - DNSLatency *int `json:"DnsLatency,omitempty"` - TCPLatency *int `json:"TcpLatency,omitempty"` - SSLLatency *int `json:"SslLatency,omitempty"` - - MaxRetriesExceeded *int `json:"MaxRetriesExceeded,omitempty"` -} - -func (m *metric) TruncateFields() { - m.ClientID = truncateString(m.ClientID, 255) - m.UserAgent = truncateString(m.UserAgent, 256) - - m.AWSException = truncateString(m.AWSException, 128) - m.AWSExceptionMessage = truncateString(m.AWSExceptionMessage, 512) - - m.SDKException = truncateString(m.SDKException, 128) - m.SDKExceptionMessage = truncateString(m.SDKExceptionMessage, 512) - - m.FinalAWSException = truncateString(m.FinalAWSException, 128) - m.FinalAWSExceptionMessage = truncateString(m.FinalAWSExceptionMessage, 512) - - m.FinalSDKException = truncateString(m.FinalSDKException, 128) - m.FinalSDKExceptionMessage = truncateString(m.FinalSDKExceptionMessage, 512) -} - -func truncateString(v *string, l int) *string { - if v != nil && len(*v) > l { - nv := (*v)[:l] - return &nv - } - - return v -} - -func (m *metric) SetException(e metricException) { - switch te := e.(type) { - case awsException: - m.AWSException = aws.String(te.exception) - m.AWSExceptionMessage = aws.String(te.message) - case sdkException: - m.SDKException = aws.String(te.exception) - m.SDKExceptionMessage = aws.String(te.message) - } -} - -func (m *metric) SetFinalException(e metricException) { - switch te := e.(type) { - case awsException: - m.FinalAWSException = aws.String(te.exception) - m.FinalAWSExceptionMessage = aws.String(te.message) - case sdkException: - m.FinalSDKException = aws.String(te.exception) - m.FinalSDKExceptionMessage = aws.String(te.message) - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go deleted file mode 100644 index 82a3e345e9..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go +++ /dev/null @@ -1,55 +0,0 @@ -package csm - -import ( - "sync/atomic" -) - -const ( - runningEnum = iota - pausedEnum -) - -var ( - // MetricsChannelSize of metrics to hold in the channel - MetricsChannelSize = 100 -) - -type metricChan struct { - ch chan metric - paused *int64 -} - -func newMetricChan(size int) metricChan { - return metricChan{ - ch: make(chan metric, size), - paused: new(int64), - } -} - -func (ch *metricChan) Pause() { - atomic.StoreInt64(ch.paused, pausedEnum) -} - -func (ch *metricChan) Continue() { - atomic.StoreInt64(ch.paused, runningEnum) -} - -func (ch *metricChan) IsPaused() bool { - v := atomic.LoadInt64(ch.paused) - return v == pausedEnum -} - -// Push will push metrics to the metric channel if the channel -// is not paused -func (ch *metricChan) Push(m metric) bool { - if ch.IsPaused() { - return false - } - - select { - case ch.ch <- m: - return true - default: - return false - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_exception.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_exception.go deleted file mode 100644 index 54a99280ce..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_exception.go +++ /dev/null @@ -1,26 +0,0 @@ -package csm - -type metricException interface { - Exception() string - Message() string -} - -type requestException struct { - exception string - message string -} - -func (e requestException) Exception() string { - return e.exception -} -func (e requestException) Message() string { - return e.message -} - -type awsException struct { - requestException -} - -type sdkException struct { - requestException -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go deleted file mode 100644 index 835bcd49cb..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go +++ /dev/null @@ -1,264 +0,0 @@ -package csm - -import ( - "encoding/json" - "net" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" -) - -// Reporter will gather metrics of API requests made and -// send those metrics to the CSM endpoint. -type Reporter struct { - clientID string - url string - conn net.Conn - metricsCh metricChan - done chan struct{} -} - -var ( - sender *Reporter -) - -func connect(url string) error { - const network = "udp" - if err := sender.connect(network, url); err != nil { - return err - } - - if sender.done == nil { - sender.done = make(chan struct{}) - go sender.start() - } - - return nil -} - -func newReporter(clientID, url string) *Reporter { - return &Reporter{ - clientID: clientID, - url: url, - metricsCh: newMetricChan(MetricsChannelSize), - } -} - -func (rep *Reporter) sendAPICallAttemptMetric(r *request.Request) { - if rep == nil { - return - } - - now := time.Now() - creds, _ := r.Config.Credentials.Get() - - m := metric{ - ClientID: aws.String(rep.clientID), - API: aws.String(r.Operation.Name), - Service: aws.String(r.ClientInfo.ServiceID), - Timestamp: (*metricTime)(&now), - UserAgent: aws.String(r.HTTPRequest.Header.Get("User-Agent")), - Region: r.Config.Region, - Type: aws.String("ApiCallAttempt"), - Version: aws.Int(1), - - XAmzRequestID: aws.String(r.RequestID), - - AttemptLatency: aws.Int(int(now.Sub(r.AttemptTime).Nanoseconds() / int64(time.Millisecond))), - AccessKey: aws.String(creds.AccessKeyID), - } - - if r.HTTPResponse != nil { - m.HTTPStatusCode = aws.Int(r.HTTPResponse.StatusCode) - } - - if r.Error != nil { - if awserr, ok := r.Error.(awserr.Error); ok { - m.SetException(getMetricException(awserr)) - } - } - - m.TruncateFields() - rep.metricsCh.Push(m) -} - -func getMetricException(err awserr.Error) metricException { - msg := err.Error() - code := err.Code() - - switch code { - case request.ErrCodeRequestError, - request.ErrCodeSerialization, - request.CanceledErrorCode: - return sdkException{ - requestException{exception: code, message: msg}, - } - default: - return awsException{ - requestException{exception: code, message: msg}, - } - } -} - -func (rep *Reporter) sendAPICallMetric(r *request.Request) { - if rep == nil { - return - } - - now := time.Now() - m := metric{ - ClientID: aws.String(rep.clientID), - API: aws.String(r.Operation.Name), - Service: aws.String(r.ClientInfo.ServiceID), - Timestamp: (*metricTime)(&now), - UserAgent: aws.String(r.HTTPRequest.Header.Get("User-Agent")), - Type: aws.String("ApiCall"), - AttemptCount: aws.Int(r.RetryCount + 1), - Region: r.Config.Region, - Latency: aws.Int(int(time.Since(r.Time) / time.Millisecond)), - XAmzRequestID: aws.String(r.RequestID), - MaxRetriesExceeded: aws.Int(boolIntValue(r.RetryCount >= r.MaxRetries())), - } - - if r.HTTPResponse != nil { - m.FinalHTTPStatusCode = aws.Int(r.HTTPResponse.StatusCode) - } - - if r.Error != nil { - if awserr, ok := r.Error.(awserr.Error); ok { - m.SetFinalException(getMetricException(awserr)) - } - } - - m.TruncateFields() - - // TODO: Probably want to figure something out for logging dropped - // metrics - rep.metricsCh.Push(m) -} - -func (rep *Reporter) connect(network, url string) error { - if rep.conn != nil { - rep.conn.Close() - } - - conn, err := net.Dial(network, url) - if err != nil { - return awserr.New("UDPError", "Could not connect", err) - } - - rep.conn = conn - - return nil -} - -func (rep *Reporter) close() { - if rep.done != nil { - close(rep.done) - } - - rep.metricsCh.Pause() -} - -func (rep *Reporter) start() { - defer func() { - rep.metricsCh.Pause() - }() - - for { - select { - case <-rep.done: - rep.done = nil - return - case m := <-rep.metricsCh.ch: - // TODO: What to do with this error? Probably should just log - b, err := json.Marshal(m) - if err != nil { - continue - } - - rep.conn.Write(b) - } - } -} - -// Pause will pause the metric channel preventing any new metrics from being -// added. It is safe to call concurrently with other calls to Pause, but if -// called concurently with Continue can lead to unexpected state. -func (rep *Reporter) Pause() { - lock.Lock() - defer lock.Unlock() - - if rep == nil { - return - } - - rep.close() -} - -// Continue will reopen the metric channel and allow for monitoring to be -// resumed. It is safe to call concurrently with other calls to Continue, but -// if called concurently with Pause can lead to unexpected state. -func (rep *Reporter) Continue() { - lock.Lock() - defer lock.Unlock() - if rep == nil { - return - } - - if !rep.metricsCh.IsPaused() { - return - } - - rep.metricsCh.Continue() -} - -// Client side metric handler names -const ( - APICallMetricHandlerName = "awscsm.SendAPICallMetric" - APICallAttemptMetricHandlerName = "awscsm.SendAPICallAttemptMetric" -) - -// InjectHandlers will will enable client side metrics and inject the proper -// handlers to handle how metrics are sent. -// -// InjectHandlers is NOT safe to call concurrently. Calling InjectHandlers -// multiple times may lead to unexpected behavior, (e.g. duplicate metrics). -// -// // Start must be called in order to inject the correct handlers -// r, err := csm.Start("clientID", "127.0.0.1:8094") -// if err != nil { -// panic(fmt.Errorf("expected no error, but received %v", err)) -// } -// -// sess := session.NewSession() -// r.InjectHandlers(&sess.Handlers) -// -// // create a new service client with our client side metric session -// svc := s3.New(sess) -func (rep *Reporter) InjectHandlers(handlers *request.Handlers) { - if rep == nil { - return - } - - handlers.Complete.PushFrontNamed(request.NamedHandler{ - Name: APICallMetricHandlerName, - Fn: rep.sendAPICallMetric, - }) - - handlers.CompleteAttempt.PushFrontNamed(request.NamedHandler{ - Name: APICallAttemptMetricHandlerName, - Fn: rep.sendAPICallAttemptMetric, - }) -} - -// boolIntValue return 1 for true and 0 for false. -func boolIntValue(b bool) int { - if b { - return 1 - } - - return 0 -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go deleted file mode 100644 index 23bb639e01..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go +++ /dev/null @@ -1,207 +0,0 @@ -// Package defaults is a collection of helpers to retrieve the SDK's default -// configuration and handlers. -// -// Generally this package shouldn't be used directly, but session.Session -// instead. This package is useful when you need to reset the defaults -// of a session or service client to the SDK defaults before setting -// additional parameters. -package defaults - -import ( - "fmt" - "net" - "net/http" - "net/url" - "os" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/corehandlers" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds" - "github.com/aws/aws-sdk-go/aws/credentials/endpointcreds" - "github.com/aws/aws-sdk-go/aws/ec2metadata" - "github.com/aws/aws-sdk-go/aws/endpoints" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/internal/shareddefaults" -) - -// A Defaults provides a collection of default values for SDK clients. -type Defaults struct { - Config *aws.Config - Handlers request.Handlers -} - -// Get returns the SDK's default values with Config and handlers pre-configured. -func Get() Defaults { - cfg := Config() - handlers := Handlers() - cfg.Credentials = CredChain(cfg, handlers) - - return Defaults{ - Config: cfg, - Handlers: handlers, - } -} - -// Config returns the default configuration without credentials. -// To retrieve a config with credentials also included use -// `defaults.Get().Config` instead. -// -// Generally you shouldn't need to use this method directly, but -// is available if you need to reset the configuration of an -// existing service client or session. -func Config() *aws.Config { - return aws.NewConfig(). - WithCredentials(credentials.AnonymousCredentials). - WithRegion(os.Getenv("AWS_REGION")). - WithHTTPClient(http.DefaultClient). - WithMaxRetries(aws.UseServiceDefaultRetries). - WithLogger(aws.NewDefaultLogger()). - WithLogLevel(aws.LogOff). - WithEndpointResolver(endpoints.DefaultResolver()) -} - -// Handlers returns the default request handlers. -// -// Generally you shouldn't need to use this method directly, but -// is available if you need to reset the request handlers of an -// existing service client or session. -func Handlers() request.Handlers { - var handlers request.Handlers - - handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler) - handlers.Validate.AfterEachFn = request.HandlerListStopOnError - handlers.Build.PushBackNamed(corehandlers.SDKVersionUserAgentHandler) - handlers.Build.PushBackNamed(corehandlers.AddHostExecEnvUserAgentHander) - handlers.Build.AfterEachFn = request.HandlerListStopOnError - handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler) - handlers.Send.PushBackNamed(corehandlers.ValidateReqSigHandler) - handlers.Send.PushBackNamed(corehandlers.SendHandler) - handlers.AfterRetry.PushBackNamed(corehandlers.AfterRetryHandler) - handlers.ValidateResponse.PushBackNamed(corehandlers.ValidateResponseHandler) - - return handlers -} - -// CredChain returns the default credential chain. -// -// Generally you shouldn't need to use this method directly, but -// is available if you need to reset the credentials of an -// existing service client or session's Config. -func CredChain(cfg *aws.Config, handlers request.Handlers) *credentials.Credentials { - return credentials.NewCredentials(&credentials.ChainProvider{ - VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors), - Providers: CredProviders(cfg, handlers), - }) -} - -// CredProviders returns the slice of providers used in -// the default credential chain. -// -// For applications that need to use some other provider (for example use -// different environment variables for legacy reasons) but still fall back -// on the default chain of providers. This allows that default chaint to be -// automatically updated -func CredProviders(cfg *aws.Config, handlers request.Handlers) []credentials.Provider { - return []credentials.Provider{ - &credentials.EnvProvider{}, - &credentials.SharedCredentialsProvider{Filename: "", Profile: ""}, - RemoteCredProvider(*cfg, handlers), - } -} - -const ( - httpProviderAuthorizationEnvVar = "AWS_CONTAINER_AUTHORIZATION_TOKEN" - httpProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_FULL_URI" -) - -// RemoteCredProvider returns a credentials provider for the default remote -// endpoints such as EC2 or ECS Roles. -func RemoteCredProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider { - if u := os.Getenv(httpProviderEnvVar); len(u) > 0 { - return localHTTPCredProvider(cfg, handlers, u) - } - - if uri := os.Getenv(shareddefaults.ECSCredsProviderEnvVar); len(uri) > 0 { - u := fmt.Sprintf("%s%s", shareddefaults.ECSContainerCredentialsURI, uri) - return httpCredProvider(cfg, handlers, u) - } - - return ec2RoleProvider(cfg, handlers) -} - -var lookupHostFn = net.LookupHost - -func isLoopbackHost(host string) (bool, error) { - ip := net.ParseIP(host) - if ip != nil { - return ip.IsLoopback(), nil - } - - // Host is not an ip, perform lookup - addrs, err := lookupHostFn(host) - if err != nil { - return false, err - } - for _, addr := range addrs { - if !net.ParseIP(addr).IsLoopback() { - return false, nil - } - } - - return true, nil -} - -func localHTTPCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider { - var errMsg string - - parsed, err := url.Parse(u) - if err != nil { - errMsg = fmt.Sprintf("invalid URL, %v", err) - } else { - host := aws.URLHostname(parsed) - if len(host) == 0 { - errMsg = "unable to parse host from local HTTP cred provider URL" - } else if isLoopback, loopbackErr := isLoopbackHost(host); loopbackErr != nil { - errMsg = fmt.Sprintf("failed to resolve host %q, %v", host, loopbackErr) - } else if !isLoopback { - errMsg = fmt.Sprintf("invalid endpoint host, %q, only loopback hosts are allowed.", host) - } - } - - if len(errMsg) > 0 { - if cfg.Logger != nil { - cfg.Logger.Log("Ignoring, HTTP credential provider", errMsg, err) - } - return credentials.ErrorProvider{ - Err: awserr.New("CredentialsEndpointError", errMsg, err), - ProviderName: endpointcreds.ProviderName, - } - } - - return httpCredProvider(cfg, handlers, u) -} - -func httpCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider { - return endpointcreds.NewProviderClient(cfg, handlers, u, - func(p *endpointcreds.Provider) { - p.ExpiryWindow = 5 * time.Minute - p.AuthorizationToken = os.Getenv(httpProviderAuthorizationEnvVar) - }, - ) -} - -func ec2RoleProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider { - resolver := cfg.EndpointResolver - if resolver == nil { - resolver = endpoints.DefaultResolver() - } - - e, _ := resolver.EndpointFor(endpoints.Ec2metadataServiceID, "") - return &ec2rolecreds.EC2RoleProvider{ - Client: ec2metadata.NewClient(cfg, handlers, e.URL, e.SigningRegion), - ExpiryWindow: 5 * time.Minute, - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go deleted file mode 100644 index ca0ee1dcc7..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go +++ /dev/null @@ -1,27 +0,0 @@ -package defaults - -import ( - "github.com/aws/aws-sdk-go/internal/shareddefaults" -) - -// SharedCredentialsFilename returns the SDK's default file path -// for the shared credentials file. -// -// Builds the shared config file path based on the OS's platform. -// -// - Linux/Unix: $HOME/.aws/credentials -// - Windows: %USERPROFILE%\.aws\credentials -func SharedCredentialsFilename() string { - return shareddefaults.SharedCredentialsFilename() -} - -// SharedConfigFilename returns the SDK's default file path for -// the shared config file. -// -// Builds the shared config file path based on the OS's platform. -// -// - Linux/Unix: $HOME/.aws/config -// - Windows: %USERPROFILE%\.aws\config -func SharedConfigFilename() string { - return shareddefaults.SharedConfigFilename() -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/doc.go deleted file mode 100644 index 4fcb616184..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/doc.go +++ /dev/null @@ -1,56 +0,0 @@ -// Package aws provides the core SDK's utilities and shared types. Use this package's -// utilities to simplify setting and reading API operations parameters. -// -// Value and Pointer Conversion Utilities -// -// This package includes a helper conversion utility for each scalar type the SDK's -// API use. These utilities make getting a pointer of the scalar, and dereferencing -// a pointer easier. -// -// Each conversion utility comes in two forms. Value to Pointer and Pointer to Value. -// The Pointer to value will safely dereference the pointer and return its value. -// If the pointer was nil, the scalar's zero value will be returned. -// -// The value to pointer functions will be named after the scalar type. So get a -// *string from a string value use the "String" function. This makes it easy to -// to get pointer of a literal string value, because getting the address of a -// literal requires assigning the value to a variable first. -// -// var strPtr *string -// -// // Without the SDK's conversion functions -// str := "my string" -// strPtr = &str -// -// // With the SDK's conversion functions -// strPtr = aws.String("my string") -// -// // Convert *string to string value -// str = aws.StringValue(strPtr) -// -// In addition to scalars the aws package also includes conversion utilities for -// map and slice for commonly types used in API parameters. The map and slice -// conversion functions use similar naming pattern as the scalar conversion -// functions. -// -// var strPtrs []*string -// var strs []string = []string{"Go", "Gophers", "Go"} -// -// // Convert []string to []*string -// strPtrs = aws.StringSlice(strs) -// -// // Convert []*string to []string -// strs = aws.StringValueSlice(strPtrs) -// -// SDK Default HTTP Client -// -// The SDK will use the http.DefaultClient if a HTTP client is not provided to -// the SDK's Session, or service client constructor. This means that if the -// http.DefaultClient is modified by other components of your application the -// modifications will be picked up by the SDK as well. -// -// In some cases this might be intended, but it is a better practice to create -// a custom HTTP Client to share explicitly through your application. You can -// configure the SDK to use the custom HTTP Client by setting the HTTPClient -// value of the SDK's Config type when creating a Session or service client. -package aws diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go deleted file mode 100644 index a716c021cf..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go +++ /dev/null @@ -1,250 +0,0 @@ -package ec2metadata - -import ( - "encoding/json" - "fmt" - "net/http" - "strconv" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/internal/sdkuri" -) - -// getToken uses the duration to return a token for EC2 metadata service, -// or an error if the request failed. -func (c *EC2Metadata) getToken(ctx aws.Context, duration time.Duration) (tokenOutput, error) { - op := &request.Operation{ - Name: "GetToken", - HTTPMethod: "PUT", - HTTPPath: "/api/token", - } - - var output tokenOutput - req := c.NewRequest(op, nil, &output) - req.SetContext(ctx) - - // remove the fetch token handler from the request handlers to avoid infinite recursion - req.Handlers.Sign.RemoveByName(fetchTokenHandlerName) - - // Swap the unmarshalMetadataHandler with unmarshalTokenHandler on this request. - req.Handlers.Unmarshal.Swap(unmarshalMetadataHandlerName, unmarshalTokenHandler) - - ttl := strconv.FormatInt(int64(duration/time.Second), 10) - req.HTTPRequest.Header.Set(ttlHeader, ttl) - - err := req.Send() - - // Errors with bad request status should be returned. - if err != nil { - err = awserr.NewRequestFailure( - awserr.New(req.HTTPResponse.Status, http.StatusText(req.HTTPResponse.StatusCode), err), - req.HTTPResponse.StatusCode, req.RequestID) - } - - return output, err -} - -// GetMetadata uses the path provided to request information from the EC2 -// instance metadata service. The content will be returned as a string, or -// error if the request failed. -func (c *EC2Metadata) GetMetadata(p string) (string, error) { - return c.GetMetadataWithContext(aws.BackgroundContext(), p) -} - -// GetMetadataWithContext uses the path provided to request information from the EC2 -// instance metadata service. The content will be returned as a string, or -// error if the request failed. -func (c *EC2Metadata) GetMetadataWithContext(ctx aws.Context, p string) (string, error) { - op := &request.Operation{ - Name: "GetMetadata", - HTTPMethod: "GET", - HTTPPath: sdkuri.PathJoin("/meta-data", p), - } - output := &metadataOutput{} - - req := c.NewRequest(op, nil, output) - - req.SetContext(ctx) - - err := req.Send() - return output.Content, err -} - -// GetUserData returns the userdata that was configured for the service. If -// there is no user-data setup for the EC2 instance a "NotFoundError" error -// code will be returned. -func (c *EC2Metadata) GetUserData() (string, error) { - return c.GetUserDataWithContext(aws.BackgroundContext()) -} - -// GetUserDataWithContext returns the userdata that was configured for the service. If -// there is no user-data setup for the EC2 instance a "NotFoundError" error -// code will be returned. -func (c *EC2Metadata) GetUserDataWithContext(ctx aws.Context) (string, error) { - op := &request.Operation{ - Name: "GetUserData", - HTTPMethod: "GET", - HTTPPath: "/user-data", - } - - output := &metadataOutput{} - req := c.NewRequest(op, nil, output) - req.SetContext(ctx) - - err := req.Send() - return output.Content, err -} - -// GetDynamicData uses the path provided to request information from the EC2 -// instance metadata service for dynamic data. The content will be returned -// as a string, or error if the request failed. -func (c *EC2Metadata) GetDynamicData(p string) (string, error) { - return c.GetDynamicDataWithContext(aws.BackgroundContext(), p) -} - -// GetDynamicDataWithContext uses the path provided to request information from the EC2 -// instance metadata service for dynamic data. The content will be returned -// as a string, or error if the request failed. -func (c *EC2Metadata) GetDynamicDataWithContext(ctx aws.Context, p string) (string, error) { - op := &request.Operation{ - Name: "GetDynamicData", - HTTPMethod: "GET", - HTTPPath: sdkuri.PathJoin("/dynamic", p), - } - - output := &metadataOutput{} - req := c.NewRequest(op, nil, output) - req.SetContext(ctx) - - err := req.Send() - return output.Content, err -} - -// GetInstanceIdentityDocument retrieves an identity document describing an -// instance. Error is returned if the request fails or is unable to parse -// the response. -func (c *EC2Metadata) GetInstanceIdentityDocument() (EC2InstanceIdentityDocument, error) { - return c.GetInstanceIdentityDocumentWithContext(aws.BackgroundContext()) -} - -// GetInstanceIdentityDocumentWithContext retrieves an identity document describing an -// instance. Error is returned if the request fails or is unable to parse -// the response. -func (c *EC2Metadata) GetInstanceIdentityDocumentWithContext(ctx aws.Context) (EC2InstanceIdentityDocument, error) { - resp, err := c.GetDynamicDataWithContext(ctx, "instance-identity/document") - if err != nil { - return EC2InstanceIdentityDocument{}, - awserr.New("EC2MetadataRequestError", - "failed to get EC2 instance identity document", err) - } - - doc := EC2InstanceIdentityDocument{} - if err := json.NewDecoder(strings.NewReader(resp)).Decode(&doc); err != nil { - return EC2InstanceIdentityDocument{}, - awserr.New(request.ErrCodeSerialization, - "failed to decode EC2 instance identity document", err) - } - - return doc, nil -} - -// IAMInfo retrieves IAM info from the metadata API -func (c *EC2Metadata) IAMInfo() (EC2IAMInfo, error) { - return c.IAMInfoWithContext(aws.BackgroundContext()) -} - -// IAMInfoWithContext retrieves IAM info from the metadata API -func (c *EC2Metadata) IAMInfoWithContext(ctx aws.Context) (EC2IAMInfo, error) { - resp, err := c.GetMetadataWithContext(ctx, "iam/info") - if err != nil { - return EC2IAMInfo{}, - awserr.New("EC2MetadataRequestError", - "failed to get EC2 IAM info", err) - } - - info := EC2IAMInfo{} - if err := json.NewDecoder(strings.NewReader(resp)).Decode(&info); err != nil { - return EC2IAMInfo{}, - awserr.New(request.ErrCodeSerialization, - "failed to decode EC2 IAM info", err) - } - - if info.Code != "Success" { - errMsg := fmt.Sprintf("failed to get EC2 IAM Info (%s)", info.Code) - return EC2IAMInfo{}, - awserr.New("EC2MetadataError", errMsg, nil) - } - - return info, nil -} - -// Region returns the region the instance is running in. -func (c *EC2Metadata) Region() (string, error) { - return c.RegionWithContext(aws.BackgroundContext()) -} - -// RegionWithContext returns the region the instance is running in. -func (c *EC2Metadata) RegionWithContext(ctx aws.Context) (string, error) { - ec2InstanceIdentityDocument, err := c.GetInstanceIdentityDocumentWithContext(ctx) - if err != nil { - return "", err - } - // extract region from the ec2InstanceIdentityDocument - region := ec2InstanceIdentityDocument.Region - if len(region) == 0 { - return "", awserr.New("EC2MetadataError", "invalid region received for ec2metadata instance", nil) - } - // returns region - return region, nil -} - -// Available returns if the application has access to the EC2 Metadata service. -// Can be used to determine if application is running within an EC2 Instance and -// the metadata service is available. -func (c *EC2Metadata) Available() bool { - return c.AvailableWithContext(aws.BackgroundContext()) -} - -// AvailableWithContext returns if the application has access to the EC2 Metadata service. -// Can be used to determine if application is running within an EC2 Instance and -// the metadata service is available. -func (c *EC2Metadata) AvailableWithContext(ctx aws.Context) bool { - if _, err := c.GetMetadataWithContext(ctx, "instance-id"); err != nil { - return false - } - - return true -} - -// An EC2IAMInfo provides the shape for unmarshaling -// an IAM info from the metadata API -type EC2IAMInfo struct { - Code string - LastUpdated time.Time - InstanceProfileArn string - InstanceProfileID string -} - -// An EC2InstanceIdentityDocument provides the shape for unmarshaling -// an instance identity document -type EC2InstanceIdentityDocument struct { - DevpayProductCodes []string `json:"devpayProductCodes"` - MarketplaceProductCodes []string `json:"marketplaceProductCodes"` - AvailabilityZone string `json:"availabilityZone"` - PrivateIP string `json:"privateIp"` - Version string `json:"version"` - Region string `json:"region"` - InstanceID string `json:"instanceId"` - BillingProducts []string `json:"billingProducts"` - InstanceType string `json:"instanceType"` - AccountID string `json:"accountId"` - PendingTime time.Time `json:"pendingTime"` - ImageID string `json:"imageId"` - KernelID string `json:"kernelId"` - RamdiskID string `json:"ramdiskId"` - Architecture string `json:"architecture"` -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go deleted file mode 100644 index b8b2940d74..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go +++ /dev/null @@ -1,228 +0,0 @@ -// Package ec2metadata provides the client for making API calls to the -// EC2 Metadata service. -// -// This package's client can be disabled completely by setting the environment -// variable "AWS_EC2_METADATA_DISABLED=true". This environment variable set to -// true instructs the SDK to disable the EC2 Metadata client. The client cannot -// be used while the environment variable is set to true, (case insensitive). -package ec2metadata - -import ( - "bytes" - "errors" - "io" - "net/http" - "os" - "strconv" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/client/metadata" - "github.com/aws/aws-sdk-go/aws/corehandlers" - "github.com/aws/aws-sdk-go/aws/request" -) - -const ( - // ServiceName is the name of the service. - ServiceName = "ec2metadata" - disableServiceEnvVar = "AWS_EC2_METADATA_DISABLED" - - // Headers for Token and TTL - ttlHeader = "x-aws-ec2-metadata-token-ttl-seconds" - tokenHeader = "x-aws-ec2-metadata-token" - - // Named Handler constants - fetchTokenHandlerName = "FetchTokenHandler" - unmarshalMetadataHandlerName = "unmarshalMetadataHandler" - unmarshalTokenHandlerName = "unmarshalTokenHandler" - enableTokenProviderHandlerName = "enableTokenProviderHandler" - - // TTL constants - defaultTTL = 21600 * time.Second - ttlExpirationWindow = 30 * time.Second -) - -// A EC2Metadata is an EC2 Metadata service Client. -type EC2Metadata struct { - *client.Client -} - -// New creates a new instance of the EC2Metadata client with a session. -// This client is safe to use across multiple goroutines. -// -// -// Example: -// // Create a EC2Metadata client from just a session. -// svc := ec2metadata.New(mySession) -// -// // Create a EC2Metadata client with additional configuration -// svc := ec2metadata.New(mySession, aws.NewConfig().WithLogLevel(aws.LogDebugHTTPBody)) -func New(p client.ConfigProvider, cfgs ...*aws.Config) *EC2Metadata { - c := p.ClientConfig(ServiceName, cfgs...) - return NewClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) -} - -// NewClient returns a new EC2Metadata client. Should be used to create -// a client when not using a session. Generally using just New with a session -// is preferred. -// -// If an unmodified HTTP client is provided from the stdlib default, or no client -// the EC2RoleProvider's EC2Metadata HTTP client's timeout will be shortened. -// To disable this set Config.EC2MetadataDisableTimeoutOverride to false. Enabled by default. -func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string, opts ...func(*client.Client)) *EC2Metadata { - if !aws.BoolValue(cfg.EC2MetadataDisableTimeoutOverride) && httpClientZero(cfg.HTTPClient) { - // If the http client is unmodified and this feature is not disabled - // set custom timeouts for EC2Metadata requests. - cfg.HTTPClient = &http.Client{ - // use a shorter timeout than default because the metadata - // service is local if it is running, and to fail faster - // if not running on an ec2 instance. - Timeout: 1 * time.Second, - } - // max number of retries on the client operation - cfg.MaxRetries = aws.Int(2) - } - - svc := &EC2Metadata{ - Client: client.New( - cfg, - metadata.ClientInfo{ - ServiceName: ServiceName, - ServiceID: ServiceName, - Endpoint: endpoint, - APIVersion: "latest", - }, - handlers, - ), - } - - // token provider instance - tp := newTokenProvider(svc, defaultTTL) - - // NamedHandler for fetching token - svc.Handlers.Sign.PushBackNamed(request.NamedHandler{ - Name: fetchTokenHandlerName, - Fn: tp.fetchTokenHandler, - }) - // NamedHandler for enabling token provider - svc.Handlers.Complete.PushBackNamed(request.NamedHandler{ - Name: enableTokenProviderHandlerName, - Fn: tp.enableTokenProviderHandler, - }) - - svc.Handlers.Unmarshal.PushBackNamed(unmarshalHandler) - svc.Handlers.UnmarshalError.PushBack(unmarshalError) - svc.Handlers.Validate.Clear() - svc.Handlers.Validate.PushBack(validateEndpointHandler) - - // Disable the EC2 Metadata service if the environment variable is set. - // This short-circuits the service's functionality to always fail to send - // requests. - if strings.ToLower(os.Getenv(disableServiceEnvVar)) == "true" { - svc.Handlers.Send.SwapNamed(request.NamedHandler{ - Name: corehandlers.SendHandler.Name, - Fn: func(r *request.Request) { - r.HTTPResponse = &http.Response{ - Header: http.Header{}, - } - r.Error = awserr.New( - request.CanceledErrorCode, - "EC2 IMDS access disabled via "+disableServiceEnvVar+" env var", - nil) - }, - }) - } - - // Add additional options to the service config - for _, option := range opts { - option(svc.Client) - } - return svc -} - -func httpClientZero(c *http.Client) bool { - return c == nil || (c.Transport == nil && c.CheckRedirect == nil && c.Jar == nil && c.Timeout == 0) -} - -type metadataOutput struct { - Content string -} - -type tokenOutput struct { - Token string - TTL time.Duration -} - -// unmarshal token handler is used to parse the response of a getToken operation -var unmarshalTokenHandler = request.NamedHandler{ - Name: unmarshalTokenHandlerName, - Fn: func(r *request.Request) { - defer r.HTTPResponse.Body.Close() - var b bytes.Buffer - if _, err := io.Copy(&b, r.HTTPResponse.Body); err != nil { - r.Error = awserr.NewRequestFailure(awserr.New(request.ErrCodeSerialization, - "unable to unmarshal EC2 metadata response", err), r.HTTPResponse.StatusCode, r.RequestID) - return - } - - v := r.HTTPResponse.Header.Get(ttlHeader) - data, ok := r.Data.(*tokenOutput) - if !ok { - return - } - - data.Token = b.String() - // TTL is in seconds - i, err := strconv.ParseInt(v, 10, 64) - if err != nil { - r.Error = awserr.NewRequestFailure(awserr.New(request.ParamFormatErrCode, - "unable to parse EC2 token TTL response", err), r.HTTPResponse.StatusCode, r.RequestID) - return - } - t := time.Duration(i) * time.Second - data.TTL = t - }, -} - -var unmarshalHandler = request.NamedHandler{ - Name: unmarshalMetadataHandlerName, - Fn: func(r *request.Request) { - defer r.HTTPResponse.Body.Close() - var b bytes.Buffer - if _, err := io.Copy(&b, r.HTTPResponse.Body); err != nil { - r.Error = awserr.NewRequestFailure(awserr.New(request.ErrCodeSerialization, - "unable to unmarshal EC2 metadata response", err), r.HTTPResponse.StatusCode, r.RequestID) - return - } - - if data, ok := r.Data.(*metadataOutput); ok { - data.Content = b.String() - } - }, -} - -func unmarshalError(r *request.Request) { - defer r.HTTPResponse.Body.Close() - var b bytes.Buffer - - if _, err := io.Copy(&b, r.HTTPResponse.Body); err != nil { - r.Error = awserr.NewRequestFailure( - awserr.New(request.ErrCodeSerialization, "unable to unmarshal EC2 metadata error response", err), - r.HTTPResponse.StatusCode, r.RequestID) - return - } - - // Response body format is not consistent between metadata endpoints. - // Grab the error message as a string and include that as the source error - r.Error = awserr.NewRequestFailure(awserr.New("EC2MetadataError", "failed to make EC2Metadata request", errors.New(b.String())), - r.HTTPResponse.StatusCode, r.RequestID) -} - -func validateEndpointHandler(r *request.Request) { - if r.ClientInfo.Endpoint == "" { - r.Error = aws.ErrMissingEndpoint - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go deleted file mode 100644 index d0a3a020d8..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go +++ /dev/null @@ -1,92 +0,0 @@ -package ec2metadata - -import ( - "net/http" - "sync/atomic" - "time" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/request" -) - -// A tokenProvider struct provides access to EC2Metadata client -// and atomic instance of a token, along with configuredTTL for it. -// tokenProvider also provides an atomic flag to disable the -// fetch token operation. -// The disabled member will use 0 as false, and 1 as true. -type tokenProvider struct { - client *EC2Metadata - token atomic.Value - configuredTTL time.Duration - disabled uint32 -} - -// A ec2Token struct helps use of token in EC2 Metadata service ops -type ec2Token struct { - token string - credentials.Expiry -} - -// newTokenProvider provides a pointer to a tokenProvider instance -func newTokenProvider(c *EC2Metadata, duration time.Duration) *tokenProvider { - return &tokenProvider{client: c, configuredTTL: duration} -} - -// fetchTokenHandler fetches token for EC2Metadata service client by default. -func (t *tokenProvider) fetchTokenHandler(r *request.Request) { - - // short-circuits to insecure data flow if tokenProvider is disabled. - if v := atomic.LoadUint32(&t.disabled); v == 1 { - return - } - - if ec2Token, ok := t.token.Load().(ec2Token); ok && !ec2Token.IsExpired() { - r.HTTPRequest.Header.Set(tokenHeader, ec2Token.token) - return - } - - output, err := t.client.getToken(r.Context(), t.configuredTTL) - - if err != nil { - - // change the disabled flag on token provider to true, - // when error is request timeout error. - if requestFailureError, ok := err.(awserr.RequestFailure); ok { - switch requestFailureError.StatusCode() { - case http.StatusForbidden, http.StatusNotFound, http.StatusMethodNotAllowed: - atomic.StoreUint32(&t.disabled, 1) - case http.StatusBadRequest: - r.Error = requestFailureError - } - - // Check if request timed out while waiting for response - if e, ok := requestFailureError.OrigErr().(awserr.Error); ok { - if e.Code() == request.ErrCodeRequestError { - atomic.StoreUint32(&t.disabled, 1) - } - } - } - return - } - - newToken := ec2Token{ - token: output.Token, - } - newToken.SetExpiration(time.Now().Add(output.TTL), ttlExpirationWindow) - t.token.Store(newToken) - - // Inject token header to the request. - if ec2Token, ok := t.token.Load().(ec2Token); ok { - r.HTTPRequest.Header.Set(tokenHeader, ec2Token.token) - } -} - -// enableTokenProviderHandler enables the token provider -func (t *tokenProvider) enableTokenProviderHandler(r *request.Request) { - // If the error code status is 401, we enable the token provider - if e, ok := r.Error.(awserr.RequestFailure); ok && e != nil && - e.StatusCode() == http.StatusUnauthorized { - atomic.StoreUint32(&t.disabled, 0) - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go deleted file mode 100644 index 654fb1ad52..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go +++ /dev/null @@ -1,216 +0,0 @@ -package endpoints - -import ( - "encoding/json" - "fmt" - "io" - - "github.com/aws/aws-sdk-go/aws/awserr" -) - -type modelDefinition map[string]json.RawMessage - -// A DecodeModelOptions are the options for how the endpoints model definition -// are decoded. -type DecodeModelOptions struct { - SkipCustomizations bool -} - -// Set combines all of the option functions together. -func (d *DecodeModelOptions) Set(optFns ...func(*DecodeModelOptions)) { - for _, fn := range optFns { - fn(d) - } -} - -// DecodeModel unmarshals a Regions and Endpoint model definition file into -// a endpoint Resolver. If the file format is not supported, or an error occurs -// when unmarshaling the model an error will be returned. -// -// Casting the return value of this func to a EnumPartitions will -// allow you to get a list of the partitions in the order the endpoints -// will be resolved in. -// -// resolver, err := endpoints.DecodeModel(reader) -// -// partitions := resolver.(endpoints.EnumPartitions).Partitions() -// for _, p := range partitions { -// // ... inspect partitions -// } -func DecodeModel(r io.Reader, optFns ...func(*DecodeModelOptions)) (Resolver, error) { - var opts DecodeModelOptions - opts.Set(optFns...) - - // Get the version of the partition file to determine what - // unmarshaling model to use. - modelDef := modelDefinition{} - if err := json.NewDecoder(r).Decode(&modelDef); err != nil { - return nil, newDecodeModelError("failed to decode endpoints model", err) - } - - var version string - if b, ok := modelDef["version"]; ok { - version = string(b) - } else { - return nil, newDecodeModelError("endpoints version not found in model", nil) - } - - if version == "3" { - return decodeV3Endpoints(modelDef, opts) - } - - return nil, newDecodeModelError( - fmt.Sprintf("endpoints version %s, not supported", version), nil) -} - -func decodeV3Endpoints(modelDef modelDefinition, opts DecodeModelOptions) (Resolver, error) { - b, ok := modelDef["partitions"] - if !ok { - return nil, newDecodeModelError("endpoints model missing partitions", nil) - } - - ps := partitions{} - if err := json.Unmarshal(b, &ps); err != nil { - return nil, newDecodeModelError("failed to decode endpoints model", err) - } - - if opts.SkipCustomizations { - return ps, nil - } - - // Customization - for i := 0; i < len(ps); i++ { - p := &ps[i] - custAddEC2Metadata(p) - custAddS3DualStack(p) - custRegionalS3(p) - custRmIotDataService(p) - custFixAppAutoscalingChina(p) - custFixAppAutoscalingUsGov(p) - } - - return ps, nil -} - -func custAddS3DualStack(p *partition) { - if !(p.ID == "aws" || p.ID == "aws-cn" || p.ID == "aws-us-gov") { - return - } - - custAddDualstack(p, "s3") - custAddDualstack(p, "s3-control") -} - -func custRegionalS3(p *partition) { - if p.ID != "aws" { - return - } - - service, ok := p.Services["s3"] - if !ok { - return - } - - // If global endpoint already exists no customization needed. - if _, ok := service.Endpoints["aws-global"]; ok { - return - } - - service.PartitionEndpoint = "aws-global" - service.Endpoints["us-east-1"] = endpoint{} - service.Endpoints["aws-global"] = endpoint{ - Hostname: "s3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - } - - p.Services["s3"] = service -} - -func custAddDualstack(p *partition, svcName string) { - s, ok := p.Services[svcName] - if !ok { - return - } - - s.Defaults.HasDualStack = boxedTrue - s.Defaults.DualStackHostname = "{service}.dualstack.{region}.{dnsSuffix}" - - p.Services[svcName] = s -} - -func custAddEC2Metadata(p *partition) { - p.Services["ec2metadata"] = service{ - IsRegionalized: boxedFalse, - PartitionEndpoint: "aws-global", - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "169.254.169.254/latest", - Protocols: []string{"http"}, - }, - }, - } -} - -func custRmIotDataService(p *partition) { - delete(p.Services, "data.iot") -} - -func custFixAppAutoscalingChina(p *partition) { - if p.ID != "aws-cn" { - return - } - - const serviceName = "application-autoscaling" - s, ok := p.Services[serviceName] - if !ok { - return - } - - const expectHostname = `autoscaling.{region}.amazonaws.com` - if e, a := s.Defaults.Hostname, expectHostname; e != a { - fmt.Printf("custFixAppAutoscalingChina: ignoring customization, expected %s, got %s\n", e, a) - return - } - - s.Defaults.Hostname = expectHostname + ".cn" - p.Services[serviceName] = s -} - -func custFixAppAutoscalingUsGov(p *partition) { - if p.ID != "aws-us-gov" { - return - } - - const serviceName = "application-autoscaling" - s, ok := p.Services[serviceName] - if !ok { - return - } - - if a := s.Defaults.CredentialScope.Service; a != "" { - fmt.Printf("custFixAppAutoscalingUsGov: ignoring customization, expected empty credential scope service, got %s\n", a) - return - } - - if a := s.Defaults.Hostname; a != "" { - fmt.Printf("custFixAppAutoscalingUsGov: ignoring customization, expected empty hostname, got %s\n", a) - return - } - - s.Defaults.CredentialScope.Service = "application-autoscaling" - s.Defaults.Hostname = "autoscaling.{region}.amazonaws.com" - - p.Services[serviceName] = s -} - -type decodeModelError struct { - awsError -} - -func newDecodeModelError(msg string, err error) decodeModelError { - return decodeModelError{ - awsError: awserr.New("DecodeEndpointsModelError", msg, err), - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go deleted file mode 100644 index bdf4f71ab0..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ /dev/null @@ -1,9007 +0,0 @@ -// Code generated by aws/endpoints/v3model_codegen.go. DO NOT EDIT. - -package endpoints - -import ( - "regexp" -) - -// Partition identifiers -const ( - AwsPartitionID = "aws" // AWS Standard partition. - AwsCnPartitionID = "aws-cn" // AWS China partition. - AwsUsGovPartitionID = "aws-us-gov" // AWS GovCloud (US) partition. - AwsIsoPartitionID = "aws-iso" // AWS ISO (US) partition. - AwsIsoBPartitionID = "aws-iso-b" // AWS ISOB (US) partition. -) - -// AWS Standard partition's regions. -const ( - AfSouth1RegionID = "af-south-1" // Africa (Cape Town). - ApEast1RegionID = "ap-east-1" // Asia Pacific (Hong Kong). - ApNortheast1RegionID = "ap-northeast-1" // Asia Pacific (Tokyo). - ApNortheast2RegionID = "ap-northeast-2" // Asia Pacific (Seoul). - ApSouth1RegionID = "ap-south-1" // Asia Pacific (Mumbai). - ApSoutheast1RegionID = "ap-southeast-1" // Asia Pacific (Singapore). - ApSoutheast2RegionID = "ap-southeast-2" // Asia Pacific (Sydney). - CaCentral1RegionID = "ca-central-1" // Canada (Central). - EuCentral1RegionID = "eu-central-1" // Europe (Frankfurt). - EuNorth1RegionID = "eu-north-1" // Europe (Stockholm). - EuSouth1RegionID = "eu-south-1" // Europe (Milan). - EuWest1RegionID = "eu-west-1" // Europe (Ireland). - EuWest2RegionID = "eu-west-2" // Europe (London). - EuWest3RegionID = "eu-west-3" // Europe (Paris). - MeSouth1RegionID = "me-south-1" // Middle East (Bahrain). - SaEast1RegionID = "sa-east-1" // South America (Sao Paulo). - UsEast1RegionID = "us-east-1" // US East (N. Virginia). - UsEast2RegionID = "us-east-2" // US East (Ohio). - UsWest1RegionID = "us-west-1" // US West (N. California). - UsWest2RegionID = "us-west-2" // US West (Oregon). -) - -// AWS China partition's regions. -const ( - CnNorth1RegionID = "cn-north-1" // China (Beijing). - CnNorthwest1RegionID = "cn-northwest-1" // China (Ningxia). -) - -// AWS GovCloud (US) partition's regions. -const ( - UsGovEast1RegionID = "us-gov-east-1" // AWS GovCloud (US-East). - UsGovWest1RegionID = "us-gov-west-1" // AWS GovCloud (US-West). -) - -// AWS ISO (US) partition's regions. -const ( - UsIsoEast1RegionID = "us-iso-east-1" // US ISO East. -) - -// AWS ISOB (US) partition's regions. -const ( - UsIsobEast1RegionID = "us-isob-east-1" // US ISOB East (Ohio). -) - -// DefaultResolver returns an Endpoint resolver that will be able -// to resolve endpoints for: AWS Standard, AWS China, AWS GovCloud (US), AWS ISO (US), and AWS ISOB (US). -// -// Use DefaultPartitions() to get the list of the default partitions. -func DefaultResolver() Resolver { - return defaultPartitions -} - -// DefaultPartitions returns a list of the partitions the SDK is bundled -// with. The available partitions are: AWS Standard, AWS China, AWS GovCloud (US), AWS ISO (US), and AWS ISOB (US). -// -// partitions := endpoints.DefaultPartitions -// for _, p := range partitions { -// // ... inspect partitions -// } -func DefaultPartitions() []Partition { - return defaultPartitions.Partitions() -} - -var defaultPartitions = partitions{ - awsPartition, - awscnPartition, - awsusgovPartition, - awsisoPartition, - awsisobPartition, -} - -// AwsPartition returns the Resolver for AWS Standard. -func AwsPartition() Partition { - return awsPartition.Partition() -} - -var awsPartition = partition{ - ID: "aws", - Name: "AWS Standard", - DNSSuffix: "amazonaws.com", - RegionRegex: regionRegex{ - Regexp: func() *regexp.Regexp { - reg, _ := regexp.Compile("^(us|eu|ap|sa|ca|me|af)\\-\\w+\\-\\d+$") - return reg - }(), - }, - Defaults: endpoint{ - Hostname: "{service}.{region}.{dnsSuffix}", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - Regions: regions{ - "af-south-1": region{ - Description: "Africa (Cape Town)", - }, - "ap-east-1": region{ - Description: "Asia Pacific (Hong Kong)", - }, - "ap-northeast-1": region{ - Description: "Asia Pacific (Tokyo)", - }, - "ap-northeast-2": region{ - Description: "Asia Pacific (Seoul)", - }, - "ap-south-1": region{ - Description: "Asia Pacific (Mumbai)", - }, - "ap-southeast-1": region{ - Description: "Asia Pacific (Singapore)", - }, - "ap-southeast-2": region{ - Description: "Asia Pacific (Sydney)", - }, - "ca-central-1": region{ - Description: "Canada (Central)", - }, - "eu-central-1": region{ - Description: "Europe (Frankfurt)", - }, - "eu-north-1": region{ - Description: "Europe (Stockholm)", - }, - "eu-south-1": region{ - Description: "Europe (Milan)", - }, - "eu-west-1": region{ - Description: "Europe (Ireland)", - }, - "eu-west-2": region{ - Description: "Europe (London)", - }, - "eu-west-3": region{ - Description: "Europe (Paris)", - }, - "me-south-1": region{ - Description: "Middle East (Bahrain)", - }, - "sa-east-1": region{ - Description: "South America (Sao Paulo)", - }, - "us-east-1": region{ - Description: "US East (N. Virginia)", - }, - "us-east-2": region{ - Description: "US East (Ohio)", - }, - "us-west-1": region{ - Description: "US West (N. California)", - }, - "us-west-2": region{ - Description: "US West (Oregon)", - }, - }, - Services: services{ - "a4b": service{ - - Endpoints: endpoints{ - "us-east-1": endpoint{}, - }, - }, - "access-analyzer": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "acm": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "ca-central-1-fips": endpoint{ - Hostname: "acm-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-1-fips": endpoint{ - Hostname: "acm-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "us-east-2": endpoint{}, - "us-east-2-fips": endpoint{ - Hostname: "acm-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "us-west-1": endpoint{}, - "us-west-1-fips": endpoint{ - Hostname: "acm-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "us-west-2": endpoint{}, - "us-west-2-fips": endpoint{ - Hostname: "acm-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "acm-pca": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-ca-central-1": endpoint{ - Hostname: "acm-pca-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - "fips-us-east-1": endpoint{ - Hostname: "acm-pca-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "acm-pca-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-1": endpoint{ - Hostname: "acm-pca-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "acm-pca-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "api.detective": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "api.ecr": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{ - Hostname: "api.ecr.af-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "af-south-1", - }, - }, - "ap-east-1": endpoint{ - Hostname: "api.ecr.ap-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-east-1", - }, - }, - "ap-northeast-1": endpoint{ - Hostname: "api.ecr.ap-northeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - }, - "ap-northeast-2": endpoint{ - Hostname: "api.ecr.ap-northeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - }, - "ap-south-1": endpoint{ - Hostname: "api.ecr.ap-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-south-1", - }, - }, - "ap-southeast-1": endpoint{ - Hostname: "api.ecr.ap-southeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - }, - "ap-southeast-2": endpoint{ - Hostname: "api.ecr.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - }, - "ca-central-1": endpoint{ - Hostname: "api.ecr.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - "eu-central-1": endpoint{ - Hostname: "api.ecr.eu-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - }, - "eu-north-1": endpoint{ - Hostname: "api.ecr.eu-north-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-north-1", - }, - }, - "eu-south-1": endpoint{ - Hostname: "api.ecr.eu-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-south-1", - }, - }, - "eu-west-1": endpoint{ - Hostname: "api.ecr.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - }, - "eu-west-2": endpoint{ - Hostname: "api.ecr.eu-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, - }, - "eu-west-3": endpoint{ - Hostname: "api.ecr.eu-west-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-3", - }, - }, - "fips-us-east-1": endpoint{ - Hostname: "ecr-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "ecr-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-1": endpoint{ - Hostname: "ecr-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "ecr-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "me-south-1": endpoint{ - Hostname: "api.ecr.me-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "me-south-1", - }, - }, - "sa-east-1": endpoint{ - Hostname: "api.ecr.sa-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "sa-east-1", - }, - }, - "us-east-1": endpoint{ - Hostname: "api.ecr.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "us-east-2": endpoint{ - Hostname: "api.ecr.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "us-west-1": endpoint{ - Hostname: "api.ecr.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "us-west-2": endpoint{ - Hostname: "api.ecr.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "api.elastic-inference": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{ - Hostname: "api.elastic-inference.ap-northeast-1.amazonaws.com", - }, - "ap-northeast-2": endpoint{ - Hostname: "api.elastic-inference.ap-northeast-2.amazonaws.com", - }, - "eu-west-1": endpoint{ - Hostname: "api.elastic-inference.eu-west-1.amazonaws.com", - }, - "us-east-1": endpoint{ - Hostname: "api.elastic-inference.us-east-1.amazonaws.com", - }, - "us-east-2": endpoint{ - Hostname: "api.elastic-inference.us-east-2.amazonaws.com", - }, - "us-west-2": endpoint{ - Hostname: "api.elastic-inference.us-west-2.amazonaws.com", - }, - }, - }, - "api.mediatailor": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "api.pricing": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "pricing", - }, - }, - Endpoints: endpoints{ - "ap-south-1": endpoint{}, - "us-east-1": endpoint{}, - }, - }, - "api.sagemaker": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-1-fips": endpoint{ - Hostname: "api-fips.sagemaker.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "us-east-2": endpoint{}, - "us-east-2-fips": endpoint{ - Hostname: "api-fips.sagemaker.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "us-west-1": endpoint{}, - "us-west-1-fips": endpoint{ - Hostname: "api-fips.sagemaker.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "us-west-2": endpoint{}, - "us-west-2-fips": endpoint{ - Hostname: "api-fips.sagemaker.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "apigateway": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "application-autoscaling": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "appmesh": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "appstream2": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - CredentialScope: credentialScope{ - Service: "appstream", - }, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "fips": endpoint{ - Hostname: "appstream2-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "appsync": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "athena": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "autoscaling": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "autoscaling-plans": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "backup": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "batch": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "fips.batch.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "fips.batch.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-1": endpoint{ - Hostname: "fips.batch.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "fips.batch.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "budgets": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "budgets.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - }, - }, - "ce": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "ce.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - }, - }, - "chime": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - Defaults: endpoint{ - SSLCommonName: "service.chime.aws.amazon.com", - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "service.chime.aws.amazon.com", - Protocols: []string{"https"}, - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - }, - }, - "cloud9": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "clouddirectory": service{ - - Endpoints: endpoints{ - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "cloudformation": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-1-fips": endpoint{ - Hostname: "cloudformation-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "us-east-2": endpoint{}, - "us-east-2-fips": endpoint{ - Hostname: "cloudformation-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "us-west-1": endpoint{}, - "us-west-1-fips": endpoint{ - Hostname: "cloudformation-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "us-west-2": endpoint{}, - "us-west-2-fips": endpoint{ - Hostname: "cloudformation-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "cloudfront": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "cloudfront.amazonaws.com", - Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - }, - }, - "cloudhsm": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "cloudhsmv2": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "cloudhsm", - }, - }, - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "cloudsearch": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "cloudtrail": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "cloudtrail-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "cloudtrail-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-1": endpoint{ - Hostname: "cloudtrail-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "cloudtrail-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "codebuild": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-1-fips": endpoint{ - Hostname: "codebuild-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "us-east-2": endpoint{}, - "us-east-2-fips": endpoint{ - Hostname: "codebuild-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "us-west-1": endpoint{}, - "us-west-1-fips": endpoint{ - Hostname: "codebuild-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "us-west-2": endpoint{}, - "us-west-2-fips": endpoint{ - Hostname: "codebuild-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "codecommit": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips": endpoint{ - Hostname: "codecommit-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "codedeploy": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-1-fips": endpoint{ - Hostname: "codedeploy-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "us-east-2": endpoint{}, - "us-east-2-fips": endpoint{ - Hostname: "codedeploy-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "us-west-1": endpoint{}, - "us-west-1-fips": endpoint{ - Hostname: "codedeploy-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "us-west-2": endpoint{}, - "us-west-2-fips": endpoint{ - Hostname: "codedeploy-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "codepipeline": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-ca-central-1": endpoint{ - Hostname: "codepipeline-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - "fips-us-east-1": endpoint{ - Hostname: "codepipeline-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "codepipeline-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-1": endpoint{ - Hostname: "codepipeline-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "codepipeline-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "codestar": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "codestar-connections": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "cognito-identity": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "cognito-identity-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "cognito-identity-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "cognito-identity-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "cognito-idp": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "cognito-idp-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "cognito-idp-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "cognito-idp-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "cognito-sync": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "comprehend": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "comprehend-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "comprehend-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "comprehend-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "comprehendmedical": service{ - - Endpoints: endpoints{ - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "comprehendmedical-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "comprehendmedical-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "comprehendmedical-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "config": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "connect": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "cur": service{ - - Endpoints: endpoints{ - "us-east-1": endpoint{}, - }, - }, - "data.mediastore": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "dataexchange": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "datapipeline": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "datasync": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-ca-central-1": endpoint{ - Hostname: "datasync-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - "fips-us-east-1": endpoint{ - Hostname: "datasync-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "datasync-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-1": endpoint{ - Hostname: "datasync-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "datasync-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "dax": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "devicefarm": service{ - - Endpoints: endpoints{ - "us-west-2": endpoint{}, - }, - }, - "directconnect": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "directconnect-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "directconnect-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-1": endpoint{ - Hostname: "directconnect-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "directconnect-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "discovery": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "dms": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "dms-fips": endpoint{ - Hostname: "dms-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "docdb": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{ - Hostname: "rds.ap-northeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - }, - "ap-northeast-2": endpoint{ - Hostname: "rds.ap-northeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - }, - "ap-south-1": endpoint{ - Hostname: "rds.ap-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-south-1", - }, - }, - "ap-southeast-1": endpoint{ - Hostname: "rds.ap-southeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - }, - "ap-southeast-2": endpoint{ - Hostname: "rds.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - }, - "ca-central-1": endpoint{ - Hostname: "rds.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - "eu-central-1": endpoint{ - Hostname: "rds.eu-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - }, - "eu-west-1": endpoint{ - Hostname: "rds.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - }, - "eu-west-2": endpoint{ - Hostname: "rds.eu-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, - }, - "eu-west-3": endpoint{ - Hostname: "rds.eu-west-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-3", - }, - }, - "us-east-1": endpoint{ - Hostname: "rds.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "us-east-2": endpoint{ - Hostname: "rds.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "us-west-2": endpoint{ - Hostname: "rds.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "ds": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-ca-central-1": endpoint{ - Hostname: "ds-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - "fips-us-east-1": endpoint{ - Hostname: "ds-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "ds-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-1": endpoint{ - Hostname: "ds-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "ds-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "dynamodb": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "ca-central-1-fips": endpoint{ - Hostname: "dynamodb-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "local": endpoint{ - Hostname: "localhost:8000", - Protocols: []string{"http"}, - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-1-fips": endpoint{ - Hostname: "dynamodb-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "us-east-2": endpoint{}, - "us-east-2-fips": endpoint{ - Hostname: "dynamodb-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "us-west-1": endpoint{}, - "us-west-1-fips": endpoint{ - Hostname: "dynamodb-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "us-west-2": endpoint{}, - "us-west-2-fips": endpoint{ - Hostname: "dynamodb-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "ec2": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-ca-central-1": endpoint{ - Hostname: "ec2-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - "fips-us-east-1": endpoint{ - Hostname: "ec2-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "ec2-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-1": endpoint{ - Hostname: "ec2-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "ec2-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "ec2metadata": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "169.254.169.254/latest", - Protocols: []string{"http"}, - }, - }, - }, - "ecs": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "ecs-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "ecs-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-1": endpoint{ - Hostname: "ecs-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "ecs-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "eks": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "fips.eks.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "fips.eks.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "fips.eks.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "elasticache": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips": endpoint{ - Hostname: "elasticache-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "elasticbeanstalk": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "elasticbeanstalk-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "elasticbeanstalk-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-1": endpoint{ - Hostname: "elasticbeanstalk-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "elasticbeanstalk-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "elasticfilesystem": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-af-south-1": endpoint{ - Hostname: "elasticfilesystem-fips.af-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "af-south-1", - }, - }, - "fips-ap-east-1": endpoint{ - Hostname: "elasticfilesystem-fips.ap-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-east-1", - }, - }, - "fips-ap-northeast-1": endpoint{ - Hostname: "elasticfilesystem-fips.ap-northeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - }, - "fips-ap-northeast-2": endpoint{ - Hostname: "elasticfilesystem-fips.ap-northeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - }, - "fips-ap-south-1": endpoint{ - Hostname: "elasticfilesystem-fips.ap-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-south-1", - }, - }, - "fips-ap-southeast-1": endpoint{ - Hostname: "elasticfilesystem-fips.ap-southeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - }, - "fips-ap-southeast-2": endpoint{ - Hostname: "elasticfilesystem-fips.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - }, - "fips-ca-central-1": endpoint{ - Hostname: "elasticfilesystem-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - "fips-eu-central-1": endpoint{ - Hostname: "elasticfilesystem-fips.eu-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - }, - "fips-eu-north-1": endpoint{ - Hostname: "elasticfilesystem-fips.eu-north-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-north-1", - }, - }, - "fips-eu-south-1": endpoint{ - Hostname: "elasticfilesystem-fips.eu-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-south-1", - }, - }, - "fips-eu-west-1": endpoint{ - Hostname: "elasticfilesystem-fips.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - }, - "fips-eu-west-2": endpoint{ - Hostname: "elasticfilesystem-fips.eu-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, - }, - "fips-eu-west-3": endpoint{ - Hostname: "elasticfilesystem-fips.eu-west-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-3", - }, - }, - "fips-me-south-1": endpoint{ - Hostname: "elasticfilesystem-fips.me-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "me-south-1", - }, - }, - "fips-sa-east-1": endpoint{ - Hostname: "elasticfilesystem-fips.sa-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "sa-east-1", - }, - }, - "fips-us-east-1": endpoint{ - Hostname: "elasticfilesystem-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "elasticfilesystem-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-1": endpoint{ - Hostname: "elasticfilesystem-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "elasticfilesystem-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "elasticloadbalancing": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "elasticloadbalancing-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "elasticloadbalancing-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-1": endpoint{ - Hostname: "elasticloadbalancing-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "elasticloadbalancing-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "elasticmapreduce": service{ - Defaults: endpoint{ - SSLCommonName: "{region}.{service}.{dnsSuffix}", - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{ - SSLCommonName: "{service}.{region}.{dnsSuffix}", - }, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-ca-central-1": endpoint{ - Hostname: "elasticmapreduce-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - "fips-us-east-1": endpoint{ - Hostname: "elasticmapreduce-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "elasticmapreduce-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-1": endpoint{ - Hostname: "elasticmapreduce-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "elasticmapreduce-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{ - SSLCommonName: "{service}.{region}.{dnsSuffix}", - }, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "elastictranscoder": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "email": service{ - - Endpoints: endpoints{ - "ap-south-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "entitlement.marketplace": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "aws-marketplace", - }, - }, - Endpoints: endpoints{ - "us-east-1": endpoint{}, - }, - }, - "es": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips": endpoint{ - Hostname: "es-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "events": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "events-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "events-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-1": endpoint{ - Hostname: "events-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "events-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "firehose": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "firehose-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "firehose-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-1": endpoint{ - Hostname: "firehose-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "firehose-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "fms": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-ap-northeast-1": endpoint{ - Hostname: "fms-fips.ap-northeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - }, - "fips-ap-northeast-2": endpoint{ - Hostname: "fms-fips.ap-northeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - }, - "fips-ap-south-1": endpoint{ - Hostname: "fms-fips.ap-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-south-1", - }, - }, - "fips-ap-southeast-1": endpoint{ - Hostname: "fms-fips.ap-southeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - }, - "fips-ap-southeast-2": endpoint{ - Hostname: "fms-fips.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - }, - "fips-ca-central-1": endpoint{ - Hostname: "fms-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - "fips-eu-central-1": endpoint{ - Hostname: "fms-fips.eu-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - }, - "fips-eu-west-1": endpoint{ - Hostname: "fms-fips.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - }, - "fips-eu-west-2": endpoint{ - Hostname: "fms-fips.eu-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, - }, - "fips-eu-west-3": endpoint{ - Hostname: "fms-fips.eu-west-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-3", - }, - }, - "fips-sa-east-1": endpoint{ - Hostname: "fms-fips.sa-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "sa-east-1", - }, - }, - "fips-us-east-1": endpoint{ - Hostname: "fms-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "fms-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-1": endpoint{ - Hostname: "fms-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "fms-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "forecast": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "forecastquery": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "fsx": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "gamelift": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "glacier": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-ca-central-1": endpoint{ - Hostname: "glacier-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - "fips-us-east-1": endpoint{ - Hostname: "glacier-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "glacier-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-1": endpoint{ - Hostname: "glacier-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "glacier-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "glue": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "glue-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "glue-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-1": endpoint{ - Hostname: "glue-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "glue-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "greengrass": service{ - IsRegionalized: boxedTrue, - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "groundstation": service{ - - Endpoints: endpoints{ - "ap-southeast-2": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "me-south-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "guardduty": service{ - IsRegionalized: boxedTrue, - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-1-fips": endpoint{ - Hostname: "guardduty-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "us-east-2": endpoint{}, - "us-east-2-fips": endpoint{ - Hostname: "guardduty-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "us-west-1": endpoint{}, - "us-west-1-fips": endpoint{ - Hostname: "guardduty-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "us-west-2": endpoint{}, - "us-west-2-fips": endpoint{ - Hostname: "guardduty-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "health": service{ - - Endpoints: endpoints{ - "us-east-1": endpoint{}, - }, - }, - "iam": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "iam.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "iam-fips": endpoint{ - Hostname: "iam-fips.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - }, - }, - "importexport": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "importexport.amazonaws.com", - SignatureVersions: []string{"v2", "v4"}, - CredentialScope: credentialScope{ - Region: "us-east-1", - Service: "IngestionService", - }, - }, - }, - }, - "inspector": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "inspector-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "inspector-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-1": endpoint{ - Hostname: "inspector-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "inspector-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "iot": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "execute-api", - }, - }, - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "iotanalytics": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "iotevents": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "ioteventsdata": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{ - Hostname: "data.iotevents.ap-northeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - }, - "ap-northeast-2": endpoint{ - Hostname: "data.iotevents.ap-northeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - }, - "ap-southeast-1": endpoint{ - Hostname: "data.iotevents.ap-southeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - }, - "ap-southeast-2": endpoint{ - Hostname: "data.iotevents.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - }, - "eu-central-1": endpoint{ - Hostname: "data.iotevents.eu-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - }, - "eu-west-1": endpoint{ - Hostname: "data.iotevents.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - }, - "eu-west-2": endpoint{ - Hostname: "data.iotevents.eu-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, - }, - "us-east-1": endpoint{ - Hostname: "data.iotevents.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "us-east-2": endpoint{ - Hostname: "data.iotevents.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "us-west-2": endpoint{ - Hostname: "data.iotevents.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "iotsecuredtunneling": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "iotthingsgraph": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "iotthingsgraph", - }, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "kafka": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "kinesis": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "kinesis-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "kinesis-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-1": endpoint{ - Hostname: "kinesis-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "kinesis-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "kinesisanalytics": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "kinesisvideo": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "kms": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "lakeformation": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "lambda": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "lambda-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "lambda-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-1": endpoint{ - Hostname: "lambda-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "lambda-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "license-manager": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "license-manager-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "license-manager-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-1": endpoint{ - Hostname: "license-manager-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "license-manager-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "lightsail": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "logs": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "machinelearning": service{ - - Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - }, - }, - "macie": service{ - - Endpoints: endpoints{ - "fips-us-east-1": endpoint{ - Hostname: "macie-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "macie-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "managedblockchain": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-southeast-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - }, - }, - "marketplacecommerceanalytics": service{ - - Endpoints: endpoints{ - "us-east-1": endpoint{}, - }, - }, - "mediaconnect": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "mediaconvert": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-ca-central-1": endpoint{ - Hostname: "mediaconvert-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - "fips-us-east-1": endpoint{ - Hostname: "mediaconvert-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "mediaconvert-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-1": endpoint{ - Hostname: "mediaconvert-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "mediaconvert-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "medialive": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "mediapackage": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "mediastore": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "metering.marketplace": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "aws-marketplace", - }, - }, - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "mgh": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "mobileanalytics": service{ - - Endpoints: endpoints{ - "us-east-1": endpoint{}, - }, - }, - "models.lex": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "lex", - }, - }, - Endpoints: endpoints{ - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "monitoring": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "monitoring-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "monitoring-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-1": endpoint{ - Hostname: "monitoring-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "monitoring-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "mq": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "mq-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "mq-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-1": endpoint{ - Hostname: "mq-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "mq-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "mturk-requester": service{ - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "sandbox": endpoint{ - Hostname: "mturk-requester-sandbox.us-east-1.amazonaws.com", - }, - "us-east-1": endpoint{}, - }, - }, - "neptune": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{ - Hostname: "rds.ap-northeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - }, - "ap-northeast-2": endpoint{ - Hostname: "rds.ap-northeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - }, - "ap-south-1": endpoint{ - Hostname: "rds.ap-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-south-1", - }, - }, - "ap-southeast-1": endpoint{ - Hostname: "rds.ap-southeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - }, - "ap-southeast-2": endpoint{ - Hostname: "rds.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - }, - "ca-central-1": endpoint{ - Hostname: "rds.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - "eu-central-1": endpoint{ - Hostname: "rds.eu-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - }, - "eu-north-1": endpoint{ - Hostname: "rds.eu-north-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-north-1", - }, - }, - "eu-west-1": endpoint{ - Hostname: "rds.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - }, - "eu-west-2": endpoint{ - Hostname: "rds.eu-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, - }, - "eu-west-3": endpoint{ - Hostname: "rds.eu-west-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-3", - }, - }, - "me-south-1": endpoint{ - Hostname: "rds.me-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "me-south-1", - }, - }, - "us-east-1": endpoint{ - Hostname: "rds.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "us-east-2": endpoint{ - Hostname: "rds.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "us-west-2": endpoint{ - Hostname: "rds.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "oidc": service{ - - Endpoints: endpoints{ - "ap-southeast-1": endpoint{ - Hostname: "oidc.ap-southeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - }, - "ap-southeast-2": endpoint{ - Hostname: "oidc.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - }, - "ca-central-1": endpoint{ - Hostname: "oidc.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - "eu-central-1": endpoint{ - Hostname: "oidc.eu-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - }, - "eu-west-1": endpoint{ - Hostname: "oidc.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - }, - "eu-west-2": endpoint{ - Hostname: "oidc.eu-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, - }, - "us-east-1": endpoint{ - Hostname: "oidc.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "us-east-2": endpoint{ - Hostname: "oidc.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "us-west-2": endpoint{ - Hostname: "oidc.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "opsworks": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "opsworks-cm": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "organizations": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "organizations.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-aws-global": endpoint{ - Hostname: "organizations-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - }, - }, - "outposts": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-ca-central-1": endpoint{ - Hostname: "outposts-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - "fips-us-east-1": endpoint{ - Hostname: "outposts-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "outposts-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-1": endpoint{ - Hostname: "outposts-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "outposts-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "me-south-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "pinpoint": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "mobiletargeting", - }, - }, - Endpoints: endpoints{ - "ap-south-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "pinpoint-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "pinpoint-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "us-east-1": endpoint{ - Hostname: "pinpoint.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "us-west-2": endpoint{ - Hostname: "pinpoint.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "polly": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "polly-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "polly-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-1": endpoint{ - Hostname: "polly-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "polly-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "portal.sso": service{ - - Endpoints: endpoints{ - "ap-southeast-1": endpoint{ - Hostname: "portal.sso.ap-southeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - }, - "ap-southeast-2": endpoint{ - Hostname: "portal.sso.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - }, - "ca-central-1": endpoint{ - Hostname: "portal.sso.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - "eu-central-1": endpoint{ - Hostname: "portal.sso.eu-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - }, - "eu-west-1": endpoint{ - Hostname: "portal.sso.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - }, - "eu-west-2": endpoint{ - Hostname: "portal.sso.eu-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, - }, - "us-east-1": endpoint{ - Hostname: "portal.sso.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "us-east-2": endpoint{ - Hostname: "portal.sso.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "us-west-2": endpoint{ - Hostname: "portal.sso.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "projects.iot1click": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "qldb": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "ram": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "rds": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "rds-fips.ca-central-1": endpoint{ - Hostname: "rds-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - "rds-fips.us-east-1": endpoint{ - Hostname: "rds-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "rds-fips.us-east-2": endpoint{ - Hostname: "rds-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "rds-fips.us-west-1": endpoint{ - Hostname: "rds-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "rds-fips.us-west-2": endpoint{ - Hostname: "rds-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{ - SSLCommonName: "{service}.{dnsSuffix}", - }, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "redshift": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-ca-central-1": endpoint{ - Hostname: "redshift-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - "fips-us-east-1": endpoint{ - Hostname: "redshift-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "redshift-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-1": endpoint{ - Hostname: "redshift-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "redshift-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "rekognition": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "resource-groups": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "resource-groups-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "resource-groups-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-1": endpoint{ - Hostname: "resource-groups-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "resource-groups-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "robomaker": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "route53": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "route53.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - }, - }, - "route53domains": service{ - - Endpoints: endpoints{ - "us-east-1": endpoint{}, - }, - }, - "route53resolver": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "runtime.lex": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "lex", - }, - }, - Endpoints: endpoints{ - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "runtime.sagemaker": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-1-fips": endpoint{ - Hostname: "runtime-fips.sagemaker.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "us-east-2": endpoint{}, - "us-east-2-fips": endpoint{ - Hostname: "runtime-fips.sagemaker.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "us-west-1": endpoint{}, - "us-west-1-fips": endpoint{ - Hostname: "runtime-fips.sagemaker.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "us-west-2": endpoint{}, - "us-west-2-fips": endpoint{ - Hostname: "runtime-fips.sagemaker.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "s3": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedTrue, - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"s3v4"}, - - HasDualStack: boxedTrue, - DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", - }, - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{ - Hostname: "s3.ap-northeast-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{ - Hostname: "s3.ap-southeast-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - "ap-southeast-2": endpoint{ - Hostname: "s3.ap-southeast-2.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - "aws-global": endpoint{ - Hostname: "s3.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{ - Hostname: "s3.eu-west-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "s3-external-1": endpoint{ - Hostname: "s3-external-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "sa-east-1": endpoint{ - Hostname: "s3.sa-east-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - "us-east-1": endpoint{ - Hostname: "s3.us-east-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - "us-east-2": endpoint{}, - "us-west-1": endpoint{ - Hostname: "s3.us-west-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - "us-west-2": endpoint{ - Hostname: "s3.us-west-2.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - }, - }, - "s3-control": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - SignatureVersions: []string{"s3v4"}, - - HasDualStack: boxedTrue, - DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{ - Hostname: "s3-control.ap-northeast-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - }, - "ap-northeast-2": endpoint{ - Hostname: "s3-control.ap-northeast-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - }, - "ap-south-1": endpoint{ - Hostname: "s3-control.ap-south-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "ap-south-1", - }, - }, - "ap-southeast-1": endpoint{ - Hostname: "s3-control.ap-southeast-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - }, - "ap-southeast-2": endpoint{ - Hostname: "s3-control.ap-southeast-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - }, - "ca-central-1": endpoint{ - Hostname: "s3-control.ca-central-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - "eu-central-1": endpoint{ - Hostname: "s3-control.eu-central-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - }, - "eu-north-1": endpoint{ - Hostname: "s3-control.eu-north-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "eu-north-1", - }, - }, - "eu-west-1": endpoint{ - Hostname: "s3-control.eu-west-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - }, - "eu-west-2": endpoint{ - Hostname: "s3-control.eu-west-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, - }, - "eu-west-3": endpoint{ - Hostname: "s3-control.eu-west-3.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "eu-west-3", - }, - }, - "sa-east-1": endpoint{ - Hostname: "s3-control.sa-east-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "sa-east-1", - }, - }, - "us-east-1": endpoint{ - Hostname: "s3-control.us-east-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "us-east-1-fips": endpoint{ - Hostname: "s3-control-fips.us-east-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "us-east-2": endpoint{ - Hostname: "s3-control.us-east-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "us-east-2-fips": endpoint{ - Hostname: "s3-control-fips.us-east-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "us-west-1": endpoint{ - Hostname: "s3-control.us-west-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "us-west-1-fips": endpoint{ - Hostname: "s3-control-fips.us-west-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "us-west-2": endpoint{ - Hostname: "s3-control.us-west-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "us-west-2-fips": endpoint{ - Hostname: "s3-control-fips.us-west-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "savingsplans": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "savingsplans.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - }, - }, - "schemas": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "sdb": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"v2"}, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-west-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{ - Hostname: "sdb.amazonaws.com", - }, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "secretsmanager": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-1-fips": endpoint{ - Hostname: "secretsmanager-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "us-east-2": endpoint{}, - "us-east-2-fips": endpoint{ - Hostname: "secretsmanager-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "us-west-1": endpoint{}, - "us-west-1-fips": endpoint{ - Hostname: "secretsmanager-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "us-west-2": endpoint{}, - "us-west-2-fips": endpoint{ - Hostname: "secretsmanager-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "securityhub": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "securityhub-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "securityhub-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-1": endpoint{ - Hostname: "securityhub-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "securityhub-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "serverlessrepo": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "ap-east-1": endpoint{ - Protocols: []string{"https"}, - }, - "ap-northeast-1": endpoint{ - Protocols: []string{"https"}, - }, - "ap-northeast-2": endpoint{ - Protocols: []string{"https"}, - }, - "ap-south-1": endpoint{ - Protocols: []string{"https"}, - }, - "ap-southeast-1": endpoint{ - Protocols: []string{"https"}, - }, - "ap-southeast-2": endpoint{ - Protocols: []string{"https"}, - }, - "ca-central-1": endpoint{ - Protocols: []string{"https"}, - }, - "eu-central-1": endpoint{ - Protocols: []string{"https"}, - }, - "eu-north-1": endpoint{ - Protocols: []string{"https"}, - }, - "eu-west-1": endpoint{ - Protocols: []string{"https"}, - }, - "eu-west-2": endpoint{ - Protocols: []string{"https"}, - }, - "eu-west-3": endpoint{ - Protocols: []string{"https"}, - }, - "me-south-1": endpoint{ - Protocols: []string{"https"}, - }, - "sa-east-1": endpoint{ - Protocols: []string{"https"}, - }, - "us-east-1": endpoint{ - Protocols: []string{"https"}, - }, - "us-east-2": endpoint{ - Protocols: []string{"https"}, - }, - "us-west-1": endpoint{ - Protocols: []string{"https"}, - }, - "us-west-2": endpoint{ - Protocols: []string{"https"}, - }, - }, - }, - "servicecatalog": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-1-fips": endpoint{ - Hostname: "servicecatalog-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "us-east-2": endpoint{}, - "us-east-2-fips": endpoint{ - Hostname: "servicecatalog-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "us-west-1": endpoint{}, - "us-west-1-fips": endpoint{ - Hostname: "servicecatalog-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "us-west-2": endpoint{}, - "us-west-2-fips": endpoint{ - Hostname: "servicecatalog-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "servicediscovery": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "session.qldb": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "shield": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - Defaults: endpoint{ - SSLCommonName: "shield.us-east-1.amazonaws.com", - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "shield.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-aws-global": endpoint{ - Hostname: "shield-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - }, - }, - "sms": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "sms-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "sms-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-1": endpoint{ - Hostname: "sms-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "sms-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "snowball": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-ap-northeast-1": endpoint{ - Hostname: "snowball-fips.ap-northeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - }, - "fips-ap-northeast-2": endpoint{ - Hostname: "snowball-fips.ap-northeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - }, - "fips-ap-south-1": endpoint{ - Hostname: "snowball-fips.ap-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-south-1", - }, - }, - "fips-ap-southeast-1": endpoint{ - Hostname: "snowball-fips.ap-southeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - }, - "fips-ap-southeast-2": endpoint{ - Hostname: "snowball-fips.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - }, - "fips-ca-central-1": endpoint{ - Hostname: "snowball-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - "fips-eu-central-1": endpoint{ - Hostname: "snowball-fips.eu-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - }, - "fips-eu-west-1": endpoint{ - Hostname: "snowball-fips.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - }, - "fips-eu-west-2": endpoint{ - Hostname: "snowball-fips.eu-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, - }, - "fips-eu-west-3": endpoint{ - Hostname: "snowball-fips.eu-west-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-3", - }, - }, - "fips-sa-east-1": endpoint{ - Hostname: "snowball-fips.sa-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "sa-east-1", - }, - }, - "fips-us-east-1": endpoint{ - Hostname: "snowball-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "snowball-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-1": endpoint{ - Hostname: "snowball-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "snowball-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "sns": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "sns-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "sns-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-1": endpoint{ - Hostname: "sns-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "sns-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "sqs": service{ - Defaults: endpoint{ - SSLCommonName: "{region}.queue.{dnsSuffix}", - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "sqs-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "sqs-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-1": endpoint{ - Hostname: "sqs-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "sqs-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{ - SSLCommonName: "queue.{dnsSuffix}", - }, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "ssm": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "ssm-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "ssm-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-1": endpoint{ - Hostname: "ssm-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "ssm-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "ssm-facade-fips-us-east-1": endpoint{ - Hostname: "ssm-facade-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "ssm-facade-fips-us-east-2": endpoint{ - Hostname: "ssm-facade-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "ssm-facade-fips-us-west-1": endpoint{ - Hostname: "ssm-facade-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "ssm-facade-fips-us-west-2": endpoint{ - Hostname: "ssm-facade-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "states": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "states-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "states-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-1": endpoint{ - Hostname: "states-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "states-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "storagegateway": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "streams.dynamodb": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Service: "dynamodb", - }, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "ca-central-1-fips": endpoint{ - Hostname: "dynamodb-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "local": endpoint{ - Hostname: "localhost:8000", - Protocols: []string{"http"}, - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-1-fips": endpoint{ - Hostname: "dynamodb-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "us-east-2": endpoint{}, - "us-east-2-fips": endpoint{ - Hostname: "dynamodb-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "us-west-1": endpoint{}, - "us-west-1-fips": endpoint{ - Hostname: "dynamodb-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "us-west-2": endpoint{}, - "us-west-2-fips": endpoint{ - Hostname: "dynamodb-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "sts": service{ - PartitionEndpoint: "aws-global", - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "aws-global": endpoint{ - Hostname: "sts.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-1-fips": endpoint{ - Hostname: "sts-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "us-east-2": endpoint{}, - "us-east-2-fips": endpoint{ - Hostname: "sts-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "us-west-1": endpoint{}, - "us-west-1-fips": endpoint{ - Hostname: "sts-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "us-west-2": endpoint{}, - "us-west-2-fips": endpoint{ - Hostname: "sts-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "support": service{ - PartitionEndpoint: "aws-global", - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "support.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - }, - }, - "swf": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "swf-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "swf-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-1": endpoint{ - Hostname: "swf-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "swf-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "tagging": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "transcribe": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "fips.transcribe.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "fips.transcribe.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-1": endpoint{ - Hostname: "fips.transcribe.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "fips.transcribe.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "transcribestreaming": service{ - - Endpoints: endpoints{ - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "transfer": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "translate": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "us-east-1": endpoint{}, - "us-east-1-fips": endpoint{ - Hostname: "translate-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "us-east-2": endpoint{}, - "us-east-2-fips": endpoint{ - Hostname: "translate-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - "us-west-2-fips": endpoint{ - Hostname: "translate-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "waf": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-fips": endpoint{ - Hostname: "waf-fips.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "aws-global": endpoint{ - Hostname: "waf.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - }, - }, - "waf-regional": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{ - Hostname: "waf-regional.ap-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-east-1", - }, - }, - "ap-northeast-1": endpoint{ - Hostname: "waf-regional.ap-northeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - }, - "ap-northeast-2": endpoint{ - Hostname: "waf-regional.ap-northeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - }, - "ap-south-1": endpoint{ - Hostname: "waf-regional.ap-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-south-1", - }, - }, - "ap-southeast-1": endpoint{ - Hostname: "waf-regional.ap-southeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - }, - "ap-southeast-2": endpoint{ - Hostname: "waf-regional.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - }, - "ca-central-1": endpoint{ - Hostname: "waf-regional.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - "eu-central-1": endpoint{ - Hostname: "waf-regional.eu-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - }, - "eu-north-1": endpoint{ - Hostname: "waf-regional.eu-north-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-north-1", - }, - }, - "eu-west-1": endpoint{ - Hostname: "waf-regional.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - }, - "eu-west-2": endpoint{ - Hostname: "waf-regional.eu-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, - }, - "eu-west-3": endpoint{ - Hostname: "waf-regional.eu-west-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-3", - }, - }, - "fips-ap-east-1": endpoint{ - Hostname: "waf-regional-fips.ap-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-east-1", - }, - }, - "fips-ap-northeast-1": endpoint{ - Hostname: "waf-regional-fips.ap-northeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - }, - "fips-ap-northeast-2": endpoint{ - Hostname: "waf-regional-fips.ap-northeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - }, - "fips-ap-south-1": endpoint{ - Hostname: "waf-regional-fips.ap-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-south-1", - }, - }, - "fips-ap-southeast-1": endpoint{ - Hostname: "waf-regional-fips.ap-southeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - }, - "fips-ap-southeast-2": endpoint{ - Hostname: "waf-regional-fips.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - }, - "fips-ca-central-1": endpoint{ - Hostname: "waf-regional-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - "fips-eu-central-1": endpoint{ - Hostname: "waf-regional-fips.eu-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - }, - "fips-eu-north-1": endpoint{ - Hostname: "waf-regional-fips.eu-north-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-north-1", - }, - }, - "fips-eu-west-1": endpoint{ - Hostname: "waf-regional-fips.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - }, - "fips-eu-west-2": endpoint{ - Hostname: "waf-regional-fips.eu-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, - }, - "fips-eu-west-3": endpoint{ - Hostname: "waf-regional-fips.eu-west-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-3", - }, - }, - "fips-me-south-1": endpoint{ - Hostname: "waf-regional-fips.me-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "me-south-1", - }, - }, - "fips-sa-east-1": endpoint{ - Hostname: "waf-regional-fips.sa-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "sa-east-1", - }, - }, - "fips-us-east-1": endpoint{ - Hostname: "waf-regional-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "waf-regional-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-1": endpoint{ - Hostname: "waf-regional-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "waf-regional-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "me-south-1": endpoint{ - Hostname: "waf-regional.me-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "me-south-1", - }, - }, - "sa-east-1": endpoint{ - Hostname: "waf-regional.sa-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "sa-east-1", - }, - }, - "us-east-1": endpoint{ - Hostname: "waf-regional.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "us-east-2": endpoint{ - Hostname: "waf-regional.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "us-west-1": endpoint{ - Hostname: "waf-regional.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "us-west-2": endpoint{ - Hostname: "waf-regional.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "workdocs": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-west-1": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "workdocs-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "workdocs-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "workmail": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "workspaces": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "xray": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - }, -} - -// AwsCnPartition returns the Resolver for AWS China. -func AwsCnPartition() Partition { - return awscnPartition.Partition() -} - -var awscnPartition = partition{ - ID: "aws-cn", - Name: "AWS China", - DNSSuffix: "amazonaws.com.cn", - RegionRegex: regionRegex{ - Regexp: func() *regexp.Regexp { - reg, _ := regexp.Compile("^cn\\-\\w+\\-\\d+$") - return reg - }(), - }, - Defaults: endpoint{ - Hostname: "{service}.{region}.{dnsSuffix}", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - Regions: regions{ - "cn-north-1": region{ - Description: "China (Beijing)", - }, - "cn-northwest-1": region{ - Description: "China (Ningxia)", - }, - }, - Services: services{ - "acm": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "api.ecr": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{ - Hostname: "api.ecr.cn-north-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-north-1", - }, - }, - "cn-northwest-1": endpoint{ - Hostname: "api.ecr.cn-northwest-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - }, - }, - }, - "api.sagemaker": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "apigateway": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "application-autoscaling": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "appsync": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - }, - }, - "athena": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "autoscaling": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "autoscaling-plans": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "backup": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "batch": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "cloudformation": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "cloudfront": service{ - PartitionEndpoint: "aws-cn-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-cn-global": endpoint{ - Hostname: "cloudfront.cn-northwest-1.amazonaws.com.cn", - Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - }, - }, - }, - "cloudtrail": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "codebuild": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "codecommit": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "codedeploy": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "cognito-identity": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - }, - }, - "config": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "dax": service{ - - Endpoints: endpoints{ - "cn-northwest-1": endpoint{}, - }, - }, - "directconnect": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "dms": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "ds": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "dynamodb": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "ec2": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "ec2metadata": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "169.254.169.254/latest", - Protocols: []string{"http"}, - }, - }, - }, - "ecs": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "eks": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "elasticache": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "elasticbeanstalk": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "elasticfilesystem": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - "fips-cn-north-1": endpoint{ - Hostname: "elasticfilesystem-fips.cn-north-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-north-1", - }, - }, - "fips-cn-northwest-1": endpoint{ - Hostname: "elasticfilesystem-fips.cn-northwest-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - }, - }, - }, - "elasticloadbalancing": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "elasticmapreduce": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "es": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "events": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "firehose": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "gamelift": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - }, - }, - "glacier": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "glue": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "greengrass": service{ - IsRegionalized: boxedTrue, - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - }, - }, - "health": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "iam": service{ - PartitionEndpoint: "aws-cn-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-cn-global": endpoint{ - Hostname: "iam.cn-north-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-north-1", - }, - }, - }, - }, - "iot": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "execute-api", - }, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "iotsecuredtunneling": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "kafka": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "kinesis": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "kms": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "lambda": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "license-manager": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "logs": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "mediaconvert": service{ - - Endpoints: endpoints{ - "cn-northwest-1": endpoint{ - Hostname: "subscribe.mediaconvert.cn-northwest-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - }, - }, - }, - "monitoring": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "neptune": service{ - - Endpoints: endpoints{ - "cn-northwest-1": endpoint{ - Hostname: "rds.cn-northwest-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - }, - }, - }, - "polly": service{ - - Endpoints: endpoints{ - "cn-northwest-1": endpoint{}, - }, - }, - "rds": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "redshift": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "route53": service{ - PartitionEndpoint: "aws-cn-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-cn-global": endpoint{ - Hostname: "route53.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - }, - }, - }, - "runtime.sagemaker": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "s3": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"s3v4"}, - - HasDualStack: boxedTrue, - DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "s3-control": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - SignatureVersions: []string{"s3v4"}, - - HasDualStack: boxedTrue, - DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{ - Hostname: "s3-control.cn-north-1.amazonaws.com.cn", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "cn-north-1", - }, - }, - "cn-northwest-1": endpoint{ - Hostname: "s3-control.cn-northwest-1.amazonaws.com.cn", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - }, - }, - }, - "secretsmanager": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "serverlessrepo": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{ - Protocols: []string{"https"}, - }, - "cn-northwest-1": endpoint{ - Protocols: []string{"https"}, - }, - }, - }, - "sms": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "snowball": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "fips-cn-north-1": endpoint{ - Hostname: "snowball-fips.cn-north-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-north-1", - }, - }, - }, - }, - "sns": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "sqs": service{ - Defaults: endpoint{ - SSLCommonName: "{region}.queue.{dnsSuffix}", - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "ssm": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "states": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "storagegateway": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "streams.dynamodb": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Service: "dynamodb", - }, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "sts": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "support": service{ - PartitionEndpoint: "aws-cn-global", - - Endpoints: endpoints{ - "aws-cn-global": endpoint{ - Hostname: "support.cn-north-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-north-1", - }, - }, - }, - }, - "swf": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "tagging": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "transcribe": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{ - Hostname: "cn.transcribe.cn-north-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-north-1", - }, - }, - "cn-northwest-1": endpoint{ - Hostname: "cn.transcribe.cn-northwest-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - }, - }, - }, - "workspaces": service{ - - Endpoints: endpoints{ - "cn-northwest-1": endpoint{}, - }, - }, - "xray": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - }, -} - -// AwsUsGovPartition returns the Resolver for AWS GovCloud (US). -func AwsUsGovPartition() Partition { - return awsusgovPartition.Partition() -} - -var awsusgovPartition = partition{ - ID: "aws-us-gov", - Name: "AWS GovCloud (US)", - DNSSuffix: "amazonaws.com", - RegionRegex: regionRegex{ - Regexp: func() *regexp.Regexp { - reg, _ := regexp.Compile("^us\\-gov\\-\\w+\\-\\d+$") - return reg - }(), - }, - Defaults: endpoint{ - Hostname: "{service}.{region}.{dnsSuffix}", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - Regions: regions{ - "us-gov-east-1": region{ - Description: "AWS GovCloud (US-East)", - }, - "us-gov-west-1": region{ - Description: "AWS GovCloud (US-West)", - }, - }, - Services: services{ - "access-analyzer": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "acm": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "acm-pca": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ - Hostname: "acm-pca.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "fips-us-gov-west-1": endpoint{ - Hostname: "acm-pca.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "api.ecr": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ - Hostname: "ecr-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "fips-us-gov-west-1": endpoint{ - Hostname: "ecr-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-east-1": endpoint{ - Hostname: "api.ecr.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "us-gov-west-1": endpoint{ - Hostname: "api.ecr.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "api.sagemaker": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "apigateway": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "application-autoscaling": service{ - Defaults: endpoint{ - Hostname: "autoscaling.{region}.amazonaws.com", - Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Service: "application-autoscaling", - }, - }, - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "appstream2": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - CredentialScope: credentialScope{ - Service: "appstream", - }, - }, - Endpoints: endpoints{ - "fips": endpoint{ - Hostname: "appstream2-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-west-1": endpoint{}, - }, - }, - "athena": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ - Hostname: "athena-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "fips-us-gov-west-1": endpoint{ - Hostname: "athena-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "autoscaling": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{ - Protocols: []string{"http", "https"}, - }, - "us-gov-west-1": endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - }, - "autoscaling-plans": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "batch": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ - Hostname: "batch.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "fips-us-gov-west-1": endpoint{ - Hostname: "batch.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "clouddirectory": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "cloudformation": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{ - Hostname: "cloudformation.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "us-gov-west-1": endpoint{ - Hostname: "cloudformation.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "cloudhsm": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "cloudhsmv2": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "cloudhsm", - }, - }, - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "cloudtrail": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{ - Hostname: "cloudtrail.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "us-gov-west-1": endpoint{ - Hostname: "cloudtrail.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "codebuild": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-east-1-fips": endpoint{ - Hostname: "codebuild-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "us-gov-west-1": endpoint{}, - "us-gov-west-1-fips": endpoint{ - Hostname: "codebuild-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "codecommit": service{ - - Endpoints: endpoints{ - "fips": endpoint{ - Hostname: "codecommit-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "codedeploy": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-east-1-fips": endpoint{ - Hostname: "codedeploy-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "us-gov-west-1": endpoint{}, - "us-gov-west-1-fips": endpoint{ - Hostname: "codedeploy-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "codepipeline": service{ - - Endpoints: endpoints{ - "fips-us-gov-west-1": endpoint{ - Hostname: "codepipeline-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-west-1": endpoint{}, - }, - }, - "cognito-identity": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "cognito-idp": service{ - - Endpoints: endpoints{ - "fips-us-gov-west-1": endpoint{ - Hostname: "cognito-idp-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-west-1": endpoint{}, - }, - }, - "comprehend": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "fips-us-gov-west-1": endpoint{ - Hostname: "comprehend-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-west-1": endpoint{}, - }, - }, - "comprehendmedical": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "config": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "datasync": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ - Hostname: "datasync-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "fips-us-gov-west-1": endpoint{ - Hostname: "datasync-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "directconnect": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{ - Hostname: "directconnect.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "us-gov-west-1": endpoint{ - Hostname: "directconnect.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "dms": service{ - - Endpoints: endpoints{ - "dms-fips": endpoint{ - Hostname: "dms.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "ds": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ - Hostname: "ds-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "fips-us-gov-west-1": endpoint{ - Hostname: "ds-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "dynamodb": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-east-1-fips": endpoint{ - Hostname: "dynamodb.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "us-gov-west-1": endpoint{}, - "us-gov-west-1-fips": endpoint{ - Hostname: "dynamodb.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "ec2": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{ - Hostname: "ec2.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "us-gov-west-1": endpoint{ - Hostname: "ec2.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "ec2metadata": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "169.254.169.254/latest", - Protocols: []string{"http"}, - }, - }, - }, - "ecs": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ - Hostname: "ecs-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "fips-us-gov-west-1": endpoint{ - Hostname: "ecs-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "eks": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "elasticache": service{ - - Endpoints: endpoints{ - "fips": endpoint{ - Hostname: "elasticache-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "elasticbeanstalk": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{ - Hostname: "elasticbeanstalk.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "us-gov-west-1": endpoint{ - Hostname: "elasticbeanstalk.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "elasticfilesystem": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ - Hostname: "elasticfilesystem-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "fips-us-gov-west-1": endpoint{ - Hostname: "elasticfilesystem-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "elasticloadbalancing": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ - Hostname: "elasticloadbalancing-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "fips-us-gov-west-1": endpoint{ - Hostname: "elasticloadbalancing-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - }, - "elasticmapreduce": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ - Hostname: "elasticmapreduce.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "fips-us-gov-west-1": endpoint{ - Hostname: "elasticmapreduce.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{ - Protocols: []string{"https"}, - }, - }, - }, - "email": service{ - - Endpoints: endpoints{ - "fips-us-gov-west-1": endpoint{ - Hostname: "email-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-west-1": endpoint{}, - }, - }, - "es": service{ - - Endpoints: endpoints{ - "fips": endpoint{ - Hostname: "es-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "events": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{ - Hostname: "events.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "us-gov-west-1": endpoint{ - Hostname: "events.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "firehose": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ - Hostname: "firehose-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "fips-us-gov-west-1": endpoint{ - Hostname: "firehose-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "glacier": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{ - Hostname: "glacier.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "us-gov-west-1": endpoint{ - Hostname: "glacier.us-gov-west-1.amazonaws.com", - Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "glue": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ - Hostname: "glue-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "fips-us-gov-west-1": endpoint{ - Hostname: "glue-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "greengrass": service{ - IsRegionalized: boxedTrue, - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "us-gov-west-1": endpoint{ - Hostname: "greengrass.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "guardduty": service{ - IsRegionalized: boxedTrue, - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - "us-gov-west-1-fips": endpoint{ - Hostname: "guardduty.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "health": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "iam": service{ - PartitionEndpoint: "aws-us-gov-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-us-gov-global": endpoint{ - Hostname: "iam.us-gov.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "iam-govcloud-fips": endpoint{ - Hostname: "iam.us-gov.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "inspector": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ - Hostname: "inspector-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "fips-us-gov-west-1": endpoint{ - Hostname: "inspector-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "iot": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "execute-api", - }, - }, - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "iotsecuredtunneling": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "kafka": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "kinesis": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ - Hostname: "kinesis-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "fips-us-gov-west-1": endpoint{ - Hostname: "kinesis-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "kms": service{ - - Endpoints: endpoints{ - "ProdFips": endpoint{ - Hostname: "kms-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "lambda": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ - Hostname: "lambda-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "fips-us-gov-west-1": endpoint{ - Hostname: "lambda-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "license-manager": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ - Hostname: "license-manager-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "fips-us-gov-west-1": endpoint{ - Hostname: "license-manager-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "logs": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "mediaconvert": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{ - Hostname: "mediaconvert.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "metering.marketplace": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "aws-marketplace", - }, - }, - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "monitoring": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ - Hostname: "monitoring.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "fips-us-gov-west-1": endpoint{ - Hostname: "monitoring.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "neptune": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{ - Hostname: "rds.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "us-gov-west-1": endpoint{ - Hostname: "rds.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "organizations": service{ - PartitionEndpoint: "aws-us-gov-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-us-gov-global": endpoint{ - Hostname: "organizations.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "fips-aws-us-gov-global": endpoint{ - Hostname: "organizations.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "outposts": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{ - Hostname: "outposts.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "us-gov-west-1": endpoint{ - Hostname: "outposts.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "pinpoint": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "mobiletargeting", - }, - }, - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "polly": service{ - - Endpoints: endpoints{ - "fips-us-gov-west-1": endpoint{ - Hostname: "polly-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-west-1": endpoint{}, - }, - }, - "ram": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "rds": service{ - - Endpoints: endpoints{ - "rds.us-gov-east-1": endpoint{ - Hostname: "rds.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "rds.us-gov-west-1": endpoint{ - Hostname: "rds.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "redshift": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{ - Hostname: "redshift.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "us-gov-west-1": endpoint{ - Hostname: "redshift.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "rekognition": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "resource-groups": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ - Hostname: "resource-groups.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "fips-us-gov-west-1": endpoint{ - Hostname: "resource-groups.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "route53": service{ - PartitionEndpoint: "aws-us-gov-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-us-gov-global": endpoint{ - Hostname: "route53.us-gov.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "route53resolver": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "runtime.sagemaker": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "s3": service{ - Defaults: endpoint{ - SignatureVersions: []string{"s3", "s3v4"}, - - HasDualStack: boxedTrue, - DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", - }, - Endpoints: endpoints{ - "fips-us-gov-west-1": endpoint{ - Hostname: "s3-fips-us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-east-1": endpoint{ - Hostname: "s3.us-gov-east-1.amazonaws.com", - Protocols: []string{"http", "https"}, - }, - "us-gov-west-1": endpoint{ - Hostname: "s3.us-gov-west-1.amazonaws.com", - Protocols: []string{"http", "https"}, - }, - }, - }, - "s3-control": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - SignatureVersions: []string{"s3v4"}, - - HasDualStack: boxedTrue, - DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", - }, - Endpoints: endpoints{ - "us-gov-east-1": endpoint{ - Hostname: "s3-control.us-gov-east-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "us-gov-east-1-fips": endpoint{ - Hostname: "s3-control-fips.us-gov-east-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "us-gov-west-1": endpoint{ - Hostname: "s3-control.us-gov-west-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-west-1-fips": endpoint{ - Hostname: "s3-control-fips.us-gov-west-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "secretsmanager": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-east-1-fips": endpoint{ - Hostname: "secretsmanager-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "us-gov-west-1": endpoint{}, - "us-gov-west-1-fips": endpoint{ - Hostname: "secretsmanager-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "securityhub": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ - Hostname: "securityhub-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "fips-us-gov-west-1": endpoint{ - Hostname: "securityhub-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "serverlessrepo": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "us-gov-east-1": endpoint{ - Hostname: "serverlessrepo.us-gov-east-1.amazonaws.com", - Protocols: []string{"https"}, - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "us-gov-west-1": endpoint{ - Hostname: "serverlessrepo.us-gov-west-1.amazonaws.com", - Protocols: []string{"https"}, - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "servicecatalog": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-east-1-fips": endpoint{ - Hostname: "servicecatalog-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "us-gov-west-1": endpoint{}, - "us-gov-west-1-fips": endpoint{ - Hostname: "servicecatalog-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "sms": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ - Hostname: "sms-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "fips-us-gov-west-1": endpoint{ - Hostname: "sms-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "snowball": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ - Hostname: "snowball-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "fips-us-gov-west-1": endpoint{ - Hostname: "snowball-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "sns": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{ - Hostname: "sns.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "us-gov-west-1": endpoint{ - Hostname: "sns.us-gov-west-1.amazonaws.com", - Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "sqs": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{ - Hostname: "sqs.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "us-gov-west-1": endpoint{ - Hostname: "sqs.us-gov-west-1.amazonaws.com", - SSLCommonName: "{region}.queue.{dnsSuffix}", - Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "ssm": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ - Hostname: "ssm.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "fips-us-gov-west-1": endpoint{ - Hostname: "ssm.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "ssm-facade-fips-us-gov-east-1": endpoint{ - Hostname: "ssm-facade.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "ssm-facade-fips-us-gov-west-1": endpoint{ - Hostname: "ssm-facade.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "states": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ - Hostname: "states-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "fips-us-gov-west-1": endpoint{ - Hostname: "states.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "storagegateway": service{ - - Endpoints: endpoints{ - "fips": endpoint{ - Hostname: "storagegateway-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "streams.dynamodb": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "dynamodb", - }, - }, - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-east-1-fips": endpoint{ - Hostname: "dynamodb.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "us-gov-west-1": endpoint{}, - "us-gov-west-1-fips": endpoint{ - Hostname: "dynamodb.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "sts": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-east-1-fips": endpoint{ - Hostname: "sts.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "us-gov-west-1": endpoint{}, - "us-gov-west-1-fips": endpoint{ - Hostname: "sts.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "support": service{ - PartitionEndpoint: "aws-us-gov-global", - - Endpoints: endpoints{ - "aws-us-gov-global": endpoint{ - Hostname: "support.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "fips-us-gov-west-1": endpoint{ - Hostname: "support.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "swf": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{ - Hostname: "swf.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "us-gov-west-1": endpoint{ - Hostname: "swf.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "tagging": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "transcribe": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ - Hostname: "fips.transcribe.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "fips-us-gov-west-1": endpoint{ - Hostname: "fips.transcribe.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "translate": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - "us-gov-west-1-fips": endpoint{ - Hostname: "translate-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "waf-regional": service{ - - Endpoints: endpoints{ - "fips-us-gov-west-1": endpoint{ - Hostname: "waf-regional-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-west-1": endpoint{ - Hostname: "waf-regional.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "workspaces": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "xray": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - }, -} - -// AwsIsoPartition returns the Resolver for AWS ISO (US). -func AwsIsoPartition() Partition { - return awsisoPartition.Partition() -} - -var awsisoPartition = partition{ - ID: "aws-iso", - Name: "AWS ISO (US)", - DNSSuffix: "c2s.ic.gov", - RegionRegex: regionRegex{ - Regexp: func() *regexp.Regexp { - reg, _ := regexp.Compile("^us\\-iso\\-\\w+\\-\\d+$") - return reg - }(), - }, - Defaults: endpoint{ - Hostname: "{service}.{region}.{dnsSuffix}", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - Regions: regions{ - "us-iso-east-1": region{ - Description: "US ISO East", - }, - }, - Services: services{ - "api.ecr": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{ - Hostname: "api.ecr.us-iso-east-1.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-east-1", - }, - }, - }, - }, - "api.sagemaker": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "apigateway": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "application-autoscaling": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "autoscaling": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - }, - "cloudformation": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "cloudtrail": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "codedeploy": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "comprehend": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "config": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "datapipeline": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "directconnect": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "dms": service{ - - Endpoints: endpoints{ - "dms-fips": endpoint{ - Hostname: "dms.us-iso-east-1.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-east-1", - }, - }, - "us-iso-east-1": endpoint{}, - }, - }, - "ds": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "dynamodb": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - }, - "ec2": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "ec2metadata": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "169.254.169.254/latest", - Protocols: []string{"http"}, - }, - }, - }, - "ecs": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "elasticache": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "elasticloadbalancing": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - }, - "elasticmapreduce": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{ - Protocols: []string{"https"}, - }, - }, - }, - "es": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "events": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "glacier": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - }, - "health": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "iam": service{ - PartitionEndpoint: "aws-iso-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-iso-global": endpoint{ - Hostname: "iam.us-iso-east-1.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-east-1", - }, - }, - }, - }, - "kinesis": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "kms": service{ - - Endpoints: endpoints{ - "ProdFips": endpoint{ - Hostname: "kms-fips.us-iso-east-1.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-east-1", - }, - }, - "us-iso-east-1": endpoint{}, - }, - }, - "lambda": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "logs": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "monitoring": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "rds": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "redshift": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "route53": service{ - PartitionEndpoint: "aws-iso-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-iso-global": endpoint{ - Hostname: "route53.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-east-1", - }, - }, - }, - }, - "runtime.sagemaker": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "s3": service{ - Defaults: endpoint{ - SignatureVersions: []string{"s3v4"}, - }, - Endpoints: endpoints{ - "us-iso-east-1": endpoint{ - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"s3v4"}, - }, - }, - }, - "snowball": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "sns": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - }, - "sqs": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - }, - "states": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "streams.dynamodb": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Service: "dynamodb", - }, - }, - Endpoints: endpoints{ - "us-iso-east-1": endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - }, - "sts": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "support": service{ - PartitionEndpoint: "aws-iso-global", - - Endpoints: endpoints{ - "aws-iso-global": endpoint{ - Hostname: "support.us-iso-east-1.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-east-1", - }, - }, - }, - }, - "swf": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "workspaces": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - }, -} - -// AwsIsoBPartition returns the Resolver for AWS ISOB (US). -func AwsIsoBPartition() Partition { - return awsisobPartition.Partition() -} - -var awsisobPartition = partition{ - ID: "aws-iso-b", - Name: "AWS ISOB (US)", - DNSSuffix: "sc2s.sgov.gov", - RegionRegex: regionRegex{ - Regexp: func() *regexp.Regexp { - reg, _ := regexp.Compile("^us\\-isob\\-\\w+\\-\\d+$") - return reg - }(), - }, - Defaults: endpoint{ - Hostname: "{service}.{region}.{dnsSuffix}", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - Regions: regions{ - "us-isob-east-1": region{ - Description: "US ISOB East (Ohio)", - }, - }, - Services: services{ - "application-autoscaling": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "autoscaling": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "cloudformation": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "cloudtrail": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "config": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "directconnect": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "dms": service{ - - Endpoints: endpoints{ - "dms-fips": endpoint{ - Hostname: "dms.us-isob-east-1.sc2s.sgov.gov", - CredentialScope: credentialScope{ - Region: "us-isob-east-1", - }, - }, - "us-isob-east-1": endpoint{}, - }, - }, - "dynamodb": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "ec2": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "ec2metadata": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "169.254.169.254/latest", - Protocols: []string{"http"}, - }, - }, - }, - "elasticache": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "elasticloadbalancing": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{ - Protocols: []string{"https"}, - }, - }, - }, - "elasticmapreduce": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "events": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "glacier": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "health": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "iam": service{ - PartitionEndpoint: "aws-iso-b-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-iso-b-global": endpoint{ - Hostname: "iam.us-isob-east-1.sc2s.sgov.gov", - CredentialScope: credentialScope{ - Region: "us-isob-east-1", - }, - }, - }, - }, - "kinesis": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "kms": service{ - - Endpoints: endpoints{ - "ProdFips": endpoint{ - Hostname: "kms-fips.us-isob-east-1.sc2s.sgov.gov", - CredentialScope: credentialScope{ - Region: "us-isob-east-1", - }, - }, - "us-isob-east-1": endpoint{}, - }, - }, - "license-manager": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "logs": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "monitoring": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "rds": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "redshift": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "s3": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"s3v4"}, - }, - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "snowball": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "sns": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "sqs": service{ - Defaults: endpoint{ - SSLCommonName: "{region}.queue.{dnsSuffix}", - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "ssm": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "states": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "streams.dynamodb": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Service: "dynamodb", - }, - }, - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "sts": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "support": service{ - PartitionEndpoint: "aws-iso-b-global", - - Endpoints: endpoints{ - "aws-iso-b-global": endpoint{ - Hostname: "support.us-isob-east-1.sc2s.sgov.gov", - CredentialScope: credentialScope{ - Region: "us-isob-east-1", - }, - }, - }, - }, - "swf": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - }, -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go deleted file mode 100644 index ca8fc828e1..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go +++ /dev/null @@ -1,141 +0,0 @@ -package endpoints - -// Service identifiers -// -// Deprecated: Use client package's EndpointsID value instead of these -// ServiceIDs. These IDs are not maintained, and are out of date. -const ( - A4bServiceID = "a4b" // A4b. - AcmServiceID = "acm" // Acm. - AcmPcaServiceID = "acm-pca" // AcmPca. - ApiMediatailorServiceID = "api.mediatailor" // ApiMediatailor. - ApiPricingServiceID = "api.pricing" // ApiPricing. - ApiSagemakerServiceID = "api.sagemaker" // ApiSagemaker. - ApigatewayServiceID = "apigateway" // Apigateway. - ApplicationAutoscalingServiceID = "application-autoscaling" // ApplicationAutoscaling. - Appstream2ServiceID = "appstream2" // Appstream2. - AppsyncServiceID = "appsync" // Appsync. - AthenaServiceID = "athena" // Athena. - AutoscalingServiceID = "autoscaling" // Autoscaling. - AutoscalingPlansServiceID = "autoscaling-plans" // AutoscalingPlans. - BatchServiceID = "batch" // Batch. - BudgetsServiceID = "budgets" // Budgets. - CeServiceID = "ce" // Ce. - ChimeServiceID = "chime" // Chime. - Cloud9ServiceID = "cloud9" // Cloud9. - ClouddirectoryServiceID = "clouddirectory" // Clouddirectory. - CloudformationServiceID = "cloudformation" // Cloudformation. - CloudfrontServiceID = "cloudfront" // Cloudfront. - CloudhsmServiceID = "cloudhsm" // Cloudhsm. - Cloudhsmv2ServiceID = "cloudhsmv2" // Cloudhsmv2. - CloudsearchServiceID = "cloudsearch" // Cloudsearch. - CloudtrailServiceID = "cloudtrail" // Cloudtrail. - CodebuildServiceID = "codebuild" // Codebuild. - CodecommitServiceID = "codecommit" // Codecommit. - CodedeployServiceID = "codedeploy" // Codedeploy. - CodepipelineServiceID = "codepipeline" // Codepipeline. - CodestarServiceID = "codestar" // Codestar. - CognitoIdentityServiceID = "cognito-identity" // CognitoIdentity. - CognitoIdpServiceID = "cognito-idp" // CognitoIdp. - CognitoSyncServiceID = "cognito-sync" // CognitoSync. - ComprehendServiceID = "comprehend" // Comprehend. - ConfigServiceID = "config" // Config. - CurServiceID = "cur" // Cur. - DatapipelineServiceID = "datapipeline" // Datapipeline. - DaxServiceID = "dax" // Dax. - DevicefarmServiceID = "devicefarm" // Devicefarm. - DirectconnectServiceID = "directconnect" // Directconnect. - DiscoveryServiceID = "discovery" // Discovery. - DmsServiceID = "dms" // Dms. - DsServiceID = "ds" // Ds. - DynamodbServiceID = "dynamodb" // Dynamodb. - Ec2ServiceID = "ec2" // Ec2. - Ec2metadataServiceID = "ec2metadata" // Ec2metadata. - EcrServiceID = "ecr" // Ecr. - EcsServiceID = "ecs" // Ecs. - ElasticacheServiceID = "elasticache" // Elasticache. - ElasticbeanstalkServiceID = "elasticbeanstalk" // Elasticbeanstalk. - ElasticfilesystemServiceID = "elasticfilesystem" // Elasticfilesystem. - ElasticloadbalancingServiceID = "elasticloadbalancing" // Elasticloadbalancing. - ElasticmapreduceServiceID = "elasticmapreduce" // Elasticmapreduce. - ElastictranscoderServiceID = "elastictranscoder" // Elastictranscoder. - EmailServiceID = "email" // Email. - EntitlementMarketplaceServiceID = "entitlement.marketplace" // EntitlementMarketplace. - EsServiceID = "es" // Es. - EventsServiceID = "events" // Events. - FirehoseServiceID = "firehose" // Firehose. - FmsServiceID = "fms" // Fms. - GameliftServiceID = "gamelift" // Gamelift. - GlacierServiceID = "glacier" // Glacier. - GlueServiceID = "glue" // Glue. - GreengrassServiceID = "greengrass" // Greengrass. - GuarddutyServiceID = "guardduty" // Guardduty. - HealthServiceID = "health" // Health. - IamServiceID = "iam" // Iam. - ImportexportServiceID = "importexport" // Importexport. - InspectorServiceID = "inspector" // Inspector. - IotServiceID = "iot" // Iot. - IotanalyticsServiceID = "iotanalytics" // Iotanalytics. - KinesisServiceID = "kinesis" // Kinesis. - KinesisanalyticsServiceID = "kinesisanalytics" // Kinesisanalytics. - KinesisvideoServiceID = "kinesisvideo" // Kinesisvideo. - KmsServiceID = "kms" // Kms. - LambdaServiceID = "lambda" // Lambda. - LightsailServiceID = "lightsail" // Lightsail. - LogsServiceID = "logs" // Logs. - MachinelearningServiceID = "machinelearning" // Machinelearning. - MarketplacecommerceanalyticsServiceID = "marketplacecommerceanalytics" // Marketplacecommerceanalytics. - MediaconvertServiceID = "mediaconvert" // Mediaconvert. - MedialiveServiceID = "medialive" // Medialive. - MediapackageServiceID = "mediapackage" // Mediapackage. - MediastoreServiceID = "mediastore" // Mediastore. - MeteringMarketplaceServiceID = "metering.marketplace" // MeteringMarketplace. - MghServiceID = "mgh" // Mgh. - MobileanalyticsServiceID = "mobileanalytics" // Mobileanalytics. - ModelsLexServiceID = "models.lex" // ModelsLex. - MonitoringServiceID = "monitoring" // Monitoring. - MturkRequesterServiceID = "mturk-requester" // MturkRequester. - NeptuneServiceID = "neptune" // Neptune. - OpsworksServiceID = "opsworks" // Opsworks. - OpsworksCmServiceID = "opsworks-cm" // OpsworksCm. - OrganizationsServiceID = "organizations" // Organizations. - PinpointServiceID = "pinpoint" // Pinpoint. - PollyServiceID = "polly" // Polly. - RdsServiceID = "rds" // Rds. - RedshiftServiceID = "redshift" // Redshift. - RekognitionServiceID = "rekognition" // Rekognition. - ResourceGroupsServiceID = "resource-groups" // ResourceGroups. - Route53ServiceID = "route53" // Route53. - Route53domainsServiceID = "route53domains" // Route53domains. - RuntimeLexServiceID = "runtime.lex" // RuntimeLex. - RuntimeSagemakerServiceID = "runtime.sagemaker" // RuntimeSagemaker. - S3ServiceID = "s3" // S3. - S3ControlServiceID = "s3-control" // S3Control. - SagemakerServiceID = "api.sagemaker" // Sagemaker. - SdbServiceID = "sdb" // Sdb. - SecretsmanagerServiceID = "secretsmanager" // Secretsmanager. - ServerlessrepoServiceID = "serverlessrepo" // Serverlessrepo. - ServicecatalogServiceID = "servicecatalog" // Servicecatalog. - ServicediscoveryServiceID = "servicediscovery" // Servicediscovery. - ShieldServiceID = "shield" // Shield. - SmsServiceID = "sms" // Sms. - SnowballServiceID = "snowball" // Snowball. - SnsServiceID = "sns" // Sns. - SqsServiceID = "sqs" // Sqs. - SsmServiceID = "ssm" // Ssm. - StatesServiceID = "states" // States. - StoragegatewayServiceID = "storagegateway" // Storagegateway. - StreamsDynamodbServiceID = "streams.dynamodb" // StreamsDynamodb. - StsServiceID = "sts" // Sts. - SupportServiceID = "support" // Support. - SwfServiceID = "swf" // Swf. - TaggingServiceID = "tagging" // Tagging. - TransferServiceID = "transfer" // Transfer. - TranslateServiceID = "translate" // Translate. - WafServiceID = "waf" // Waf. - WafRegionalServiceID = "waf-regional" // WafRegional. - WorkdocsServiceID = "workdocs" // Workdocs. - WorkmailServiceID = "workmail" // Workmail. - WorkspacesServiceID = "workspaces" // Workspaces. - XrayServiceID = "xray" // Xray. -) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go deleted file mode 100644 index 84316b92c0..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go +++ /dev/null @@ -1,66 +0,0 @@ -// Package endpoints provides the types and functionality for defining regions -// and endpoints, as well as querying those definitions. -// -// The SDK's Regions and Endpoints metadata is code generated into the endpoints -// package, and is accessible via the DefaultResolver function. This function -// returns a endpoint Resolver will search the metadata and build an associated -// endpoint if one is found. The default resolver will search all partitions -// known by the SDK. e.g AWS Standard (aws), AWS China (aws-cn), and -// AWS GovCloud (US) (aws-us-gov). -// . -// -// Enumerating Regions and Endpoint Metadata -// -// Casting the Resolver returned by DefaultResolver to a EnumPartitions interface -// will allow you to get access to the list of underlying Partitions with the -// Partitions method. This is helpful if you want to limit the SDK's endpoint -// resolving to a single partition, or enumerate regions, services, and endpoints -// in the partition. -// -// resolver := endpoints.DefaultResolver() -// partitions := resolver.(endpoints.EnumPartitions).Partitions() -// -// for _, p := range partitions { -// fmt.Println("Regions for", p.ID()) -// for id, _ := range p.Regions() { -// fmt.Println("*", id) -// } -// -// fmt.Println("Services for", p.ID()) -// for id, _ := range p.Services() { -// fmt.Println("*", id) -// } -// } -// -// Using Custom Endpoints -// -// The endpoints package also gives you the ability to use your own logic how -// endpoints are resolved. This is a great way to define a custom endpoint -// for select services, without passing that logic down through your code. -// -// If a type implements the Resolver interface it can be used to resolve -// endpoints. To use this with the SDK's Session and Config set the value -// of the type to the EndpointsResolver field of aws.Config when initializing -// the session, or service client. -// -// In addition the ResolverFunc is a wrapper for a func matching the signature -// of Resolver.EndpointFor, converting it to a type that satisfies the -// Resolver interface. -// -// -// myCustomResolver := func(service, region string, optFns ...func(*endpoints.Options)) (endpoints.ResolvedEndpoint, error) { -// if service == endpoints.S3ServiceID { -// return endpoints.ResolvedEndpoint{ -// URL: "s3.custom.endpoint.com", -// SigningRegion: "custom-signing-region", -// }, nil -// } -// -// return endpoints.DefaultResolver().EndpointFor(service, region, optFns...) -// } -// -// sess := session.Must(session.NewSession(&aws.Config{ -// Region: aws.String("us-west-2"), -// EndpointResolver: endpoints.ResolverFunc(myCustomResolver), -// })) -package endpoints diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go deleted file mode 100644 index ca956e5f12..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go +++ /dev/null @@ -1,564 +0,0 @@ -package endpoints - -import ( - "fmt" - "regexp" - "strings" - - "github.com/aws/aws-sdk-go/aws/awserr" -) - -// Options provide the configuration needed to direct how the -// endpoints will be resolved. -type Options struct { - // DisableSSL forces the endpoint to be resolved as HTTP. - // instead of HTTPS if the service supports it. - DisableSSL bool - - // Sets the resolver to resolve the endpoint as a dualstack endpoint - // for the service. If dualstack support for a service is not known and - // StrictMatching is not enabled a dualstack endpoint for the service will - // be returned. This endpoint may not be valid. If StrictMatching is - // enabled only services that are known to support dualstack will return - // dualstack endpoints. - UseDualStack bool - - // Enables strict matching of services and regions resolved endpoints. - // If the partition doesn't enumerate the exact service and region an - // error will be returned. This option will prevent returning endpoints - // that look valid, but may not resolve to any real endpoint. - StrictMatching bool - - // Enables resolving a service endpoint based on the region provided if the - // service does not exist. The service endpoint ID will be used as the service - // domain name prefix. By default the endpoint resolver requires the service - // to be known when resolving endpoints. - // - // If resolving an endpoint on the partition list the provided region will - // be used to determine which partition's domain name pattern to the service - // endpoint ID with. If both the service and region are unknown and resolving - // the endpoint on partition list an UnknownEndpointError error will be returned. - // - // If resolving and endpoint on a partition specific resolver that partition's - // domain name pattern will be used with the service endpoint ID. If both - // region and service do not exist when resolving an endpoint on a specific - // partition the partition's domain pattern will be used to combine the - // endpoint and region together. - // - // This option is ignored if StrictMatching is enabled. - ResolveUnknownService bool - - // STS Regional Endpoint flag helps with resolving the STS endpoint - STSRegionalEndpoint STSRegionalEndpoint - - // S3 Regional Endpoint flag helps with resolving the S3 endpoint - S3UsEast1RegionalEndpoint S3UsEast1RegionalEndpoint -} - -// STSRegionalEndpoint is an enum for the states of the STS Regional Endpoint -// options. -type STSRegionalEndpoint int - -func (e STSRegionalEndpoint) String() string { - switch e { - case LegacySTSEndpoint: - return "legacy" - case RegionalSTSEndpoint: - return "regional" - case UnsetSTSEndpoint: - return "" - default: - return "unknown" - } -} - -const ( - - // UnsetSTSEndpoint represents that STS Regional Endpoint flag is not specified. - UnsetSTSEndpoint STSRegionalEndpoint = iota - - // LegacySTSEndpoint represents when STS Regional Endpoint flag is specified - // to use legacy endpoints. - LegacySTSEndpoint - - // RegionalSTSEndpoint represents when STS Regional Endpoint flag is specified - // to use regional endpoints. - RegionalSTSEndpoint -) - -// GetSTSRegionalEndpoint function returns the STSRegionalEndpointFlag based -// on the input string provided in env config or shared config by the user. -// -// `legacy`, `regional` are the only case-insensitive valid strings for -// resolving the STS regional Endpoint flag. -func GetSTSRegionalEndpoint(s string) (STSRegionalEndpoint, error) { - switch { - case strings.EqualFold(s, "legacy"): - return LegacySTSEndpoint, nil - case strings.EqualFold(s, "regional"): - return RegionalSTSEndpoint, nil - default: - return UnsetSTSEndpoint, fmt.Errorf("unable to resolve the value of STSRegionalEndpoint for %v", s) - } -} - -// S3UsEast1RegionalEndpoint is an enum for the states of the S3 us-east-1 -// Regional Endpoint options. -type S3UsEast1RegionalEndpoint int - -func (e S3UsEast1RegionalEndpoint) String() string { - switch e { - case LegacyS3UsEast1Endpoint: - return "legacy" - case RegionalS3UsEast1Endpoint: - return "regional" - case UnsetS3UsEast1Endpoint: - return "" - default: - return "unknown" - } -} - -const ( - - // UnsetS3UsEast1Endpoint represents that S3 Regional Endpoint flag is not - // specified. - UnsetS3UsEast1Endpoint S3UsEast1RegionalEndpoint = iota - - // LegacyS3UsEast1Endpoint represents when S3 Regional Endpoint flag is - // specified to use legacy endpoints. - LegacyS3UsEast1Endpoint - - // RegionalS3UsEast1Endpoint represents when S3 Regional Endpoint flag is - // specified to use regional endpoints. - RegionalS3UsEast1Endpoint -) - -// GetS3UsEast1RegionalEndpoint function returns the S3UsEast1RegionalEndpointFlag based -// on the input string provided in env config or shared config by the user. -// -// `legacy`, `regional` are the only case-insensitive valid strings for -// resolving the S3 regional Endpoint flag. -func GetS3UsEast1RegionalEndpoint(s string) (S3UsEast1RegionalEndpoint, error) { - switch { - case strings.EqualFold(s, "legacy"): - return LegacyS3UsEast1Endpoint, nil - case strings.EqualFold(s, "regional"): - return RegionalS3UsEast1Endpoint, nil - default: - return UnsetS3UsEast1Endpoint, - fmt.Errorf("unable to resolve the value of S3UsEast1RegionalEndpoint for %v", s) - } -} - -// Set combines all of the option functions together. -func (o *Options) Set(optFns ...func(*Options)) { - for _, fn := range optFns { - fn(o) - } -} - -// DisableSSLOption sets the DisableSSL options. Can be used as a functional -// option when resolving endpoints. -func DisableSSLOption(o *Options) { - o.DisableSSL = true -} - -// UseDualStackOption sets the UseDualStack option. Can be used as a functional -// option when resolving endpoints. -func UseDualStackOption(o *Options) { - o.UseDualStack = true -} - -// StrictMatchingOption sets the StrictMatching option. Can be used as a functional -// option when resolving endpoints. -func StrictMatchingOption(o *Options) { - o.StrictMatching = true -} - -// ResolveUnknownServiceOption sets the ResolveUnknownService option. Can be used -// as a functional option when resolving endpoints. -func ResolveUnknownServiceOption(o *Options) { - o.ResolveUnknownService = true -} - -// STSRegionalEndpointOption enables the STS endpoint resolver behavior to resolve -// STS endpoint to their regional endpoint, instead of the global endpoint. -func STSRegionalEndpointOption(o *Options) { - o.STSRegionalEndpoint = RegionalSTSEndpoint -} - -// A Resolver provides the interface for functionality to resolve endpoints. -// The build in Partition and DefaultResolver return value satisfy this interface. -type Resolver interface { - EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) -} - -// ResolverFunc is a helper utility that wraps a function so it satisfies the -// Resolver interface. This is useful when you want to add additional endpoint -// resolving logic, or stub out specific endpoints with custom values. -type ResolverFunc func(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) - -// EndpointFor wraps the ResolverFunc function to satisfy the Resolver interface. -func (fn ResolverFunc) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) { - return fn(service, region, opts...) -} - -var schemeRE = regexp.MustCompile("^([^:]+)://") - -// AddScheme adds the HTTP or HTTPS schemes to a endpoint URL if there is no -// scheme. If disableSSL is true HTTP will set HTTP instead of the default HTTPS. -// -// If disableSSL is set, it will only set the URL's scheme if the URL does not -// contain a scheme. -func AddScheme(endpoint string, disableSSL bool) string { - if !schemeRE.MatchString(endpoint) { - scheme := "https" - if disableSSL { - scheme = "http" - } - endpoint = fmt.Sprintf("%s://%s", scheme, endpoint) - } - - return endpoint -} - -// EnumPartitions a provides a way to retrieve the underlying partitions that -// make up the SDK's default Resolver, or any resolver decoded from a model -// file. -// -// Use this interface with DefaultResolver and DecodeModels to get the list of -// Partitions. -type EnumPartitions interface { - Partitions() []Partition -} - -// RegionsForService returns a map of regions for the partition and service. -// If either the partition or service does not exist false will be returned -// as the second parameter. -// -// This example shows how to get the regions for DynamoDB in the AWS partition. -// rs, exists := endpoints.RegionsForService(endpoints.DefaultPartitions(), endpoints.AwsPartitionID, endpoints.DynamodbServiceID) -// -// This is equivalent to using the partition directly. -// rs := endpoints.AwsPartition().Services()[endpoints.DynamodbServiceID].Regions() -func RegionsForService(ps []Partition, partitionID, serviceID string) (map[string]Region, bool) { - for _, p := range ps { - if p.ID() != partitionID { - continue - } - if _, ok := p.p.Services[serviceID]; !ok { - break - } - - s := Service{ - id: serviceID, - p: p.p, - } - return s.Regions(), true - } - - return map[string]Region{}, false -} - -// PartitionForRegion returns the first partition which includes the region -// passed in. This includes both known regions and regions which match -// a pattern supported by the partition which may include regions that are -// not explicitly known by the partition. Use the Regions method of the -// returned Partition if explicit support is needed. -func PartitionForRegion(ps []Partition, regionID string) (Partition, bool) { - for _, p := range ps { - if _, ok := p.p.Regions[regionID]; ok || p.p.RegionRegex.MatchString(regionID) { - return p, true - } - } - - return Partition{}, false -} - -// A Partition provides the ability to enumerate the partition's regions -// and services. -type Partition struct { - id, dnsSuffix string - p *partition -} - -// DNSSuffix returns the base domain name of the partition. -func (p Partition) DNSSuffix() string { return p.dnsSuffix } - -// ID returns the identifier of the partition. -func (p Partition) ID() string { return p.id } - -// EndpointFor attempts to resolve the endpoint based on service and region. -// See Options for information on configuring how the endpoint is resolved. -// -// If the service cannot be found in the metadata the UnknownServiceError -// error will be returned. This validation will occur regardless if -// StrictMatching is enabled. To enable resolving unknown services set the -// "ResolveUnknownService" option to true. When StrictMatching is disabled -// this option allows the partition resolver to resolve a endpoint based on -// the service endpoint ID provided. -// -// When resolving endpoints you can choose to enable StrictMatching. This will -// require the provided service and region to be known by the partition. -// If the endpoint cannot be strictly resolved an error will be returned. This -// mode is useful to ensure the endpoint resolved is valid. Without -// StrictMatching enabled the endpoint returned may look valid but may not work. -// StrictMatching requires the SDK to be updated if you want to take advantage -// of new regions and services expansions. -// -// Errors that can be returned. -// * UnknownServiceError -// * UnknownEndpointError -func (p Partition) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) { - return p.p.EndpointFor(service, region, opts...) -} - -// Regions returns a map of Regions indexed by their ID. This is useful for -// enumerating over the regions in a partition. -func (p Partition) Regions() map[string]Region { - rs := make(map[string]Region, len(p.p.Regions)) - for id, r := range p.p.Regions { - rs[id] = Region{ - id: id, - desc: r.Description, - p: p.p, - } - } - - return rs -} - -// Services returns a map of Service indexed by their ID. This is useful for -// enumerating over the services in a partition. -func (p Partition) Services() map[string]Service { - ss := make(map[string]Service, len(p.p.Services)) - for id := range p.p.Services { - ss[id] = Service{ - id: id, - p: p.p, - } - } - - return ss -} - -// A Region provides information about a region, and ability to resolve an -// endpoint from the context of a region, given a service. -type Region struct { - id, desc string - p *partition -} - -// ID returns the region's identifier. -func (r Region) ID() string { return r.id } - -// Description returns the region's description. The region description -// is free text, it can be empty, and it may change between SDK releases. -func (r Region) Description() string { return r.desc } - -// ResolveEndpoint resolves an endpoint from the context of the region given -// a service. See Partition.EndpointFor for usage and errors that can be returned. -func (r Region) ResolveEndpoint(service string, opts ...func(*Options)) (ResolvedEndpoint, error) { - return r.p.EndpointFor(service, r.id, opts...) -} - -// Services returns a list of all services that are known to be in this region. -func (r Region) Services() map[string]Service { - ss := map[string]Service{} - for id, s := range r.p.Services { - if _, ok := s.Endpoints[r.id]; ok { - ss[id] = Service{ - id: id, - p: r.p, - } - } - } - - return ss -} - -// A Service provides information about a service, and ability to resolve an -// endpoint from the context of a service, given a region. -type Service struct { - id string - p *partition -} - -// ID returns the identifier for the service. -func (s Service) ID() string { return s.id } - -// ResolveEndpoint resolves an endpoint from the context of a service given -// a region. See Partition.EndpointFor for usage and errors that can be returned. -func (s Service) ResolveEndpoint(region string, opts ...func(*Options)) (ResolvedEndpoint, error) { - return s.p.EndpointFor(s.id, region, opts...) -} - -// Regions returns a map of Regions that the service is present in. -// -// A region is the AWS region the service exists in. Whereas a Endpoint is -// an URL that can be resolved to a instance of a service. -func (s Service) Regions() map[string]Region { - rs := map[string]Region{} - for id := range s.p.Services[s.id].Endpoints { - if r, ok := s.p.Regions[id]; ok { - rs[id] = Region{ - id: id, - desc: r.Description, - p: s.p, - } - } - } - - return rs -} - -// Endpoints returns a map of Endpoints indexed by their ID for all known -// endpoints for a service. -// -// A region is the AWS region the service exists in. Whereas a Endpoint is -// an URL that can be resolved to a instance of a service. -func (s Service) Endpoints() map[string]Endpoint { - es := make(map[string]Endpoint, len(s.p.Services[s.id].Endpoints)) - for id := range s.p.Services[s.id].Endpoints { - es[id] = Endpoint{ - id: id, - serviceID: s.id, - p: s.p, - } - } - - return es -} - -// A Endpoint provides information about endpoints, and provides the ability -// to resolve that endpoint for the service, and the region the endpoint -// represents. -type Endpoint struct { - id string - serviceID string - p *partition -} - -// ID returns the identifier for an endpoint. -func (e Endpoint) ID() string { return e.id } - -// ServiceID returns the identifier the endpoint belongs to. -func (e Endpoint) ServiceID() string { return e.serviceID } - -// ResolveEndpoint resolves an endpoint from the context of a service and -// region the endpoint represents. See Partition.EndpointFor for usage and -// errors that can be returned. -func (e Endpoint) ResolveEndpoint(opts ...func(*Options)) (ResolvedEndpoint, error) { - return e.p.EndpointFor(e.serviceID, e.id, opts...) -} - -// A ResolvedEndpoint is an endpoint that has been resolved based on a partition -// service, and region. -type ResolvedEndpoint struct { - // The endpoint URL - URL string - - // The endpoint partition - PartitionID string - - // The region that should be used for signing requests. - SigningRegion string - - // The service name that should be used for signing requests. - SigningName string - - // States that the signing name for this endpoint was derived from metadata - // passed in, but was not explicitly modeled. - SigningNameDerived bool - - // The signing method that should be used for signing requests. - SigningMethod string -} - -// So that the Error interface type can be included as an anonymous field -// in the requestError struct and not conflict with the error.Error() method. -type awsError awserr.Error - -// A EndpointNotFoundError is returned when in StrictMatching mode, and the -// endpoint for the service and region cannot be found in any of the partitions. -type EndpointNotFoundError struct { - awsError - Partition string - Service string - Region string -} - -// A UnknownServiceError is returned when the service does not resolve to an -// endpoint. Includes a list of all known services for the partition. Returned -// when a partition does not support the service. -type UnknownServiceError struct { - awsError - Partition string - Service string - Known []string -} - -// NewUnknownServiceError builds and returns UnknownServiceError. -func NewUnknownServiceError(p, s string, known []string) UnknownServiceError { - return UnknownServiceError{ - awsError: awserr.New("UnknownServiceError", - "could not resolve endpoint for unknown service", nil), - Partition: p, - Service: s, - Known: known, - } -} - -// String returns the string representation of the error. -func (e UnknownServiceError) Error() string { - extra := fmt.Sprintf("partition: %q, service: %q", - e.Partition, e.Service) - if len(e.Known) > 0 { - extra += fmt.Sprintf(", known: %v", e.Known) - } - return awserr.SprintError(e.Code(), e.Message(), extra, e.OrigErr()) -} - -// String returns the string representation of the error. -func (e UnknownServiceError) String() string { - return e.Error() -} - -// A UnknownEndpointError is returned when in StrictMatching mode and the -// service is valid, but the region does not resolve to an endpoint. Includes -// a list of all known endpoints for the service. -type UnknownEndpointError struct { - awsError - Partition string - Service string - Region string - Known []string -} - -// NewUnknownEndpointError builds and returns UnknownEndpointError. -func NewUnknownEndpointError(p, s, r string, known []string) UnknownEndpointError { - return UnknownEndpointError{ - awsError: awserr.New("UnknownEndpointError", - "could not resolve endpoint", nil), - Partition: p, - Service: s, - Region: r, - Known: known, - } -} - -// String returns the string representation of the error. -func (e UnknownEndpointError) Error() string { - extra := fmt.Sprintf("partition: %q, service: %q, region: %q", - e.Partition, e.Service, e.Region) - if len(e.Known) > 0 { - extra += fmt.Sprintf(", known: %v", e.Known) - } - return awserr.SprintError(e.Code(), e.Message(), extra, e.OrigErr()) -} - -// String returns the string representation of the error. -func (e UnknownEndpointError) String() string { - return e.Error() -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/legacy_regions.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/legacy_regions.go deleted file mode 100644 index df75e899ad..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/legacy_regions.go +++ /dev/null @@ -1,24 +0,0 @@ -package endpoints - -var legacyGlobalRegions = map[string]map[string]struct{}{ - "sts": { - "ap-northeast-1": {}, - "ap-south-1": {}, - "ap-southeast-1": {}, - "ap-southeast-2": {}, - "ca-central-1": {}, - "eu-central-1": {}, - "eu-north-1": {}, - "eu-west-1": {}, - "eu-west-2": {}, - "eu-west-3": {}, - "sa-east-1": {}, - "us-east-1": {}, - "us-east-2": {}, - "us-west-1": {}, - "us-west-2": {}, - }, - "s3": { - "us-east-1": {}, - }, -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go deleted file mode 100644 index eb2ac83c99..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go +++ /dev/null @@ -1,341 +0,0 @@ -package endpoints - -import ( - "fmt" - "regexp" - "strconv" - "strings" -) - -type partitions []partition - -func (ps partitions) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) { - var opt Options - opt.Set(opts...) - - for i := 0; i < len(ps); i++ { - if !ps[i].canResolveEndpoint(service, region, opt.StrictMatching) { - continue - } - - return ps[i].EndpointFor(service, region, opts...) - } - - // If loose matching fallback to first partition format to use - // when resolving the endpoint. - if !opt.StrictMatching && len(ps) > 0 { - return ps[0].EndpointFor(service, region, opts...) - } - - return ResolvedEndpoint{}, NewUnknownEndpointError("all partitions", service, region, []string{}) -} - -// Partitions satisfies the EnumPartitions interface and returns a list -// of Partitions representing each partition represented in the SDK's -// endpoints model. -func (ps partitions) Partitions() []Partition { - parts := make([]Partition, 0, len(ps)) - for i := 0; i < len(ps); i++ { - parts = append(parts, ps[i].Partition()) - } - - return parts -} - -type partition struct { - ID string `json:"partition"` - Name string `json:"partitionName"` - DNSSuffix string `json:"dnsSuffix"` - RegionRegex regionRegex `json:"regionRegex"` - Defaults endpoint `json:"defaults"` - Regions regions `json:"regions"` - Services services `json:"services"` -} - -func (p partition) Partition() Partition { - return Partition{ - dnsSuffix: p.DNSSuffix, - id: p.ID, - p: &p, - } -} - -func (p partition) canResolveEndpoint(service, region string, strictMatch bool) bool { - s, hasService := p.Services[service] - _, hasEndpoint := s.Endpoints[region] - - if hasEndpoint && hasService { - return true - } - - if strictMatch { - return false - } - - return p.RegionRegex.MatchString(region) -} - -func allowLegacyEmptyRegion(service string) bool { - legacy := map[string]struct{}{ - "budgets": {}, - "ce": {}, - "chime": {}, - "cloudfront": {}, - "ec2metadata": {}, - "iam": {}, - "importexport": {}, - "organizations": {}, - "route53": {}, - "sts": {}, - "support": {}, - "waf": {}, - } - - _, allowed := legacy[service] - return allowed -} - -func (p partition) EndpointFor(service, region string, opts ...func(*Options)) (resolved ResolvedEndpoint, err error) { - var opt Options - opt.Set(opts...) - - s, hasService := p.Services[service] - if len(service) == 0 || !(hasService || opt.ResolveUnknownService) { - // Only return error if the resolver will not fallback to creating - // endpoint based on service endpoint ID passed in. - return resolved, NewUnknownServiceError(p.ID, service, serviceList(p.Services)) - } - - if len(region) == 0 && allowLegacyEmptyRegion(service) && len(s.PartitionEndpoint) != 0 { - region = s.PartitionEndpoint - } - - if (service == "sts" && opt.STSRegionalEndpoint != RegionalSTSEndpoint) || - (service == "s3" && opt.S3UsEast1RegionalEndpoint != RegionalS3UsEast1Endpoint) { - if _, ok := legacyGlobalRegions[service][region]; ok { - region = "aws-global" - } - } - - e, hasEndpoint := s.endpointForRegion(region) - if len(region) == 0 || (!hasEndpoint && opt.StrictMatching) { - return resolved, NewUnknownEndpointError(p.ID, service, region, endpointList(s.Endpoints)) - } - - defs := []endpoint{p.Defaults, s.Defaults} - - return e.resolve(service, p.ID, region, p.DNSSuffix, defs, opt), nil -} - -func serviceList(ss services) []string { - list := make([]string, 0, len(ss)) - for k := range ss { - list = append(list, k) - } - return list -} -func endpointList(es endpoints) []string { - list := make([]string, 0, len(es)) - for k := range es { - list = append(list, k) - } - return list -} - -type regionRegex struct { - *regexp.Regexp -} - -func (rr *regionRegex) UnmarshalJSON(b []byte) (err error) { - // Strip leading and trailing quotes - regex, err := strconv.Unquote(string(b)) - if err != nil { - return fmt.Errorf("unable to strip quotes from regex, %v", err) - } - - rr.Regexp, err = regexp.Compile(regex) - if err != nil { - return fmt.Errorf("unable to unmarshal region regex, %v", err) - } - return nil -} - -type regions map[string]region - -type region struct { - Description string `json:"description"` -} - -type services map[string]service - -type service struct { - PartitionEndpoint string `json:"partitionEndpoint"` - IsRegionalized boxedBool `json:"isRegionalized,omitempty"` - Defaults endpoint `json:"defaults"` - Endpoints endpoints `json:"endpoints"` -} - -func (s *service) endpointForRegion(region string) (endpoint, bool) { - if s.IsRegionalized == boxedFalse { - return s.Endpoints[s.PartitionEndpoint], region == s.PartitionEndpoint - } - - if e, ok := s.Endpoints[region]; ok { - return e, true - } - - // Unable to find any matching endpoint, return - // blank that will be used for generic endpoint creation. - return endpoint{}, false -} - -type endpoints map[string]endpoint - -type endpoint struct { - Hostname string `json:"hostname"` - Protocols []string `json:"protocols"` - CredentialScope credentialScope `json:"credentialScope"` - - // Custom fields not modeled - HasDualStack boxedBool `json:"-"` - DualStackHostname string `json:"-"` - - // Signature Version not used - SignatureVersions []string `json:"signatureVersions"` - - // SSLCommonName not used. - SSLCommonName string `json:"sslCommonName"` -} - -const ( - defaultProtocol = "https" - defaultSigner = "v4" -) - -var ( - protocolPriority = []string{"https", "http"} - signerPriority = []string{"v4", "v2"} -) - -func getByPriority(s []string, p []string, def string) string { - if len(s) == 0 { - return def - } - - for i := 0; i < len(p); i++ { - for j := 0; j < len(s); j++ { - if s[j] == p[i] { - return s[j] - } - } - } - - return s[0] -} - -func (e endpoint) resolve(service, partitionID, region, dnsSuffix string, defs []endpoint, opts Options) ResolvedEndpoint { - var merged endpoint - for _, def := range defs { - merged.mergeIn(def) - } - merged.mergeIn(e) - e = merged - - signingRegion := e.CredentialScope.Region - if len(signingRegion) == 0 { - signingRegion = region - } - - signingName := e.CredentialScope.Service - var signingNameDerived bool - if len(signingName) == 0 { - signingName = service - signingNameDerived = true - } - - hostname := e.Hostname - // Offset the hostname for dualstack if enabled - if opts.UseDualStack && e.HasDualStack == boxedTrue { - hostname = e.DualStackHostname - region = signingRegion - } - - u := strings.Replace(hostname, "{service}", service, 1) - u = strings.Replace(u, "{region}", region, 1) - u = strings.Replace(u, "{dnsSuffix}", dnsSuffix, 1) - - scheme := getEndpointScheme(e.Protocols, opts.DisableSSL) - u = fmt.Sprintf("%s://%s", scheme, u) - - return ResolvedEndpoint{ - URL: u, - PartitionID: partitionID, - SigningRegion: signingRegion, - SigningName: signingName, - SigningNameDerived: signingNameDerived, - SigningMethod: getByPriority(e.SignatureVersions, signerPriority, defaultSigner), - } -} - -func getEndpointScheme(protocols []string, disableSSL bool) string { - if disableSSL { - return "http" - } - - return getByPriority(protocols, protocolPriority, defaultProtocol) -} - -func (e *endpoint) mergeIn(other endpoint) { - if len(other.Hostname) > 0 { - e.Hostname = other.Hostname - } - if len(other.Protocols) > 0 { - e.Protocols = other.Protocols - } - if len(other.SignatureVersions) > 0 { - e.SignatureVersions = other.SignatureVersions - } - if len(other.CredentialScope.Region) > 0 { - e.CredentialScope.Region = other.CredentialScope.Region - } - if len(other.CredentialScope.Service) > 0 { - e.CredentialScope.Service = other.CredentialScope.Service - } - if len(other.SSLCommonName) > 0 { - e.SSLCommonName = other.SSLCommonName - } - if other.HasDualStack != boxedBoolUnset { - e.HasDualStack = other.HasDualStack - } - if len(other.DualStackHostname) > 0 { - e.DualStackHostname = other.DualStackHostname - } -} - -type credentialScope struct { - Region string `json:"region"` - Service string `json:"service"` -} - -type boxedBool int - -func (b *boxedBool) UnmarshalJSON(buf []byte) error { - v, err := strconv.ParseBool(string(buf)) - if err != nil { - return err - } - - if v { - *b = boxedTrue - } else { - *b = boxedFalse - } - - return nil -} - -const ( - boxedBoolUnset boxedBool = iota - boxedFalse - boxedTrue -) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go deleted file mode 100644 index 0fdfcc56e0..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go +++ /dev/null @@ -1,351 +0,0 @@ -// +build codegen - -package endpoints - -import ( - "fmt" - "io" - "reflect" - "strings" - "text/template" - "unicode" -) - -// A CodeGenOptions are the options for code generating the endpoints into -// Go code from the endpoints model definition. -type CodeGenOptions struct { - // Options for how the model will be decoded. - DecodeModelOptions DecodeModelOptions - - // Disables code generation of the service endpoint prefix IDs defined in - // the model. - DisableGenerateServiceIDs bool -} - -// Set combines all of the option functions together -func (d *CodeGenOptions) Set(optFns ...func(*CodeGenOptions)) { - for _, fn := range optFns { - fn(d) - } -} - -// CodeGenModel given a endpoints model file will decode it and attempt to -// generate Go code from the model definition. Error will be returned if -// the code is unable to be generated, or decoded. -func CodeGenModel(modelFile io.Reader, outFile io.Writer, optFns ...func(*CodeGenOptions)) error { - var opts CodeGenOptions - opts.Set(optFns...) - - resolver, err := DecodeModel(modelFile, func(d *DecodeModelOptions) { - *d = opts.DecodeModelOptions - }) - if err != nil { - return err - } - - v := struct { - Resolver - CodeGenOptions - }{ - Resolver: resolver, - CodeGenOptions: opts, - } - - tmpl := template.Must(template.New("tmpl").Funcs(funcMap).Parse(v3Tmpl)) - if err := tmpl.ExecuteTemplate(outFile, "defaults", v); err != nil { - return fmt.Errorf("failed to execute template, %v", err) - } - - return nil -} - -func toSymbol(v string) string { - out := []rune{} - for _, c := range strings.Title(v) { - if !(unicode.IsNumber(c) || unicode.IsLetter(c)) { - continue - } - - out = append(out, c) - } - - return string(out) -} - -func quoteString(v string) string { - return fmt.Sprintf("%q", v) -} - -func regionConstName(p, r string) string { - return toSymbol(p) + toSymbol(r) -} - -func partitionGetter(id string) string { - return fmt.Sprintf("%sPartition", toSymbol(id)) -} - -func partitionVarName(id string) string { - return fmt.Sprintf("%sPartition", strings.ToLower(toSymbol(id))) -} - -func listPartitionNames(ps partitions) string { - names := []string{} - switch len(ps) { - case 1: - return ps[0].Name - case 2: - return fmt.Sprintf("%s and %s", ps[0].Name, ps[1].Name) - default: - for i, p := range ps { - if i == len(ps)-1 { - names = append(names, "and "+p.Name) - } else { - names = append(names, p.Name) - } - } - return strings.Join(names, ", ") - } -} - -func boxedBoolIfSet(msg string, v boxedBool) string { - switch v { - case boxedTrue: - return fmt.Sprintf(msg, "boxedTrue") - case boxedFalse: - return fmt.Sprintf(msg, "boxedFalse") - default: - return "" - } -} - -func stringIfSet(msg, v string) string { - if len(v) == 0 { - return "" - } - - return fmt.Sprintf(msg, v) -} - -func stringSliceIfSet(msg string, vs []string) string { - if len(vs) == 0 { - return "" - } - - names := []string{} - for _, v := range vs { - names = append(names, `"`+v+`"`) - } - - return fmt.Sprintf(msg, strings.Join(names, ",")) -} - -func endpointIsSet(v endpoint) bool { - return !reflect.DeepEqual(v, endpoint{}) -} - -func serviceSet(ps partitions) map[string]struct{} { - set := map[string]struct{}{} - for _, p := range ps { - for id := range p.Services { - set[id] = struct{}{} - } - } - - return set -} - -var funcMap = template.FuncMap{ - "ToSymbol": toSymbol, - "QuoteString": quoteString, - "RegionConst": regionConstName, - "PartitionGetter": partitionGetter, - "PartitionVarName": partitionVarName, - "ListPartitionNames": listPartitionNames, - "BoxedBoolIfSet": boxedBoolIfSet, - "StringIfSet": stringIfSet, - "StringSliceIfSet": stringSliceIfSet, - "EndpointIsSet": endpointIsSet, - "ServicesSet": serviceSet, -} - -const v3Tmpl = ` -{{ define "defaults" -}} -// Code generated by aws/endpoints/v3model_codegen.go. DO NOT EDIT. - -package endpoints - -import ( - "regexp" -) - - {{ template "partition consts" $.Resolver }} - - {{ range $_, $partition := $.Resolver }} - {{ template "partition region consts" $partition }} - {{ end }} - - {{ if not $.DisableGenerateServiceIDs -}} - {{ template "service consts" $.Resolver }} - {{- end }} - - {{ template "endpoint resolvers" $.Resolver }} -{{- end }} - -{{ define "partition consts" }} - // Partition identifiers - const ( - {{ range $_, $p := . -}} - {{ ToSymbol $p.ID }}PartitionID = {{ QuoteString $p.ID }} // {{ $p.Name }} partition. - {{ end -}} - ) -{{- end }} - -{{ define "partition region consts" }} - // {{ .Name }} partition's regions. - const ( - {{ range $id, $region := .Regions -}} - {{ ToSymbol $id }}RegionID = {{ QuoteString $id }} // {{ $region.Description }}. - {{ end -}} - ) -{{- end }} - -{{ define "service consts" }} - // Service identifiers - const ( - {{ $serviceSet := ServicesSet . -}} - {{ range $id, $_ := $serviceSet -}} - {{ ToSymbol $id }}ServiceID = {{ QuoteString $id }} // {{ ToSymbol $id }}. - {{ end -}} - ) -{{- end }} - -{{ define "endpoint resolvers" }} - // DefaultResolver returns an Endpoint resolver that will be able - // to resolve endpoints for: {{ ListPartitionNames . }}. - // - // Use DefaultPartitions() to get the list of the default partitions. - func DefaultResolver() Resolver { - return defaultPartitions - } - - // DefaultPartitions returns a list of the partitions the SDK is bundled - // with. The available partitions are: {{ ListPartitionNames . }}. - // - // partitions := endpoints.DefaultPartitions - // for _, p := range partitions { - // // ... inspect partitions - // } - func DefaultPartitions() []Partition { - return defaultPartitions.Partitions() - } - - var defaultPartitions = partitions{ - {{ range $_, $partition := . -}} - {{ PartitionVarName $partition.ID }}, - {{ end }} - } - - {{ range $_, $partition := . -}} - {{ $name := PartitionGetter $partition.ID -}} - // {{ $name }} returns the Resolver for {{ $partition.Name }}. - func {{ $name }}() Partition { - return {{ PartitionVarName $partition.ID }}.Partition() - } - var {{ PartitionVarName $partition.ID }} = {{ template "gocode Partition" $partition }} - {{ end }} -{{ end }} - -{{ define "default partitions" }} - func DefaultPartitions() []Partition { - return []partition{ - {{ range $_, $partition := . -}} - // {{ ToSymbol $partition.ID}}Partition(), - {{ end }} - } - } -{{ end }} - -{{ define "gocode Partition" -}} -partition{ - {{ StringIfSet "ID: %q,\n" .ID -}} - {{ StringIfSet "Name: %q,\n" .Name -}} - {{ StringIfSet "DNSSuffix: %q,\n" .DNSSuffix -}} - RegionRegex: {{ template "gocode RegionRegex" .RegionRegex }}, - {{ if EndpointIsSet .Defaults -}} - Defaults: {{ template "gocode Endpoint" .Defaults }}, - {{- end }} - Regions: {{ template "gocode Regions" .Regions }}, - Services: {{ template "gocode Services" .Services }}, -} -{{- end }} - -{{ define "gocode RegionRegex" -}} -regionRegex{ - Regexp: func() *regexp.Regexp{ - reg, _ := regexp.Compile({{ QuoteString .Regexp.String }}) - return reg - }(), -} -{{- end }} - -{{ define "gocode Regions" -}} -regions{ - {{ range $id, $region := . -}} - "{{ $id }}": {{ template "gocode Region" $region }}, - {{ end -}} -} -{{- end }} - -{{ define "gocode Region" -}} -region{ - {{ StringIfSet "Description: %q,\n" .Description -}} -} -{{- end }} - -{{ define "gocode Services" -}} -services{ - {{ range $id, $service := . -}} - "{{ $id }}": {{ template "gocode Service" $service }}, - {{ end }} -} -{{- end }} - -{{ define "gocode Service" -}} -service{ - {{ StringIfSet "PartitionEndpoint: %q,\n" .PartitionEndpoint -}} - {{ BoxedBoolIfSet "IsRegionalized: %s,\n" .IsRegionalized -}} - {{ if EndpointIsSet .Defaults -}} - Defaults: {{ template "gocode Endpoint" .Defaults -}}, - {{- end }} - {{ if .Endpoints -}} - Endpoints: {{ template "gocode Endpoints" .Endpoints }}, - {{- end }} -} -{{- end }} - -{{ define "gocode Endpoints" -}} -endpoints{ - {{ range $id, $endpoint := . -}} - "{{ $id }}": {{ template "gocode Endpoint" $endpoint }}, - {{ end }} -} -{{- end }} - -{{ define "gocode Endpoint" -}} -endpoint{ - {{ StringIfSet "Hostname: %q,\n" .Hostname -}} - {{ StringIfSet "SSLCommonName: %q,\n" .SSLCommonName -}} - {{ StringSliceIfSet "Protocols: []string{%s},\n" .Protocols -}} - {{ StringSliceIfSet "SignatureVersions: []string{%s},\n" .SignatureVersions -}} - {{ if or .CredentialScope.Region .CredentialScope.Service -}} - CredentialScope: credentialScope{ - {{ StringIfSet "Region: %q,\n" .CredentialScope.Region -}} - {{ StringIfSet "Service: %q,\n" .CredentialScope.Service -}} - }, - {{- end }} - {{ BoxedBoolIfSet "HasDualStack: %s,\n" .HasDualStack -}} - {{ StringIfSet "DualStackHostname: %q,\n" .DualStackHostname -}} - -} -{{- end }} -` diff --git a/vendor/github.com/aws/aws-sdk-go/aws/errors.go b/vendor/github.com/aws/aws-sdk-go/aws/errors.go deleted file mode 100644 index fa06f7a8f8..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/errors.go +++ /dev/null @@ -1,13 +0,0 @@ -package aws - -import "github.com/aws/aws-sdk-go/aws/awserr" - -var ( - // ErrMissingRegion is an error that is returned if region configuration is - // not found. - ErrMissingRegion = awserr.New("MissingRegion", "could not find region configuration", nil) - - // ErrMissingEndpoint is an error that is returned if an endpoint cannot be - // resolved for a service. - ErrMissingEndpoint = awserr.New("MissingEndpoint", "'Endpoint' configuration is required for this service", nil) -) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go b/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go deleted file mode 100644 index 91a6f277a7..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go +++ /dev/null @@ -1,12 +0,0 @@ -package aws - -// JSONValue is a representation of a grab bag type that will be marshaled -// into a json string. This type can be used just like any other map. -// -// Example: -// -// values := aws.JSONValue{ -// "Foo": "Bar", -// } -// values["Baz"] = "Qux" -type JSONValue map[string]interface{} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/logger.go b/vendor/github.com/aws/aws-sdk-go/aws/logger.go deleted file mode 100644 index 6ed15b2ecc..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/logger.go +++ /dev/null @@ -1,118 +0,0 @@ -package aws - -import ( - "log" - "os" -) - -// A LogLevelType defines the level logging should be performed at. Used to instruct -// the SDK which statements should be logged. -type LogLevelType uint - -// LogLevel returns the pointer to a LogLevel. Should be used to workaround -// not being able to take the address of a non-composite literal. -func LogLevel(l LogLevelType) *LogLevelType { - return &l -} - -// Value returns the LogLevel value or the default value LogOff if the LogLevel -// is nil. Safe to use on nil value LogLevelTypes. -func (l *LogLevelType) Value() LogLevelType { - if l != nil { - return *l - } - return LogOff -} - -// Matches returns true if the v LogLevel is enabled by this LogLevel. Should be -// used with logging sub levels. Is safe to use on nil value LogLevelTypes. If -// LogLevel is nil, will default to LogOff comparison. -func (l *LogLevelType) Matches(v LogLevelType) bool { - c := l.Value() - return c&v == v -} - -// AtLeast returns true if this LogLevel is at least high enough to satisfies v. -// Is safe to use on nil value LogLevelTypes. If LogLevel is nil, will default -// to LogOff comparison. -func (l *LogLevelType) AtLeast(v LogLevelType) bool { - c := l.Value() - return c >= v -} - -const ( - // LogOff states that no logging should be performed by the SDK. This is the - // default state of the SDK, and should be use to disable all logging. - LogOff LogLevelType = iota * 0x1000 - - // LogDebug state that debug output should be logged by the SDK. This should - // be used to inspect request made and responses received. - LogDebug -) - -// Debug Logging Sub Levels -const ( - // LogDebugWithSigning states that the SDK should log request signing and - // presigning events. This should be used to log the signing details of - // requests for debugging. Will also enable LogDebug. - LogDebugWithSigning LogLevelType = LogDebug | (1 << iota) - - // LogDebugWithHTTPBody states the SDK should log HTTP request and response - // HTTP bodys in addition to the headers and path. This should be used to - // see the body content of requests and responses made while using the SDK - // Will also enable LogDebug. - LogDebugWithHTTPBody - - // LogDebugWithRequestRetries states the SDK should log when service requests will - // be retried. This should be used to log when you want to log when service - // requests are being retried. Will also enable LogDebug. - LogDebugWithRequestRetries - - // LogDebugWithRequestErrors states the SDK should log when service requests fail - // to build, send, validate, or unmarshal. - LogDebugWithRequestErrors - - // LogDebugWithEventStreamBody states the SDK should log EventStream - // request and response bodys. This should be used to log the EventStream - // wire unmarshaled message content of requests and responses made while - // using the SDK Will also enable LogDebug. - LogDebugWithEventStreamBody -) - -// A Logger is a minimalistic interface for the SDK to log messages to. Should -// be used to provide custom logging writers for the SDK to use. -type Logger interface { - Log(...interface{}) -} - -// A LoggerFunc is a convenience type to convert a function taking a variadic -// list of arguments and wrap it so the Logger interface can be used. -// -// Example: -// s3.New(sess, &aws.Config{Logger: aws.LoggerFunc(func(args ...interface{}) { -// fmt.Fprintln(os.Stdout, args...) -// })}) -type LoggerFunc func(...interface{}) - -// Log calls the wrapped function with the arguments provided -func (f LoggerFunc) Log(args ...interface{}) { - f(args...) -} - -// NewDefaultLogger returns a Logger which will write log messages to stdout, and -// use same formatting runes as the stdlib log.Logger -func NewDefaultLogger() Logger { - return &defaultLogger{ - logger: log.New(os.Stdout, "", log.LstdFlags), - } -} - -// A defaultLogger provides a minimalistic logger satisfying the Logger interface. -type defaultLogger struct { - logger *log.Logger -} - -// Log logs the parameters to the stdlib logger. See log.Println. -func (l defaultLogger) Log(args ...interface{}) { - l.logger.Println(args...) -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go deleted file mode 100644 index d9b37f4d32..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go +++ /dev/null @@ -1,18 +0,0 @@ -package request - -import ( - "strings" -) - -func isErrConnectionReset(err error) bool { - if strings.Contains(err.Error(), "read: connection reset") { - return false - } - - if strings.Contains(err.Error(), "connection reset") || - strings.Contains(err.Error(), "broken pipe") { - return true - } - - return false -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go deleted file mode 100644 index e819ab6c0e..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go +++ /dev/null @@ -1,343 +0,0 @@ -package request - -import ( - "fmt" - "strings" -) - -// A Handlers provides a collection of request handlers for various -// stages of handling requests. -type Handlers struct { - Validate HandlerList - Build HandlerList - BuildStream HandlerList - Sign HandlerList - Send HandlerList - ValidateResponse HandlerList - Unmarshal HandlerList - UnmarshalStream HandlerList - UnmarshalMeta HandlerList - UnmarshalError HandlerList - Retry HandlerList - AfterRetry HandlerList - CompleteAttempt HandlerList - Complete HandlerList -} - -// Copy returns a copy of this handler's lists. -func (h *Handlers) Copy() Handlers { - return Handlers{ - Validate: h.Validate.copy(), - Build: h.Build.copy(), - BuildStream: h.BuildStream.copy(), - Sign: h.Sign.copy(), - Send: h.Send.copy(), - ValidateResponse: h.ValidateResponse.copy(), - Unmarshal: h.Unmarshal.copy(), - UnmarshalStream: h.UnmarshalStream.copy(), - UnmarshalError: h.UnmarshalError.copy(), - UnmarshalMeta: h.UnmarshalMeta.copy(), - Retry: h.Retry.copy(), - AfterRetry: h.AfterRetry.copy(), - CompleteAttempt: h.CompleteAttempt.copy(), - Complete: h.Complete.copy(), - } -} - -// Clear removes callback functions for all handlers. -func (h *Handlers) Clear() { - h.Validate.Clear() - h.Build.Clear() - h.BuildStream.Clear() - h.Send.Clear() - h.Sign.Clear() - h.Unmarshal.Clear() - h.UnmarshalStream.Clear() - h.UnmarshalMeta.Clear() - h.UnmarshalError.Clear() - h.ValidateResponse.Clear() - h.Retry.Clear() - h.AfterRetry.Clear() - h.CompleteAttempt.Clear() - h.Complete.Clear() -} - -// IsEmpty returns if there are no handlers in any of the handlerlists. -func (h *Handlers) IsEmpty() bool { - if h.Validate.Len() != 0 { - return false - } - if h.Build.Len() != 0 { - return false - } - if h.BuildStream.Len() != 0 { - return false - } - if h.Send.Len() != 0 { - return false - } - if h.Sign.Len() != 0 { - return false - } - if h.Unmarshal.Len() != 0 { - return false - } - if h.UnmarshalStream.Len() != 0 { - return false - } - if h.UnmarshalMeta.Len() != 0 { - return false - } - if h.UnmarshalError.Len() != 0 { - return false - } - if h.ValidateResponse.Len() != 0 { - return false - } - if h.Retry.Len() != 0 { - return false - } - if h.AfterRetry.Len() != 0 { - return false - } - if h.CompleteAttempt.Len() != 0 { - return false - } - if h.Complete.Len() != 0 { - return false - } - - return true -} - -// A HandlerListRunItem represents an entry in the HandlerList which -// is being run. -type HandlerListRunItem struct { - Index int - Handler NamedHandler - Request *Request -} - -// A HandlerList manages zero or more handlers in a list. -type HandlerList struct { - list []NamedHandler - - // Called after each request handler in the list is called. If set - // and the func returns true the HandlerList will continue to iterate - // over the request handlers. If false is returned the HandlerList - // will stop iterating. - // - // Should be used if extra logic to be performed between each handler - // in the list. This can be used to terminate a list's iteration - // based on a condition such as error like, HandlerListStopOnError. - // Or for logging like HandlerListLogItem. - AfterEachFn func(item HandlerListRunItem) bool -} - -// A NamedHandler is a struct that contains a name and function callback. -type NamedHandler struct { - Name string - Fn func(*Request) -} - -// copy creates a copy of the handler list. -func (l *HandlerList) copy() HandlerList { - n := HandlerList{ - AfterEachFn: l.AfterEachFn, - } - if len(l.list) == 0 { - return n - } - - n.list = append(make([]NamedHandler, 0, len(l.list)), l.list...) - return n -} - -// Clear clears the handler list. -func (l *HandlerList) Clear() { - l.list = l.list[0:0] -} - -// Len returns the number of handlers in the list. -func (l *HandlerList) Len() int { - return len(l.list) -} - -// PushBack pushes handler f to the back of the handler list. -func (l *HandlerList) PushBack(f func(*Request)) { - l.PushBackNamed(NamedHandler{"__anonymous", f}) -} - -// PushBackNamed pushes named handler f to the back of the handler list. -func (l *HandlerList) PushBackNamed(n NamedHandler) { - if cap(l.list) == 0 { - l.list = make([]NamedHandler, 0, 5) - } - l.list = append(l.list, n) -} - -// PushFront pushes handler f to the front of the handler list. -func (l *HandlerList) PushFront(f func(*Request)) { - l.PushFrontNamed(NamedHandler{"__anonymous", f}) -} - -// PushFrontNamed pushes named handler f to the front of the handler list. -func (l *HandlerList) PushFrontNamed(n NamedHandler) { - if cap(l.list) == len(l.list) { - // Allocating new list required - l.list = append([]NamedHandler{n}, l.list...) - } else { - // Enough room to prepend into list. - l.list = append(l.list, NamedHandler{}) - copy(l.list[1:], l.list) - l.list[0] = n - } -} - -// Remove removes a NamedHandler n -func (l *HandlerList) Remove(n NamedHandler) { - l.RemoveByName(n.Name) -} - -// RemoveByName removes a NamedHandler by name. -func (l *HandlerList) RemoveByName(name string) { - for i := 0; i < len(l.list); i++ { - m := l.list[i] - if m.Name == name { - // Shift array preventing creating new arrays - copy(l.list[i:], l.list[i+1:]) - l.list[len(l.list)-1] = NamedHandler{} - l.list = l.list[:len(l.list)-1] - - // decrement list so next check to length is correct - i-- - } - } -} - -// SwapNamed will swap out any existing handlers with the same name as the -// passed in NamedHandler returning true if handlers were swapped. False is -// returned otherwise. -func (l *HandlerList) SwapNamed(n NamedHandler) (swapped bool) { - for i := 0; i < len(l.list); i++ { - if l.list[i].Name == n.Name { - l.list[i].Fn = n.Fn - swapped = true - } - } - - return swapped -} - -// Swap will swap out all handlers matching the name passed in. The matched -// handlers will be swapped in. True is returned if the handlers were swapped. -func (l *HandlerList) Swap(name string, replace NamedHandler) bool { - var swapped bool - - for i := 0; i < len(l.list); i++ { - if l.list[i].Name == name { - l.list[i] = replace - swapped = true - } - } - - return swapped -} - -// SetBackNamed will replace the named handler if it exists in the handler list. -// If the handler does not exist the handler will be added to the end of the list. -func (l *HandlerList) SetBackNamed(n NamedHandler) { - if !l.SwapNamed(n) { - l.PushBackNamed(n) - } -} - -// SetFrontNamed will replace the named handler if it exists in the handler list. -// If the handler does not exist the handler will be added to the beginning of -// the list. -func (l *HandlerList) SetFrontNamed(n NamedHandler) { - if !l.SwapNamed(n) { - l.PushFrontNamed(n) - } -} - -// Run executes all handlers in the list with a given request object. -func (l *HandlerList) Run(r *Request) { - for i, h := range l.list { - h.Fn(r) - item := HandlerListRunItem{ - Index: i, Handler: h, Request: r, - } - if l.AfterEachFn != nil && !l.AfterEachFn(item) { - return - } - } -} - -// HandlerListLogItem logs the request handler and the state of the -// request's Error value. Always returns true to continue iterating -// request handlers in a HandlerList. -func HandlerListLogItem(item HandlerListRunItem) bool { - if item.Request.Config.Logger == nil { - return true - } - item.Request.Config.Logger.Log("DEBUG: RequestHandler", - item.Index, item.Handler.Name, item.Request.Error) - - return true -} - -// HandlerListStopOnError returns false to stop the HandlerList iterating -// over request handlers if Request.Error is not nil. True otherwise -// to continue iterating. -func HandlerListStopOnError(item HandlerListRunItem) bool { - return item.Request.Error == nil -} - -// WithAppendUserAgent will add a string to the user agent prefixed with a -// single white space. -func WithAppendUserAgent(s string) Option { - return func(r *Request) { - r.Handlers.Build.PushBack(func(r2 *Request) { - AddToUserAgent(r, s) - }) - } -} - -// MakeAddToUserAgentHandler will add the name/version pair to the User-Agent request -// header. If the extra parameters are provided they will be added as metadata to the -// name/version pair resulting in the following format. -// "name/version (extra0; extra1; ...)" -// The user agent part will be concatenated with this current request's user agent string. -func MakeAddToUserAgentHandler(name, version string, extra ...string) func(*Request) { - ua := fmt.Sprintf("%s/%s", name, version) - if len(extra) > 0 { - ua += fmt.Sprintf(" (%s)", strings.Join(extra, "; ")) - } - return func(r *Request) { - AddToUserAgent(r, ua) - } -} - -// MakeAddToUserAgentFreeFormHandler adds the input to the User-Agent request header. -// The input string will be concatenated with the current request's user agent string. -func MakeAddToUserAgentFreeFormHandler(s string) func(*Request) { - return func(r *Request) { - AddToUserAgent(r, s) - } -} - -// WithSetRequestHeaders updates the operation request's HTTP header to contain -// the header key value pairs provided. If the header key already exists in the -// request's HTTP header set, the existing value(s) will be replaced. -func WithSetRequestHeaders(h map[string]string) Option { - return withRequestHeader(h).SetRequestHeaders -} - -type withRequestHeader map[string]string - -func (h withRequestHeader) SetRequestHeaders(r *Request) { - for k, v := range h { - r.HTTPRequest.Header[k] = []string{v} - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go deleted file mode 100644 index 79f79602b0..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go +++ /dev/null @@ -1,24 +0,0 @@ -package request - -import ( - "io" - "net/http" - "net/url" -) - -func copyHTTPRequest(r *http.Request, body io.ReadCloser) *http.Request { - req := new(http.Request) - *req = *r - req.URL = &url.URL{} - *req.URL = *r.URL - req.Body = body - - req.Header = http.Header{} - for k, v := range r.Header { - for _, vv := range v { - req.Header.Add(k, vv) - } - } - - return req -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go b/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go deleted file mode 100644 index 9370fa50c3..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go +++ /dev/null @@ -1,65 +0,0 @@ -package request - -import ( - "io" - "sync" - - "github.com/aws/aws-sdk-go/internal/sdkio" -) - -// offsetReader is a thread-safe io.ReadCloser to prevent racing -// with retrying requests -type offsetReader struct { - buf io.ReadSeeker - lock sync.Mutex - closed bool -} - -func newOffsetReader(buf io.ReadSeeker, offset int64) (*offsetReader, error) { - reader := &offsetReader{} - _, err := buf.Seek(offset, sdkio.SeekStart) - if err != nil { - return nil, err - } - - reader.buf = buf - return reader, nil -} - -// Close will close the instance of the offset reader's access to -// the underlying io.ReadSeeker. -func (o *offsetReader) Close() error { - o.lock.Lock() - defer o.lock.Unlock() - o.closed = true - return nil -} - -// Read is a thread-safe read of the underlying io.ReadSeeker -func (o *offsetReader) Read(p []byte) (int, error) { - o.lock.Lock() - defer o.lock.Unlock() - - if o.closed { - return 0, io.EOF - } - - return o.buf.Read(p) -} - -// Seek is a thread-safe seeking operation. -func (o *offsetReader) Seek(offset int64, whence int) (int64, error) { - o.lock.Lock() - defer o.lock.Unlock() - - return o.buf.Seek(offset, whence) -} - -// CloseAndCopy will return a new offsetReader with a copy of the old buffer -// and close the old buffer. -func (o *offsetReader) CloseAndCopy(offset int64) (*offsetReader, error) { - if err := o.Close(); err != nil { - return nil, err - } - return newOffsetReader(o.buf, offset) -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go deleted file mode 100644 index d597c6ead5..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go +++ /dev/null @@ -1,698 +0,0 @@ -package request - -import ( - "bytes" - "fmt" - "io" - "net/http" - "net/url" - "reflect" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/client/metadata" - "github.com/aws/aws-sdk-go/internal/sdkio" -) - -const ( - // ErrCodeSerialization is the serialization error code that is received - // during protocol unmarshaling. - ErrCodeSerialization = "SerializationError" - - // ErrCodeRead is an error that is returned during HTTP reads. - ErrCodeRead = "ReadError" - - // ErrCodeResponseTimeout is the connection timeout error that is received - // during body reads. - ErrCodeResponseTimeout = "ResponseTimeout" - - // ErrCodeInvalidPresignExpire is returned when the expire time provided to - // presign is invalid - ErrCodeInvalidPresignExpire = "InvalidPresignExpireError" - - // CanceledErrorCode is the error code that will be returned by an - // API request that was canceled. Requests given a aws.Context may - // return this error when canceled. - CanceledErrorCode = "RequestCanceled" - - // ErrCodeRequestError is an error preventing the SDK from continuing to - // process the request. - ErrCodeRequestError = "RequestError" -) - -// A Request is the service request to be made. -type Request struct { - Config aws.Config - ClientInfo metadata.ClientInfo - Handlers Handlers - - Retryer - AttemptTime time.Time - Time time.Time - Operation *Operation - HTTPRequest *http.Request - HTTPResponse *http.Response - Body io.ReadSeeker - streamingBody io.ReadCloser - BodyStart int64 // offset from beginning of Body that the request body starts - Params interface{} - Error error - Data interface{} - RequestID string - RetryCount int - Retryable *bool - RetryDelay time.Duration - NotHoist bool - SignedHeaderVals http.Header - LastSignedAt time.Time - DisableFollowRedirects bool - - // Additional API error codes that should be retried. IsErrorRetryable - // will consider these codes in addition to its built in cases. - RetryErrorCodes []string - - // Additional API error codes that should be retried with throttle backoff - // delay. IsErrorThrottle will consider these codes in addition to its - // built in cases. - ThrottleErrorCodes []string - - // A value greater than 0 instructs the request to be signed as Presigned URL - // You should not set this field directly. Instead use Request's - // Presign or PresignRequest methods. - ExpireTime time.Duration - - context aws.Context - - built bool - - // Need to persist an intermediate body between the input Body and HTTP - // request body because the HTTP Client's transport can maintain a reference - // to the HTTP request's body after the client has returned. This value is - // safe to use concurrently and wrap the input Body for each HTTP request. - safeBody *offsetReader -} - -// An Operation is the service API operation to be made. -type Operation struct { - Name string - HTTPMethod string - HTTPPath string - *Paginator - - BeforePresignFn func(r *Request) error -} - -// New returns a new Request pointer for the service API operation and -// parameters. -// -// A Retryer should be provided to direct how the request is retried. If -// Retryer is nil, a default no retry value will be used. You can use -// NoOpRetryer in the Client package to disable retry behavior directly. -// -// Params is any value of input parameters to be the request payload. -// Data is pointer value to an object which the request's response -// payload will be deserialized to. -func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers, - retryer Retryer, operation *Operation, params interface{}, data interface{}) *Request { - - if retryer == nil { - retryer = noOpRetryer{} - } - - method := operation.HTTPMethod - if method == "" { - method = "POST" - } - - httpReq, _ := http.NewRequest(method, "", nil) - - var err error - httpReq.URL, err = url.Parse(clientInfo.Endpoint + operation.HTTPPath) - if err != nil { - httpReq.URL = &url.URL{} - err = awserr.New("InvalidEndpointURL", "invalid endpoint uri", err) - } - - r := &Request{ - Config: cfg, - ClientInfo: clientInfo, - Handlers: handlers.Copy(), - - Retryer: retryer, - Time: time.Now(), - ExpireTime: 0, - Operation: operation, - HTTPRequest: httpReq, - Body: nil, - Params: params, - Error: err, - Data: data, - } - r.SetBufferBody([]byte{}) - - return r -} - -// A Option is a functional option that can augment or modify a request when -// using a WithContext API operation method. -type Option func(*Request) - -// WithGetResponseHeader builds a request Option which will retrieve a single -// header value from the HTTP Response. If there are multiple values for the -// header key use WithGetResponseHeaders instead to access the http.Header -// map directly. The passed in val pointer must be non-nil. -// -// This Option can be used multiple times with a single API operation. -// -// var id2, versionID string -// svc.PutObjectWithContext(ctx, params, -// request.WithGetResponseHeader("x-amz-id-2", &id2), -// request.WithGetResponseHeader("x-amz-version-id", &versionID), -// ) -func WithGetResponseHeader(key string, val *string) Option { - return func(r *Request) { - r.Handlers.Complete.PushBack(func(req *Request) { - *val = req.HTTPResponse.Header.Get(key) - }) - } -} - -// WithGetResponseHeaders builds a request Option which will retrieve the -// headers from the HTTP response and assign them to the passed in headers -// variable. The passed in headers pointer must be non-nil. -// -// var headers http.Header -// svc.PutObjectWithContext(ctx, params, request.WithGetResponseHeaders(&headers)) -func WithGetResponseHeaders(headers *http.Header) Option { - return func(r *Request) { - r.Handlers.Complete.PushBack(func(req *Request) { - *headers = req.HTTPResponse.Header - }) - } -} - -// WithLogLevel is a request option that will set the request to use a specific -// log level when the request is made. -// -// svc.PutObjectWithContext(ctx, params, request.WithLogLevel(aws.LogDebugWithHTTPBody) -func WithLogLevel(l aws.LogLevelType) Option { - return func(r *Request) { - r.Config.LogLevel = aws.LogLevel(l) - } -} - -// ApplyOptions will apply each option to the request calling them in the order -// the were provided. -func (r *Request) ApplyOptions(opts ...Option) { - for _, opt := range opts { - opt(r) - } -} - -// Context will always returns a non-nil context. If Request does not have a -// context aws.BackgroundContext will be returned. -func (r *Request) Context() aws.Context { - if r.context != nil { - return r.context - } - return aws.BackgroundContext() -} - -// SetContext adds a Context to the current request that can be used to cancel -// a in-flight request. The Context value must not be nil, or this method will -// panic. -// -// Unlike http.Request.WithContext, SetContext does not return a copy of the -// Request. It is not safe to use use a single Request value for multiple -// requests. A new Request should be created for each API operation request. -// -// Go 1.6 and below: -// The http.Request's Cancel field will be set to the Done() value of -// the context. This will overwrite the Cancel field's value. -// -// Go 1.7 and above: -// The http.Request.WithContext will be used to set the context on the underlying -// http.Request. This will create a shallow copy of the http.Request. The SDK -// may create sub contexts in the future for nested requests such as retries. -func (r *Request) SetContext(ctx aws.Context) { - if ctx == nil { - panic("context cannot be nil") - } - setRequestContext(r, ctx) -} - -// WillRetry returns if the request's can be retried. -func (r *Request) WillRetry() bool { - if !aws.IsReaderSeekable(r.Body) && r.HTTPRequest.Body != NoBody { - return false - } - return r.Error != nil && aws.BoolValue(r.Retryable) && r.RetryCount < r.MaxRetries() -} - -func fmtAttemptCount(retryCount, maxRetries int) string { - return fmt.Sprintf("attempt %v/%v", retryCount, maxRetries) -} - -// ParamsFilled returns if the request's parameters have been populated -// and the parameters are valid. False is returned if no parameters are -// provided or invalid. -func (r *Request) ParamsFilled() bool { - return r.Params != nil && reflect.ValueOf(r.Params).Elem().IsValid() -} - -// DataFilled returns true if the request's data for response deserialization -// target has been set and is a valid. False is returned if data is not -// set, or is invalid. -func (r *Request) DataFilled() bool { - return r.Data != nil && reflect.ValueOf(r.Data).Elem().IsValid() -} - -// SetBufferBody will set the request's body bytes that will be sent to -// the service API. -func (r *Request) SetBufferBody(buf []byte) { - r.SetReaderBody(bytes.NewReader(buf)) -} - -// SetStringBody sets the body of the request to be backed by a string. -func (r *Request) SetStringBody(s string) { - r.SetReaderBody(strings.NewReader(s)) -} - -// SetReaderBody will set the request's body reader. -func (r *Request) SetReaderBody(reader io.ReadSeeker) { - r.Body = reader - - if aws.IsReaderSeekable(reader) { - var err error - // Get the Bodies current offset so retries will start from the same - // initial position. - r.BodyStart, err = reader.Seek(0, sdkio.SeekCurrent) - if err != nil { - r.Error = awserr.New(ErrCodeSerialization, - "failed to determine start of request body", err) - return - } - } - r.ResetBody() -} - -// SetStreamingBody set the reader to be used for the request that will stream -// bytes to the server. Request's Body must not be set to any reader. -func (r *Request) SetStreamingBody(reader io.ReadCloser) { - r.streamingBody = reader - r.SetReaderBody(aws.ReadSeekCloser(reader)) -} - -// Presign returns the request's signed URL. Error will be returned -// if the signing fails. The expire parameter is only used for presigned Amazon -// S3 API requests. All other AWS services will use a fixed expiration -// time of 15 minutes. -// -// It is invalid to create a presigned URL with a expire duration 0 or less. An -// error is returned if expire duration is 0 or less. -func (r *Request) Presign(expire time.Duration) (string, error) { - r = r.copy() - - // Presign requires all headers be hoisted. There is no way to retrieve - // the signed headers not hoisted without this. Making the presigned URL - // useless. - r.NotHoist = false - - u, _, err := getPresignedURL(r, expire) - return u, err -} - -// PresignRequest behaves just like presign, with the addition of returning a -// set of headers that were signed. The expire parameter is only used for -// presigned Amazon S3 API requests. All other AWS services will use a fixed -// expiration time of 15 minutes. -// -// It is invalid to create a presigned URL with a expire duration 0 or less. An -// error is returned if expire duration is 0 or less. -// -// Returns the URL string for the API operation with signature in the query string, -// and the HTTP headers that were included in the signature. These headers must -// be included in any HTTP request made with the presigned URL. -// -// To prevent hoisting any headers to the query string set NotHoist to true on -// this Request value prior to calling PresignRequest. -func (r *Request) PresignRequest(expire time.Duration) (string, http.Header, error) { - r = r.copy() - return getPresignedURL(r, expire) -} - -// IsPresigned returns true if the request represents a presigned API url. -func (r *Request) IsPresigned() bool { - return r.ExpireTime != 0 -} - -func getPresignedURL(r *Request, expire time.Duration) (string, http.Header, error) { - if expire <= 0 { - return "", nil, awserr.New( - ErrCodeInvalidPresignExpire, - "presigned URL requires an expire duration greater than 0", - nil, - ) - } - - r.ExpireTime = expire - - if r.Operation.BeforePresignFn != nil { - if err := r.Operation.BeforePresignFn(r); err != nil { - return "", nil, err - } - } - - if err := r.Sign(); err != nil { - return "", nil, err - } - - return r.HTTPRequest.URL.String(), r.SignedHeaderVals, nil -} - -const ( - notRetrying = "not retrying" -) - -func debugLogReqError(r *Request, stage, retryStr string, err error) { - if !r.Config.LogLevel.Matches(aws.LogDebugWithRequestErrors) { - return - } - - r.Config.Logger.Log(fmt.Sprintf("DEBUG: %s %s/%s failed, %s, error %v", - stage, r.ClientInfo.ServiceName, r.Operation.Name, retryStr, err)) -} - -// Build will build the request's object so it can be signed and sent -// to the service. Build will also validate all the request's parameters. -// Any additional build Handlers set on this request will be run -// in the order they were set. -// -// The request will only be built once. Multiple calls to build will have -// no effect. -// -// If any Validate or Build errors occur the build will stop and the error -// which occurred will be returned. -func (r *Request) Build() error { - if !r.built { - r.Handlers.Validate.Run(r) - if r.Error != nil { - debugLogReqError(r, "Validate Request", notRetrying, r.Error) - return r.Error - } - r.Handlers.Build.Run(r) - if r.Error != nil { - debugLogReqError(r, "Build Request", notRetrying, r.Error) - return r.Error - } - r.built = true - } - - return r.Error -} - -// Sign will sign the request, returning error if errors are encountered. -// -// Sign will build the request prior to signing. All Sign Handlers will -// be executed in the order they were set. -func (r *Request) Sign() error { - r.Build() - if r.Error != nil { - debugLogReqError(r, "Build Request", notRetrying, r.Error) - return r.Error - } - - SanitizeHostForHeader(r.HTTPRequest) - - r.Handlers.Sign.Run(r) - return r.Error -} - -func (r *Request) getNextRequestBody() (body io.ReadCloser, err error) { - if r.streamingBody != nil { - return r.streamingBody, nil - } - - if r.safeBody != nil { - r.safeBody.Close() - } - - r.safeBody, err = newOffsetReader(r.Body, r.BodyStart) - if err != nil { - return nil, awserr.New(ErrCodeSerialization, - "failed to get next request body reader", err) - } - - // Go 1.8 tightened and clarified the rules code needs to use when building - // requests with the http package. Go 1.8 removed the automatic detection - // of if the Request.Body was empty, or actually had bytes in it. The SDK - // always sets the Request.Body even if it is empty and should not actually - // be sent. This is incorrect. - // - // Go 1.8 did add a http.NoBody value that the SDK can use to tell the http - // client that the request really should be sent without a body. The - // Request.Body cannot be set to nil, which is preferable, because the - // field is exported and could introduce nil pointer dereferences for users - // of the SDK if they used that field. - // - // Related golang/go#18257 - l, err := aws.SeekerLen(r.Body) - if err != nil { - return nil, awserr.New(ErrCodeSerialization, - "failed to compute request body size", err) - } - - if l == 0 { - body = NoBody - } else if l > 0 { - body = r.safeBody - } else { - // Hack to prevent sending bodies for methods where the body - // should be ignored by the server. Sending bodies on these - // methods without an associated ContentLength will cause the - // request to socket timeout because the server does not handle - // Transfer-Encoding: chunked bodies for these methods. - // - // This would only happen if a aws.ReaderSeekerCloser was used with - // a io.Reader that was not also an io.Seeker, or did not implement - // Len() method. - switch r.Operation.HTTPMethod { - case "GET", "HEAD", "DELETE": - body = NoBody - default: - body = r.safeBody - } - } - - return body, nil -} - -// GetBody will return an io.ReadSeeker of the Request's underlying -// input body with a concurrency safe wrapper. -func (r *Request) GetBody() io.ReadSeeker { - return r.safeBody -} - -// Send will send the request, returning error if errors are encountered. -// -// Send will sign the request prior to sending. All Send Handlers will -// be executed in the order they were set. -// -// Canceling a request is non-deterministic. If a request has been canceled, -// then the transport will choose, randomly, one of the state channels during -// reads or getting the connection. -// -// readLoop() and getConn(req *Request, cm connectMethod) -// https://github.com/golang/go/blob/master/src/net/http/transport.go -// -// Send will not close the request.Request's body. -func (r *Request) Send() error { - defer func() { - // Regardless of success or failure of the request trigger the Complete - // request handlers. - r.Handlers.Complete.Run(r) - }() - - if err := r.Error; err != nil { - return err - } - - for { - r.Error = nil - r.AttemptTime = time.Now() - - if err := r.Sign(); err != nil { - debugLogReqError(r, "Sign Request", notRetrying, err) - return err - } - - if err := r.sendRequest(); err == nil { - return nil - } - r.Handlers.Retry.Run(r) - r.Handlers.AfterRetry.Run(r) - - if r.Error != nil || !aws.BoolValue(r.Retryable) { - return r.Error - } - - if err := r.prepareRetry(); err != nil { - r.Error = err - return err - } - } -} - -func (r *Request) prepareRetry() error { - if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) { - r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d", - r.ClientInfo.ServiceName, r.Operation.Name, r.RetryCount)) - } - - // The previous http.Request will have a reference to the r.Body - // and the HTTP Client's Transport may still be reading from - // the request's body even though the Client's Do returned. - r.HTTPRequest = copyHTTPRequest(r.HTTPRequest, nil) - r.ResetBody() - if err := r.Error; err != nil { - return awserr.New(ErrCodeSerialization, - "failed to prepare body for retry", err) - - } - - // Closing response body to ensure that no response body is leaked - // between retry attempts. - if r.HTTPResponse != nil && r.HTTPResponse.Body != nil { - r.HTTPResponse.Body.Close() - } - - return nil -} - -func (r *Request) sendRequest() (sendErr error) { - defer r.Handlers.CompleteAttempt.Run(r) - - r.Retryable = nil - r.Handlers.Send.Run(r) - if r.Error != nil { - debugLogReqError(r, "Send Request", - fmtAttemptCount(r.RetryCount, r.MaxRetries()), - r.Error) - return r.Error - } - - r.Handlers.UnmarshalMeta.Run(r) - r.Handlers.ValidateResponse.Run(r) - if r.Error != nil { - r.Handlers.UnmarshalError.Run(r) - debugLogReqError(r, "Validate Response", - fmtAttemptCount(r.RetryCount, r.MaxRetries()), - r.Error) - return r.Error - } - - r.Handlers.Unmarshal.Run(r) - if r.Error != nil { - debugLogReqError(r, "Unmarshal Response", - fmtAttemptCount(r.RetryCount, r.MaxRetries()), - r.Error) - return r.Error - } - - return nil -} - -// copy will copy a request which will allow for local manipulation of the -// request. -func (r *Request) copy() *Request { - req := &Request{} - *req = *r - req.Handlers = r.Handlers.Copy() - op := *r.Operation - req.Operation = &op - return req -} - -// AddToUserAgent adds the string to the end of the request's current user agent. -func AddToUserAgent(r *Request, s string) { - curUA := r.HTTPRequest.Header.Get("User-Agent") - if len(curUA) > 0 { - s = curUA + " " + s - } - r.HTTPRequest.Header.Set("User-Agent", s) -} - -// SanitizeHostForHeader removes default port from host and updates request.Host -func SanitizeHostForHeader(r *http.Request) { - host := getHost(r) - port := portOnly(host) - if port != "" && isDefaultPort(r.URL.Scheme, port) { - r.Host = stripPort(host) - } -} - -// Returns host from request -func getHost(r *http.Request) string { - if r.Host != "" { - return r.Host - } - - if r.URL == nil { - return "" - } - - return r.URL.Host -} - -// Hostname returns u.Host, without any port number. -// -// If Host is an IPv6 literal with a port number, Hostname returns the -// IPv6 literal without the square brackets. IPv6 literals may include -// a zone identifier. -// -// Copied from the Go 1.8 standard library (net/url) -func stripPort(hostport string) string { - colon := strings.IndexByte(hostport, ':') - if colon == -1 { - return hostport - } - if i := strings.IndexByte(hostport, ']'); i != -1 { - return strings.TrimPrefix(hostport[:i], "[") - } - return hostport[:colon] -} - -// Port returns the port part of u.Host, without the leading colon. -// If u.Host doesn't contain a port, Port returns an empty string. -// -// Copied from the Go 1.8 standard library (net/url) -func portOnly(hostport string) string { - colon := strings.IndexByte(hostport, ':') - if colon == -1 { - return "" - } - if i := strings.Index(hostport, "]:"); i != -1 { - return hostport[i+len("]:"):] - } - if strings.Contains(hostport, "]") { - return "" - } - return hostport[colon+len(":"):] -} - -// Returns true if the specified URI is using the standard port -// (i.e. port 80 for HTTP URIs or 443 for HTTPS URIs) -func isDefaultPort(scheme, port string) bool { - if port == "" { - return true - } - - lowerCaseScheme := strings.ToLower(scheme) - if (lowerCaseScheme == "http" && port == "80") || (lowerCaseScheme == "https" && port == "443") { - return true - } - - return false -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go deleted file mode 100644 index e36e468b7c..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go +++ /dev/null @@ -1,39 +0,0 @@ -// +build !go1.8 - -package request - -import "io" - -// NoBody is an io.ReadCloser with no bytes. Read always returns EOF -// and Close always returns nil. It can be used in an outgoing client -// request to explicitly signal that a request has zero bytes. -// An alternative, however, is to simply set Request.Body to nil. -// -// Copy of Go 1.8 NoBody type from net/http/http.go -type noBody struct{} - -func (noBody) Read([]byte) (int, error) { return 0, io.EOF } -func (noBody) Close() error { return nil } -func (noBody) WriteTo(io.Writer) (int64, error) { return 0, nil } - -// NoBody is an empty reader that will trigger the Go HTTP client to not include -// and body in the HTTP request. -var NoBody = noBody{} - -// ResetBody rewinds the request body back to its starting position, and -// sets the HTTP Request body reference. When the body is read prior -// to being sent in the HTTP request it will need to be rewound. -// -// ResetBody will automatically be called by the SDK's build handler, but if -// the request is being used directly ResetBody must be called before the request -// is Sent. SetStringBody, SetBufferBody, and SetReaderBody will automatically -// call ResetBody. -func (r *Request) ResetBody() { - body, err := r.getNextRequestBody() - if err != nil { - r.Error = err - return - } - - r.HTTPRequest.Body = body -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go deleted file mode 100644 index de1292f45a..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go +++ /dev/null @@ -1,36 +0,0 @@ -// +build go1.8 - -package request - -import ( - "net/http" - - "github.com/aws/aws-sdk-go/aws/awserr" -) - -// NoBody is a http.NoBody reader instructing Go HTTP client to not include -// and body in the HTTP request. -var NoBody = http.NoBody - -// ResetBody rewinds the request body back to its starting position, and -// sets the HTTP Request body reference. When the body is read prior -// to being sent in the HTTP request it will need to be rewound. -// -// ResetBody will automatically be called by the SDK's build handler, but if -// the request is being used directly ResetBody must be called before the request -// is Sent. SetStringBody, SetBufferBody, and SetReaderBody will automatically -// call ResetBody. -// -// Will also set the Go 1.8's http.Request.GetBody member to allow retrying -// PUT/POST redirects. -func (r *Request) ResetBody() { - body, err := r.getNextRequestBody() - if err != nil { - r.Error = awserr.New(ErrCodeSerialization, - "failed to reset request body", err) - return - } - - r.HTTPRequest.Body = body - r.HTTPRequest.GetBody = r.getNextRequestBody -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go deleted file mode 100644 index a7365cd1e4..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go +++ /dev/null @@ -1,14 +0,0 @@ -// +build go1.7 - -package request - -import "github.com/aws/aws-sdk-go/aws" - -// setContext updates the Request to use the passed in context for cancellation. -// Context will also be used for request retry delay. -// -// Creates shallow copy of the http.Request with the WithContext method. -func setRequestContext(r *Request, ctx aws.Context) { - r.context = ctx - r.HTTPRequest = r.HTTPRequest.WithContext(ctx) -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go deleted file mode 100644 index 307fa0705b..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go +++ /dev/null @@ -1,14 +0,0 @@ -// +build !go1.7 - -package request - -import "github.com/aws/aws-sdk-go/aws" - -// setContext updates the Request to use the passed in context for cancellation. -// Context will also be used for request retry delay. -// -// Creates shallow copy of the http.Request with the WithContext method. -func setRequestContext(r *Request, ctx aws.Context) { - r.context = ctx - r.HTTPRequest.Cancel = ctx.Done() -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go deleted file mode 100644 index 64784e16f3..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go +++ /dev/null @@ -1,266 +0,0 @@ -package request - -import ( - "reflect" - "sync/atomic" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awsutil" -) - -// A Pagination provides paginating of SDK API operations which are paginatable. -// Generally you should not use this type directly, but use the "Pages" API -// operations method to automatically perform pagination for you. Such as, -// "S3.ListObjectsPages", and "S3.ListObjectsPagesWithContext" methods. -// -// Pagination differs from a Paginator type in that pagination is the type that -// does the pagination between API operations, and Paginator defines the -// configuration that will be used per page request. -// -// for p.Next() { -// data := p.Page().(*s3.ListObjectsOutput) -// // process the page's data -// // ... -// // break out of loop to stop fetching additional pages -// } -// -// return p.Err() -// -// See service client API operation Pages methods for examples how the SDK will -// use the Pagination type. -type Pagination struct { - // Function to return a Request value for each pagination request. - // Any configuration or handlers that need to be applied to the request - // prior to getting the next page should be done here before the request - // returned. - // - // NewRequest should always be built from the same API operations. It is - // undefined if different API operations are returned on subsequent calls. - NewRequest func() (*Request, error) - // EndPageOnSameToken, when enabled, will allow the paginator to stop on - // token that are the same as its previous tokens. - EndPageOnSameToken bool - - started bool - prevTokens []interface{} - nextTokens []interface{} - - err error - curPage interface{} -} - -// HasNextPage will return true if Pagination is able to determine that the API -// operation has additional pages. False will be returned if there are no more -// pages remaining. -// -// Will always return true if Next has not been called yet. -func (p *Pagination) HasNextPage() bool { - if !p.started { - return true - } - - hasNextPage := len(p.nextTokens) != 0 - if p.EndPageOnSameToken { - return hasNextPage && !awsutil.DeepEqual(p.nextTokens, p.prevTokens) - } - return hasNextPage -} - -// Err returns the error Pagination encountered when retrieving the next page. -func (p *Pagination) Err() error { - return p.err -} - -// Page returns the current page. Page should only be called after a successful -// call to Next. It is undefined what Page will return if Page is called after -// Next returns false. -func (p *Pagination) Page() interface{} { - return p.curPage -} - -// Next will attempt to retrieve the next page for the API operation. When a page -// is retrieved true will be returned. If the page cannot be retrieved, or there -// are no more pages false will be returned. -// -// Use the Page method to retrieve the current page data. The data will need -// to be cast to the API operation's output type. -// -// Use the Err method to determine if an error occurred if Page returns false. -func (p *Pagination) Next() bool { - if !p.HasNextPage() { - return false - } - - req, err := p.NewRequest() - if err != nil { - p.err = err - return false - } - - if p.started { - for i, intok := range req.Operation.InputTokens { - awsutil.SetValueAtPath(req.Params, intok, p.nextTokens[i]) - } - } - p.started = true - - err = req.Send() - if err != nil { - p.err = err - return false - } - - p.prevTokens = p.nextTokens - p.nextTokens = req.nextPageTokens() - p.curPage = req.Data - - return true -} - -// A Paginator is the configuration data that defines how an API operation -// should be paginated. This type is used by the API service models to define -// the generated pagination config for service APIs. -// -// The Pagination type is what provides iterating between pages of an API. It -// is only used to store the token metadata the SDK should use for performing -// pagination. -type Paginator struct { - InputTokens []string - OutputTokens []string - LimitToken string - TruncationToken string -} - -// nextPageTokens returns the tokens to use when asking for the next page of data. -func (r *Request) nextPageTokens() []interface{} { - if r.Operation.Paginator == nil { - return nil - } - if r.Operation.TruncationToken != "" { - tr, _ := awsutil.ValuesAtPath(r.Data, r.Operation.TruncationToken) - if len(tr) == 0 { - return nil - } - - switch v := tr[0].(type) { - case *bool: - if !aws.BoolValue(v) { - return nil - } - case bool: - if !v { - return nil - } - } - } - - tokens := []interface{}{} - tokenAdded := false - for _, outToken := range r.Operation.OutputTokens { - vs, _ := awsutil.ValuesAtPath(r.Data, outToken) - if len(vs) == 0 { - tokens = append(tokens, nil) - continue - } - v := vs[0] - - switch tv := v.(type) { - case *string: - if len(aws.StringValue(tv)) == 0 { - tokens = append(tokens, nil) - continue - } - case string: - if len(tv) == 0 { - tokens = append(tokens, nil) - continue - } - } - - tokenAdded = true - tokens = append(tokens, v) - } - if !tokenAdded { - return nil - } - - return tokens -} - -// Ensure a deprecated item is only logged once instead of each time its used. -func logDeprecatedf(logger aws.Logger, flag *int32, msg string) { - if logger == nil { - return - } - if atomic.CompareAndSwapInt32(flag, 0, 1) { - logger.Log(msg) - } -} - -var ( - logDeprecatedHasNextPage int32 - logDeprecatedNextPage int32 - logDeprecatedEachPage int32 -) - -// HasNextPage returns true if this request has more pages of data available. -// -// Deprecated Use Pagination type for configurable pagination of API operations -func (r *Request) HasNextPage() bool { - logDeprecatedf(r.Config.Logger, &logDeprecatedHasNextPage, - "Request.HasNextPage deprecated. Use Pagination type for configurable pagination of API operations") - - return len(r.nextPageTokens()) > 0 -} - -// NextPage returns a new Request that can be executed to return the next -// page of result data. Call .Send() on this request to execute it. -// -// Deprecated Use Pagination type for configurable pagination of API operations -func (r *Request) NextPage() *Request { - logDeprecatedf(r.Config.Logger, &logDeprecatedNextPage, - "Request.NextPage deprecated. Use Pagination type for configurable pagination of API operations") - - tokens := r.nextPageTokens() - if len(tokens) == 0 { - return nil - } - - data := reflect.New(reflect.TypeOf(r.Data).Elem()).Interface() - nr := New(r.Config, r.ClientInfo, r.Handlers, r.Retryer, r.Operation, awsutil.CopyOf(r.Params), data) - for i, intok := range nr.Operation.InputTokens { - awsutil.SetValueAtPath(nr.Params, intok, tokens[i]) - } - return nr -} - -// EachPage iterates over each page of a paginated request object. The fn -// parameter should be a function with the following sample signature: -// -// func(page *T, lastPage bool) bool { -// return true // return false to stop iterating -// } -// -// Where "T" is the structure type matching the output structure of the given -// operation. For example, a request object generated by -// DynamoDB.ListTablesRequest() would expect to see dynamodb.ListTablesOutput -// as the structure "T". The lastPage value represents whether the page is -// the last page of data or not. The return value of this function should -// return true to keep iterating or false to stop. -// -// Deprecated Use Pagination type for configurable pagination of API operations -func (r *Request) EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error { - logDeprecatedf(r.Config.Logger, &logDeprecatedEachPage, - "Request.EachPage deprecated. Use Pagination type for configurable pagination of API operations") - - for page := r; page != nil; page = page.NextPage() { - if err := page.Send(); err != nil { - return err - } - if getNextPage := fn(page.Data, !page.HasNextPage()); !getNextPage { - return page.Error - } - } - - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go deleted file mode 100644 index 752ae47f84..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go +++ /dev/null @@ -1,309 +0,0 @@ -package request - -import ( - "net" - "net/url" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" -) - -// Retryer provides the interface drive the SDK's request retry behavior. The -// Retryer implementation is responsible for implementing exponential backoff, -// and determine if a request API error should be retried. -// -// client.DefaultRetryer is the SDK's default implementation of the Retryer. It -// uses the which uses the Request.IsErrorRetryable and Request.IsErrorThrottle -// methods to determine if the request is retried. -type Retryer interface { - // RetryRules return the retry delay that should be used by the SDK before - // making another request attempt for the failed request. - RetryRules(*Request) time.Duration - - // ShouldRetry returns if the failed request is retryable. - // - // Implementations may consider request attempt count when determining if a - // request is retryable, but the SDK will use MaxRetries to limit the - // number of attempts a request are made. - ShouldRetry(*Request) bool - - // MaxRetries is the number of times a request may be retried before - // failing. - MaxRetries() int -} - -// WithRetryer sets a Retryer value to the given Config returning the Config -// value for chaining. The value must not be nil. -func WithRetryer(cfg *aws.Config, retryer Retryer) *aws.Config { - if retryer == nil { - if cfg.Logger != nil { - cfg.Logger.Log("ERROR: Request.WithRetryer called with nil retryer. Replacing with retry disabled Retryer.") - } - retryer = noOpRetryer{} - } - cfg.Retryer = retryer - return cfg - -} - -// noOpRetryer is a internal no op retryer used when a request is created -// without a retryer. -// -// Provides a retryer that performs no retries. -// It should be used when we do not want retries to be performed. -type noOpRetryer struct{} - -// MaxRetries returns the number of maximum returns the service will use to make -// an individual API; For NoOpRetryer the MaxRetries will always be zero. -func (d noOpRetryer) MaxRetries() int { - return 0 -} - -// ShouldRetry will always return false for NoOpRetryer, as it should never retry. -func (d noOpRetryer) ShouldRetry(_ *Request) bool { - return false -} - -// RetryRules returns the delay duration before retrying this request again; -// since NoOpRetryer does not retry, RetryRules always returns 0. -func (d noOpRetryer) RetryRules(_ *Request) time.Duration { - return 0 -} - -// retryableCodes is a collection of service response codes which are retry-able -// without any further action. -var retryableCodes = map[string]struct{}{ - ErrCodeRequestError: {}, - "RequestTimeout": {}, - ErrCodeResponseTimeout: {}, - "RequestTimeoutException": {}, // Glacier's flavor of RequestTimeout -} - -var throttleCodes = map[string]struct{}{ - "ProvisionedThroughputExceededException": {}, - "ThrottledException": {}, // SNS, XRay, ResourceGroupsTagging API - "Throttling": {}, - "ThrottlingException": {}, - "RequestLimitExceeded": {}, - "RequestThrottled": {}, - "RequestThrottledException": {}, - "TooManyRequestsException": {}, // Lambda functions - "PriorRequestNotComplete": {}, // Route53 - "TransactionInProgressException": {}, - "EC2ThrottledException": {}, // EC2 -} - -// credsExpiredCodes is a collection of error codes which signify the credentials -// need to be refreshed. Expired tokens require refreshing of credentials, and -// resigning before the request can be retried. -var credsExpiredCodes = map[string]struct{}{ - "ExpiredToken": {}, - "ExpiredTokenException": {}, - "RequestExpired": {}, // EC2 Only -} - -func isCodeThrottle(code string) bool { - _, ok := throttleCodes[code] - return ok -} - -func isCodeRetryable(code string) bool { - if _, ok := retryableCodes[code]; ok { - return true - } - - return isCodeExpiredCreds(code) -} - -func isCodeExpiredCreds(code string) bool { - _, ok := credsExpiredCodes[code] - return ok -} - -var validParentCodes = map[string]struct{}{ - ErrCodeSerialization: {}, - ErrCodeRead: {}, -} - -func isNestedErrorRetryable(parentErr awserr.Error) bool { - if parentErr == nil { - return false - } - - if _, ok := validParentCodes[parentErr.Code()]; !ok { - return false - } - - err := parentErr.OrigErr() - if err == nil { - return false - } - - if aerr, ok := err.(awserr.Error); ok { - return isCodeRetryable(aerr.Code()) - } - - if t, ok := err.(temporary); ok { - return t.Temporary() || isErrConnectionReset(err) - } - - return isErrConnectionReset(err) -} - -// IsErrorRetryable returns whether the error is retryable, based on its Code. -// Returns false if error is nil. -func IsErrorRetryable(err error) bool { - if err == nil { - return false - } - return shouldRetryError(err) -} - -type temporary interface { - Temporary() bool -} - -func shouldRetryError(origErr error) bool { - switch err := origErr.(type) { - case awserr.Error: - if err.Code() == CanceledErrorCode { - return false - } - if isNestedErrorRetryable(err) { - return true - } - - origErr := err.OrigErr() - var shouldRetry bool - if origErr != nil { - shouldRetry = shouldRetryError(origErr) - if err.Code() == ErrCodeRequestError && !shouldRetry { - return false - } - } - if isCodeRetryable(err.Code()) { - return true - } - return shouldRetry - - case *url.Error: - if strings.Contains(err.Error(), "connection refused") { - // Refused connections should be retried as the service may not yet - // be running on the port. Go TCP dial considers refused - // connections as not temporary. - return true - } - // *url.Error only implements Temporary after golang 1.6 but since - // url.Error only wraps the error: - return shouldRetryError(err.Err) - - case temporary: - if netErr, ok := err.(*net.OpError); ok && netErr.Op == "dial" { - return true - } - // If the error is temporary, we want to allow continuation of the - // retry process - return err.Temporary() || isErrConnectionReset(origErr) - - case nil: - // `awserr.Error.OrigErr()` can be nil, meaning there was an error but - // because we don't know the cause, it is marked as retryable. See - // TestRequest4xxUnretryable for an example. - return true - - default: - switch err.Error() { - case "net/http: request canceled", - "net/http: request canceled while waiting for connection": - // known 1.5 error case when an http request is cancelled - return false - } - // here we don't know the error; so we allow a retry. - return true - } -} - -// IsErrorThrottle returns whether the error is to be throttled based on its code. -// Returns false if error is nil. -func IsErrorThrottle(err error) bool { - if aerr, ok := err.(awserr.Error); ok && aerr != nil { - return isCodeThrottle(aerr.Code()) - } - return false -} - -// IsErrorExpiredCreds returns whether the error code is a credential expiry -// error. Returns false if error is nil. -func IsErrorExpiredCreds(err error) bool { - if aerr, ok := err.(awserr.Error); ok && aerr != nil { - return isCodeExpiredCreds(aerr.Code()) - } - return false -} - -// IsErrorRetryable returns whether the error is retryable, based on its Code. -// Returns false if the request has no Error set. -// -// Alias for the utility function IsErrorRetryable -func (r *Request) IsErrorRetryable() bool { - if isErrCode(r.Error, r.RetryErrorCodes) { - return true - } - - // HTTP response status code 501 should not be retried. - // 501 represents Not Implemented which means the request method is not - // supported by the server and cannot be handled. - if r.HTTPResponse != nil { - // HTTP response status code 500 represents internal server error and - // should be retried without any throttle. - if r.HTTPResponse.StatusCode == 500 { - return true - } - } - return IsErrorRetryable(r.Error) -} - -// IsErrorThrottle returns whether the error is to be throttled based on its -// code. Returns false if the request has no Error set. -// -// Alias for the utility function IsErrorThrottle -func (r *Request) IsErrorThrottle() bool { - if isErrCode(r.Error, r.ThrottleErrorCodes) { - return true - } - - if r.HTTPResponse != nil { - switch r.HTTPResponse.StatusCode { - case - 429, // error caused due to too many requests - 502, // Bad Gateway error should be throttled - 503, // caused when service is unavailable - 504: // error occurred due to gateway timeout - return true - } - } - - return IsErrorThrottle(r.Error) -} - -func isErrCode(err error, codes []string) bool { - if aerr, ok := err.(awserr.Error); ok && aerr != nil { - for _, code := range codes { - if code == aerr.Code() { - return true - } - } - } - - return false -} - -// IsErrorExpired returns whether the error code is a credential expiry error. -// Returns false if the request has no Error set. -// -// Alias for the utility function IsErrorExpiredCreds -func (r *Request) IsErrorExpired() bool { - return IsErrorExpiredCreds(r.Error) -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go b/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go deleted file mode 100644 index 09a44eb987..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go +++ /dev/null @@ -1,94 +0,0 @@ -package request - -import ( - "io" - "time" - - "github.com/aws/aws-sdk-go/aws/awserr" -) - -var timeoutErr = awserr.New( - ErrCodeResponseTimeout, - "read on body has reached the timeout limit", - nil, -) - -type readResult struct { - n int - err error -} - -// timeoutReadCloser will handle body reads that take too long. -// We will return a ErrReadTimeout error if a timeout occurs. -type timeoutReadCloser struct { - reader io.ReadCloser - duration time.Duration -} - -// Read will spin off a goroutine to call the reader's Read method. We will -// select on the timer's channel or the read's channel. Whoever completes first -// will be returned. -func (r *timeoutReadCloser) Read(b []byte) (int, error) { - timer := time.NewTimer(r.duration) - c := make(chan readResult, 1) - - go func() { - n, err := r.reader.Read(b) - timer.Stop() - c <- readResult{n: n, err: err} - }() - - select { - case data := <-c: - return data.n, data.err - case <-timer.C: - return 0, timeoutErr - } -} - -func (r *timeoutReadCloser) Close() error { - return r.reader.Close() -} - -const ( - // HandlerResponseTimeout is what we use to signify the name of the - // response timeout handler. - HandlerResponseTimeout = "ResponseTimeoutHandler" -) - -// adaptToResponseTimeoutError is a handler that will replace any top level error -// to a ErrCodeResponseTimeout, if its child is that. -func adaptToResponseTimeoutError(req *Request) { - if err, ok := req.Error.(awserr.Error); ok { - aerr, ok := err.OrigErr().(awserr.Error) - if ok && aerr.Code() == ErrCodeResponseTimeout { - req.Error = aerr - } - } -} - -// WithResponseReadTimeout is a request option that will wrap the body in a timeout read closer. -// This will allow for per read timeouts. If a timeout occurred, we will return the -// ErrCodeResponseTimeout. -// -// svc.PutObjectWithContext(ctx, params, request.WithTimeoutReadCloser(30 * time.Second) -func WithResponseReadTimeout(duration time.Duration) Option { - return func(r *Request) { - - var timeoutHandler = NamedHandler{ - HandlerResponseTimeout, - func(req *Request) { - req.HTTPResponse.Body = &timeoutReadCloser{ - reader: req.HTTPResponse.Body, - duration: duration, - } - }} - - // remove the handler so we are not stomping over any new durations. - r.Handlers.Send.RemoveByName(HandlerResponseTimeout) - r.Handlers.Send.PushBackNamed(timeoutHandler) - - r.Handlers.Unmarshal.PushBack(adaptToResponseTimeoutError) - r.Handlers.UnmarshalError.PushBack(adaptToResponseTimeoutError) - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go b/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go deleted file mode 100644 index 8630683f31..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go +++ /dev/null @@ -1,286 +0,0 @@ -package request - -import ( - "bytes" - "fmt" - - "github.com/aws/aws-sdk-go/aws/awserr" -) - -const ( - // InvalidParameterErrCode is the error code for invalid parameters errors - InvalidParameterErrCode = "InvalidParameter" - // ParamRequiredErrCode is the error code for required parameter errors - ParamRequiredErrCode = "ParamRequiredError" - // ParamMinValueErrCode is the error code for fields with too low of a - // number value. - ParamMinValueErrCode = "ParamMinValueError" - // ParamMinLenErrCode is the error code for fields without enough elements. - ParamMinLenErrCode = "ParamMinLenError" - // ParamMaxLenErrCode is the error code for value being too long. - ParamMaxLenErrCode = "ParamMaxLenError" - - // ParamFormatErrCode is the error code for a field with invalid - // format or characters. - ParamFormatErrCode = "ParamFormatInvalidError" -) - -// Validator provides a way for types to perform validation logic on their -// input values that external code can use to determine if a type's values -// are valid. -type Validator interface { - Validate() error -} - -// An ErrInvalidParams provides wrapping of invalid parameter errors found when -// validating API operation input parameters. -type ErrInvalidParams struct { - // Context is the base context of the invalid parameter group. - Context string - errs []ErrInvalidParam -} - -// Add adds a new invalid parameter error to the collection of invalid -// parameters. The context of the invalid parameter will be updated to reflect -// this collection. -func (e *ErrInvalidParams) Add(err ErrInvalidParam) { - err.SetContext(e.Context) - e.errs = append(e.errs, err) -} - -// AddNested adds the invalid parameter errors from another ErrInvalidParams -// value into this collection. The nested errors will have their nested context -// updated and base context to reflect the merging. -// -// Use for nested validations errors. -func (e *ErrInvalidParams) AddNested(nestedCtx string, nested ErrInvalidParams) { - for _, err := range nested.errs { - err.SetContext(e.Context) - err.AddNestedContext(nestedCtx) - e.errs = append(e.errs, err) - } -} - -// Len returns the number of invalid parameter errors -func (e ErrInvalidParams) Len() int { - return len(e.errs) -} - -// Code returns the code of the error -func (e ErrInvalidParams) Code() string { - return InvalidParameterErrCode -} - -// Message returns the message of the error -func (e ErrInvalidParams) Message() string { - return fmt.Sprintf("%d validation error(s) found.", len(e.errs)) -} - -// Error returns the string formatted form of the invalid parameters. -func (e ErrInvalidParams) Error() string { - w := &bytes.Buffer{} - fmt.Fprintf(w, "%s: %s\n", e.Code(), e.Message()) - - for _, err := range e.errs { - fmt.Fprintf(w, "- %s\n", err.Message()) - } - - return w.String() -} - -// OrigErr returns the invalid parameters as a awserr.BatchedErrors value -func (e ErrInvalidParams) OrigErr() error { - return awserr.NewBatchError( - InvalidParameterErrCode, e.Message(), e.OrigErrs()) -} - -// OrigErrs returns a slice of the invalid parameters -func (e ErrInvalidParams) OrigErrs() []error { - errs := make([]error, len(e.errs)) - for i := 0; i < len(errs); i++ { - errs[i] = e.errs[i] - } - - return errs -} - -// An ErrInvalidParam represents an invalid parameter error type. -type ErrInvalidParam interface { - awserr.Error - - // Field name the error occurred on. - Field() string - - // SetContext updates the context of the error. - SetContext(string) - - // AddNestedContext updates the error's context to include a nested level. - AddNestedContext(string) -} - -type errInvalidParam struct { - context string - nestedContext string - field string - code string - msg string -} - -// Code returns the error code for the type of invalid parameter. -func (e *errInvalidParam) Code() string { - return e.code -} - -// Message returns the reason the parameter was invalid, and its context. -func (e *errInvalidParam) Message() string { - return fmt.Sprintf("%s, %s.", e.msg, e.Field()) -} - -// Error returns the string version of the invalid parameter error. -func (e *errInvalidParam) Error() string { - return fmt.Sprintf("%s: %s", e.code, e.Message()) -} - -// OrigErr returns nil, Implemented for awserr.Error interface. -func (e *errInvalidParam) OrigErr() error { - return nil -} - -// Field Returns the field and context the error occurred. -func (e *errInvalidParam) Field() string { - field := e.context - if len(field) > 0 { - field += "." - } - if len(e.nestedContext) > 0 { - field += fmt.Sprintf("%s.", e.nestedContext) - } - field += e.field - - return field -} - -// SetContext updates the base context of the error. -func (e *errInvalidParam) SetContext(ctx string) { - e.context = ctx -} - -// AddNestedContext prepends a context to the field's path. -func (e *errInvalidParam) AddNestedContext(ctx string) { - if len(e.nestedContext) == 0 { - e.nestedContext = ctx - } else { - e.nestedContext = fmt.Sprintf("%s.%s", ctx, e.nestedContext) - } - -} - -// An ErrParamRequired represents an required parameter error. -type ErrParamRequired struct { - errInvalidParam -} - -// NewErrParamRequired creates a new required parameter error. -func NewErrParamRequired(field string) *ErrParamRequired { - return &ErrParamRequired{ - errInvalidParam{ - code: ParamRequiredErrCode, - field: field, - msg: fmt.Sprintf("missing required field"), - }, - } -} - -// An ErrParamMinValue represents a minimum value parameter error. -type ErrParamMinValue struct { - errInvalidParam - min float64 -} - -// NewErrParamMinValue creates a new minimum value parameter error. -func NewErrParamMinValue(field string, min float64) *ErrParamMinValue { - return &ErrParamMinValue{ - errInvalidParam: errInvalidParam{ - code: ParamMinValueErrCode, - field: field, - msg: fmt.Sprintf("minimum field value of %v", min), - }, - min: min, - } -} - -// MinValue returns the field's require minimum value. -// -// float64 is returned for both int and float min values. -func (e *ErrParamMinValue) MinValue() float64 { - return e.min -} - -// An ErrParamMinLen represents a minimum length parameter error. -type ErrParamMinLen struct { - errInvalidParam - min int -} - -// NewErrParamMinLen creates a new minimum length parameter error. -func NewErrParamMinLen(field string, min int) *ErrParamMinLen { - return &ErrParamMinLen{ - errInvalidParam: errInvalidParam{ - code: ParamMinLenErrCode, - field: field, - msg: fmt.Sprintf("minimum field size of %v", min), - }, - min: min, - } -} - -// MinLen returns the field's required minimum length. -func (e *ErrParamMinLen) MinLen() int { - return e.min -} - -// An ErrParamMaxLen represents a maximum length parameter error. -type ErrParamMaxLen struct { - errInvalidParam - max int -} - -// NewErrParamMaxLen creates a new maximum length parameter error. -func NewErrParamMaxLen(field string, max int, value string) *ErrParamMaxLen { - return &ErrParamMaxLen{ - errInvalidParam: errInvalidParam{ - code: ParamMaxLenErrCode, - field: field, - msg: fmt.Sprintf("maximum size of %v, %v", max, value), - }, - max: max, - } -} - -// MaxLen returns the field's required minimum length. -func (e *ErrParamMaxLen) MaxLen() int { - return e.max -} - -// An ErrParamFormat represents a invalid format parameter error. -type ErrParamFormat struct { - errInvalidParam - format string -} - -// NewErrParamFormat creates a new invalid format parameter error. -func NewErrParamFormat(field string, format, value string) *ErrParamFormat { - return &ErrParamFormat{ - errInvalidParam: errInvalidParam{ - code: ParamFormatErrCode, - field: field, - msg: fmt.Sprintf("format %v, %v", format, value), - }, - format: format, - } -} - -// Format returns the field's required format. -func (e *ErrParamFormat) Format() string { - return e.format -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go b/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go deleted file mode 100644 index 4601f883cc..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go +++ /dev/null @@ -1,295 +0,0 @@ -package request - -import ( - "fmt" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/awsutil" -) - -// WaiterResourceNotReadyErrorCode is the error code returned by a waiter when -// the waiter's max attempts have been exhausted. -const WaiterResourceNotReadyErrorCode = "ResourceNotReady" - -// A WaiterOption is a function that will update the Waiter value's fields to -// configure the waiter. -type WaiterOption func(*Waiter) - -// WithWaiterMaxAttempts returns the maximum number of times the waiter should -// attempt to check the resource for the target state. -func WithWaiterMaxAttempts(max int) WaiterOption { - return func(w *Waiter) { - w.MaxAttempts = max - } -} - -// WaiterDelay will return a delay the waiter should pause between attempts to -// check the resource state. The passed in attempt is the number of times the -// Waiter has checked the resource state. -// -// Attempt is the number of attempts the Waiter has made checking the resource -// state. -type WaiterDelay func(attempt int) time.Duration - -// ConstantWaiterDelay returns a WaiterDelay that will always return a constant -// delay the waiter should use between attempts. It ignores the number of -// attempts made. -func ConstantWaiterDelay(delay time.Duration) WaiterDelay { - return func(attempt int) time.Duration { - return delay - } -} - -// WithWaiterDelay will set the Waiter to use the WaiterDelay passed in. -func WithWaiterDelay(delayer WaiterDelay) WaiterOption { - return func(w *Waiter) { - w.Delay = delayer - } -} - -// WithWaiterLogger returns a waiter option to set the logger a waiter -// should use to log warnings and errors to. -func WithWaiterLogger(logger aws.Logger) WaiterOption { - return func(w *Waiter) { - w.Logger = logger - } -} - -// WithWaiterRequestOptions returns a waiter option setting the request -// options for each request the waiter makes. Appends to waiter's request -// options already set. -func WithWaiterRequestOptions(opts ...Option) WaiterOption { - return func(w *Waiter) { - w.RequestOptions = append(w.RequestOptions, opts...) - } -} - -// A Waiter provides the functionality to perform a blocking call which will -// wait for a resource state to be satisfied by a service. -// -// This type should not be used directly. The API operations provided in the -// service packages prefixed with "WaitUntil" should be used instead. -type Waiter struct { - Name string - Acceptors []WaiterAcceptor - Logger aws.Logger - - MaxAttempts int - Delay WaiterDelay - - RequestOptions []Option - NewRequest func([]Option) (*Request, error) - SleepWithContext func(aws.Context, time.Duration) error -} - -// ApplyOptions updates the waiter with the list of waiter options provided. -func (w *Waiter) ApplyOptions(opts ...WaiterOption) { - for _, fn := range opts { - fn(w) - } -} - -// WaiterState are states the waiter uses based on WaiterAcceptor definitions -// to identify if the resource state the waiter is waiting on has occurred. -type WaiterState int - -// String returns the string representation of the waiter state. -func (s WaiterState) String() string { - switch s { - case SuccessWaiterState: - return "success" - case FailureWaiterState: - return "failure" - case RetryWaiterState: - return "retry" - default: - return "unknown waiter state" - } -} - -// States the waiter acceptors will use to identify target resource states. -const ( - SuccessWaiterState WaiterState = iota // waiter successful - FailureWaiterState // waiter failed - RetryWaiterState // waiter needs to be retried -) - -// WaiterMatchMode is the mode that the waiter will use to match the WaiterAcceptor -// definition's Expected attribute. -type WaiterMatchMode int - -// Modes the waiter will use when inspecting API response to identify target -// resource states. -const ( - PathAllWaiterMatch WaiterMatchMode = iota // match on all paths - PathWaiterMatch // match on specific path - PathAnyWaiterMatch // match on any path - PathListWaiterMatch // match on list of paths - StatusWaiterMatch // match on status code - ErrorWaiterMatch // match on error -) - -// String returns the string representation of the waiter match mode. -func (m WaiterMatchMode) String() string { - switch m { - case PathAllWaiterMatch: - return "pathAll" - case PathWaiterMatch: - return "path" - case PathAnyWaiterMatch: - return "pathAny" - case PathListWaiterMatch: - return "pathList" - case StatusWaiterMatch: - return "status" - case ErrorWaiterMatch: - return "error" - default: - return "unknown waiter match mode" - } -} - -// WaitWithContext will make requests for the API operation using NewRequest to -// build API requests. The request's response will be compared against the -// Waiter's Acceptors to determine the successful state of the resource the -// waiter is inspecting. -// -// The passed in context must not be nil. If it is nil a panic will occur. The -// Context will be used to cancel the waiter's pending requests and retry delays. -// Use aws.BackgroundContext if no context is available. -// -// The waiter will continue until the target state defined by the Acceptors, -// or the max attempts expires. -// -// Will return the WaiterResourceNotReadyErrorCode error code if the waiter's -// retryer ShouldRetry returns false. This normally will happen when the max -// wait attempts expires. -func (w Waiter) WaitWithContext(ctx aws.Context) error { - - for attempt := 1; ; attempt++ { - req, err := w.NewRequest(w.RequestOptions) - if err != nil { - waiterLogf(w.Logger, "unable to create request %v", err) - return err - } - req.Handlers.Build.PushBack(MakeAddToUserAgentFreeFormHandler("Waiter")) - err = req.Send() - - // See if any of the acceptors match the request's response, or error - for _, a := range w.Acceptors { - if matched, matchErr := a.match(w.Name, w.Logger, req, err); matched { - return matchErr - } - } - - // The Waiter should only check the resource state MaxAttempts times - // This is here instead of in the for loop above to prevent delaying - // unnecessary when the waiter will not retry. - if attempt == w.MaxAttempts { - break - } - - // Delay to wait before inspecting the resource again - delay := w.Delay(attempt) - if sleepFn := req.Config.SleepDelay; sleepFn != nil { - // Support SleepDelay for backwards compatibility and testing - sleepFn(delay) - } else { - sleepCtxFn := w.SleepWithContext - if sleepCtxFn == nil { - sleepCtxFn = aws.SleepWithContext - } - - if err := sleepCtxFn(ctx, delay); err != nil { - return awserr.New(CanceledErrorCode, "waiter context canceled", err) - } - } - } - - return awserr.New(WaiterResourceNotReadyErrorCode, "exceeded wait attempts", nil) -} - -// A WaiterAcceptor provides the information needed to wait for an API operation -// to complete. -type WaiterAcceptor struct { - State WaiterState - Matcher WaiterMatchMode - Argument string - Expected interface{} -} - -// match returns if the acceptor found a match with the passed in request -// or error. True is returned if the acceptor made a match, error is returned -// if there was an error attempting to perform the match. -func (a *WaiterAcceptor) match(name string, l aws.Logger, req *Request, err error) (bool, error) { - result := false - var vals []interface{} - - switch a.Matcher { - case PathAllWaiterMatch, PathWaiterMatch: - // Require all matches to be equal for result to match - vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument) - if len(vals) == 0 { - break - } - result = true - for _, val := range vals { - if !awsutil.DeepEqual(val, a.Expected) { - result = false - break - } - } - case PathAnyWaiterMatch: - // Only a single match needs to equal for the result to match - vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument) - for _, val := range vals { - if awsutil.DeepEqual(val, a.Expected) { - result = true - break - } - } - case PathListWaiterMatch: - // ignored matcher - case StatusWaiterMatch: - s := a.Expected.(int) - result = s == req.HTTPResponse.StatusCode - case ErrorWaiterMatch: - if aerr, ok := err.(awserr.Error); ok { - result = aerr.Code() == a.Expected.(string) - } - default: - waiterLogf(l, "WARNING: Waiter %s encountered unexpected matcher: %s", - name, a.Matcher) - } - - if !result { - // If there was no matching result found there is nothing more to do - // for this response, retry the request. - return false, nil - } - - switch a.State { - case SuccessWaiterState: - // waiter completed - return true, nil - case FailureWaiterState: - // Waiter failure state triggered - return true, awserr.New(WaiterResourceNotReadyErrorCode, - "failed waiting for successful resource state", err) - case RetryWaiterState: - // clear the error and retry the operation - return false, nil - default: - waiterLogf(l, "WARNING: Waiter %s encountered unexpected state: %s", - name, a.State) - return false, nil - } -} - -func waiterLogf(logger aws.Logger, msg string, args ...interface{}) { - if logger != nil { - logger.Log(fmt.Sprintf(msg, args...)) - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport.go b/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport.go deleted file mode 100644 index ea9ebb6f6a..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport.go +++ /dev/null @@ -1,26 +0,0 @@ -// +build go1.7 - -package session - -import ( - "net" - "net/http" - "time" -) - -// Transport that should be used when a custom CA bundle is specified with the -// SDK. -func getCABundleTransport() *http.Transport { - return &http.Transport{ - Proxy: http.ProxyFromEnvironment, - DialContext: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - DualStack: true, - }).DialContext, - MaxIdleConns: 100, - IdleConnTimeout: 90 * time.Second, - TLSHandshakeTimeout: 10 * time.Second, - ExpectContinueTimeout: 1 * time.Second, - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_5.go b/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_5.go deleted file mode 100644 index fec39dfc12..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_5.go +++ /dev/null @@ -1,22 +0,0 @@ -// +build !go1.6,go1.5 - -package session - -import ( - "net" - "net/http" - "time" -) - -// Transport that should be used when a custom CA bundle is specified with the -// SDK. -func getCABundleTransport() *http.Transport { - return &http.Transport{ - Proxy: http.ProxyFromEnvironment, - Dial: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - }).Dial, - TLSHandshakeTimeout: 10 * time.Second, - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_6.go b/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_6.go deleted file mode 100644 index 1c5a5391e6..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_6.go +++ /dev/null @@ -1,23 +0,0 @@ -// +build !go1.7,go1.6 - -package session - -import ( - "net" - "net/http" - "time" -) - -// Transport that should be used when a custom CA bundle is specified with the -// SDK. -func getCABundleTransport() *http.Transport { - return &http.Transport{ - Proxy: http.ProxyFromEnvironment, - Dial: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - }).Dial, - TLSHandshakeTimeout: 10 * time.Second, - ExpectContinueTimeout: 1 * time.Second, - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go deleted file mode 100644 index fe6dac1f47..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go +++ /dev/null @@ -1,267 +0,0 @@ -package session - -import ( - "fmt" - "os" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/credentials/processcreds" - "github.com/aws/aws-sdk-go/aws/credentials/stscreds" - "github.com/aws/aws-sdk-go/aws/defaults" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/internal/shareddefaults" -) - -func resolveCredentials(cfg *aws.Config, - envCfg envConfig, sharedCfg sharedConfig, - handlers request.Handlers, - sessOpts Options, -) (*credentials.Credentials, error) { - - switch { - case len(sessOpts.Profile) != 0: - // User explicitly provided an Profile in the session's configuration - // so load that profile from shared config first. - // Github(aws/aws-sdk-go#2727) - return resolveCredsFromProfile(cfg, envCfg, sharedCfg, handlers, sessOpts) - - case envCfg.Creds.HasKeys(): - // Environment credentials - return credentials.NewStaticCredentialsFromCreds(envCfg.Creds), nil - - case len(envCfg.WebIdentityTokenFilePath) != 0: - // Web identity token from environment, RoleARN required to also be - // set. - return assumeWebIdentity(cfg, handlers, - envCfg.WebIdentityTokenFilePath, - envCfg.RoleARN, - envCfg.RoleSessionName, - ) - - default: - // Fallback to the "default" credential resolution chain. - return resolveCredsFromProfile(cfg, envCfg, sharedCfg, handlers, sessOpts) - } -} - -// WebIdentityEmptyRoleARNErr will occur if 'AWS_WEB_IDENTITY_TOKEN_FILE' was set but -// 'AWS_ROLE_ARN' was not set. -var WebIdentityEmptyRoleARNErr = awserr.New(stscreds.ErrCodeWebIdentity, "role ARN is not set", nil) - -// WebIdentityEmptyTokenFilePathErr will occur if 'AWS_ROLE_ARN' was set but -// 'AWS_WEB_IDENTITY_TOKEN_FILE' was not set. -var WebIdentityEmptyTokenFilePathErr = awserr.New(stscreds.ErrCodeWebIdentity, "token file path is not set", nil) - -func assumeWebIdentity(cfg *aws.Config, handlers request.Handlers, - filepath string, - roleARN, sessionName string, -) (*credentials.Credentials, error) { - - if len(filepath) == 0 { - return nil, WebIdentityEmptyTokenFilePathErr - } - - if len(roleARN) == 0 { - return nil, WebIdentityEmptyRoleARNErr - } - - creds := stscreds.NewWebIdentityCredentials( - &Session{ - Config: cfg, - Handlers: handlers.Copy(), - }, - roleARN, - sessionName, - filepath, - ) - - return creds, nil -} - -func resolveCredsFromProfile(cfg *aws.Config, - envCfg envConfig, sharedCfg sharedConfig, - handlers request.Handlers, - sessOpts Options, -) (creds *credentials.Credentials, err error) { - - switch { - case sharedCfg.SourceProfile != nil: - // Assume IAM role with credentials source from a different profile. - creds, err = resolveCredsFromProfile(cfg, envCfg, - *sharedCfg.SourceProfile, handlers, sessOpts, - ) - - case sharedCfg.Creds.HasKeys(): - // Static Credentials from Shared Config/Credentials file. - creds = credentials.NewStaticCredentialsFromCreds( - sharedCfg.Creds, - ) - - case len(sharedCfg.CredentialProcess) != 0: - // Get credentials from CredentialProcess - creds = processcreds.NewCredentials(sharedCfg.CredentialProcess) - - case len(sharedCfg.CredentialSource) != 0: - creds, err = resolveCredsFromSource(cfg, envCfg, - sharedCfg, handlers, sessOpts, - ) - - case len(sharedCfg.WebIdentityTokenFile) != 0: - // Credentials from Assume Web Identity token require an IAM Role, and - // that roll will be assumed. May be wrapped with another assume role - // via SourceProfile. - return assumeWebIdentity(cfg, handlers, - sharedCfg.WebIdentityTokenFile, - sharedCfg.RoleARN, - sharedCfg.RoleSessionName, - ) - - default: - // Fallback to default credentials provider, include mock errors for - // the credential chain so user can identify why credentials failed to - // be retrieved. - creds = credentials.NewCredentials(&credentials.ChainProvider{ - VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors), - Providers: []credentials.Provider{ - &credProviderError{ - Err: awserr.New("EnvAccessKeyNotFound", - "failed to find credentials in the environment.", nil), - }, - &credProviderError{ - Err: awserr.New("SharedCredsLoad", - fmt.Sprintf("failed to load profile, %s.", envCfg.Profile), nil), - }, - defaults.RemoteCredProvider(*cfg, handlers), - }, - }) - } - if err != nil { - return nil, err - } - - if len(sharedCfg.RoleARN) > 0 { - cfgCp := *cfg - cfgCp.Credentials = creds - return credsFromAssumeRole(cfgCp, handlers, sharedCfg, sessOpts) - } - - return creds, nil -} - -// valid credential source values -const ( - credSourceEc2Metadata = "Ec2InstanceMetadata" - credSourceEnvironment = "Environment" - credSourceECSContainer = "EcsContainer" -) - -func resolveCredsFromSource(cfg *aws.Config, - envCfg envConfig, sharedCfg sharedConfig, - handlers request.Handlers, - sessOpts Options, -) (creds *credentials.Credentials, err error) { - - switch sharedCfg.CredentialSource { - case credSourceEc2Metadata: - p := defaults.RemoteCredProvider(*cfg, handlers) - creds = credentials.NewCredentials(p) - - case credSourceEnvironment: - creds = credentials.NewStaticCredentialsFromCreds(envCfg.Creds) - - case credSourceECSContainer: - if len(os.Getenv(shareddefaults.ECSCredsProviderEnvVar)) == 0 { - return nil, ErrSharedConfigECSContainerEnvVarEmpty - } - - p := defaults.RemoteCredProvider(*cfg, handlers) - creds = credentials.NewCredentials(p) - - default: - return nil, ErrSharedConfigInvalidCredSource - } - - return creds, nil -} - -func credsFromAssumeRole(cfg aws.Config, - handlers request.Handlers, - sharedCfg sharedConfig, - sessOpts Options, -) (*credentials.Credentials, error) { - - if len(sharedCfg.MFASerial) != 0 && sessOpts.AssumeRoleTokenProvider == nil { - // AssumeRole Token provider is required if doing Assume Role - // with MFA. - return nil, AssumeRoleTokenProviderNotSetError{} - } - - return stscreds.NewCredentials( - &Session{ - Config: &cfg, - Handlers: handlers.Copy(), - }, - sharedCfg.RoleARN, - func(opt *stscreds.AssumeRoleProvider) { - opt.RoleSessionName = sharedCfg.RoleSessionName - - if sessOpts.AssumeRoleDuration == 0 && - sharedCfg.AssumeRoleDuration != nil && - *sharedCfg.AssumeRoleDuration/time.Minute > 15 { - opt.Duration = *sharedCfg.AssumeRoleDuration - } else if sessOpts.AssumeRoleDuration != 0 { - opt.Duration = sessOpts.AssumeRoleDuration - } - - // Assume role with external ID - if len(sharedCfg.ExternalID) > 0 { - opt.ExternalID = aws.String(sharedCfg.ExternalID) - } - - // Assume role with MFA - if len(sharedCfg.MFASerial) > 0 { - opt.SerialNumber = aws.String(sharedCfg.MFASerial) - opt.TokenProvider = sessOpts.AssumeRoleTokenProvider - } - }, - ), nil -} - -// AssumeRoleTokenProviderNotSetError is an error returned when creating a -// session when the MFAToken option is not set when shared config is configured -// load assume a role with an MFA token. -type AssumeRoleTokenProviderNotSetError struct{} - -// Code is the short id of the error. -func (e AssumeRoleTokenProviderNotSetError) Code() string { - return "AssumeRoleTokenProviderNotSetError" -} - -// Message is the description of the error -func (e AssumeRoleTokenProviderNotSetError) Message() string { - return fmt.Sprintf("assume role with MFA enabled, but AssumeRoleTokenProvider session option not set.") -} - -// OrigErr is the underlying error that caused the failure. -func (e AssumeRoleTokenProviderNotSetError) OrigErr() error { - return nil -} - -// Error satisfies the error interface. -func (e AssumeRoleTokenProviderNotSetError) Error() string { - return awserr.SprintError(e.Code(), e.Message(), "", nil) -} - -type credProviderError struct { - Err error -} - -func (c credProviderError) Retrieve() (credentials.Value, error) { - return credentials.Value{}, c.Err -} -func (c credProviderError) IsExpired() bool { - return true -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go deleted file mode 100644 index 7ec66e7e58..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go +++ /dev/null @@ -1,245 +0,0 @@ -/* -Package session provides configuration for the SDK's service clients. Sessions -can be shared across service clients that share the same base configuration. - -Sessions are safe to use concurrently as long as the Session is not being -modified. Sessions should be cached when possible, because creating a new -Session will load all configuration values from the environment, and config -files each time the Session is created. Sharing the Session value across all of -your service clients will ensure the configuration is loaded the fewest number -of times possible. - -Sessions options from Shared Config - -By default NewSession will only load credentials from the shared credentials -file (~/.aws/credentials). If the AWS_SDK_LOAD_CONFIG environment variable is -set to a truthy value the Session will be created from the configuration -values from the shared config (~/.aws/config) and shared credentials -(~/.aws/credentials) files. Using the NewSessionWithOptions with -SharedConfigState set to SharedConfigEnable will create the session as if the -AWS_SDK_LOAD_CONFIG environment variable was set. - -Credential and config loading order - -The Session will attempt to load configuration and credentials from the -environment, configuration files, and other credential sources. The order -configuration is loaded in is: - - * Environment Variables - * Shared Credentials file - * Shared Configuration file (if SharedConfig is enabled) - * EC2 Instance Metadata (credentials only) - -The Environment variables for credentials will have precedence over shared -config even if SharedConfig is enabled. To override this behavior, and use -shared config credentials instead specify the session.Options.Profile, (e.g. -when using credential_source=Environment to assume a role). - - sess, err := session.NewSessionWithOptions(session.Options{ - Profile: "myProfile", - }) - -Creating Sessions - -Creating a Session without additional options will load credentials region, and -profile loaded from the environment and shared config automatically. See, -"Environment Variables" section for information on environment variables used -by Session. - - // Create Session - sess, err := session.NewSession() - - -When creating Sessions optional aws.Config values can be passed in that will -override the default, or loaded, config values the Session is being created -with. This allows you to provide additional, or case based, configuration -as needed. - - // Create a Session with a custom region - sess, err := session.NewSession(&aws.Config{ - Region: aws.String("us-west-2"), - }) - -Use NewSessionWithOptions to provide additional configuration driving how the -Session's configuration will be loaded. Such as, specifying shared config -profile, or override the shared config state, (AWS_SDK_LOAD_CONFIG). - - // Equivalent to session.NewSession() - sess, err := session.NewSessionWithOptions(session.Options{ - // Options - }) - - sess, err := session.NewSessionWithOptions(session.Options{ - // Specify profile to load for the session's config - Profile: "profile_name", - - // Provide SDK Config options, such as Region. - Config: aws.Config{ - Region: aws.String("us-west-2"), - }, - - // Force enable Shared Config support - SharedConfigState: session.SharedConfigEnable, - }) - -Adding Handlers - -You can add handlers to a session to decorate API operation, (e.g. adding HTTP -headers). All clients that use the Session receive a copy of the Session's -handlers. For example, the following request handler added to the Session logs -every requests made. - - // Create a session, and add additional handlers for all service - // clients created with the Session to inherit. Adds logging handler. - sess := session.Must(session.NewSession()) - - sess.Handlers.Send.PushFront(func(r *request.Request) { - // Log every request made and its payload - logger.Printf("Request: %s/%s, Params: %s", - r.ClientInfo.ServiceName, r.Operation, r.Params) - }) - -Shared Config Fields - -By default the SDK will only load the shared credentials file's -(~/.aws/credentials) credentials values, and all other config is provided by -the environment variables, SDK defaults, and user provided aws.Config values. - -If the AWS_SDK_LOAD_CONFIG environment variable is set, or SharedConfigEnable -option is used to create the Session the full shared config values will be -loaded. This includes credentials, region, and support for assume role. In -addition the Session will load its configuration from both the shared config -file (~/.aws/config) and shared credentials file (~/.aws/credentials). Both -files have the same format. - -If both config files are present the configuration from both files will be -read. The Session will be created from configuration values from the shared -credentials file (~/.aws/credentials) over those in the shared config file -(~/.aws/config). - -Credentials are the values the SDK uses to authenticating requests with AWS -Services. When specified in a file, both aws_access_key_id and -aws_secret_access_key must be provided together in the same file to be -considered valid. They will be ignored if both are not present. -aws_session_token is an optional field that can be provided in addition to the -other two fields. - - aws_access_key_id = AKID - aws_secret_access_key = SECRET - aws_session_token = TOKEN - - ; region only supported if SharedConfigEnabled. - region = us-east-1 - -Assume Role configuration - -The role_arn field allows you to configure the SDK to assume an IAM role using -a set of credentials from another source. Such as when paired with static -credentials, "profile_source", "credential_process", or "credential_source" -fields. If "role_arn" is provided, a source of credentials must also be -specified, such as "source_profile", "credential_source", or -"credential_process". - - role_arn = arn:aws:iam:::role/ - source_profile = profile_with_creds - external_id = 1234 - mfa_serial = - role_session_name = session_name - - -The SDK supports assuming a role with MFA token. If "mfa_serial" is set, you -must also set the Session Option.AssumeRoleTokenProvider. The Session will fail -to load if the AssumeRoleTokenProvider is not specified. - - sess := session.Must(session.NewSessionWithOptions(session.Options{ - AssumeRoleTokenProvider: stscreds.StdinTokenProvider, - })) - -To setup Assume Role outside of a session see the stscreds.AssumeRoleProvider -documentation. - -Environment Variables - -When a Session is created several environment variables can be set to adjust -how the SDK functions, and what configuration data it loads when creating -Sessions. All environment values are optional, but some values like credentials -require multiple of the values to set or the partial values will be ignored. -All environment variable values are strings unless otherwise noted. - -Environment configuration values. If set both Access Key ID and Secret Access -Key must be provided. Session Token and optionally also be provided, but is -not required. - - # Access Key ID - AWS_ACCESS_KEY_ID=AKID - AWS_ACCESS_KEY=AKID # only read if AWS_ACCESS_KEY_ID is not set. - - # Secret Access Key - AWS_SECRET_ACCESS_KEY=SECRET - AWS_SECRET_KEY=SECRET=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set. - - # Session Token - AWS_SESSION_TOKEN=TOKEN - -Region value will instruct the SDK where to make service API requests to. If is -not provided in the environment the region must be provided before a service -client request is made. - - AWS_REGION=us-east-1 - - # AWS_DEFAULT_REGION is only read if AWS_SDK_LOAD_CONFIG is also set, - # and AWS_REGION is not also set. - AWS_DEFAULT_REGION=us-east-1 - -Profile name the SDK should load use when loading shared config from the -configuration files. If not provided "default" will be used as the profile name. - - AWS_PROFILE=my_profile - - # AWS_DEFAULT_PROFILE is only read if AWS_SDK_LOAD_CONFIG is also set, - # and AWS_PROFILE is not also set. - AWS_DEFAULT_PROFILE=my_profile - -SDK load config instructs the SDK to load the shared config in addition to -shared credentials. This also expands the configuration loaded so the shared -credentials will have parity with the shared config file. This also enables -Region and Profile support for the AWS_DEFAULT_REGION and AWS_DEFAULT_PROFILE -env values as well. - - AWS_SDK_LOAD_CONFIG=1 - -Shared credentials file path can be set to instruct the SDK to use an alternative -file for the shared credentials. If not set the file will be loaded from -$HOME/.aws/credentials on Linux/Unix based systems, and -%USERPROFILE%\.aws\credentials on Windows. - - AWS_SHARED_CREDENTIALS_FILE=$HOME/my_shared_credentials - -Shared config file path can be set to instruct the SDK to use an alternative -file for the shared config. If not set the file will be loaded from -$HOME/.aws/config on Linux/Unix based systems, and -%USERPROFILE%\.aws\config on Windows. - - AWS_CONFIG_FILE=$HOME/my_shared_config - -Path to a custom Credentials Authority (CA) bundle PEM file that the SDK -will use instead of the default system's root CA bundle. Use this only -if you want to replace the CA bundle the SDK uses for TLS requests. - - AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle - -Enabling this option will attempt to merge the Transport into the SDK's HTTP -client. If the client's Transport is not a http.Transport an error will be -returned. If the Transport's TLS config is set this option will cause the SDK -to overwrite the Transport's TLS config's RootCAs value. If the CA bundle file -contains multiple certificates all of them will be loaded. - -The Session option CustomCABundle is also available when creating sessions -to also enable this feature. CustomCABundle session option field has priority -over the AWS_CA_BUNDLE environment variable, and will be used if both are set. - -Setting a custom HTTPClient in the aws.Config options will override this setting. -To use this option and custom HTTP client, the HTTP client needs to be provided -when creating the session. Not the service client. -*/ -package session diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go deleted file mode 100644 index c1e0e9c954..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go +++ /dev/null @@ -1,345 +0,0 @@ -package session - -import ( - "fmt" - "os" - "strconv" - "strings" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/defaults" - "github.com/aws/aws-sdk-go/aws/endpoints" -) - -// EnvProviderName provides a name of the provider when config is loaded from environment. -const EnvProviderName = "EnvConfigCredentials" - -// envConfig is a collection of environment values the SDK will read -// setup config from. All environment values are optional. But some values -// such as credentials require multiple values to be complete or the values -// will be ignored. -type envConfig struct { - // Environment configuration values. If set both Access Key ID and Secret Access - // Key must be provided. Session Token and optionally also be provided, but is - // not required. - // - // # Access Key ID - // AWS_ACCESS_KEY_ID=AKID - // AWS_ACCESS_KEY=AKID # only read if AWS_ACCESS_KEY_ID is not set. - // - // # Secret Access Key - // AWS_SECRET_ACCESS_KEY=SECRET - // AWS_SECRET_KEY=SECRET=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set. - // - // # Session Token - // AWS_SESSION_TOKEN=TOKEN - Creds credentials.Value - - // Region value will instruct the SDK where to make service API requests to. If is - // not provided in the environment the region must be provided before a service - // client request is made. - // - // AWS_REGION=us-east-1 - // - // # AWS_DEFAULT_REGION is only read if AWS_SDK_LOAD_CONFIG is also set, - // # and AWS_REGION is not also set. - // AWS_DEFAULT_REGION=us-east-1 - Region string - - // Profile name the SDK should load use when loading shared configuration from the - // shared configuration files. If not provided "default" will be used as the - // profile name. - // - // AWS_PROFILE=my_profile - // - // # AWS_DEFAULT_PROFILE is only read if AWS_SDK_LOAD_CONFIG is also set, - // # and AWS_PROFILE is not also set. - // AWS_DEFAULT_PROFILE=my_profile - Profile string - - // SDK load config instructs the SDK to load the shared config in addition to - // shared credentials. This also expands the configuration loaded from the shared - // credentials to have parity with the shared config file. This also enables - // Region and Profile support for the AWS_DEFAULT_REGION and AWS_DEFAULT_PROFILE - // env values as well. - // - // AWS_SDK_LOAD_CONFIG=1 - EnableSharedConfig bool - - // Shared credentials file path can be set to instruct the SDK to use an alternate - // file for the shared credentials. If not set the file will be loaded from - // $HOME/.aws/credentials on Linux/Unix based systems, and - // %USERPROFILE%\.aws\credentials on Windows. - // - // AWS_SHARED_CREDENTIALS_FILE=$HOME/my_shared_credentials - SharedCredentialsFile string - - // Shared config file path can be set to instruct the SDK to use an alternate - // file for the shared config. If not set the file will be loaded from - // $HOME/.aws/config on Linux/Unix based systems, and - // %USERPROFILE%\.aws\config on Windows. - // - // AWS_CONFIG_FILE=$HOME/my_shared_config - SharedConfigFile string - - // Sets the path to a custom Credentials Authority (CA) Bundle PEM file - // that the SDK will use instead of the system's root CA bundle. - // Only use this if you want to configure the SDK to use a custom set - // of CAs. - // - // Enabling this option will attempt to merge the Transport - // into the SDK's HTTP client. If the client's Transport is - // not a http.Transport an error will be returned. If the - // Transport's TLS config is set this option will cause the - // SDK to overwrite the Transport's TLS config's RootCAs value. - // - // Setting a custom HTTPClient in the aws.Config options will override this setting. - // To use this option and custom HTTP client, the HTTP client needs to be provided - // when creating the session. Not the service client. - // - // AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle - CustomCABundle string - - csmEnabled string - CSMEnabled *bool - CSMPort string - CSMHost string - CSMClientID string - - // Enables endpoint discovery via environment variables. - // - // AWS_ENABLE_ENDPOINT_DISCOVERY=true - EnableEndpointDiscovery *bool - enableEndpointDiscovery string - - // Specifies the WebIdentity token the SDK should use to assume a role - // with. - // - // AWS_WEB_IDENTITY_TOKEN_FILE=file_path - WebIdentityTokenFilePath string - - // Specifies the IAM role arn to use when assuming an role. - // - // AWS_ROLE_ARN=role_arn - RoleARN string - - // Specifies the IAM role session name to use when assuming a role. - // - // AWS_ROLE_SESSION_NAME=session_name - RoleSessionName string - - // Specifies the STS Regional Endpoint flag for the SDK to resolve the endpoint - // for a service. - // - // AWS_STS_REGIONAL_ENDPOINTS=regional - // This can take value as `regional` or `legacy` - STSRegionalEndpoint endpoints.STSRegionalEndpoint - - // Specifies the S3 Regional Endpoint flag for the SDK to resolve the - // endpoint for a service. - // - // AWS_S3_US_EAST_1_REGIONAL_ENDPOINT=regional - // This can take value as `regional` or `legacy` - S3UsEast1RegionalEndpoint endpoints.S3UsEast1RegionalEndpoint - - // Specifies if the S3 service should allow ARNs to direct the region - // the client's requests are sent to. - // - // AWS_S3_USE_ARN_REGION=true - S3UseARNRegion bool -} - -var ( - csmEnabledEnvKey = []string{ - "AWS_CSM_ENABLED", - } - csmHostEnvKey = []string{ - "AWS_CSM_HOST", - } - csmPortEnvKey = []string{ - "AWS_CSM_PORT", - } - csmClientIDEnvKey = []string{ - "AWS_CSM_CLIENT_ID", - } - credAccessEnvKey = []string{ - "AWS_ACCESS_KEY_ID", - "AWS_ACCESS_KEY", - } - credSecretEnvKey = []string{ - "AWS_SECRET_ACCESS_KEY", - "AWS_SECRET_KEY", - } - credSessionEnvKey = []string{ - "AWS_SESSION_TOKEN", - } - - enableEndpointDiscoveryEnvKey = []string{ - "AWS_ENABLE_ENDPOINT_DISCOVERY", - } - - regionEnvKeys = []string{ - "AWS_REGION", - "AWS_DEFAULT_REGION", // Only read if AWS_SDK_LOAD_CONFIG is also set - } - profileEnvKeys = []string{ - "AWS_PROFILE", - "AWS_DEFAULT_PROFILE", // Only read if AWS_SDK_LOAD_CONFIG is also set - } - sharedCredsFileEnvKey = []string{ - "AWS_SHARED_CREDENTIALS_FILE", - } - sharedConfigFileEnvKey = []string{ - "AWS_CONFIG_FILE", - } - webIdentityTokenFilePathEnvKey = []string{ - "AWS_WEB_IDENTITY_TOKEN_FILE", - } - roleARNEnvKey = []string{ - "AWS_ROLE_ARN", - } - roleSessionNameEnvKey = []string{ - "AWS_ROLE_SESSION_NAME", - } - stsRegionalEndpointKey = []string{ - "AWS_STS_REGIONAL_ENDPOINTS", - } - s3UsEast1RegionalEndpoint = []string{ - "AWS_S3_US_EAST_1_REGIONAL_ENDPOINT", - } - s3UseARNRegionEnvKey = []string{ - "AWS_S3_USE_ARN_REGION", - } -) - -// loadEnvConfig retrieves the SDK's environment configuration. -// See `envConfig` for the values that will be retrieved. -// -// If the environment variable `AWS_SDK_LOAD_CONFIG` is set to a truthy value -// the shared SDK config will be loaded in addition to the SDK's specific -// configuration values. -func loadEnvConfig() (envConfig, error) { - enableSharedConfig, _ := strconv.ParseBool(os.Getenv("AWS_SDK_LOAD_CONFIG")) - return envConfigLoad(enableSharedConfig) -} - -// loadEnvSharedConfig retrieves the SDK's environment configuration, and the -// SDK shared config. See `envConfig` for the values that will be retrieved. -// -// Loads the shared configuration in addition to the SDK's specific configuration. -// This will load the same values as `loadEnvConfig` if the `AWS_SDK_LOAD_CONFIG` -// environment variable is set. -func loadSharedEnvConfig() (envConfig, error) { - return envConfigLoad(true) -} - -func envConfigLoad(enableSharedConfig bool) (envConfig, error) { - cfg := envConfig{} - - cfg.EnableSharedConfig = enableSharedConfig - - // Static environment credentials - var creds credentials.Value - setFromEnvVal(&creds.AccessKeyID, credAccessEnvKey) - setFromEnvVal(&creds.SecretAccessKey, credSecretEnvKey) - setFromEnvVal(&creds.SessionToken, credSessionEnvKey) - if creds.HasKeys() { - // Require logical grouping of credentials - creds.ProviderName = EnvProviderName - cfg.Creds = creds - } - - // Role Metadata - setFromEnvVal(&cfg.RoleARN, roleARNEnvKey) - setFromEnvVal(&cfg.RoleSessionName, roleSessionNameEnvKey) - - // Web identity environment variables - setFromEnvVal(&cfg.WebIdentityTokenFilePath, webIdentityTokenFilePathEnvKey) - - // CSM environment variables - setFromEnvVal(&cfg.csmEnabled, csmEnabledEnvKey) - setFromEnvVal(&cfg.CSMHost, csmHostEnvKey) - setFromEnvVal(&cfg.CSMPort, csmPortEnvKey) - setFromEnvVal(&cfg.CSMClientID, csmClientIDEnvKey) - - if len(cfg.csmEnabled) != 0 { - v, _ := strconv.ParseBool(cfg.csmEnabled) - cfg.CSMEnabled = &v - } - - regionKeys := regionEnvKeys - profileKeys := profileEnvKeys - if !cfg.EnableSharedConfig { - regionKeys = regionKeys[:1] - profileKeys = profileKeys[:1] - } - - setFromEnvVal(&cfg.Region, regionKeys) - setFromEnvVal(&cfg.Profile, profileKeys) - - // endpoint discovery is in reference to it being enabled. - setFromEnvVal(&cfg.enableEndpointDiscovery, enableEndpointDiscoveryEnvKey) - if len(cfg.enableEndpointDiscovery) > 0 { - cfg.EnableEndpointDiscovery = aws.Bool(cfg.enableEndpointDiscovery != "false") - } - - setFromEnvVal(&cfg.SharedCredentialsFile, sharedCredsFileEnvKey) - setFromEnvVal(&cfg.SharedConfigFile, sharedConfigFileEnvKey) - - if len(cfg.SharedCredentialsFile) == 0 { - cfg.SharedCredentialsFile = defaults.SharedCredentialsFilename() - } - if len(cfg.SharedConfigFile) == 0 { - cfg.SharedConfigFile = defaults.SharedConfigFilename() - } - - cfg.CustomCABundle = os.Getenv("AWS_CA_BUNDLE") - - var err error - // STS Regional Endpoint variable - for _, k := range stsRegionalEndpointKey { - if v := os.Getenv(k); len(v) != 0 { - cfg.STSRegionalEndpoint, err = endpoints.GetSTSRegionalEndpoint(v) - if err != nil { - return cfg, fmt.Errorf("failed to load, %v from env config, %v", k, err) - } - } - } - - // S3 Regional Endpoint variable - for _, k := range s3UsEast1RegionalEndpoint { - if v := os.Getenv(k); len(v) != 0 { - cfg.S3UsEast1RegionalEndpoint, err = endpoints.GetS3UsEast1RegionalEndpoint(v) - if err != nil { - return cfg, fmt.Errorf("failed to load, %v from env config, %v", k, err) - } - } - } - - var s3UseARNRegion string - setFromEnvVal(&s3UseARNRegion, s3UseARNRegionEnvKey) - if len(s3UseARNRegion) != 0 { - switch { - case strings.EqualFold(s3UseARNRegion, "false"): - cfg.S3UseARNRegion = false - case strings.EqualFold(s3UseARNRegion, "true"): - cfg.S3UseARNRegion = true - default: - return envConfig{}, fmt.Errorf( - "invalid value for environment variable, %s=%s, need true or false", - s3UseARNRegionEnvKey[0], s3UseARNRegion) - } - } - - return cfg, nil -} - -func setFromEnvVal(dst *string, keys []string) { - for _, k := range keys { - if v := os.Getenv(k); len(v) != 0 { - *dst = v - break - } - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go deleted file mode 100644 index 0ff4996051..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go +++ /dev/null @@ -1,734 +0,0 @@ -package session - -import ( - "crypto/tls" - "crypto/x509" - "fmt" - "io" - "io/ioutil" - "net/http" - "os" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/corehandlers" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/csm" - "github.com/aws/aws-sdk-go/aws/defaults" - "github.com/aws/aws-sdk-go/aws/endpoints" - "github.com/aws/aws-sdk-go/aws/request" -) - -const ( - // ErrCodeSharedConfig represents an error that occurs in the shared - // configuration logic - ErrCodeSharedConfig = "SharedConfigErr" -) - -// ErrSharedConfigSourceCollision will be returned if a section contains both -// source_profile and credential_source -var ErrSharedConfigSourceCollision = awserr.New(ErrCodeSharedConfig, "only source profile or credential source can be specified, not both", nil) - -// ErrSharedConfigECSContainerEnvVarEmpty will be returned if the environment -// variables are empty and Environment was set as the credential source -var ErrSharedConfigECSContainerEnvVarEmpty = awserr.New(ErrCodeSharedConfig, "EcsContainer was specified as the credential_source, but 'AWS_CONTAINER_CREDENTIALS_RELATIVE_URI' was not set", nil) - -// ErrSharedConfigInvalidCredSource will be returned if an invalid credential source was provided -var ErrSharedConfigInvalidCredSource = awserr.New(ErrCodeSharedConfig, "credential source values must be EcsContainer, Ec2InstanceMetadata, or Environment", nil) - -// A Session provides a central location to create service clients from and -// store configurations and request handlers for those services. -// -// Sessions are safe to create service clients concurrently, but it is not safe -// to mutate the Session concurrently. -// -// The Session satisfies the service client's client.ConfigProvider. -type Session struct { - Config *aws.Config - Handlers request.Handlers -} - -// New creates a new instance of the handlers merging in the provided configs -// on top of the SDK's default configurations. Once the Session is created it -// can be mutated to modify the Config or Handlers. The Session is safe to be -// read concurrently, but it should not be written to concurrently. -// -// If the AWS_SDK_LOAD_CONFIG environment is set to a truthy value, the New -// method could now encounter an error when loading the configuration. When -// The environment variable is set, and an error occurs, New will return a -// session that will fail all requests reporting the error that occurred while -// loading the session. Use NewSession to get the error when creating the -// session. -// -// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value -// the shared config file (~/.aws/config) will also be loaded, in addition to -// the shared credentials file (~/.aws/credentials). Values set in both the -// shared config, and shared credentials will be taken from the shared -// credentials file. -// -// Deprecated: Use NewSession functions to create sessions instead. NewSession -// has the same functionality as New except an error can be returned when the -// func is called instead of waiting to receive an error until a request is made. -func New(cfgs ...*aws.Config) *Session { - // load initial config from environment - envCfg, envErr := loadEnvConfig() - - if envCfg.EnableSharedConfig { - var cfg aws.Config - cfg.MergeIn(cfgs...) - s, err := NewSessionWithOptions(Options{ - Config: cfg, - SharedConfigState: SharedConfigEnable, - }) - if err != nil { - // Old session.New expected all errors to be discovered when - // a request is made, and would report the errors then. This - // needs to be replicated if an error occurs while creating - // the session. - msg := "failed to create session with AWS_SDK_LOAD_CONFIG enabled. " + - "Use session.NewSession to handle errors occurring during session creation." - - // Session creation failed, need to report the error and prevent - // any requests from succeeding. - s = &Session{Config: defaults.Config()} - s.logDeprecatedNewSessionError(msg, err, cfgs) - } - - return s - } - - s := deprecatedNewSession(cfgs...) - if envErr != nil { - msg := "failed to load env config" - s.logDeprecatedNewSessionError(msg, envErr, cfgs) - } - - if csmCfg, err := loadCSMConfig(envCfg, []string{}); err != nil { - if l := s.Config.Logger; l != nil { - l.Log(fmt.Sprintf("ERROR: failed to load CSM configuration, %v", err)) - } - } else if csmCfg.Enabled { - err := enableCSM(&s.Handlers, csmCfg, s.Config.Logger) - if err != nil { - msg := "failed to enable CSM" - s.logDeprecatedNewSessionError(msg, err, cfgs) - } - } - - return s -} - -// NewSession returns a new Session created from SDK defaults, config files, -// environment, and user provided config files. Once the Session is created -// it can be mutated to modify the Config or Handlers. The Session is safe to -// be read concurrently, but it should not be written to concurrently. -// -// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value -// the shared config file (~/.aws/config) will also be loaded in addition to -// the shared credentials file (~/.aws/credentials). Values set in both the -// shared config, and shared credentials will be taken from the shared -// credentials file. Enabling the Shared Config will also allow the Session -// to be built with retrieving credentials with AssumeRole set in the config. -// -// See the NewSessionWithOptions func for information on how to override or -// control through code how the Session will be created, such as specifying the -// config profile, and controlling if shared config is enabled or not. -func NewSession(cfgs ...*aws.Config) (*Session, error) { - opts := Options{} - opts.Config.MergeIn(cfgs...) - - return NewSessionWithOptions(opts) -} - -// SharedConfigState provides the ability to optionally override the state -// of the session's creation based on the shared config being enabled or -// disabled. -type SharedConfigState int - -const ( - // SharedConfigStateFromEnv does not override any state of the - // AWS_SDK_LOAD_CONFIG env var. It is the default value of the - // SharedConfigState type. - SharedConfigStateFromEnv SharedConfigState = iota - - // SharedConfigDisable overrides the AWS_SDK_LOAD_CONFIG env var value - // and disables the shared config functionality. - SharedConfigDisable - - // SharedConfigEnable overrides the AWS_SDK_LOAD_CONFIG env var value - // and enables the shared config functionality. - SharedConfigEnable -) - -// Options provides the means to control how a Session is created and what -// configuration values will be loaded. -// -type Options struct { - // Provides config values for the SDK to use when creating service clients - // and making API requests to services. Any value set in with this field - // will override the associated value provided by the SDK defaults, - // environment or config files where relevant. - // - // If not set, configuration values from from SDK defaults, environment, - // config will be used. - Config aws.Config - - // Overrides the config profile the Session should be created from. If not - // set the value of the environment variable will be loaded (AWS_PROFILE, - // or AWS_DEFAULT_PROFILE if the Shared Config is enabled). - // - // If not set and environment variables are not set the "default" - // (DefaultSharedConfigProfile) will be used as the profile to load the - // session config from. - Profile string - - // Instructs how the Session will be created based on the AWS_SDK_LOAD_CONFIG - // environment variable. By default a Session will be created using the - // value provided by the AWS_SDK_LOAD_CONFIG environment variable. - // - // Setting this value to SharedConfigEnable or SharedConfigDisable - // will allow you to override the AWS_SDK_LOAD_CONFIG environment variable - // and enable or disable the shared config functionality. - SharedConfigState SharedConfigState - - // Ordered list of files the session will load configuration from. - // It will override environment variable AWS_SHARED_CREDENTIALS_FILE, AWS_CONFIG_FILE. - SharedConfigFiles []string - - // When the SDK's shared config is configured to assume a role with MFA - // this option is required in order to provide the mechanism that will - // retrieve the MFA token. There is no default value for this field. If - // it is not set an error will be returned when creating the session. - // - // This token provider will be called when ever the assumed role's - // credentials need to be refreshed. Within the context of service clients - // all sharing the same session the SDK will ensure calls to the token - // provider are atomic. When sharing a token provider across multiple - // sessions additional synchronization logic is needed to ensure the - // token providers do not introduce race conditions. It is recommend to - // share the session where possible. - // - // stscreds.StdinTokenProvider is a basic implementation that will prompt - // from stdin for the MFA token code. - // - // This field is only used if the shared configuration is enabled, and - // the config enables assume role wit MFA via the mfa_serial field. - AssumeRoleTokenProvider func() (string, error) - - // When the SDK's shared config is configured to assume a role this option - // may be provided to set the expiry duration of the STS credentials. - // Defaults to 15 minutes if not set as documented in the - // stscreds.AssumeRoleProvider. - AssumeRoleDuration time.Duration - - // Reader for a custom Credentials Authority (CA) bundle in PEM format that - // the SDK will use instead of the default system's root CA bundle. Use this - // only if you want to replace the CA bundle the SDK uses for TLS requests. - // - // Enabling this option will attempt to merge the Transport into the SDK's HTTP - // client. If the client's Transport is not a http.Transport an error will be - // returned. If the Transport's TLS config is set this option will cause the SDK - // to overwrite the Transport's TLS config's RootCAs value. If the CA - // bundle reader contains multiple certificates all of them will be loaded. - // - // The Session option CustomCABundle is also available when creating sessions - // to also enable this feature. CustomCABundle session option field has priority - // over the AWS_CA_BUNDLE environment variable, and will be used if both are set. - CustomCABundle io.Reader - - // The handlers that the session and all API clients will be created with. - // This must be a complete set of handlers. Use the defaults.Handlers() - // function to initialize this value before changing the handlers to be - // used by the SDK. - Handlers request.Handlers -} - -// NewSessionWithOptions returns a new Session created from SDK defaults, config files, -// environment, and user provided config files. This func uses the Options -// values to configure how the Session is created. -// -// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value -// the shared config file (~/.aws/config) will also be loaded in addition to -// the shared credentials file (~/.aws/credentials). Values set in both the -// shared config, and shared credentials will be taken from the shared -// credentials file. Enabling the Shared Config will also allow the Session -// to be built with retrieving credentials with AssumeRole set in the config. -// -// // Equivalent to session.New -// sess := session.Must(session.NewSessionWithOptions(session.Options{})) -// -// // Specify profile to load for the session's config -// sess := session.Must(session.NewSessionWithOptions(session.Options{ -// Profile: "profile_name", -// })) -// -// // Specify profile for config and region for requests -// sess := session.Must(session.NewSessionWithOptions(session.Options{ -// Config: aws.Config{Region: aws.String("us-east-1")}, -// Profile: "profile_name", -// })) -// -// // Force enable Shared Config support -// sess := session.Must(session.NewSessionWithOptions(session.Options{ -// SharedConfigState: session.SharedConfigEnable, -// })) -func NewSessionWithOptions(opts Options) (*Session, error) { - var envCfg envConfig - var err error - if opts.SharedConfigState == SharedConfigEnable { - envCfg, err = loadSharedEnvConfig() - if err != nil { - return nil, fmt.Errorf("failed to load shared config, %v", err) - } - } else { - envCfg, err = loadEnvConfig() - if err != nil { - return nil, fmt.Errorf("failed to load environment config, %v", err) - } - } - - if len(opts.Profile) != 0 { - envCfg.Profile = opts.Profile - } - - switch opts.SharedConfigState { - case SharedConfigDisable: - envCfg.EnableSharedConfig = false - case SharedConfigEnable: - envCfg.EnableSharedConfig = true - } - - // Only use AWS_CA_BUNDLE if session option is not provided. - if len(envCfg.CustomCABundle) != 0 && opts.CustomCABundle == nil { - f, err := os.Open(envCfg.CustomCABundle) - if err != nil { - return nil, awserr.New("LoadCustomCABundleError", - "failed to open custom CA bundle PEM file", err) - } - defer f.Close() - opts.CustomCABundle = f - } - - return newSession(opts, envCfg, &opts.Config) -} - -// Must is a helper function to ensure the Session is valid and there was no -// error when calling a NewSession function. -// -// This helper is intended to be used in variable initialization to load the -// Session and configuration at startup. Such as: -// -// var sess = session.Must(session.NewSession()) -func Must(sess *Session, err error) *Session { - if err != nil { - panic(err) - } - - return sess -} - -func deprecatedNewSession(cfgs ...*aws.Config) *Session { - cfg := defaults.Config() - handlers := defaults.Handlers() - - // Apply the passed in configs so the configuration can be applied to the - // default credential chain - cfg.MergeIn(cfgs...) - if cfg.EndpointResolver == nil { - // An endpoint resolver is required for a session to be able to provide - // endpoints for service client configurations. - cfg.EndpointResolver = endpoints.DefaultResolver() - } - cfg.Credentials = defaults.CredChain(cfg, handlers) - - // Reapply any passed in configs to override credentials if set - cfg.MergeIn(cfgs...) - - s := &Session{ - Config: cfg, - Handlers: handlers, - } - - initHandlers(s) - return s -} - -func enableCSM(handlers *request.Handlers, cfg csmConfig, logger aws.Logger) error { - if logger != nil { - logger.Log("Enabling CSM") - } - - r, err := csm.Start(cfg.ClientID, csm.AddressWithDefaults(cfg.Host, cfg.Port)) - if err != nil { - return err - } - r.InjectHandlers(handlers) - - return nil -} - -func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, error) { - cfg := defaults.Config() - - handlers := opts.Handlers - if handlers.IsEmpty() { - handlers = defaults.Handlers() - } - - // Get a merged version of the user provided config to determine if - // credentials were. - userCfg := &aws.Config{} - userCfg.MergeIn(cfgs...) - cfg.MergeIn(userCfg) - - // Ordered config files will be loaded in with later files overwriting - // previous config file values. - var cfgFiles []string - if opts.SharedConfigFiles != nil { - cfgFiles = opts.SharedConfigFiles - } else { - cfgFiles = []string{envCfg.SharedConfigFile, envCfg.SharedCredentialsFile} - if !envCfg.EnableSharedConfig { - // The shared config file (~/.aws/config) is only loaded if instructed - // to load via the envConfig.EnableSharedConfig (AWS_SDK_LOAD_CONFIG). - cfgFiles = cfgFiles[1:] - } - } - - // Load additional config from file(s) - sharedCfg, err := loadSharedConfig(envCfg.Profile, cfgFiles, envCfg.EnableSharedConfig) - if err != nil { - if len(envCfg.Profile) == 0 && !envCfg.EnableSharedConfig && (envCfg.Creds.HasKeys() || userCfg.Credentials != nil) { - // Special case where the user has not explicitly specified an AWS_PROFILE, - // or session.Options.profile, shared config is not enabled, and the - // environment has credentials, allow the shared config file to fail to - // load since the user has already provided credentials, and nothing else - // is required to be read file. Github(aws/aws-sdk-go#2455) - } else if _, ok := err.(SharedConfigProfileNotExistsError); !ok { - return nil, err - } - } - - if err := mergeConfigSrcs(cfg, userCfg, envCfg, sharedCfg, handlers, opts); err != nil { - return nil, err - } - - s := &Session{ - Config: cfg, - Handlers: handlers, - } - - initHandlers(s) - - if csmCfg, err := loadCSMConfig(envCfg, cfgFiles); err != nil { - if l := s.Config.Logger; l != nil { - l.Log(fmt.Sprintf("ERROR: failed to load CSM configuration, %v", err)) - } - } else if csmCfg.Enabled { - err = enableCSM(&s.Handlers, csmCfg, s.Config.Logger) - if err != nil { - return nil, err - } - } - - // Setup HTTP client with custom cert bundle if enabled - if opts.CustomCABundle != nil { - if err := loadCustomCABundle(s, opts.CustomCABundle); err != nil { - return nil, err - } - } - - return s, nil -} - -type csmConfig struct { - Enabled bool - Host string - Port string - ClientID string -} - -var csmProfileName = "aws_csm" - -func loadCSMConfig(envCfg envConfig, cfgFiles []string) (csmConfig, error) { - if envCfg.CSMEnabled != nil { - if *envCfg.CSMEnabled { - return csmConfig{ - Enabled: true, - ClientID: envCfg.CSMClientID, - Host: envCfg.CSMHost, - Port: envCfg.CSMPort, - }, nil - } - return csmConfig{}, nil - } - - sharedCfg, err := loadSharedConfig(csmProfileName, cfgFiles, false) - if err != nil { - if _, ok := err.(SharedConfigProfileNotExistsError); !ok { - return csmConfig{}, err - } - } - if sharedCfg.CSMEnabled != nil && *sharedCfg.CSMEnabled == true { - return csmConfig{ - Enabled: true, - ClientID: sharedCfg.CSMClientID, - Host: sharedCfg.CSMHost, - Port: sharedCfg.CSMPort, - }, nil - } - - return csmConfig{}, nil -} - -func loadCustomCABundle(s *Session, bundle io.Reader) error { - var t *http.Transport - switch v := s.Config.HTTPClient.Transport.(type) { - case *http.Transport: - t = v - default: - if s.Config.HTTPClient.Transport != nil { - return awserr.New("LoadCustomCABundleError", - "unable to load custom CA bundle, HTTPClient's transport unsupported type", nil) - } - } - if t == nil { - // Nil transport implies `http.DefaultTransport` should be used. Since - // the SDK cannot modify, nor copy the `DefaultTransport` specifying - // the values the next closest behavior. - t = getCABundleTransport() - } - - p, err := loadCertPool(bundle) - if err != nil { - return err - } - if t.TLSClientConfig == nil { - t.TLSClientConfig = &tls.Config{} - } - t.TLSClientConfig.RootCAs = p - - s.Config.HTTPClient.Transport = t - - return nil -} - -func loadCertPool(r io.Reader) (*x509.CertPool, error) { - b, err := ioutil.ReadAll(r) - if err != nil { - return nil, awserr.New("LoadCustomCABundleError", - "failed to read custom CA bundle PEM file", err) - } - - p := x509.NewCertPool() - if !p.AppendCertsFromPEM(b) { - return nil, awserr.New("LoadCustomCABundleError", - "failed to load custom CA bundle PEM file", err) - } - - return p, nil -} - -func mergeConfigSrcs(cfg, userCfg *aws.Config, - envCfg envConfig, sharedCfg sharedConfig, - handlers request.Handlers, - sessOpts Options, -) error { - - // Region if not already set by user - if len(aws.StringValue(cfg.Region)) == 0 { - if len(envCfg.Region) > 0 { - cfg.WithRegion(envCfg.Region) - } else if envCfg.EnableSharedConfig && len(sharedCfg.Region) > 0 { - cfg.WithRegion(sharedCfg.Region) - } - } - - if cfg.EnableEndpointDiscovery == nil { - if envCfg.EnableEndpointDiscovery != nil { - cfg.WithEndpointDiscovery(*envCfg.EnableEndpointDiscovery) - } else if envCfg.EnableSharedConfig && sharedCfg.EnableEndpointDiscovery != nil { - cfg.WithEndpointDiscovery(*sharedCfg.EnableEndpointDiscovery) - } - } - - // Regional Endpoint flag for STS endpoint resolving - mergeSTSRegionalEndpointConfig(cfg, []endpoints.STSRegionalEndpoint{ - userCfg.STSRegionalEndpoint, - envCfg.STSRegionalEndpoint, - sharedCfg.STSRegionalEndpoint, - endpoints.LegacySTSEndpoint, - }) - - // Regional Endpoint flag for S3 endpoint resolving - mergeS3UsEast1RegionalEndpointConfig(cfg, []endpoints.S3UsEast1RegionalEndpoint{ - userCfg.S3UsEast1RegionalEndpoint, - envCfg.S3UsEast1RegionalEndpoint, - sharedCfg.S3UsEast1RegionalEndpoint, - endpoints.LegacyS3UsEast1Endpoint, - }) - - // Configure credentials if not already set by the user when creating the - // Session. - if cfg.Credentials == credentials.AnonymousCredentials && userCfg.Credentials == nil { - creds, err := resolveCredentials(cfg, envCfg, sharedCfg, handlers, sessOpts) - if err != nil { - return err - } - cfg.Credentials = creds - } - - cfg.S3UseARNRegion = userCfg.S3UseARNRegion - if cfg.S3UseARNRegion == nil { - cfg.S3UseARNRegion = &envCfg.S3UseARNRegion - } - if cfg.S3UseARNRegion == nil { - cfg.S3UseARNRegion = &sharedCfg.S3UseARNRegion - } - - return nil -} - -func mergeSTSRegionalEndpointConfig(cfg *aws.Config, values []endpoints.STSRegionalEndpoint) { - for _, v := range values { - if v != endpoints.UnsetSTSEndpoint { - cfg.STSRegionalEndpoint = v - break - } - } -} - -func mergeS3UsEast1RegionalEndpointConfig(cfg *aws.Config, values []endpoints.S3UsEast1RegionalEndpoint) { - for _, v := range values { - if v != endpoints.UnsetS3UsEast1Endpoint { - cfg.S3UsEast1RegionalEndpoint = v - break - } - } -} - -func initHandlers(s *Session) { - // Add the Validate parameter handler if it is not disabled. - s.Handlers.Validate.Remove(corehandlers.ValidateParametersHandler) - if !aws.BoolValue(s.Config.DisableParamValidation) { - s.Handlers.Validate.PushBackNamed(corehandlers.ValidateParametersHandler) - } -} - -// Copy creates and returns a copy of the current Session, copying the config -// and handlers. If any additional configs are provided they will be merged -// on top of the Session's copied config. -// -// // Create a copy of the current Session, configured for the us-west-2 region. -// sess.Copy(&aws.Config{Region: aws.String("us-west-2")}) -func (s *Session) Copy(cfgs ...*aws.Config) *Session { - newSession := &Session{ - Config: s.Config.Copy(cfgs...), - Handlers: s.Handlers.Copy(), - } - - initHandlers(newSession) - - return newSession -} - -// ClientConfig satisfies the client.ConfigProvider interface and is used to -// configure the service client instances. Passing the Session to the service -// client's constructor (New) will use this method to configure the client. -func (s *Session) ClientConfig(service string, cfgs ...*aws.Config) client.Config { - s = s.Copy(cfgs...) - - region := aws.StringValue(s.Config.Region) - resolved, err := s.resolveEndpoint(service, region, s.Config) - if err != nil { - s.Handlers.Validate.PushBack(func(r *request.Request) { - if len(r.ClientInfo.Endpoint) != 0 { - // Error occurred while resolving endpoint, but the request - // being invoked has had an endpoint specified after the client - // was created. - return - } - r.Error = err - }) - } - - return client.Config{ - Config: s.Config, - Handlers: s.Handlers, - PartitionID: resolved.PartitionID, - Endpoint: resolved.URL, - SigningRegion: resolved.SigningRegion, - SigningNameDerived: resolved.SigningNameDerived, - SigningName: resolved.SigningName, - } -} - -func (s *Session) resolveEndpoint(service, region string, cfg *aws.Config) (endpoints.ResolvedEndpoint, error) { - - if ep := aws.StringValue(cfg.Endpoint); len(ep) != 0 { - return endpoints.ResolvedEndpoint{ - URL: endpoints.AddScheme(ep, aws.BoolValue(cfg.DisableSSL)), - SigningRegion: region, - }, nil - } - - resolved, err := cfg.EndpointResolver.EndpointFor(service, region, - func(opt *endpoints.Options) { - opt.DisableSSL = aws.BoolValue(cfg.DisableSSL) - opt.UseDualStack = aws.BoolValue(cfg.UseDualStack) - // Support for STSRegionalEndpoint where the STSRegionalEndpoint is - // provided in envConfig or sharedConfig with envConfig getting - // precedence. - opt.STSRegionalEndpoint = cfg.STSRegionalEndpoint - - // Support for S3UsEast1RegionalEndpoint where the S3UsEast1RegionalEndpoint is - // provided in envConfig or sharedConfig with envConfig getting - // precedence. - opt.S3UsEast1RegionalEndpoint = cfg.S3UsEast1RegionalEndpoint - - // Support the condition where the service is modeled but its - // endpoint metadata is not available. - opt.ResolveUnknownService = true - }, - ) - if err != nil { - return endpoints.ResolvedEndpoint{}, err - } - - return resolved, nil -} - -// ClientConfigNoResolveEndpoint is the same as ClientConfig with the exception -// that the EndpointResolver will not be used to resolve the endpoint. The only -// endpoint set must come from the aws.Config.Endpoint field. -func (s *Session) ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) client.Config { - s = s.Copy(cfgs...) - - var resolved endpoints.ResolvedEndpoint - if ep := aws.StringValue(s.Config.Endpoint); len(ep) > 0 { - resolved.URL = endpoints.AddScheme(ep, aws.BoolValue(s.Config.DisableSSL)) - resolved.SigningRegion = aws.StringValue(s.Config.Region) - } - - return client.Config{ - Config: s.Config, - Handlers: s.Handlers, - Endpoint: resolved.URL, - SigningRegion: resolved.SigningRegion, - SigningNameDerived: resolved.SigningNameDerived, - SigningName: resolved.SigningName, - } -} - -// logDeprecatedNewSessionError function enables error handling for session -func (s *Session) logDeprecatedNewSessionError(msg string, err error, cfgs []*aws.Config) { - // Session creation failed, need to report the error and prevent - // any requests from succeeding. - s.Config.MergeIn(cfgs...) - s.Config.Logger.Log("ERROR:", msg, "Error:", err) - s.Handlers.Validate.PushBack(func(r *request.Request) { - r.Error = err - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go deleted file mode 100644 index 680805a38a..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go +++ /dev/null @@ -1,555 +0,0 @@ -package session - -import ( - "fmt" - "time" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/endpoints" - "github.com/aws/aws-sdk-go/internal/ini" -) - -const ( - // Static Credentials group - accessKeyIDKey = `aws_access_key_id` // group required - secretAccessKey = `aws_secret_access_key` // group required - sessionTokenKey = `aws_session_token` // optional - - // Assume Role Credentials group - roleArnKey = `role_arn` // group required - sourceProfileKey = `source_profile` // group required (or credential_source) - credentialSourceKey = `credential_source` // group required (or source_profile) - externalIDKey = `external_id` // optional - mfaSerialKey = `mfa_serial` // optional - roleSessionNameKey = `role_session_name` // optional - roleDurationSecondsKey = "duration_seconds" // optional - - // CSM options - csmEnabledKey = `csm_enabled` - csmHostKey = `csm_host` - csmPortKey = `csm_port` - csmClientIDKey = `csm_client_id` - - // Additional Config fields - regionKey = `region` - - // endpoint discovery group - enableEndpointDiscoveryKey = `endpoint_discovery_enabled` // optional - - // External Credential Process - credentialProcessKey = `credential_process` // optional - - // Web Identity Token File - webIdentityTokenFileKey = `web_identity_token_file` // optional - - // Additional config fields for regional or legacy endpoints - stsRegionalEndpointSharedKey = `sts_regional_endpoints` - - // Additional config fields for regional or legacy endpoints - s3UsEast1RegionalSharedKey = `s3_us_east_1_regional_endpoint` - - // DefaultSharedConfigProfile is the default profile to be used when - // loading configuration from the config files if another profile name - // is not provided. - DefaultSharedConfigProfile = `default` - - // S3 ARN Region Usage - s3UseARNRegionKey = "s3_use_arn_region" -) - -// sharedConfig represents the configuration fields of the SDK config files. -type sharedConfig struct { - // Credentials values from the config file. Both aws_access_key_id and - // aws_secret_access_key must be provided together in the same file to be - // considered valid. The values will be ignored if not a complete group. - // aws_session_token is an optional field that can be provided if both of - // the other two fields are also provided. - // - // aws_access_key_id - // aws_secret_access_key - // aws_session_token - Creds credentials.Value - - CredentialSource string - CredentialProcess string - WebIdentityTokenFile string - - RoleARN string - RoleSessionName string - ExternalID string - MFASerial string - AssumeRoleDuration *time.Duration - - SourceProfileName string - SourceProfile *sharedConfig - - // Region is the region the SDK should use for looking up AWS service - // endpoints and signing requests. - // - // region - Region string - - // EnableEndpointDiscovery can be enabled in the shared config by setting - // endpoint_discovery_enabled to true - // - // endpoint_discovery_enabled = true - EnableEndpointDiscovery *bool - - // CSM Options - CSMEnabled *bool - CSMHost string - CSMPort string - CSMClientID string - - // Specifies the Regional Endpoint flag for the SDK to resolve the endpoint for a service - // - // sts_regional_endpoints = regional - // This can take value as `LegacySTSEndpoint` or `RegionalSTSEndpoint` - STSRegionalEndpoint endpoints.STSRegionalEndpoint - - // Specifies the Regional Endpoint flag for the SDK to resolve the endpoint for a service - // - // s3_us_east_1_regional_endpoint = regional - // This can take value as `LegacyS3UsEast1Endpoint` or `RegionalS3UsEast1Endpoint` - S3UsEast1RegionalEndpoint endpoints.S3UsEast1RegionalEndpoint - - // Specifies if the S3 service should allow ARNs to direct the region - // the client's requests are sent to. - // - // s3_use_arn_region=true - S3UseARNRegion bool -} - -type sharedConfigFile struct { - Filename string - IniData ini.Sections -} - -// loadSharedConfig retrieves the configuration from the list of files using -// the profile provided. The order the files are listed will determine -// precedence. Values in subsequent files will overwrite values defined in -// earlier files. -// -// For example, given two files A and B. Both define credentials. If the order -// of the files are A then B, B's credential values will be used instead of -// A's. -// -// See sharedConfig.setFromFile for information how the config files -// will be loaded. -func loadSharedConfig(profile string, filenames []string, exOpts bool) (sharedConfig, error) { - if len(profile) == 0 { - profile = DefaultSharedConfigProfile - } - - files, err := loadSharedConfigIniFiles(filenames) - if err != nil { - return sharedConfig{}, err - } - - cfg := sharedConfig{} - profiles := map[string]struct{}{} - if err = cfg.setFromIniFiles(profiles, profile, files, exOpts); err != nil { - return sharedConfig{}, err - } - - return cfg, nil -} - -func loadSharedConfigIniFiles(filenames []string) ([]sharedConfigFile, error) { - files := make([]sharedConfigFile, 0, len(filenames)) - - for _, filename := range filenames { - sections, err := ini.OpenFile(filename) - if aerr, ok := err.(awserr.Error); ok && aerr.Code() == ini.ErrCodeUnableToReadFile { - // Skip files which can't be opened and read for whatever reason - continue - } else if err != nil { - return nil, SharedConfigLoadError{Filename: filename, Err: err} - } - - files = append(files, sharedConfigFile{ - Filename: filename, IniData: sections, - }) - } - - return files, nil -} - -func (cfg *sharedConfig) setFromIniFiles(profiles map[string]struct{}, profile string, files []sharedConfigFile, exOpts bool) error { - // Trim files from the list that don't exist. - var skippedFiles int - var profileNotFoundErr error - for _, f := range files { - if err := cfg.setFromIniFile(profile, f, exOpts); err != nil { - if _, ok := err.(SharedConfigProfileNotExistsError); ok { - // Ignore profiles not defined in individual files. - profileNotFoundErr = err - skippedFiles++ - continue - } - return err - } - } - if skippedFiles == len(files) { - // If all files were skipped because the profile is not found, return - // the original profile not found error. - return profileNotFoundErr - } - - if _, ok := profiles[profile]; ok { - // if this is the second instance of the profile the Assume Role - // options must be cleared because they are only valid for the - // first reference of a profile. The self linked instance of the - // profile only have credential provider options. - cfg.clearAssumeRoleOptions() - } else { - // First time a profile has been seen, It must either be a assume role - // or credentials. Assert if the credential type requires a role ARN, - // the ARN is also set. - if err := cfg.validateCredentialsRequireARN(profile); err != nil { - return err - } - } - profiles[profile] = struct{}{} - - if err := cfg.validateCredentialType(); err != nil { - return err - } - - // Link source profiles for assume roles - if len(cfg.SourceProfileName) != 0 { - // Linked profile via source_profile ignore credential provider - // options, the source profile must provide the credentials. - cfg.clearCredentialOptions() - - srcCfg := &sharedConfig{} - err := srcCfg.setFromIniFiles(profiles, cfg.SourceProfileName, files, exOpts) - if err != nil { - // SourceProfile that doesn't exist is an error in configuration. - if _, ok := err.(SharedConfigProfileNotExistsError); ok { - err = SharedConfigAssumeRoleError{ - RoleARN: cfg.RoleARN, - SourceProfile: cfg.SourceProfileName, - } - } - return err - } - - if !srcCfg.hasCredentials() { - return SharedConfigAssumeRoleError{ - RoleARN: cfg.RoleARN, - SourceProfile: cfg.SourceProfileName, - } - } - - cfg.SourceProfile = srcCfg - } - - return nil -} - -// setFromFile loads the configuration from the file using the profile -// provided. A sharedConfig pointer type value is used so that multiple config -// file loadings can be chained. -// -// Only loads complete logically grouped values, and will not set fields in cfg -// for incomplete grouped values in the config. Such as credentials. For -// example if a config file only includes aws_access_key_id but no -// aws_secret_access_key the aws_access_key_id will be ignored. -func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile, exOpts bool) error { - section, ok := file.IniData.GetSection(profile) - if !ok { - // Fallback to to alternate profile name: profile - section, ok = file.IniData.GetSection(fmt.Sprintf("profile %s", profile)) - if !ok { - return SharedConfigProfileNotExistsError{Profile: profile, Err: nil} - } - } - - if exOpts { - // Assume Role Parameters - updateString(&cfg.RoleARN, section, roleArnKey) - updateString(&cfg.ExternalID, section, externalIDKey) - updateString(&cfg.MFASerial, section, mfaSerialKey) - updateString(&cfg.RoleSessionName, section, roleSessionNameKey) - updateString(&cfg.SourceProfileName, section, sourceProfileKey) - updateString(&cfg.CredentialSource, section, credentialSourceKey) - updateString(&cfg.Region, section, regionKey) - - if section.Has(roleDurationSecondsKey) { - d := time.Duration(section.Int(roleDurationSecondsKey)) * time.Second - cfg.AssumeRoleDuration = &d - } - - if v := section.String(stsRegionalEndpointSharedKey); len(v) != 0 { - sre, err := endpoints.GetSTSRegionalEndpoint(v) - if err != nil { - return fmt.Errorf("failed to load %s from shared config, %s, %v", - stsRegionalEndpointSharedKey, file.Filename, err) - } - cfg.STSRegionalEndpoint = sre - } - - if v := section.String(s3UsEast1RegionalSharedKey); len(v) != 0 { - sre, err := endpoints.GetS3UsEast1RegionalEndpoint(v) - if err != nil { - return fmt.Errorf("failed to load %s from shared config, %s, %v", - s3UsEast1RegionalSharedKey, file.Filename, err) - } - cfg.S3UsEast1RegionalEndpoint = sre - } - } - - updateString(&cfg.CredentialProcess, section, credentialProcessKey) - updateString(&cfg.WebIdentityTokenFile, section, webIdentityTokenFileKey) - - // Shared Credentials - creds := credentials.Value{ - AccessKeyID: section.String(accessKeyIDKey), - SecretAccessKey: section.String(secretAccessKey), - SessionToken: section.String(sessionTokenKey), - ProviderName: fmt.Sprintf("SharedConfigCredentials: %s", file.Filename), - } - if creds.HasKeys() { - cfg.Creds = creds - } - - // Endpoint discovery - updateBoolPtr(&cfg.EnableEndpointDiscovery, section, enableEndpointDiscoveryKey) - - // CSM options - updateBoolPtr(&cfg.CSMEnabled, section, csmEnabledKey) - updateString(&cfg.CSMHost, section, csmHostKey) - updateString(&cfg.CSMPort, section, csmPortKey) - updateString(&cfg.CSMClientID, section, csmClientIDKey) - - updateBool(&cfg.S3UseARNRegion, section, s3UseARNRegionKey) - - return nil -} - -func (cfg *sharedConfig) validateCredentialsRequireARN(profile string) error { - var credSource string - - switch { - case len(cfg.SourceProfileName) != 0: - credSource = sourceProfileKey - case len(cfg.CredentialSource) != 0: - credSource = credentialSourceKey - case len(cfg.WebIdentityTokenFile) != 0: - credSource = webIdentityTokenFileKey - } - - if len(credSource) != 0 && len(cfg.RoleARN) == 0 { - return CredentialRequiresARNError{ - Type: credSource, - Profile: profile, - } - } - - return nil -} - -func (cfg *sharedConfig) validateCredentialType() error { - // Only one or no credential type can be defined. - if !oneOrNone( - len(cfg.SourceProfileName) != 0, - len(cfg.CredentialSource) != 0, - len(cfg.CredentialProcess) != 0, - len(cfg.WebIdentityTokenFile) != 0, - ) { - return ErrSharedConfigSourceCollision - } - - return nil -} - -func (cfg *sharedConfig) hasCredentials() bool { - switch { - case len(cfg.SourceProfileName) != 0: - case len(cfg.CredentialSource) != 0: - case len(cfg.CredentialProcess) != 0: - case len(cfg.WebIdentityTokenFile) != 0: - case cfg.Creds.HasKeys(): - default: - return false - } - - return true -} - -func (cfg *sharedConfig) clearCredentialOptions() { - cfg.CredentialSource = "" - cfg.CredentialProcess = "" - cfg.WebIdentityTokenFile = "" - cfg.Creds = credentials.Value{} -} - -func (cfg *sharedConfig) clearAssumeRoleOptions() { - cfg.RoleARN = "" - cfg.ExternalID = "" - cfg.MFASerial = "" - cfg.RoleSessionName = "" - cfg.SourceProfileName = "" -} - -func oneOrNone(bs ...bool) bool { - var count int - - for _, b := range bs { - if b { - count++ - if count > 1 { - return false - } - } - } - - return true -} - -// updateString will only update the dst with the value in the section key, key -// is present in the section. -func updateString(dst *string, section ini.Section, key string) { - if !section.Has(key) { - return - } - *dst = section.String(key) -} - -// updateBool will only update the dst with the value in the section key, key -// is present in the section. -func updateBool(dst *bool, section ini.Section, key string) { - if !section.Has(key) { - return - } - *dst = section.Bool(key) -} - -// updateBoolPtr will only update the dst with the value in the section key, -// key is present in the section. -func updateBoolPtr(dst **bool, section ini.Section, key string) { - if !section.Has(key) { - return - } - *dst = new(bool) - **dst = section.Bool(key) -} - -// SharedConfigLoadError is an error for the shared config file failed to load. -type SharedConfigLoadError struct { - Filename string - Err error -} - -// Code is the short id of the error. -func (e SharedConfigLoadError) Code() string { - return "SharedConfigLoadError" -} - -// Message is the description of the error -func (e SharedConfigLoadError) Message() string { - return fmt.Sprintf("failed to load config file, %s", e.Filename) -} - -// OrigErr is the underlying error that caused the failure. -func (e SharedConfigLoadError) OrigErr() error { - return e.Err -} - -// Error satisfies the error interface. -func (e SharedConfigLoadError) Error() string { - return awserr.SprintError(e.Code(), e.Message(), "", e.Err) -} - -// SharedConfigProfileNotExistsError is an error for the shared config when -// the profile was not find in the config file. -type SharedConfigProfileNotExistsError struct { - Profile string - Err error -} - -// Code is the short id of the error. -func (e SharedConfigProfileNotExistsError) Code() string { - return "SharedConfigProfileNotExistsError" -} - -// Message is the description of the error -func (e SharedConfigProfileNotExistsError) Message() string { - return fmt.Sprintf("failed to get profile, %s", e.Profile) -} - -// OrigErr is the underlying error that caused the failure. -func (e SharedConfigProfileNotExistsError) OrigErr() error { - return e.Err -} - -// Error satisfies the error interface. -func (e SharedConfigProfileNotExistsError) Error() string { - return awserr.SprintError(e.Code(), e.Message(), "", e.Err) -} - -// SharedConfigAssumeRoleError is an error for the shared config when the -// profile contains assume role information, but that information is invalid -// or not complete. -type SharedConfigAssumeRoleError struct { - RoleARN string - SourceProfile string -} - -// Code is the short id of the error. -func (e SharedConfigAssumeRoleError) Code() string { - return "SharedConfigAssumeRoleError" -} - -// Message is the description of the error -func (e SharedConfigAssumeRoleError) Message() string { - return fmt.Sprintf( - "failed to load assume role for %s, source profile %s has no shared credentials", - e.RoleARN, e.SourceProfile, - ) -} - -// OrigErr is the underlying error that caused the failure. -func (e SharedConfigAssumeRoleError) OrigErr() error { - return nil -} - -// Error satisfies the error interface. -func (e SharedConfigAssumeRoleError) Error() string { - return awserr.SprintError(e.Code(), e.Message(), "", nil) -} - -// CredentialRequiresARNError provides the error for shared config credentials -// that are incorrectly configured in the shared config or credentials file. -type CredentialRequiresARNError struct { - // type of credentials that were configured. - Type string - - // Profile name the credentials were in. - Profile string -} - -// Code is the short id of the error. -func (e CredentialRequiresARNError) Code() string { - return "CredentialRequiresARNError" -} - -// Message is the description of the error -func (e CredentialRequiresARNError) Message() string { - return fmt.Sprintf( - "credential type %s requires role_arn, profile %s", - e.Type, e.Profile, - ) -} - -// OrigErr is the underlying error that caused the failure. -func (e CredentialRequiresARNError) OrigErr() error { - return nil -} - -// Error satisfies the error interface. -func (e CredentialRequiresARNError) Error() string { - return awserr.SprintError(e.Code(), e.Message(), "", nil) -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go deleted file mode 100644 index 07ea799fbd..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go +++ /dev/null @@ -1,81 +0,0 @@ -package v4 - -import ( - "github.com/aws/aws-sdk-go/internal/strings" -) - -// validator houses a set of rule needed for validation of a -// string value -type rules []rule - -// rule interface allows for more flexible rules and just simply -// checks whether or not a value adheres to that rule -type rule interface { - IsValid(value string) bool -} - -// IsValid will iterate through all rules and see if any rules -// apply to the value and supports nested rules -func (r rules) IsValid(value string) bool { - for _, rule := range r { - if rule.IsValid(value) { - return true - } - } - return false -} - -// mapRule generic rule for maps -type mapRule map[string]struct{} - -// IsValid for the map rule satisfies whether it exists in the map -func (m mapRule) IsValid(value string) bool { - _, ok := m[value] - return ok -} - -// whitelist is a generic rule for whitelisting -type whitelist struct { - rule -} - -// IsValid for whitelist checks if the value is within the whitelist -func (w whitelist) IsValid(value string) bool { - return w.rule.IsValid(value) -} - -// blacklist is a generic rule for blacklisting -type blacklist struct { - rule -} - -// IsValid for whitelist checks if the value is within the whitelist -func (b blacklist) IsValid(value string) bool { - return !b.rule.IsValid(value) -} - -type patterns []string - -// IsValid for patterns checks each pattern and returns if a match has -// been found -func (p patterns) IsValid(value string) bool { - for _, pattern := range p { - if strings.HasPrefixFold(value, pattern) { - return true - } - } - return false -} - -// inclusiveRules rules allow for rules to depend on one another -type inclusiveRules []rule - -// IsValid will return true if all rules are true -func (r inclusiveRules) IsValid(value string) bool { - for _, rule := range r { - if !rule.IsValid(value) { - return false - } - } - return true -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go deleted file mode 100644 index 6aa2ed241b..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go +++ /dev/null @@ -1,7 +0,0 @@ -package v4 - -// WithUnsignedPayload will enable and set the UnsignedPayload field to -// true of the signer. -func WithUnsignedPayload(v4 *Signer) { - v4.UnsignedPayload = true -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.5.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.5.go deleted file mode 100644 index f35fc860b3..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.5.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build !go1.7 - -package v4 - -import ( - "net/http" - - "github.com/aws/aws-sdk-go/aws" -) - -func requestContext(r *http.Request) aws.Context { - return aws.BackgroundContext() -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.7.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.7.go deleted file mode 100644 index fed5c859ca..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.7.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build go1.7 - -package v4 - -import ( - "net/http" - - "github.com/aws/aws-sdk-go/aws" -) - -func requestContext(r *http.Request) aws.Context { - return r.Context() -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/stream.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/stream.go deleted file mode 100644 index 02cbd97e23..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/stream.go +++ /dev/null @@ -1,63 +0,0 @@ -package v4 - -import ( - "encoding/hex" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws/credentials" -) - -type credentialValueProvider interface { - Get() (credentials.Value, error) -} - -// StreamSigner implements signing of event stream encoded payloads -type StreamSigner struct { - region string - service string - - credentials credentialValueProvider - - prevSig []byte -} - -// NewStreamSigner creates a SigV4 signer used to sign Event Stream encoded messages -func NewStreamSigner(region, service string, seedSignature []byte, credentials *credentials.Credentials) *StreamSigner { - return &StreamSigner{ - region: region, - service: service, - credentials: credentials, - prevSig: seedSignature, - } -} - -// GetSignature takes an event stream encoded headers and payload and returns a signature -func (s *StreamSigner) GetSignature(headers, payload []byte, date time.Time) ([]byte, error) { - credValue, err := s.credentials.Get() - if err != nil { - return nil, err - } - - sigKey := deriveSigningKey(s.region, s.service, credValue.SecretAccessKey, date) - - keyPath := buildSigningScope(s.region, s.service, date) - - stringToSign := buildEventStreamStringToSign(headers, payload, s.prevSig, keyPath, date) - - signature := hmacSHA256(sigKey, []byte(stringToSign)) - s.prevSig = signature - - return signature, nil -} - -func buildEventStreamStringToSign(headers, payload, prevSig []byte, scope string, date time.Time) string { - return strings.Join([]string{ - "AWS4-HMAC-SHA256-PAYLOAD", - formatTime(date), - scope, - hex.EncodeToString(prevSig), - hex.EncodeToString(hashSHA256(headers)), - hex.EncodeToString(hashSHA256(payload)), - }, "\n") -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go deleted file mode 100644 index bd082e9d1f..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go +++ /dev/null @@ -1,24 +0,0 @@ -// +build go1.5 - -package v4 - -import ( - "net/url" - "strings" -) - -func getURIPath(u *url.URL) string { - var uri string - - if len(u.Opaque) > 0 { - uri = "/" + strings.Join(strings.Split(u.Opaque, "/")[3:], "/") - } else { - uri = u.EscapedPath() - } - - if len(uri) == 0 { - uri = "/" - } - - return uri -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go deleted file mode 100644 index d71f7b3f4f..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go +++ /dev/null @@ -1,846 +0,0 @@ -// Package v4 implements signing for AWS V4 signer -// -// Provides request signing for request that need to be signed with -// AWS V4 Signatures. -// -// Standalone Signer -// -// Generally using the signer outside of the SDK should not require any additional -// logic when using Go v1.5 or higher. The signer does this by taking advantage -// of the URL.EscapedPath method. If your request URI requires additional escaping -// you many need to use the URL.Opaque to define what the raw URI should be sent -// to the service as. -// -// The signer will first check the URL.Opaque field, and use its value if set. -// The signer does require the URL.Opaque field to be set in the form of: -// -// "///" -// -// // e.g. -// "//example.com/some/path" -// -// The leading "//" and hostname are required or the URL.Opaque escaping will -// not work correctly. -// -// If URL.Opaque is not set the signer will fallback to the URL.EscapedPath() -// method and using the returned value. If you're using Go v1.4 you must set -// URL.Opaque if the URI path needs escaping. If URL.Opaque is not set with -// Go v1.5 the signer will fallback to URL.Path. -// -// AWS v4 signature validation requires that the canonical string's URI path -// element must be the URI escaped form of the HTTP request's path. -// http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html -// -// The Go HTTP client will perform escaping automatically on the request. Some -// of these escaping may cause signature validation errors because the HTTP -// request differs from the URI path or query that the signature was generated. -// https://golang.org/pkg/net/url/#URL.EscapedPath -// -// Because of this, it is recommended that when using the signer outside of the -// SDK that explicitly escaping the request prior to being signed is preferable, -// and will help prevent signature validation errors. This can be done by setting -// the URL.Opaque or URL.RawPath. The SDK will use URL.Opaque first and then -// call URL.EscapedPath() if Opaque is not set. -// -// If signing a request intended for HTTP2 server, and you're using Go 1.6.2 -// through 1.7.4 you should use the URL.RawPath as the pre-escaped form of the -// request URL. https://github.com/golang/go/issues/16847 points to a bug in -// Go pre 1.8 that fails to make HTTP2 requests using absolute URL in the HTTP -// message. URL.Opaque generally will force Go to make requests with absolute URL. -// URL.RawPath does not do this, but RawPath must be a valid escaping of Path -// or url.EscapedPath will ignore the RawPath escaping. -// -// Test `TestStandaloneSign` provides a complete example of using the signer -// outside of the SDK and pre-escaping the URI path. -package v4 - -import ( - "crypto/hmac" - "crypto/sha256" - "encoding/hex" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "sort" - "strconv" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/internal/sdkio" - "github.com/aws/aws-sdk-go/private/protocol/rest" -) - -const ( - authorizationHeader = "Authorization" - authHeaderSignatureElem = "Signature=" - signatureQueryKey = "X-Amz-Signature" - - authHeaderPrefix = "AWS4-HMAC-SHA256" - timeFormat = "20060102T150405Z" - shortTimeFormat = "20060102" - awsV4Request = "aws4_request" - - // emptyStringSHA256 is a SHA256 of an empty string - emptyStringSHA256 = `e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855` -) - -var ignoredHeaders = rules{ - blacklist{ - mapRule{ - authorizationHeader: struct{}{}, - "User-Agent": struct{}{}, - "X-Amzn-Trace-Id": struct{}{}, - }, - }, -} - -// requiredSignedHeaders is a whitelist for build canonical headers. -var requiredSignedHeaders = rules{ - whitelist{ - mapRule{ - "Cache-Control": struct{}{}, - "Content-Disposition": struct{}{}, - "Content-Encoding": struct{}{}, - "Content-Language": struct{}{}, - "Content-Md5": struct{}{}, - "Content-Type": struct{}{}, - "Expires": struct{}{}, - "If-Match": struct{}{}, - "If-Modified-Since": struct{}{}, - "If-None-Match": struct{}{}, - "If-Unmodified-Since": struct{}{}, - "Range": struct{}{}, - "X-Amz-Acl": struct{}{}, - "X-Amz-Copy-Source": struct{}{}, - "X-Amz-Copy-Source-If-Match": struct{}{}, - "X-Amz-Copy-Source-If-Modified-Since": struct{}{}, - "X-Amz-Copy-Source-If-None-Match": struct{}{}, - "X-Amz-Copy-Source-If-Unmodified-Since": struct{}{}, - "X-Amz-Copy-Source-Range": struct{}{}, - "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{}, - "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{}, - "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, - "X-Amz-Grant-Full-control": struct{}{}, - "X-Amz-Grant-Read": struct{}{}, - "X-Amz-Grant-Read-Acp": struct{}{}, - "X-Amz-Grant-Write": struct{}{}, - "X-Amz-Grant-Write-Acp": struct{}{}, - "X-Amz-Metadata-Directive": struct{}{}, - "X-Amz-Mfa": struct{}{}, - "X-Amz-Request-Payer": struct{}{}, - "X-Amz-Server-Side-Encryption": struct{}{}, - "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{}, - "X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{}, - "X-Amz-Server-Side-Encryption-Customer-Key": struct{}{}, - "X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, - "X-Amz-Storage-Class": struct{}{}, - "X-Amz-Tagging": struct{}{}, - "X-Amz-Website-Redirect-Location": struct{}{}, - "X-Amz-Content-Sha256": struct{}{}, - }, - }, - patterns{"X-Amz-Meta-"}, -} - -// allowedHoisting is a whitelist for build query headers. The boolean value -// represents whether or not it is a pattern. -var allowedQueryHoisting = inclusiveRules{ - blacklist{requiredSignedHeaders}, - patterns{"X-Amz-"}, -} - -// Signer applies AWS v4 signing to given request. Use this to sign requests -// that need to be signed with AWS V4 Signatures. -type Signer struct { - // The authentication credentials the request will be signed against. - // This value must be set to sign requests. - Credentials *credentials.Credentials - - // Sets the log level the signer should use when reporting information to - // the logger. If the logger is nil nothing will be logged. See - // aws.LogLevelType for more information on available logging levels - // - // By default nothing will be logged. - Debug aws.LogLevelType - - // The logger loging information will be written to. If there the logger - // is nil, nothing will be logged. - Logger aws.Logger - - // Disables the Signer's moving HTTP header key/value pairs from the HTTP - // request header to the request's query string. This is most commonly used - // with pre-signed requests preventing headers from being added to the - // request's query string. - DisableHeaderHoisting bool - - // Disables the automatic escaping of the URI path of the request for the - // siganture's canonical string's path. For services that do not need additional - // escaping then use this to disable the signer escaping the path. - // - // S3 is an example of a service that does not need additional escaping. - // - // http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html - DisableURIPathEscaping bool - - // Disables the automatical setting of the HTTP request's Body field with the - // io.ReadSeeker passed in to the signer. This is useful if you're using a - // custom wrapper around the body for the io.ReadSeeker and want to preserve - // the Body value on the Request.Body. - // - // This does run the risk of signing a request with a body that will not be - // sent in the request. Need to ensure that the underlying data of the Body - // values are the same. - DisableRequestBodyOverwrite bool - - // currentTimeFn returns the time value which represents the current time. - // This value should only be used for testing. If it is nil the default - // time.Now will be used. - currentTimeFn func() time.Time - - // UnsignedPayload will prevent signing of the payload. This will only - // work for services that have support for this. - UnsignedPayload bool -} - -// NewSigner returns a Signer pointer configured with the credentials and optional -// option values provided. If not options are provided the Signer will use its -// default configuration. -func NewSigner(credentials *credentials.Credentials, options ...func(*Signer)) *Signer { - v4 := &Signer{ - Credentials: credentials, - } - - for _, option := range options { - option(v4) - } - - return v4 -} - -type signingCtx struct { - ServiceName string - Region string - Request *http.Request - Body io.ReadSeeker - Query url.Values - Time time.Time - ExpireTime time.Duration - SignedHeaderVals http.Header - - DisableURIPathEscaping bool - - credValues credentials.Value - isPresign bool - unsignedPayload bool - - bodyDigest string - signedHeaders string - canonicalHeaders string - canonicalString string - credentialString string - stringToSign string - signature string - authorization string -} - -// Sign signs AWS v4 requests with the provided body, service name, region the -// request is made to, and time the request is signed at. The signTime allows -// you to specify that a request is signed for the future, and cannot be -// used until then. -// -// Returns a list of HTTP headers that were included in the signature or an -// error if signing the request failed. Generally for signed requests this value -// is not needed as the full request context will be captured by the http.Request -// value. It is included for reference though. -// -// Sign will set the request's Body to be the `body` parameter passed in. If -// the body is not already an io.ReadCloser, it will be wrapped within one. If -// a `nil` body parameter passed to Sign, the request's Body field will be -// also set to nil. Its important to note that this functionality will not -// change the request's ContentLength of the request. -// -// Sign differs from Presign in that it will sign the request using HTTP -// header values. This type of signing is intended for http.Request values that -// will not be shared, or are shared in a way the header values on the request -// will not be lost. -// -// The requests body is an io.ReadSeeker so the SHA256 of the body can be -// generated. To bypass the signer computing the hash you can set the -// "X-Amz-Content-Sha256" header with a precomputed value. The signer will -// only compute the hash if the request header value is empty. -func (v4 Signer) Sign(r *http.Request, body io.ReadSeeker, service, region string, signTime time.Time) (http.Header, error) { - return v4.signWithBody(r, body, service, region, 0, false, signTime) -} - -// Presign signs AWS v4 requests with the provided body, service name, region -// the request is made to, and time the request is signed at. The signTime -// allows you to specify that a request is signed for the future, and cannot -// be used until then. -// -// Returns a list of HTTP headers that were included in the signature or an -// error if signing the request failed. For presigned requests these headers -// and their values must be included on the HTTP request when it is made. This -// is helpful to know what header values need to be shared with the party the -// presigned request will be distributed to. -// -// Presign differs from Sign in that it will sign the request using query string -// instead of header values. This allows you to share the Presigned Request's -// URL with third parties, or distribute it throughout your system with minimal -// dependencies. -// -// Presign also takes an exp value which is the duration the -// signed request will be valid after the signing time. This is allows you to -// set when the request will expire. -// -// The requests body is an io.ReadSeeker so the SHA256 of the body can be -// generated. To bypass the signer computing the hash you can set the -// "X-Amz-Content-Sha256" header with a precomputed value. The signer will -// only compute the hash if the request header value is empty. -// -// Presigning a S3 request will not compute the body's SHA256 hash by default. -// This is done due to the general use case for S3 presigned URLs is to share -// PUT/GET capabilities. If you would like to include the body's SHA256 in the -// presigned request's signature you can set the "X-Amz-Content-Sha256" -// HTTP header and that will be included in the request's signature. -func (v4 Signer) Presign(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, signTime time.Time) (http.Header, error) { - return v4.signWithBody(r, body, service, region, exp, true, signTime) -} - -func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, isPresign bool, signTime time.Time) (http.Header, error) { - currentTimeFn := v4.currentTimeFn - if currentTimeFn == nil { - currentTimeFn = time.Now - } - - ctx := &signingCtx{ - Request: r, - Body: body, - Query: r.URL.Query(), - Time: signTime, - ExpireTime: exp, - isPresign: isPresign, - ServiceName: service, - Region: region, - DisableURIPathEscaping: v4.DisableURIPathEscaping, - unsignedPayload: v4.UnsignedPayload, - } - - for key := range ctx.Query { - sort.Strings(ctx.Query[key]) - } - - if ctx.isRequestSigned() { - ctx.Time = currentTimeFn() - ctx.handlePresignRemoval() - } - - var err error - ctx.credValues, err = v4.Credentials.GetWithContext(requestContext(r)) - if err != nil { - return http.Header{}, err - } - - ctx.sanitizeHostForHeader() - ctx.assignAmzQueryValues() - if err := ctx.build(v4.DisableHeaderHoisting); err != nil { - return nil, err - } - - // If the request is not presigned the body should be attached to it. This - // prevents the confusion of wanting to send a signed request without - // the body the request was signed for attached. - if !(v4.DisableRequestBodyOverwrite || ctx.isPresign) { - var reader io.ReadCloser - if body != nil { - var ok bool - if reader, ok = body.(io.ReadCloser); !ok { - reader = ioutil.NopCloser(body) - } - } - r.Body = reader - } - - if v4.Debug.Matches(aws.LogDebugWithSigning) { - v4.logSigningInfo(ctx) - } - - return ctx.SignedHeaderVals, nil -} - -func (ctx *signingCtx) sanitizeHostForHeader() { - request.SanitizeHostForHeader(ctx.Request) -} - -func (ctx *signingCtx) handlePresignRemoval() { - if !ctx.isPresign { - return - } - - // The credentials have expired for this request. The current signing - // is invalid, and needs to be request because the request will fail. - ctx.removePresign() - - // Update the request's query string to ensure the values stays in - // sync in the case retrieving the new credentials fails. - ctx.Request.URL.RawQuery = ctx.Query.Encode() -} - -func (ctx *signingCtx) assignAmzQueryValues() { - if ctx.isPresign { - ctx.Query.Set("X-Amz-Algorithm", authHeaderPrefix) - if ctx.credValues.SessionToken != "" { - ctx.Query.Set("X-Amz-Security-Token", ctx.credValues.SessionToken) - } else { - ctx.Query.Del("X-Amz-Security-Token") - } - - return - } - - if ctx.credValues.SessionToken != "" { - ctx.Request.Header.Set("X-Amz-Security-Token", ctx.credValues.SessionToken) - } -} - -// SignRequestHandler is a named request handler the SDK will use to sign -// service client request with using the V4 signature. -var SignRequestHandler = request.NamedHandler{ - Name: "v4.SignRequestHandler", Fn: SignSDKRequest, -} - -// SignSDKRequest signs an AWS request with the V4 signature. This -// request handler should only be used with the SDK's built in service client's -// API operation requests. -// -// This function should not be used on its on its own, but in conjunction with -// an AWS service client's API operation call. To sign a standalone request -// not created by a service client's API operation method use the "Sign" or -// "Presign" functions of the "Signer" type. -// -// If the credentials of the request's config are set to -// credentials.AnonymousCredentials the request will not be signed. -func SignSDKRequest(req *request.Request) { - SignSDKRequestWithCurrentTime(req, time.Now) -} - -// BuildNamedHandler will build a generic handler for signing. -func BuildNamedHandler(name string, opts ...func(*Signer)) request.NamedHandler { - return request.NamedHandler{ - Name: name, - Fn: func(req *request.Request) { - SignSDKRequestWithCurrentTime(req, time.Now, opts...) - }, - } -} - -// SignSDKRequestWithCurrentTime will sign the SDK's request using the time -// function passed in. Behaves the same as SignSDKRequest with the exception -// the request is signed with the value returned by the current time function. -func SignSDKRequestWithCurrentTime(req *request.Request, curTimeFn func() time.Time, opts ...func(*Signer)) { - // If the request does not need to be signed ignore the signing of the - // request if the AnonymousCredentials object is used. - if req.Config.Credentials == credentials.AnonymousCredentials { - return - } - - region := req.ClientInfo.SigningRegion - if region == "" { - region = aws.StringValue(req.Config.Region) - } - - name := req.ClientInfo.SigningName - if name == "" { - name = req.ClientInfo.ServiceName - } - - v4 := NewSigner(req.Config.Credentials, func(v4 *Signer) { - v4.Debug = req.Config.LogLevel.Value() - v4.Logger = req.Config.Logger - v4.DisableHeaderHoisting = req.NotHoist - v4.currentTimeFn = curTimeFn - if name == "s3" { - // S3 service should not have any escaping applied - v4.DisableURIPathEscaping = true - } - // Prevents setting the HTTPRequest's Body. Since the Body could be - // wrapped in a custom io.Closer that we do not want to be stompped - // on top of by the signer. - v4.DisableRequestBodyOverwrite = true - }) - - for _, opt := range opts { - opt(v4) - } - - curTime := curTimeFn() - signedHeaders, err := v4.signWithBody(req.HTTPRequest, req.GetBody(), - name, region, req.ExpireTime, req.ExpireTime > 0, curTime, - ) - if err != nil { - req.Error = err - req.SignedHeaderVals = nil - return - } - - req.SignedHeaderVals = signedHeaders - req.LastSignedAt = curTime -} - -const logSignInfoMsg = `DEBUG: Request Signature: ----[ CANONICAL STRING ]----------------------------- -%s ----[ STRING TO SIGN ]-------------------------------- -%s%s ------------------------------------------------------` -const logSignedURLMsg = ` ----[ SIGNED URL ]------------------------------------ -%s` - -func (v4 *Signer) logSigningInfo(ctx *signingCtx) { - signedURLMsg := "" - if ctx.isPresign { - signedURLMsg = fmt.Sprintf(logSignedURLMsg, ctx.Request.URL.String()) - } - msg := fmt.Sprintf(logSignInfoMsg, ctx.canonicalString, ctx.stringToSign, signedURLMsg) - v4.Logger.Log(msg) -} - -func (ctx *signingCtx) build(disableHeaderHoisting bool) error { - ctx.buildTime() // no depends - ctx.buildCredentialString() // no depends - - if err := ctx.buildBodyDigest(); err != nil { - return err - } - - unsignedHeaders := ctx.Request.Header - if ctx.isPresign { - if !disableHeaderHoisting { - urlValues := url.Values{} - urlValues, unsignedHeaders = buildQuery(allowedQueryHoisting, unsignedHeaders) // no depends - for k := range urlValues { - ctx.Query[k] = urlValues[k] - } - } - } - - ctx.buildCanonicalHeaders(ignoredHeaders, unsignedHeaders) - ctx.buildCanonicalString() // depends on canon headers / signed headers - ctx.buildStringToSign() // depends on canon string - ctx.buildSignature() // depends on string to sign - - if ctx.isPresign { - ctx.Request.URL.RawQuery += "&" + signatureQueryKey + "=" + ctx.signature - } else { - parts := []string{ - authHeaderPrefix + " Credential=" + ctx.credValues.AccessKeyID + "/" + ctx.credentialString, - "SignedHeaders=" + ctx.signedHeaders, - authHeaderSignatureElem + ctx.signature, - } - ctx.Request.Header.Set(authorizationHeader, strings.Join(parts, ", ")) - } - - return nil -} - -// GetSignedRequestSignature attempts to extract the signature of the request. -// Returning an error if the request is unsigned, or unable to extract the -// signature. -func GetSignedRequestSignature(r *http.Request) ([]byte, error) { - - if auth := r.Header.Get(authorizationHeader); len(auth) != 0 { - ps := strings.Split(auth, ", ") - for _, p := range ps { - if idx := strings.Index(p, authHeaderSignatureElem); idx >= 0 { - sig := p[len(authHeaderSignatureElem):] - if len(sig) == 0 { - return nil, fmt.Errorf("invalid request signature authorization header") - } - return hex.DecodeString(sig) - } - } - } - - if sig := r.URL.Query().Get("X-Amz-Signature"); len(sig) != 0 { - return hex.DecodeString(sig) - } - - return nil, fmt.Errorf("request not signed") -} - -func (ctx *signingCtx) buildTime() { - if ctx.isPresign { - duration := int64(ctx.ExpireTime / time.Second) - ctx.Query.Set("X-Amz-Date", formatTime(ctx.Time)) - ctx.Query.Set("X-Amz-Expires", strconv.FormatInt(duration, 10)) - } else { - ctx.Request.Header.Set("X-Amz-Date", formatTime(ctx.Time)) - } -} - -func (ctx *signingCtx) buildCredentialString() { - ctx.credentialString = buildSigningScope(ctx.Region, ctx.ServiceName, ctx.Time) - - if ctx.isPresign { - ctx.Query.Set("X-Amz-Credential", ctx.credValues.AccessKeyID+"/"+ctx.credentialString) - } -} - -func buildQuery(r rule, header http.Header) (url.Values, http.Header) { - query := url.Values{} - unsignedHeaders := http.Header{} - for k, h := range header { - if r.IsValid(k) { - query[k] = h - } else { - unsignedHeaders[k] = h - } - } - - return query, unsignedHeaders -} -func (ctx *signingCtx) buildCanonicalHeaders(r rule, header http.Header) { - var headers []string - headers = append(headers, "host") - for k, v := range header { - if !r.IsValid(k) { - continue // ignored header - } - if ctx.SignedHeaderVals == nil { - ctx.SignedHeaderVals = make(http.Header) - } - - lowerCaseKey := strings.ToLower(k) - if _, ok := ctx.SignedHeaderVals[lowerCaseKey]; ok { - // include additional values - ctx.SignedHeaderVals[lowerCaseKey] = append(ctx.SignedHeaderVals[lowerCaseKey], v...) - continue - } - - headers = append(headers, lowerCaseKey) - ctx.SignedHeaderVals[lowerCaseKey] = v - } - sort.Strings(headers) - - ctx.signedHeaders = strings.Join(headers, ";") - - if ctx.isPresign { - ctx.Query.Set("X-Amz-SignedHeaders", ctx.signedHeaders) - } - - headerValues := make([]string, len(headers)) - for i, k := range headers { - if k == "host" { - if ctx.Request.Host != "" { - headerValues[i] = "host:" + ctx.Request.Host - } else { - headerValues[i] = "host:" + ctx.Request.URL.Host - } - } else { - headerValues[i] = k + ":" + - strings.Join(ctx.SignedHeaderVals[k], ",") - } - } - stripExcessSpaces(headerValues) - ctx.canonicalHeaders = strings.Join(headerValues, "\n") -} - -func (ctx *signingCtx) buildCanonicalString() { - ctx.Request.URL.RawQuery = strings.Replace(ctx.Query.Encode(), "+", "%20", -1) - - uri := getURIPath(ctx.Request.URL) - - if !ctx.DisableURIPathEscaping { - uri = rest.EscapePath(uri, false) - } - - ctx.canonicalString = strings.Join([]string{ - ctx.Request.Method, - uri, - ctx.Request.URL.RawQuery, - ctx.canonicalHeaders + "\n", - ctx.signedHeaders, - ctx.bodyDigest, - }, "\n") -} - -func (ctx *signingCtx) buildStringToSign() { - ctx.stringToSign = strings.Join([]string{ - authHeaderPrefix, - formatTime(ctx.Time), - ctx.credentialString, - hex.EncodeToString(hashSHA256([]byte(ctx.canonicalString))), - }, "\n") -} - -func (ctx *signingCtx) buildSignature() { - creds := deriveSigningKey(ctx.Region, ctx.ServiceName, ctx.credValues.SecretAccessKey, ctx.Time) - signature := hmacSHA256(creds, []byte(ctx.stringToSign)) - ctx.signature = hex.EncodeToString(signature) -} - -func (ctx *signingCtx) buildBodyDigest() error { - hash := ctx.Request.Header.Get("X-Amz-Content-Sha256") - if hash == "" { - includeSHA256Header := ctx.unsignedPayload || - ctx.ServiceName == "s3" || - ctx.ServiceName == "glacier" - - s3Presign := ctx.isPresign && ctx.ServiceName == "s3" - - if ctx.unsignedPayload || s3Presign { - hash = "UNSIGNED-PAYLOAD" - includeSHA256Header = !s3Presign - } else if ctx.Body == nil { - hash = emptyStringSHA256 - } else { - if !aws.IsReaderSeekable(ctx.Body) { - return fmt.Errorf("cannot use unseekable request body %T, for signed request with body", ctx.Body) - } - hashBytes, err := makeSha256Reader(ctx.Body) - if err != nil { - return err - } - hash = hex.EncodeToString(hashBytes) - } - - if includeSHA256Header { - ctx.Request.Header.Set("X-Amz-Content-Sha256", hash) - } - } - ctx.bodyDigest = hash - - return nil -} - -// isRequestSigned returns if the request is currently signed or presigned -func (ctx *signingCtx) isRequestSigned() bool { - if ctx.isPresign && ctx.Query.Get("X-Amz-Signature") != "" { - return true - } - if ctx.Request.Header.Get("Authorization") != "" { - return true - } - - return false -} - -// unsign removes signing flags for both signed and presigned requests. -func (ctx *signingCtx) removePresign() { - ctx.Query.Del("X-Amz-Algorithm") - ctx.Query.Del("X-Amz-Signature") - ctx.Query.Del("X-Amz-Security-Token") - ctx.Query.Del("X-Amz-Date") - ctx.Query.Del("X-Amz-Expires") - ctx.Query.Del("X-Amz-Credential") - ctx.Query.Del("X-Amz-SignedHeaders") -} - -func hmacSHA256(key []byte, data []byte) []byte { - hash := hmac.New(sha256.New, key) - hash.Write(data) - return hash.Sum(nil) -} - -func hashSHA256(data []byte) []byte { - hash := sha256.New() - hash.Write(data) - return hash.Sum(nil) -} - -func makeSha256Reader(reader io.ReadSeeker) (hashBytes []byte, err error) { - hash := sha256.New() - start, err := reader.Seek(0, sdkio.SeekCurrent) - if err != nil { - return nil, err - } - defer func() { - // ensure error is return if unable to seek back to start of payload. - _, err = reader.Seek(start, sdkio.SeekStart) - }() - - // Use CopyN to avoid allocating the 32KB buffer in io.Copy for bodies - // smaller than 32KB. Fall back to io.Copy if we fail to determine the size. - size, err := aws.SeekerLen(reader) - if err != nil { - io.Copy(hash, reader) - } else { - io.CopyN(hash, reader, size) - } - - return hash.Sum(nil), nil -} - -const doubleSpace = " " - -// stripExcessSpaces will rewrite the passed in slice's string values to not -// contain multiple side-by-side spaces. -func stripExcessSpaces(vals []string) { - var j, k, l, m, spaces int - for i, str := range vals { - // Trim trailing spaces - for j = len(str) - 1; j >= 0 && str[j] == ' '; j-- { - } - - // Trim leading spaces - for k = 0; k < j && str[k] == ' '; k++ { - } - str = str[k : j+1] - - // Strip multiple spaces. - j = strings.Index(str, doubleSpace) - if j < 0 { - vals[i] = str - continue - } - - buf := []byte(str) - for k, m, l = j, j, len(buf); k < l; k++ { - if buf[k] == ' ' { - if spaces == 0 { - // First space. - buf[m] = buf[k] - m++ - } - spaces++ - } else { - // End of multiple spaces. - spaces = 0 - buf[m] = buf[k] - m++ - } - } - - vals[i] = string(buf[:m]) - } -} - -func buildSigningScope(region, service string, dt time.Time) string { - return strings.Join([]string{ - formatShortTime(dt), - region, - service, - awsV4Request, - }, "/") -} - -func deriveSigningKey(region, service, secretKey string, dt time.Time) []byte { - kDate := hmacSHA256([]byte("AWS4"+secretKey), []byte(formatShortTime(dt))) - kRegion := hmacSHA256(kDate, []byte(region)) - kService := hmacSHA256(kRegion, []byte(service)) - signingKey := hmacSHA256(kService, []byte(awsV4Request)) - return signingKey -} - -func formatShortTime(dt time.Time) string { - return dt.UTC().Format(shortTimeFormat) -} - -func formatTime(dt time.Time) string { - return dt.UTC().Format(timeFormat) -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/types.go b/vendor/github.com/aws/aws-sdk-go/aws/types.go deleted file mode 100644 index 98751ee84f..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/types.go +++ /dev/null @@ -1,264 +0,0 @@ -package aws - -import ( - "io" - "strings" - "sync" - - "github.com/aws/aws-sdk-go/internal/sdkio" -) - -// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser. Allows the -// SDK to accept an io.Reader that is not also an io.Seeker for unsigned -// streaming payload API operations. -// -// A ReadSeekCloser wrapping an nonseekable io.Reader used in an API -// operation's input will prevent that operation being retried in the case of -// network errors, and cause operation requests to fail if the operation -// requires payload signing. -// -// Note: If using With S3 PutObject to stream an object upload The SDK's S3 -// Upload manager (s3manager.Uploader) provides support for streaming with the -// ability to retry network errors. -func ReadSeekCloser(r io.Reader) ReaderSeekerCloser { - return ReaderSeekerCloser{r} -} - -// ReaderSeekerCloser represents a reader that can also delegate io.Seeker and -// io.Closer interfaces to the underlying object if they are available. -type ReaderSeekerCloser struct { - r io.Reader -} - -// IsReaderSeekable returns if the underlying reader type can be seeked. A -// io.Reader might not actually be seekable if it is the ReaderSeekerCloser -// type. -func IsReaderSeekable(r io.Reader) bool { - switch v := r.(type) { - case ReaderSeekerCloser: - return v.IsSeeker() - case *ReaderSeekerCloser: - return v.IsSeeker() - case io.ReadSeeker: - return true - default: - return false - } -} - -// Read reads from the reader up to size of p. The number of bytes read, and -// error if it occurred will be returned. -// -// If the reader is not an io.Reader zero bytes read, and nil error will be -// returned. -// -// Performs the same functionality as io.Reader Read -func (r ReaderSeekerCloser) Read(p []byte) (int, error) { - switch t := r.r.(type) { - case io.Reader: - return t.Read(p) - } - return 0, nil -} - -// Seek sets the offset for the next Read to offset, interpreted according to -// whence: 0 means relative to the origin of the file, 1 means relative to the -// current offset, and 2 means relative to the end. Seek returns the new offset -// and an error, if any. -// -// If the ReaderSeekerCloser is not an io.Seeker nothing will be done. -func (r ReaderSeekerCloser) Seek(offset int64, whence int) (int64, error) { - switch t := r.r.(type) { - case io.Seeker: - return t.Seek(offset, whence) - } - return int64(0), nil -} - -// IsSeeker returns if the underlying reader is also a seeker. -func (r ReaderSeekerCloser) IsSeeker() bool { - _, ok := r.r.(io.Seeker) - return ok -} - -// HasLen returns the length of the underlying reader if the value implements -// the Len() int method. -func (r ReaderSeekerCloser) HasLen() (int, bool) { - type lenner interface { - Len() int - } - - if lr, ok := r.r.(lenner); ok { - return lr.Len(), true - } - - return 0, false -} - -// GetLen returns the length of the bytes remaining in the underlying reader. -// Checks first for Len(), then io.Seeker to determine the size of the -// underlying reader. -// -// Will return -1 if the length cannot be determined. -func (r ReaderSeekerCloser) GetLen() (int64, error) { - if l, ok := r.HasLen(); ok { - return int64(l), nil - } - - if s, ok := r.r.(io.Seeker); ok { - return seekerLen(s) - } - - return -1, nil -} - -// SeekerLen attempts to get the number of bytes remaining at the seeker's -// current position. Returns the number of bytes remaining or error. -func SeekerLen(s io.Seeker) (int64, error) { - // Determine if the seeker is actually seekable. ReaderSeekerCloser - // hides the fact that a io.Readers might not actually be seekable. - switch v := s.(type) { - case ReaderSeekerCloser: - return v.GetLen() - case *ReaderSeekerCloser: - return v.GetLen() - } - - return seekerLen(s) -} - -func seekerLen(s io.Seeker) (int64, error) { - curOffset, err := s.Seek(0, sdkio.SeekCurrent) - if err != nil { - return 0, err - } - - endOffset, err := s.Seek(0, sdkio.SeekEnd) - if err != nil { - return 0, err - } - - _, err = s.Seek(curOffset, sdkio.SeekStart) - if err != nil { - return 0, err - } - - return endOffset - curOffset, nil -} - -// Close closes the ReaderSeekerCloser. -// -// If the ReaderSeekerCloser is not an io.Closer nothing will be done. -func (r ReaderSeekerCloser) Close() error { - switch t := r.r.(type) { - case io.Closer: - return t.Close() - } - return nil -} - -// A WriteAtBuffer provides a in memory buffer supporting the io.WriterAt interface -// Can be used with the s3manager.Downloader to download content to a buffer -// in memory. Safe to use concurrently. -type WriteAtBuffer struct { - buf []byte - m sync.Mutex - - // GrowthCoeff defines the growth rate of the internal buffer. By - // default, the growth rate is 1, where expanding the internal - // buffer will allocate only enough capacity to fit the new expected - // length. - GrowthCoeff float64 -} - -// NewWriteAtBuffer creates a WriteAtBuffer with an internal buffer -// provided by buf. -func NewWriteAtBuffer(buf []byte) *WriteAtBuffer { - return &WriteAtBuffer{buf: buf} -} - -// WriteAt writes a slice of bytes to a buffer starting at the position provided -// The number of bytes written will be returned, or error. Can overwrite previous -// written slices if the write ats overlap. -func (b *WriteAtBuffer) WriteAt(p []byte, pos int64) (n int, err error) { - pLen := len(p) - expLen := pos + int64(pLen) - b.m.Lock() - defer b.m.Unlock() - if int64(len(b.buf)) < expLen { - if int64(cap(b.buf)) < expLen { - if b.GrowthCoeff < 1 { - b.GrowthCoeff = 1 - } - newBuf := make([]byte, expLen, int64(b.GrowthCoeff*float64(expLen))) - copy(newBuf, b.buf) - b.buf = newBuf - } - b.buf = b.buf[:expLen] - } - copy(b.buf[pos:], p) - return pLen, nil -} - -// Bytes returns a slice of bytes written to the buffer. -func (b *WriteAtBuffer) Bytes() []byte { - b.m.Lock() - defer b.m.Unlock() - return b.buf -} - -// MultiCloser is a utility to close multiple io.Closers within a single -// statement. -type MultiCloser []io.Closer - -// Close closes all of the io.Closers making up the MultiClosers. Any -// errors that occur while closing will be returned in the order they -// occur. -func (m MultiCloser) Close() error { - var errs errors - for _, c := range m { - err := c.Close() - if err != nil { - errs = append(errs, err) - } - } - if len(errs) != 0 { - return errs - } - - return nil -} - -type errors []error - -func (es errors) Error() string { - var parts []string - for _, e := range es { - parts = append(parts, e.Error()) - } - - return strings.Join(parts, "\n") -} - -// CopySeekableBody copies the seekable body to an io.Writer -func CopySeekableBody(dst io.Writer, src io.ReadSeeker) (int64, error) { - curPos, err := src.Seek(0, sdkio.SeekCurrent) - if err != nil { - return 0, err - } - - // copy errors may be assumed to be from the body. - n, err := io.Copy(dst, src) - if err != nil { - return n, err - } - - // seek back to the first position after reading to reset - // the body for transmission. - _, err = src.Seek(curPos, sdkio.SeekStart) - if err != nil { - return n, err - } - - return n, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/url.go b/vendor/github.com/aws/aws-sdk-go/aws/url.go deleted file mode 100644 index 6192b2455b..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/url.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build go1.8 - -package aws - -import "net/url" - -// URLHostname will extract the Hostname without port from the URL value. -// -// Wrapper of net/url#URL.Hostname for backwards Go version compatibility. -func URLHostname(url *url.URL) string { - return url.Hostname() -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go deleted file mode 100644 index 0210d2720e..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go +++ /dev/null @@ -1,29 +0,0 @@ -// +build !go1.8 - -package aws - -import ( - "net/url" - "strings" -) - -// URLHostname will extract the Hostname without port from the URL value. -// -// Copy of Go 1.8's net/url#URL.Hostname functionality. -func URLHostname(url *url.URL) string { - return stripPort(url.Host) - -} - -// stripPort is copy of Go 1.8 url#URL.Hostname functionality. -// https://golang.org/src/net/url/url.go -func stripPort(hostport string) string { - colon := strings.IndexByte(hostport, ':') - if colon == -1 { - return hostport - } - if i := strings.IndexByte(hostport, ']'); i != -1 { - return strings.TrimPrefix(hostport[:i], "[") - } - return hostport[:colon] -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go deleted file mode 100644 index 28e186602a..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ /dev/null @@ -1,8 +0,0 @@ -// Package aws provides core functionality for making requests to AWS services. -package aws - -// SDKName is the name of this AWS SDK -const SDKName = "aws-sdk-go" - -// SDKVersion is the version of this SDK -const SDKVersion = "1.31.12" diff --git a/vendor/github.com/aws/aws-sdk-go/internal/context/background_go1.5.go b/vendor/github.com/aws/aws-sdk-go/internal/context/background_go1.5.go deleted file mode 100644 index 876dcb3fde..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/context/background_go1.5.go +++ /dev/null @@ -1,40 +0,0 @@ -// +build !go1.7 - -package context - -import "time" - -// An emptyCtx is a copy of the Go 1.7 context.emptyCtx type. This is copied to -// provide a 1.6 and 1.5 safe version of context that is compatible with Go -// 1.7's Context. -// -// An emptyCtx is never canceled, has no values, and has no deadline. It is not -// struct{}, since vars of this type must have distinct addresses. -type emptyCtx int - -func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { - return -} - -func (*emptyCtx) Done() <-chan struct{} { - return nil -} - -func (*emptyCtx) Err() error { - return nil -} - -func (*emptyCtx) Value(key interface{}) interface{} { - return nil -} - -func (e *emptyCtx) String() string { - switch e { - case BackgroundCtx: - return "aws.BackgroundContext" - } - return "unknown empty Context" -} - -// BackgroundCtx is the common base context. -var BackgroundCtx = new(emptyCtx) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ast.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ast.go deleted file mode 100644 index e83a99886b..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/ast.go +++ /dev/null @@ -1,120 +0,0 @@ -package ini - -// ASTKind represents different states in the parse table -// and the type of AST that is being constructed -type ASTKind int - -// ASTKind* is used in the parse table to transition between -// the different states -const ( - ASTKindNone = ASTKind(iota) - ASTKindStart - ASTKindExpr - ASTKindEqualExpr - ASTKindStatement - ASTKindSkipStatement - ASTKindExprStatement - ASTKindSectionStatement - ASTKindNestedSectionStatement - ASTKindCompletedNestedSectionStatement - ASTKindCommentStatement - ASTKindCompletedSectionStatement -) - -func (k ASTKind) String() string { - switch k { - case ASTKindNone: - return "none" - case ASTKindStart: - return "start" - case ASTKindExpr: - return "expr" - case ASTKindStatement: - return "stmt" - case ASTKindSectionStatement: - return "section_stmt" - case ASTKindExprStatement: - return "expr_stmt" - case ASTKindCommentStatement: - return "comment" - case ASTKindNestedSectionStatement: - return "nested_section_stmt" - case ASTKindCompletedSectionStatement: - return "completed_stmt" - case ASTKindSkipStatement: - return "skip" - default: - return "" - } -} - -// AST interface allows us to determine what kind of node we -// are on and casting may not need to be necessary. -// -// The root is always the first node in Children -type AST struct { - Kind ASTKind - Root Token - RootToken bool - Children []AST -} - -func newAST(kind ASTKind, root AST, children ...AST) AST { - return AST{ - Kind: kind, - Children: append([]AST{root}, children...), - } -} - -func newASTWithRootToken(kind ASTKind, root Token, children ...AST) AST { - return AST{ - Kind: kind, - Root: root, - RootToken: true, - Children: children, - } -} - -// AppendChild will append to the list of children an AST has. -func (a *AST) AppendChild(child AST) { - a.Children = append(a.Children, child) -} - -// GetRoot will return the root AST which can be the first entry -// in the children list or a token. -func (a *AST) GetRoot() AST { - if a.RootToken { - return *a - } - - if len(a.Children) == 0 { - return AST{} - } - - return a.Children[0] -} - -// GetChildren will return the current AST's list of children -func (a *AST) GetChildren() []AST { - if len(a.Children) == 0 { - return []AST{} - } - - if a.RootToken { - return a.Children - } - - return a.Children[1:] -} - -// SetChildren will set and override all children of the AST. -func (a *AST) SetChildren(children []AST) { - if a.RootToken { - a.Children = children - } else { - a.Children = append(a.Children[:1], children...) - } -} - -// Start is used to indicate the starting state of the parse table. -var Start = newAST(ASTKindStart, AST{}) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/comma_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/comma_token.go deleted file mode 100644 index 0895d53cbe..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/comma_token.go +++ /dev/null @@ -1,11 +0,0 @@ -package ini - -var commaRunes = []rune(",") - -func isComma(b rune) bool { - return b == ',' -} - -func newCommaToken() Token { - return newToken(TokenComma, commaRunes, NoneType) -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/comment_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/comment_token.go deleted file mode 100644 index 0b76999ba1..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/comment_token.go +++ /dev/null @@ -1,35 +0,0 @@ -package ini - -// isComment will return whether or not the next byte(s) is a -// comment. -func isComment(b []rune) bool { - if len(b) == 0 { - return false - } - - switch b[0] { - case ';': - return true - case '#': - return true - } - - return false -} - -// newCommentToken will create a comment token and -// return how many bytes were read. -func newCommentToken(b []rune) (Token, int, error) { - i := 0 - for ; i < len(b); i++ { - if b[i] == '\n' { - break - } - - if len(b)-i > 2 && b[i] == '\r' && b[i+1] == '\n' { - break - } - } - - return newToken(TokenComment, b[:i], NoneType), i, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/doc.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/doc.go deleted file mode 100644 index 25ce0fe134..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/doc.go +++ /dev/null @@ -1,29 +0,0 @@ -// Package ini is an LL(1) parser for configuration files. -// -// Example: -// sections, err := ini.OpenFile("/path/to/file") -// if err != nil { -// panic(err) -// } -// -// profile := "foo" -// section, ok := sections.GetSection(profile) -// if !ok { -// fmt.Printf("section %q could not be found", profile) -// } -// -// Below is the BNF that describes this parser -// Grammar: -// stmt -> value stmt' -// stmt' -> epsilon | op stmt -// value -> number | string | boolean | quoted_string -// -// section -> [ section' -// section' -> value section_close -// section_close -> ] -// -// SkipState will skip (NL WS)+ -// -// comment -> # comment' | ; comment' -// comment' -> epsilon | value -package ini diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/empty_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/empty_token.go deleted file mode 100644 index 04345a54c2..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/empty_token.go +++ /dev/null @@ -1,4 +0,0 @@ -package ini - -// emptyToken is used to satisfy the Token interface -var emptyToken = newToken(TokenNone, []rune{}, NoneType) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/expression.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/expression.go deleted file mode 100644 index 91ba2a59dd..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/expression.go +++ /dev/null @@ -1,24 +0,0 @@ -package ini - -// newExpression will return an expression AST. -// Expr represents an expression -// -// grammar: -// expr -> string | number -func newExpression(tok Token) AST { - return newASTWithRootToken(ASTKindExpr, tok) -} - -func newEqualExpr(left AST, tok Token) AST { - return newASTWithRootToken(ASTKindEqualExpr, tok, left) -} - -// EqualExprKey will return a LHS value in the equal expr -func EqualExprKey(ast AST) string { - children := ast.GetChildren() - if len(children) == 0 || ast.Kind != ASTKindEqualExpr { - return "" - } - - return string(children[0].Root.Raw()) -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/fuzz.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/fuzz.go deleted file mode 100644 index 8d462f77e2..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/fuzz.go +++ /dev/null @@ -1,17 +0,0 @@ -// +build gofuzz - -package ini - -import ( - "bytes" -) - -func Fuzz(data []byte) int { - b := bytes.NewReader(data) - - if _, err := Parse(b); err != nil { - return 0 - } - - return 1 -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini.go deleted file mode 100644 index 3b0ca7afe3..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini.go +++ /dev/null @@ -1,51 +0,0 @@ -package ini - -import ( - "io" - "os" - - "github.com/aws/aws-sdk-go/aws/awserr" -) - -// OpenFile takes a path to a given file, and will open and parse -// that file. -func OpenFile(path string) (Sections, error) { - f, err := os.Open(path) - if err != nil { - return Sections{}, awserr.New(ErrCodeUnableToReadFile, "unable to open file", err) - } - defer f.Close() - - return Parse(f) -} - -// Parse will parse the given file using the shared config -// visitor. -func Parse(f io.Reader) (Sections, error) { - tree, err := ParseAST(f) - if err != nil { - return Sections{}, err - } - - v := NewDefaultVisitor() - if err = Walk(tree, v); err != nil { - return Sections{}, err - } - - return v.Sections, nil -} - -// ParseBytes will parse the given bytes and return the parsed sections. -func ParseBytes(b []byte) (Sections, error) { - tree, err := ParseASTBytes(b) - if err != nil { - return Sections{}, err - } - - v := NewDefaultVisitor() - if err = Walk(tree, v); err != nil { - return Sections{}, err - } - - return v.Sections, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_lexer.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_lexer.go deleted file mode 100644 index 582c024ad1..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_lexer.go +++ /dev/null @@ -1,165 +0,0 @@ -package ini - -import ( - "bytes" - "io" - "io/ioutil" - - "github.com/aws/aws-sdk-go/aws/awserr" -) - -const ( - // ErrCodeUnableToReadFile is used when a file is failed to be - // opened or read from. - ErrCodeUnableToReadFile = "FailedRead" -) - -// TokenType represents the various different tokens types -type TokenType int - -func (t TokenType) String() string { - switch t { - case TokenNone: - return "none" - case TokenLit: - return "literal" - case TokenSep: - return "sep" - case TokenOp: - return "op" - case TokenWS: - return "ws" - case TokenNL: - return "newline" - case TokenComment: - return "comment" - case TokenComma: - return "comma" - default: - return "" - } -} - -// TokenType enums -const ( - TokenNone = TokenType(iota) - TokenLit - TokenSep - TokenComma - TokenOp - TokenWS - TokenNL - TokenComment -) - -type iniLexer struct{} - -// Tokenize will return a list of tokens during lexical analysis of the -// io.Reader. -func (l *iniLexer) Tokenize(r io.Reader) ([]Token, error) { - b, err := ioutil.ReadAll(r) - if err != nil { - return nil, awserr.New(ErrCodeUnableToReadFile, "unable to read file", err) - } - - return l.tokenize(b) -} - -func (l *iniLexer) tokenize(b []byte) ([]Token, error) { - runes := bytes.Runes(b) - var err error - n := 0 - tokenAmount := countTokens(runes) - tokens := make([]Token, tokenAmount) - count := 0 - - for len(runes) > 0 && count < tokenAmount { - switch { - case isWhitespace(runes[0]): - tokens[count], n, err = newWSToken(runes) - case isComma(runes[0]): - tokens[count], n = newCommaToken(), 1 - case isComment(runes): - tokens[count], n, err = newCommentToken(runes) - case isNewline(runes): - tokens[count], n, err = newNewlineToken(runes) - case isSep(runes): - tokens[count], n, err = newSepToken(runes) - case isOp(runes): - tokens[count], n, err = newOpToken(runes) - default: - tokens[count], n, err = newLitToken(runes) - } - - if err != nil { - return nil, err - } - - count++ - - runes = runes[n:] - } - - return tokens[:count], nil -} - -func countTokens(runes []rune) int { - count, n := 0, 0 - var err error - - for len(runes) > 0 { - switch { - case isWhitespace(runes[0]): - _, n, err = newWSToken(runes) - case isComma(runes[0]): - _, n = newCommaToken(), 1 - case isComment(runes): - _, n, err = newCommentToken(runes) - case isNewline(runes): - _, n, err = newNewlineToken(runes) - case isSep(runes): - _, n, err = newSepToken(runes) - case isOp(runes): - _, n, err = newOpToken(runes) - default: - _, n, err = newLitToken(runes) - } - - if err != nil { - return 0 - } - - count++ - runes = runes[n:] - } - - return count + 1 -} - -// Token indicates a metadata about a given value. -type Token struct { - t TokenType - ValueType ValueType - base int - raw []rune -} - -var emptyValue = Value{} - -func newToken(t TokenType, raw []rune, v ValueType) Token { - return Token{ - t: t, - raw: raw, - ValueType: v, - } -} - -// Raw return the raw runes that were consumed -func (tok Token) Raw() []rune { - return tok.raw -} - -// Type returns the token type -func (tok Token) Type() TokenType { - return tok.t -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go deleted file mode 100644 index cf9fad81e7..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go +++ /dev/null @@ -1,356 +0,0 @@ -package ini - -import ( - "fmt" - "io" -) - -// State enums for the parse table -const ( - InvalidState = iota - // stmt -> value stmt' - StatementState - // stmt' -> MarkComplete | op stmt - StatementPrimeState - // value -> number | string | boolean | quoted_string - ValueState - // section -> [ section' - OpenScopeState - // section' -> value section_close - SectionState - // section_close -> ] - CloseScopeState - // SkipState will skip (NL WS)+ - SkipState - // SkipTokenState will skip any token and push the previous - // state onto the stack. - SkipTokenState - // comment -> # comment' | ; comment' - // comment' -> MarkComplete | value - CommentState - // MarkComplete state will complete statements and move that - // to the completed AST list - MarkCompleteState - // TerminalState signifies that the tokens have been fully parsed - TerminalState -) - -// parseTable is a state machine to dictate the grammar above. -var parseTable = map[ASTKind]map[TokenType]int{ - ASTKindStart: map[TokenType]int{ - TokenLit: StatementState, - TokenSep: OpenScopeState, - TokenWS: SkipTokenState, - TokenNL: SkipTokenState, - TokenComment: CommentState, - TokenNone: TerminalState, - }, - ASTKindCommentStatement: map[TokenType]int{ - TokenLit: StatementState, - TokenSep: OpenScopeState, - TokenWS: SkipTokenState, - TokenNL: SkipTokenState, - TokenComment: CommentState, - TokenNone: MarkCompleteState, - }, - ASTKindExpr: map[TokenType]int{ - TokenOp: StatementPrimeState, - TokenLit: ValueState, - TokenSep: OpenScopeState, - TokenWS: ValueState, - TokenNL: SkipState, - TokenComment: CommentState, - TokenNone: MarkCompleteState, - }, - ASTKindEqualExpr: map[TokenType]int{ - TokenLit: ValueState, - TokenWS: SkipTokenState, - TokenNL: SkipState, - }, - ASTKindStatement: map[TokenType]int{ - TokenLit: SectionState, - TokenSep: CloseScopeState, - TokenWS: SkipTokenState, - TokenNL: SkipTokenState, - TokenComment: CommentState, - TokenNone: MarkCompleteState, - }, - ASTKindExprStatement: map[TokenType]int{ - TokenLit: ValueState, - TokenSep: OpenScopeState, - TokenOp: ValueState, - TokenWS: ValueState, - TokenNL: MarkCompleteState, - TokenComment: CommentState, - TokenNone: TerminalState, - TokenComma: SkipState, - }, - ASTKindSectionStatement: map[TokenType]int{ - TokenLit: SectionState, - TokenOp: SectionState, - TokenSep: CloseScopeState, - TokenWS: SectionState, - TokenNL: SkipTokenState, - }, - ASTKindCompletedSectionStatement: map[TokenType]int{ - TokenWS: SkipTokenState, - TokenNL: SkipTokenState, - TokenLit: StatementState, - TokenSep: OpenScopeState, - TokenComment: CommentState, - TokenNone: MarkCompleteState, - }, - ASTKindSkipStatement: map[TokenType]int{ - TokenLit: StatementState, - TokenSep: OpenScopeState, - TokenWS: SkipTokenState, - TokenNL: SkipTokenState, - TokenComment: CommentState, - TokenNone: TerminalState, - }, -} - -// ParseAST will parse input from an io.Reader using -// an LL(1) parser. -func ParseAST(r io.Reader) ([]AST, error) { - lexer := iniLexer{} - tokens, err := lexer.Tokenize(r) - if err != nil { - return []AST{}, err - } - - return parse(tokens) -} - -// ParseASTBytes will parse input from a byte slice using -// an LL(1) parser. -func ParseASTBytes(b []byte) ([]AST, error) { - lexer := iniLexer{} - tokens, err := lexer.tokenize(b) - if err != nil { - return []AST{}, err - } - - return parse(tokens) -} - -func parse(tokens []Token) ([]AST, error) { - start := Start - stack := newParseStack(3, len(tokens)) - - stack.Push(start) - s := newSkipper() - -loop: - for stack.Len() > 0 { - k := stack.Pop() - - var tok Token - if len(tokens) == 0 { - // this occurs when all the tokens have been processed - // but reduction of what's left on the stack needs to - // occur. - tok = emptyToken - } else { - tok = tokens[0] - } - - step := parseTable[k.Kind][tok.Type()] - if s.ShouldSkip(tok) { - // being in a skip state with no tokens will break out of - // the parse loop since there is nothing left to process. - if len(tokens) == 0 { - break loop - } - // if should skip is true, we skip the tokens until should skip is set to false. - step = SkipTokenState - } - - switch step { - case TerminalState: - // Finished parsing. Push what should be the last - // statement to the stack. If there is anything left - // on the stack, an error in parsing has occurred. - if k.Kind != ASTKindStart { - stack.MarkComplete(k) - } - break loop - case SkipTokenState: - // When skipping a token, the previous state was popped off the stack. - // To maintain the correct state, the previous state will be pushed - // onto the stack. - stack.Push(k) - case StatementState: - if k.Kind != ASTKindStart { - stack.MarkComplete(k) - } - expr := newExpression(tok) - stack.Push(expr) - case StatementPrimeState: - if tok.Type() != TokenOp { - stack.MarkComplete(k) - continue - } - - if k.Kind != ASTKindExpr { - return nil, NewParseError( - fmt.Sprintf("invalid expression: expected Expr type, but found %T type", k), - ) - } - - k = trimSpaces(k) - expr := newEqualExpr(k, tok) - stack.Push(expr) - case ValueState: - // ValueState requires the previous state to either be an equal expression - // or an expression statement. - // - // This grammar occurs when the RHS is a number, word, or quoted string. - // equal_expr -> lit op equal_expr' - // equal_expr' -> number | string | quoted_string - // quoted_string -> " quoted_string' - // quoted_string' -> string quoted_string_end - // quoted_string_end -> " - // - // otherwise - // expr_stmt -> equal_expr (expr_stmt')* - // expr_stmt' -> ws S | op S | MarkComplete - // S -> equal_expr' expr_stmt' - switch k.Kind { - case ASTKindEqualExpr: - // assigning a value to some key - k.AppendChild(newExpression(tok)) - stack.Push(newExprStatement(k)) - case ASTKindExpr: - k.Root.raw = append(k.Root.raw, tok.Raw()...) - stack.Push(k) - case ASTKindExprStatement: - root := k.GetRoot() - children := root.GetChildren() - if len(children) == 0 { - return nil, NewParseError( - fmt.Sprintf("invalid expression: AST contains no children %s", k.Kind), - ) - } - - rhs := children[len(children)-1] - - if rhs.Root.ValueType != QuotedStringType { - rhs.Root.ValueType = StringType - rhs.Root.raw = append(rhs.Root.raw, tok.Raw()...) - - } - - children[len(children)-1] = rhs - k.SetChildren(children) - - stack.Push(k) - } - case OpenScopeState: - if !runeCompare(tok.Raw(), openBrace) { - return nil, NewParseError("expected '['") - } - // If OpenScopeState is not at the start, we must mark the previous ast as complete - // - // for example: if previous ast was a skip statement; - // we should mark it as complete before we create a new statement - if k.Kind != ASTKindStart { - stack.MarkComplete(k) - } - - stmt := newStatement() - stack.Push(stmt) - case CloseScopeState: - if !runeCompare(tok.Raw(), closeBrace) { - return nil, NewParseError("expected ']'") - } - - k = trimSpaces(k) - stack.Push(newCompletedSectionStatement(k)) - case SectionState: - var stmt AST - - switch k.Kind { - case ASTKindStatement: - // If there are multiple literals inside of a scope declaration, - // then the current token's raw value will be appended to the Name. - // - // This handles cases like [ profile default ] - // - // k will represent a SectionStatement with the children representing - // the label of the section - stmt = newSectionStatement(tok) - case ASTKindSectionStatement: - k.Root.raw = append(k.Root.raw, tok.Raw()...) - stmt = k - default: - return nil, NewParseError( - fmt.Sprintf("invalid statement: expected statement: %v", k.Kind), - ) - } - - stack.Push(stmt) - case MarkCompleteState: - if k.Kind != ASTKindStart { - stack.MarkComplete(k) - } - - if stack.Len() == 0 { - stack.Push(start) - } - case SkipState: - stack.Push(newSkipStatement(k)) - s.Skip() - case CommentState: - if k.Kind == ASTKindStart { - stack.Push(k) - } else { - stack.MarkComplete(k) - } - - stmt := newCommentStatement(tok) - stack.Push(stmt) - default: - return nil, NewParseError( - fmt.Sprintf("invalid state with ASTKind %v and TokenType %v", - k, tok.Type())) - } - - if len(tokens) > 0 { - tokens = tokens[1:] - } - } - - // this occurs when a statement has not been completed - if stack.top > 1 { - return nil, NewParseError(fmt.Sprintf("incomplete ini expression")) - } - - // returns a sublist which excludes the start symbol - return stack.List(), nil -} - -// trimSpaces will trim spaces on the left and right hand side of -// the literal. -func trimSpaces(k AST) AST { - // trim left hand side of spaces - for i := 0; i < len(k.Root.raw); i++ { - if !isWhitespace(k.Root.raw[i]) { - break - } - - k.Root.raw = k.Root.raw[1:] - i-- - } - - // trim right hand side of spaces - for i := len(k.Root.raw) - 1; i >= 0; i-- { - if !isWhitespace(k.Root.raw[i]) { - break - } - - k.Root.raw = k.Root.raw[:len(k.Root.raw)-1] - } - - return k -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go deleted file mode 100644 index 24df543d38..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go +++ /dev/null @@ -1,324 +0,0 @@ -package ini - -import ( - "fmt" - "strconv" - "strings" -) - -var ( - runesTrue = []rune("true") - runesFalse = []rune("false") -) - -var literalValues = [][]rune{ - runesTrue, - runesFalse, -} - -func isBoolValue(b []rune) bool { - for _, lv := range literalValues { - if isLitValue(lv, b) { - return true - } - } - return false -} - -func isLitValue(want, have []rune) bool { - if len(have) < len(want) { - return false - } - - for i := 0; i < len(want); i++ { - if want[i] != have[i] { - return false - } - } - - return true -} - -// isNumberValue will return whether not the leading characters in -// a byte slice is a number. A number is delimited by whitespace or -// the newline token. -// -// A number is defined to be in a binary, octal, decimal (int | float), hex format, -// or in scientific notation. -func isNumberValue(b []rune) bool { - negativeIndex := 0 - helper := numberHelper{} - needDigit := false - - for i := 0; i < len(b); i++ { - negativeIndex++ - - switch b[i] { - case '-': - if helper.IsNegative() || negativeIndex != 1 { - return false - } - helper.Determine(b[i]) - needDigit = true - continue - case 'e', 'E': - if err := helper.Determine(b[i]); err != nil { - return false - } - negativeIndex = 0 - needDigit = true - continue - case 'b': - if helper.numberFormat == hex { - break - } - fallthrough - case 'o', 'x': - needDigit = true - if i == 0 { - return false - } - - fallthrough - case '.': - if err := helper.Determine(b[i]); err != nil { - return false - } - needDigit = true - continue - } - - if i > 0 && (isNewline(b[i:]) || isWhitespace(b[i])) { - return !needDigit - } - - if !helper.CorrectByte(b[i]) { - return false - } - needDigit = false - } - - return !needDigit -} - -func isValid(b []rune) (bool, int, error) { - if len(b) == 0 { - // TODO: should probably return an error - return false, 0, nil - } - - return isValidRune(b[0]), 1, nil -} - -func isValidRune(r rune) bool { - return r != ':' && r != '=' && r != '[' && r != ']' && r != ' ' && r != '\n' -} - -// ValueType is an enum that will signify what type -// the Value is -type ValueType int - -func (v ValueType) String() string { - switch v { - case NoneType: - return "NONE" - case DecimalType: - return "FLOAT" - case IntegerType: - return "INT" - case StringType: - return "STRING" - case BoolType: - return "BOOL" - } - - return "" -} - -// ValueType enums -const ( - NoneType = ValueType(iota) - DecimalType - IntegerType - StringType - QuotedStringType - BoolType -) - -// Value is a union container -type Value struct { - Type ValueType - raw []rune - - integer int64 - decimal float64 - boolean bool - str string -} - -func newValue(t ValueType, base int, raw []rune) (Value, error) { - v := Value{ - Type: t, - raw: raw, - } - var err error - - switch t { - case DecimalType: - v.decimal, err = strconv.ParseFloat(string(raw), 64) - case IntegerType: - if base != 10 { - raw = raw[2:] - } - - v.integer, err = strconv.ParseInt(string(raw), base, 64) - case StringType: - v.str = string(raw) - case QuotedStringType: - v.str = string(raw[1 : len(raw)-1]) - case BoolType: - v.boolean = runeCompare(v.raw, runesTrue) - } - - // issue 2253 - // - // if the value trying to be parsed is too large, then we will use - // the 'StringType' and raw value instead. - if nerr, ok := err.(*strconv.NumError); ok && nerr.Err == strconv.ErrRange { - v.Type = StringType - v.str = string(raw) - err = nil - } - - return v, err -} - -// Append will append values and change the type to a string -// type. -func (v *Value) Append(tok Token) { - r := tok.Raw() - if v.Type != QuotedStringType { - v.Type = StringType - r = tok.raw[1 : len(tok.raw)-1] - } - if tok.Type() != TokenLit { - v.raw = append(v.raw, tok.Raw()...) - } else { - v.raw = append(v.raw, r...) - } -} - -func (v Value) String() string { - switch v.Type { - case DecimalType: - return fmt.Sprintf("decimal: %f", v.decimal) - case IntegerType: - return fmt.Sprintf("integer: %d", v.integer) - case StringType: - return fmt.Sprintf("string: %s", string(v.raw)) - case QuotedStringType: - return fmt.Sprintf("quoted string: %s", string(v.raw)) - case BoolType: - return fmt.Sprintf("bool: %t", v.boolean) - default: - return "union not set" - } -} - -func newLitToken(b []rune) (Token, int, error) { - n := 0 - var err error - - token := Token{} - if b[0] == '"' { - n, err = getStringValue(b) - if err != nil { - return token, n, err - } - - token = newToken(TokenLit, b[:n], QuotedStringType) - } else if isNumberValue(b) { - var base int - base, n, err = getNumericalValue(b) - if err != nil { - return token, 0, err - } - - value := b[:n] - vType := IntegerType - if contains(value, '.') || hasExponent(value) { - vType = DecimalType - } - token = newToken(TokenLit, value, vType) - token.base = base - } else if isBoolValue(b) { - n, err = getBoolValue(b) - - token = newToken(TokenLit, b[:n], BoolType) - } else { - n, err = getValue(b) - token = newToken(TokenLit, b[:n], StringType) - } - - return token, n, err -} - -// IntValue returns an integer value -func (v Value) IntValue() int64 { - return v.integer -} - -// FloatValue returns a float value -func (v Value) FloatValue() float64 { - return v.decimal -} - -// BoolValue returns a bool value -func (v Value) BoolValue() bool { - return v.boolean -} - -func isTrimmable(r rune) bool { - switch r { - case '\n', ' ': - return true - } - return false -} - -// StringValue returns the string value -func (v Value) StringValue() string { - switch v.Type { - case StringType: - return strings.TrimFunc(string(v.raw), isTrimmable) - case QuotedStringType: - // preserve all characters in the quotes - return string(removeEscapedCharacters(v.raw[1 : len(v.raw)-1])) - default: - return strings.TrimFunc(string(v.raw), isTrimmable) - } -} - -func contains(runes []rune, c rune) bool { - for i := 0; i < len(runes); i++ { - if runes[i] == c { - return true - } - } - - return false -} - -func runeCompare(v1 []rune, v2 []rune) bool { - if len(v1) != len(v2) { - return false - } - - for i := 0; i < len(v1); i++ { - if v1[i] != v2[i] { - return false - } - } - - return true -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/newline_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/newline_token.go deleted file mode 100644 index e52ac399f1..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/newline_token.go +++ /dev/null @@ -1,30 +0,0 @@ -package ini - -func isNewline(b []rune) bool { - if len(b) == 0 { - return false - } - - if b[0] == '\n' { - return true - } - - if len(b) < 2 { - return false - } - - return b[0] == '\r' && b[1] == '\n' -} - -func newNewlineToken(b []rune) (Token, int, error) { - i := 1 - if b[0] == '\r' && isNewline(b[1:]) { - i++ - } - - if !isNewline([]rune(b[:i])) { - return emptyToken, 0, NewParseError("invalid new line token") - } - - return newToken(TokenNL, b[:i], NoneType), i, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/number_helper.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/number_helper.go deleted file mode 100644 index a45c0bc566..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/number_helper.go +++ /dev/null @@ -1,152 +0,0 @@ -package ini - -import ( - "bytes" - "fmt" - "strconv" -) - -const ( - none = numberFormat(iota) - binary - octal - decimal - hex - exponent -) - -type numberFormat int - -// numberHelper is used to dictate what format a number is in -// and what to do for negative values. Since -1e-4 is a valid -// number, we cannot just simply check for duplicate negatives. -type numberHelper struct { - numberFormat numberFormat - - negative bool - negativeExponent bool -} - -func (b numberHelper) Exists() bool { - return b.numberFormat != none -} - -func (b numberHelper) IsNegative() bool { - return b.negative || b.negativeExponent -} - -func (b *numberHelper) Determine(c rune) error { - if b.Exists() { - return NewParseError(fmt.Sprintf("multiple number formats: 0%v", string(c))) - } - - switch c { - case 'b': - b.numberFormat = binary - case 'o': - b.numberFormat = octal - case 'x': - b.numberFormat = hex - case 'e', 'E': - b.numberFormat = exponent - case '-': - if b.numberFormat != exponent { - b.negative = true - } else { - b.negativeExponent = true - } - case '.': - b.numberFormat = decimal - default: - return NewParseError(fmt.Sprintf("invalid number character: %v", string(c))) - } - - return nil -} - -func (b numberHelper) CorrectByte(c rune) bool { - switch { - case b.numberFormat == binary: - if !isBinaryByte(c) { - return false - } - case b.numberFormat == octal: - if !isOctalByte(c) { - return false - } - case b.numberFormat == hex: - if !isHexByte(c) { - return false - } - case b.numberFormat == decimal: - if !isDigit(c) { - return false - } - case b.numberFormat == exponent: - if !isDigit(c) { - return false - } - case b.negativeExponent: - if !isDigit(c) { - return false - } - case b.negative: - if !isDigit(c) { - return false - } - default: - if !isDigit(c) { - return false - } - } - - return true -} - -func (b numberHelper) Base() int { - switch b.numberFormat { - case binary: - return 2 - case octal: - return 8 - case hex: - return 16 - default: - return 10 - } -} - -func (b numberHelper) String() string { - buf := bytes.Buffer{} - i := 0 - - switch b.numberFormat { - case binary: - i++ - buf.WriteString(strconv.Itoa(i) + ": binary format\n") - case octal: - i++ - buf.WriteString(strconv.Itoa(i) + ": octal format\n") - case hex: - i++ - buf.WriteString(strconv.Itoa(i) + ": hex format\n") - case exponent: - i++ - buf.WriteString(strconv.Itoa(i) + ": exponent format\n") - default: - i++ - buf.WriteString(strconv.Itoa(i) + ": integer format\n") - } - - if b.negative { - i++ - buf.WriteString(strconv.Itoa(i) + ": negative format\n") - } - - if b.negativeExponent { - i++ - buf.WriteString(strconv.Itoa(i) + ": negative exponent format\n") - } - - return buf.String() -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/op_tokens.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/op_tokens.go deleted file mode 100644 index 8a84c7cbe0..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/op_tokens.go +++ /dev/null @@ -1,39 +0,0 @@ -package ini - -import ( - "fmt" -) - -var ( - equalOp = []rune("=") - equalColonOp = []rune(":") -) - -func isOp(b []rune) bool { - if len(b) == 0 { - return false - } - - switch b[0] { - case '=': - return true - case ':': - return true - default: - return false - } -} - -func newOpToken(b []rune) (Token, int, error) { - tok := Token{} - - switch b[0] { - case '=': - tok = newToken(TokenOp, equalOp, NoneType) - case ':': - tok = newToken(TokenOp, equalColonOp, NoneType) - default: - return tok, 0, NewParseError(fmt.Sprintf("unexpected op type, %v", b[0])) - } - return tok, 1, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_error.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_error.go deleted file mode 100644 index 4572870193..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_error.go +++ /dev/null @@ -1,43 +0,0 @@ -package ini - -import "fmt" - -const ( - // ErrCodeParseError is returned when a parsing error - // has occurred. - ErrCodeParseError = "INIParseError" -) - -// ParseError is an error which is returned during any part of -// the parsing process. -type ParseError struct { - msg string -} - -// NewParseError will return a new ParseError where message -// is the description of the error. -func NewParseError(message string) *ParseError { - return &ParseError{ - msg: message, - } -} - -// Code will return the ErrCodeParseError -func (err *ParseError) Code() string { - return ErrCodeParseError -} - -// Message returns the error's message -func (err *ParseError) Message() string { - return err.msg -} - -// OrigError return nothing since there will never be any -// original error. -func (err *ParseError) OrigError() error { - return nil -} - -func (err *ParseError) Error() string { - return fmt.Sprintf("%s: %s", err.Code(), err.Message()) -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_stack.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_stack.go deleted file mode 100644 index 7f01cf7c70..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_stack.go +++ /dev/null @@ -1,60 +0,0 @@ -package ini - -import ( - "bytes" - "fmt" -) - -// ParseStack is a stack that contains a container, the stack portion, -// and the list which is the list of ASTs that have been successfully -// parsed. -type ParseStack struct { - top int - container []AST - list []AST - index int -} - -func newParseStack(sizeContainer, sizeList int) ParseStack { - return ParseStack{ - container: make([]AST, sizeContainer), - list: make([]AST, sizeList), - } -} - -// Pop will return and truncate the last container element. -func (s *ParseStack) Pop() AST { - s.top-- - return s.container[s.top] -} - -// Push will add the new AST to the container -func (s *ParseStack) Push(ast AST) { - s.container[s.top] = ast - s.top++ -} - -// MarkComplete will append the AST to the list of completed statements -func (s *ParseStack) MarkComplete(ast AST) { - s.list[s.index] = ast - s.index++ -} - -// List will return the completed statements -func (s ParseStack) List() []AST { - return s.list[:s.index] -} - -// Len will return the length of the container -func (s *ParseStack) Len() int { - return s.top -} - -func (s ParseStack) String() string { - buf := bytes.Buffer{} - for i, node := range s.list { - buf.WriteString(fmt.Sprintf("%d: %v\n", i+1, node)) - } - - return buf.String() -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/sep_tokens.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/sep_tokens.go deleted file mode 100644 index f82095ba25..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/sep_tokens.go +++ /dev/null @@ -1,41 +0,0 @@ -package ini - -import ( - "fmt" -) - -var ( - emptyRunes = []rune{} -) - -func isSep(b []rune) bool { - if len(b) == 0 { - return false - } - - switch b[0] { - case '[', ']': - return true - default: - return false - } -} - -var ( - openBrace = []rune("[") - closeBrace = []rune("]") -) - -func newSepToken(b []rune) (Token, int, error) { - tok := Token{} - - switch b[0] { - case '[': - tok = newToken(TokenSep, openBrace, NoneType) - case ']': - tok = newToken(TokenSep, closeBrace, NoneType) - default: - return tok, 0, NewParseError(fmt.Sprintf("unexpected sep type, %v", b[0])) - } - return tok, 1, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go deleted file mode 100644 index da7a4049cf..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go +++ /dev/null @@ -1,45 +0,0 @@ -package ini - -// skipper is used to skip certain blocks of an ini file. -// Currently skipper is used to skip nested blocks of ini -// files. See example below -// -// [ foo ] -// nested = ; this section will be skipped -// a=b -// c=d -// bar=baz ; this will be included -type skipper struct { - shouldSkip bool - TokenSet bool - prevTok Token -} - -func newSkipper() skipper { - return skipper{ - prevTok: emptyToken, - } -} - -func (s *skipper) ShouldSkip(tok Token) bool { - // should skip state will be modified only if previous token was new line (NL); - // and the current token is not WhiteSpace (WS). - if s.shouldSkip && - s.prevTok.Type() == TokenNL && - tok.Type() != TokenWS { - s.Continue() - return false - } - s.prevTok = tok - return s.shouldSkip -} - -func (s *skipper) Skip() { - s.shouldSkip = true -} - -func (s *skipper) Continue() { - s.shouldSkip = false - // empty token is assigned as we return to default state, when should skip is false - s.prevTok = emptyToken -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/statement.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/statement.go deleted file mode 100644 index 18f3fe8931..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/statement.go +++ /dev/null @@ -1,35 +0,0 @@ -package ini - -// Statement is an empty AST mostly used for transitioning states. -func newStatement() AST { - return newAST(ASTKindStatement, AST{}) -} - -// SectionStatement represents a section AST -func newSectionStatement(tok Token) AST { - return newASTWithRootToken(ASTKindSectionStatement, tok) -} - -// ExprStatement represents a completed expression AST -func newExprStatement(ast AST) AST { - return newAST(ASTKindExprStatement, ast) -} - -// CommentStatement represents a comment in the ini definition. -// -// grammar: -// comment -> #comment' | ;comment' -// comment' -> epsilon | value -func newCommentStatement(tok Token) AST { - return newAST(ASTKindCommentStatement, newExpression(tok)) -} - -// CompletedSectionStatement represents a completed section -func newCompletedSectionStatement(ast AST) AST { - return newAST(ASTKindCompletedSectionStatement, ast) -} - -// SkipStatement is used to skip whole statements -func newSkipStatement(ast AST) AST { - return newAST(ASTKindSkipStatement, ast) -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/value_util.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/value_util.go deleted file mode 100644 index 305999d29b..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/value_util.go +++ /dev/null @@ -1,284 +0,0 @@ -package ini - -import ( - "fmt" -) - -// getStringValue will return a quoted string and the amount -// of bytes read -// -// an error will be returned if the string is not properly formatted -func getStringValue(b []rune) (int, error) { - if b[0] != '"' { - return 0, NewParseError("strings must start with '\"'") - } - - endQuote := false - i := 1 - - for ; i < len(b) && !endQuote; i++ { - if escaped := isEscaped(b[:i], b[i]); b[i] == '"' && !escaped { - endQuote = true - break - } else if escaped { - /*c, err := getEscapedByte(b[i]) - if err != nil { - return 0, err - } - - b[i-1] = c - b = append(b[:i], b[i+1:]...) - i--*/ - - continue - } - } - - if !endQuote { - return 0, NewParseError("missing '\"' in string value") - } - - return i + 1, nil -} - -// getBoolValue will return a boolean and the amount -// of bytes read -// -// an error will be returned if the boolean is not of a correct -// value -func getBoolValue(b []rune) (int, error) { - if len(b) < 4 { - return 0, NewParseError("invalid boolean value") - } - - n := 0 - for _, lv := range literalValues { - if len(lv) > len(b) { - continue - } - - if isLitValue(lv, b) { - n = len(lv) - } - } - - if n == 0 { - return 0, NewParseError("invalid boolean value") - } - - return n, nil -} - -// getNumericalValue will return a numerical string, the amount -// of bytes read, and the base of the number -// -// an error will be returned if the number is not of a correct -// value -func getNumericalValue(b []rune) (int, int, error) { - if !isDigit(b[0]) { - return 0, 0, NewParseError("invalid digit value") - } - - i := 0 - helper := numberHelper{} - -loop: - for negativeIndex := 0; i < len(b); i++ { - negativeIndex++ - - if !isDigit(b[i]) { - switch b[i] { - case '-': - if helper.IsNegative() || negativeIndex != 1 { - return 0, 0, NewParseError("parse error '-'") - } - - n := getNegativeNumber(b[i:]) - i += (n - 1) - helper.Determine(b[i]) - continue - case '.': - if err := helper.Determine(b[i]); err != nil { - return 0, 0, err - } - case 'e', 'E': - if err := helper.Determine(b[i]); err != nil { - return 0, 0, err - } - - negativeIndex = 0 - case 'b': - if helper.numberFormat == hex { - break - } - fallthrough - case 'o', 'x': - if i == 0 && b[i] != '0' { - return 0, 0, NewParseError("incorrect base format, expected leading '0'") - } - - if i != 1 { - return 0, 0, NewParseError(fmt.Sprintf("incorrect base format found %s at %d index", string(b[i]), i)) - } - - if err := helper.Determine(b[i]); err != nil { - return 0, 0, err - } - default: - if isWhitespace(b[i]) { - break loop - } - - if isNewline(b[i:]) { - break loop - } - - if !(helper.numberFormat == hex && isHexByte(b[i])) { - if i+2 < len(b) && !isNewline(b[i:i+2]) { - return 0, 0, NewParseError("invalid numerical character") - } else if !isNewline([]rune{b[i]}) { - return 0, 0, NewParseError("invalid numerical character") - } - - break loop - } - } - } - } - - return helper.Base(), i, nil -} - -// isDigit will return whether or not something is an integer -func isDigit(b rune) bool { - return b >= '0' && b <= '9' -} - -func hasExponent(v []rune) bool { - return contains(v, 'e') || contains(v, 'E') -} - -func isBinaryByte(b rune) bool { - switch b { - case '0', '1': - return true - default: - return false - } -} - -func isOctalByte(b rune) bool { - switch b { - case '0', '1', '2', '3', '4', '5', '6', '7': - return true - default: - return false - } -} - -func isHexByte(b rune) bool { - if isDigit(b) { - return true - } - return (b >= 'A' && b <= 'F') || - (b >= 'a' && b <= 'f') -} - -func getValue(b []rune) (int, error) { - i := 0 - - for i < len(b) { - if isNewline(b[i:]) { - break - } - - if isOp(b[i:]) { - break - } - - valid, n, err := isValid(b[i:]) - if err != nil { - return 0, err - } - - if !valid { - break - } - - i += n - } - - return i, nil -} - -// getNegativeNumber will return a negative number from a -// byte slice. This will iterate through all characters until -// a non-digit has been found. -func getNegativeNumber(b []rune) int { - if b[0] != '-' { - return 0 - } - - i := 1 - for ; i < len(b); i++ { - if !isDigit(b[i]) { - return i - } - } - - return i -} - -// isEscaped will return whether or not the character is an escaped -// character. -func isEscaped(value []rune, b rune) bool { - if len(value) == 0 { - return false - } - - switch b { - case '\'': // single quote - case '"': // quote - case 'n': // newline - case 't': // tab - case '\\': // backslash - default: - return false - } - - return value[len(value)-1] == '\\' -} - -func getEscapedByte(b rune) (rune, error) { - switch b { - case '\'': // single quote - return '\'', nil - case '"': // quote - return '"', nil - case 'n': // newline - return '\n', nil - case 't': // table - return '\t', nil - case '\\': // backslash - return '\\', nil - default: - return b, NewParseError(fmt.Sprintf("invalid escaped character %c", b)) - } -} - -func removeEscapedCharacters(b []rune) []rune { - for i := 0; i < len(b); i++ { - if isEscaped(b[:i], b[i]) { - c, err := getEscapedByte(b[i]) - if err != nil { - return b - } - - b[i-1] = c - b = append(b[:i], b[i+1:]...) - i-- - } - } - - return b -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go deleted file mode 100644 index 94841c3244..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go +++ /dev/null @@ -1,166 +0,0 @@ -package ini - -import ( - "fmt" - "sort" -) - -// Visitor is an interface used by walkers that will -// traverse an array of ASTs. -type Visitor interface { - VisitExpr(AST) error - VisitStatement(AST) error -} - -// DefaultVisitor is used to visit statements and expressions -// and ensure that they are both of the correct format. -// In addition, upon visiting this will build sections and populate -// the Sections field which can be used to retrieve profile -// configuration. -type DefaultVisitor struct { - scope string - Sections Sections -} - -// NewDefaultVisitor return a DefaultVisitor -func NewDefaultVisitor() *DefaultVisitor { - return &DefaultVisitor{ - Sections: Sections{ - container: map[string]Section{}, - }, - } -} - -// VisitExpr visits expressions... -func (v *DefaultVisitor) VisitExpr(expr AST) error { - t := v.Sections.container[v.scope] - if t.values == nil { - t.values = values{} - } - - switch expr.Kind { - case ASTKindExprStatement: - opExpr := expr.GetRoot() - switch opExpr.Kind { - case ASTKindEqualExpr: - children := opExpr.GetChildren() - if len(children) <= 1 { - return NewParseError("unexpected token type") - } - - rhs := children[1] - - if rhs.Root.Type() != TokenLit { - return NewParseError("unexpected token type") - } - - key := EqualExprKey(opExpr) - v, err := newValue(rhs.Root.ValueType, rhs.Root.base, rhs.Root.Raw()) - if err != nil { - return err - } - - t.values[key] = v - default: - return NewParseError(fmt.Sprintf("unsupported expression %v", expr)) - } - default: - return NewParseError(fmt.Sprintf("unsupported expression %v", expr)) - } - - v.Sections.container[v.scope] = t - return nil -} - -// VisitStatement visits statements... -func (v *DefaultVisitor) VisitStatement(stmt AST) error { - switch stmt.Kind { - case ASTKindCompletedSectionStatement: - child := stmt.GetRoot() - if child.Kind != ASTKindSectionStatement { - return NewParseError(fmt.Sprintf("unsupported child statement: %T", child)) - } - - name := string(child.Root.Raw()) - v.Sections.container[name] = Section{} - v.scope = name - default: - return NewParseError(fmt.Sprintf("unsupported statement: %s", stmt.Kind)) - } - - return nil -} - -// Sections is a map of Section structures that represent -// a configuration. -type Sections struct { - container map[string]Section -} - -// GetSection will return section p. If section p does not exist, -// false will be returned in the second parameter. -func (t Sections) GetSection(p string) (Section, bool) { - v, ok := t.container[p] - return v, ok -} - -// values represents a map of union values. -type values map[string]Value - -// List will return a list of all sections that were successfully -// parsed. -func (t Sections) List() []string { - keys := make([]string, len(t.container)) - i := 0 - for k := range t.container { - keys[i] = k - i++ - } - - sort.Strings(keys) - return keys -} - -// Section contains a name and values. This represent -// a sectioned entry in a configuration file. -type Section struct { - Name string - values values -} - -// Has will return whether or not an entry exists in a given section -func (t Section) Has(k string) bool { - _, ok := t.values[k] - return ok -} - -// ValueType will returned what type the union is set to. If -// k was not found, the NoneType will be returned. -func (t Section) ValueType(k string) (ValueType, bool) { - v, ok := t.values[k] - return v.Type, ok -} - -// Bool returns a bool value at k -func (t Section) Bool(k string) bool { - return t.values[k].BoolValue() -} - -// Int returns an integer value at k -func (t Section) Int(k string) int64 { - return t.values[k].IntValue() -} - -// Float64 returns a float value at k -func (t Section) Float64(k string) float64 { - return t.values[k].FloatValue() -} - -// String returns the string value at k -func (t Section) String(k string) string { - _, ok := t.values[k] - if !ok { - return "" - } - return t.values[k].StringValue() -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/walker.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/walker.go deleted file mode 100644 index 99915f7f77..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/walker.go +++ /dev/null @@ -1,25 +0,0 @@ -package ini - -// Walk will traverse the AST using the v, the Visitor. -func Walk(tree []AST, v Visitor) error { - for _, node := range tree { - switch node.Kind { - case ASTKindExpr, - ASTKindExprStatement: - - if err := v.VisitExpr(node); err != nil { - return err - } - case ASTKindStatement, - ASTKindCompletedSectionStatement, - ASTKindNestedSectionStatement, - ASTKindCompletedNestedSectionStatement: - - if err := v.VisitStatement(node); err != nil { - return err - } - } - } - - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ws_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ws_token.go deleted file mode 100644 index 7ffb4ae06f..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/ws_token.go +++ /dev/null @@ -1,24 +0,0 @@ -package ini - -import ( - "unicode" -) - -// isWhitespace will return whether or not the character is -// a whitespace character. -// -// Whitespace is defined as a space or tab. -func isWhitespace(c rune) bool { - return unicode.IsSpace(c) && c != '\n' && c != '\r' -} - -func newWSToken(b []rune) (Token, int, error) { - i := 0 - for ; i < len(b); i++ { - if !isWhitespace(b[i]) { - break - } - } - - return newToken(TokenWS, b[:i], NoneType), i, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkio/byte.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/byte.go deleted file mode 100644 index 6c443988bb..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/sdkio/byte.go +++ /dev/null @@ -1,12 +0,0 @@ -package sdkio - -const ( - // Byte is 8 bits - Byte int64 = 1 - // KibiByte (KiB) is 1024 Bytes - KibiByte = Byte * 1024 - // MebiByte (MiB) is 1024 KiB - MebiByte = KibiByte * 1024 - // GibiByte (GiB) is 1024 MiB - GibiByte = MebiByte * 1024 -) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go deleted file mode 100644 index 5aa9137e0f..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go +++ /dev/null @@ -1,10 +0,0 @@ -// +build !go1.7 - -package sdkio - -// Copy of Go 1.7 io package's Seeker constants. -const ( - SeekStart = 0 // seek relative to the origin of the file - SeekCurrent = 1 // seek relative to the current offset - SeekEnd = 2 // seek relative to the end -) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go deleted file mode 100644 index e5f005613b..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build go1.7 - -package sdkio - -import "io" - -// Alias for Go 1.7 io package Seeker constants -const ( - SeekStart = io.SeekStart // seek relative to the origin of the file - SeekCurrent = io.SeekCurrent // seek relative to the current offset - SeekEnd = io.SeekEnd // seek relative to the end -) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor.go deleted file mode 100644 index 44898eed0f..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build go1.10 - -package sdkmath - -import "math" - -// Round returns the nearest integer, rounding half away from zero. -// -// Special cases are: -// Round(±0) = ±0 -// Round(±Inf) = ±Inf -// Round(NaN) = NaN -func Round(x float64) float64 { - return math.Round(x) -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor_go1.9.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor_go1.9.go deleted file mode 100644 index 810ec7f08b..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor_go1.9.go +++ /dev/null @@ -1,56 +0,0 @@ -// +build !go1.10 - -package sdkmath - -import "math" - -// Copied from the Go standard library's (Go 1.12) math/floor.go for use in -// Go version prior to Go 1.10. -const ( - uvone = 0x3FF0000000000000 - mask = 0x7FF - shift = 64 - 11 - 1 - bias = 1023 - signMask = 1 << 63 - fracMask = 1<= 0.5 { - // return t + Copysign(1, x) - // } - // return t - // } - bits := math.Float64bits(x) - e := uint(bits>>shift) & mask - if e < bias { - // Round abs(x) < 1 including denormals. - bits &= signMask // +-0 - if e == bias-1 { - bits |= uvone // +-1 - } - } else if e < bias+shift { - // Round any abs(x) >= 1 containing a fractional component [0,1). - // - // Numbers with larger exponents are returned unchanged since they - // must be either an integer, infinity, or NaN. - const half = 1 << (shift - 1) - e -= bias - bits += half >> e - bits &^= fracMask >> e - } - return math.Float64frombits(bits) -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go deleted file mode 100644 index 0c9802d877..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go +++ /dev/null @@ -1,29 +0,0 @@ -package sdkrand - -import ( - "math/rand" - "sync" - "time" -) - -// lockedSource is a thread-safe implementation of rand.Source -type lockedSource struct { - lk sync.Mutex - src rand.Source -} - -func (r *lockedSource) Int63() (n int64) { - r.lk.Lock() - n = r.src.Int63() - r.lk.Unlock() - return -} - -func (r *lockedSource) Seed(seed int64) { - r.lk.Lock() - r.src.Seed(seed) - r.lk.Unlock() -} - -// SeededRand is a new RNG using a thread safe implementation of rand.Source -var SeededRand = rand.New(&lockedSource{src: rand.NewSource(time.Now().UnixNano())}) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read.go deleted file mode 100644 index f4651da2da..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build go1.6 - -package sdkrand - -import "math/rand" - -// Read provides the stub for math.Rand.Read method support for go version's -// 1.6 and greater. -func Read(r *rand.Rand, p []byte) (int, error) { - return r.Read(p) -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read_1_5.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read_1_5.go deleted file mode 100644 index b1d93a33d4..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read_1_5.go +++ /dev/null @@ -1,24 +0,0 @@ -// +build !go1.6 - -package sdkrand - -import "math/rand" - -// Read backfills Go 1.6's math.Rand.Reader for Go 1.5 -func Read(r *rand.Rand, p []byte) (n int, err error) { - // Copy of Go standard libraries math package's read function not added to - // standard library until Go 1.6. - var pos int8 - var val int64 - for n = 0; n < len(p); n++ { - if pos == 0 { - val = r.Int63() - pos = 7 - } - p[n] = byte(val) - val >>= 8 - pos-- - } - - return n, err -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go deleted file mode 100644 index 38ea61afea..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go +++ /dev/null @@ -1,23 +0,0 @@ -package sdkuri - -import ( - "path" - "strings" -) - -// PathJoin will join the elements of the path delimited by the "/" -// character. Similar to path.Join with the exception the trailing "/" -// character is preserved if present. -func PathJoin(elems ...string) string { - if len(elems) == 0 { - return "" - } - - hasTrailing := strings.HasSuffix(elems[len(elems)-1], "/") - str := path.Join(elems...) - if hasTrailing && str != "/" { - str += "/" - } - - return str -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/ecs_container.go b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/ecs_container.go deleted file mode 100644 index 7da8a49ce5..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/ecs_container.go +++ /dev/null @@ -1,12 +0,0 @@ -package shareddefaults - -const ( - // ECSCredsProviderEnvVar is an environmental variable key used to - // determine which path needs to be hit. - ECSCredsProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI" -) - -// ECSContainerCredentialsURI is the endpoint to retrieve container -// credentials. This can be overridden to test to ensure the credential process -// is behaving correctly. -var ECSContainerCredentialsURI = "http://169.254.170.2" diff --git a/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go deleted file mode 100644 index ebcbc2b40a..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go +++ /dev/null @@ -1,40 +0,0 @@ -package shareddefaults - -import ( - "os" - "path/filepath" - "runtime" -) - -// SharedCredentialsFilename returns the SDK's default file path -// for the shared credentials file. -// -// Builds the shared config file path based on the OS's platform. -// -// - Linux/Unix: $HOME/.aws/credentials -// - Windows: %USERPROFILE%\.aws\credentials -func SharedCredentialsFilename() string { - return filepath.Join(UserHomeDir(), ".aws", "credentials") -} - -// SharedConfigFilename returns the SDK's default file path for -// the shared config file. -// -// Builds the shared config file path based on the OS's platform. -// -// - Linux/Unix: $HOME/.aws/config -// - Windows: %USERPROFILE%\.aws\config -func SharedConfigFilename() string { - return filepath.Join(UserHomeDir(), ".aws", "config") -} - -// UserHomeDir returns the home directory for the user the process is -// running under. -func UserHomeDir() string { - if runtime.GOOS == "windows" { // Windows - return os.Getenv("USERPROFILE") - } - - // *nix - return os.Getenv("HOME") -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/strings/strings.go b/vendor/github.com/aws/aws-sdk-go/internal/strings/strings.go deleted file mode 100644 index d008ae27cb..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/strings/strings.go +++ /dev/null @@ -1,11 +0,0 @@ -package strings - -import ( - "strings" -) - -// HasPrefixFold tests whether the string s begins with prefix, interpreted as UTF-8 strings, -// under Unicode case-folding. -func HasPrefixFold(s, prefix string) bool { - return len(s) >= len(prefix) && strings.EqualFold(s[0:len(prefix)], prefix) -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/LICENSE b/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/LICENSE deleted file mode 100644 index 6a66aea5ea..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/singleflight.go b/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/singleflight.go deleted file mode 100644 index 14ad0c5891..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/singleflight.go +++ /dev/null @@ -1,120 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package singleflight provides a duplicate function call suppression -// mechanism. -package singleflight - -import "sync" - -// call is an in-flight or completed singleflight.Do call -type call struct { - wg sync.WaitGroup - - // These fields are written once before the WaitGroup is done - // and are only read after the WaitGroup is done. - val interface{} - err error - - // forgotten indicates whether Forget was called with this call's key - // while the call was still in flight. - forgotten bool - - // These fields are read and written with the singleflight - // mutex held before the WaitGroup is done, and are read but - // not written after the WaitGroup is done. - dups int - chans []chan<- Result -} - -// Group represents a class of work and forms a namespace in -// which units of work can be executed with duplicate suppression. -type Group struct { - mu sync.Mutex // protects m - m map[string]*call // lazily initialized -} - -// Result holds the results of Do, so they can be passed -// on a channel. -type Result struct { - Val interface{} - Err error - Shared bool -} - -// Do executes and returns the results of the given function, making -// sure that only one execution is in-flight for a given key at a -// time. If a duplicate comes in, the duplicate caller waits for the -// original to complete and receives the same results. -// The return value shared indicates whether v was given to multiple callers. -func (g *Group) Do(key string, fn func() (interface{}, error)) (v interface{}, err error, shared bool) { - g.mu.Lock() - if g.m == nil { - g.m = make(map[string]*call) - } - if c, ok := g.m[key]; ok { - c.dups++ - g.mu.Unlock() - c.wg.Wait() - return c.val, c.err, true - } - c := new(call) - c.wg.Add(1) - g.m[key] = c - g.mu.Unlock() - - g.doCall(c, key, fn) - return c.val, c.err, c.dups > 0 -} - -// DoChan is like Do but returns a channel that will receive the -// results when they are ready. -func (g *Group) DoChan(key string, fn func() (interface{}, error)) <-chan Result { - ch := make(chan Result, 1) - g.mu.Lock() - if g.m == nil { - g.m = make(map[string]*call) - } - if c, ok := g.m[key]; ok { - c.dups++ - c.chans = append(c.chans, ch) - g.mu.Unlock() - return ch - } - c := &call{chans: []chan<- Result{ch}} - c.wg.Add(1) - g.m[key] = c - g.mu.Unlock() - - go g.doCall(c, key, fn) - - return ch -} - -// doCall handles the single call for a key. -func (g *Group) doCall(c *call, key string, fn func() (interface{}, error)) { - c.val, c.err = fn() - c.wg.Done() - - g.mu.Lock() - if !c.forgotten { - delete(g.m, key) - } - for _, ch := range c.chans { - ch <- Result{c.val, c.err, c.dups > 0} - } - g.mu.Unlock() -} - -// Forget tells the singleflight to forget about a key. Future calls -// to Do for this key will call the function rather than waiting for -// an earlier call to complete. -func (g *Group) Forget(key string) { - g.mu.Lock() - if c, ok := g.m[key]; ok { - c.forgotten = true - } - delete(g.m, key) - g.mu.Unlock() -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go deleted file mode 100644 index d7d42db0a6..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go +++ /dev/null @@ -1,68 +0,0 @@ -package protocol - -import ( - "strings" - - "github.com/aws/aws-sdk-go/aws/request" -) - -// ValidateEndpointHostHandler is a request handler that will validate the -// request endpoint's hosts is a valid RFC 3986 host. -var ValidateEndpointHostHandler = request.NamedHandler{ - Name: "awssdk.protocol.ValidateEndpointHostHandler", - Fn: func(r *request.Request) { - err := ValidateEndpointHost(r.Operation.Name, r.HTTPRequest.URL.Host) - if err != nil { - r.Error = err - } - }, -} - -// ValidateEndpointHost validates that the host string passed in is a valid RFC -// 3986 host. Returns error if the host is not valid. -func ValidateEndpointHost(opName, host string) error { - paramErrs := request.ErrInvalidParams{Context: opName} - labels := strings.Split(host, ".") - - for i, label := range labels { - if i == len(labels)-1 && len(label) == 0 { - // Allow trailing dot for FQDN hosts. - continue - } - - if !ValidHostLabel(label) { - paramErrs.Add(request.NewErrParamFormat( - "endpoint host label", "[a-zA-Z0-9-]{1,63}", label)) - } - } - - if len(host) > 255 { - paramErrs.Add(request.NewErrParamMaxLen( - "endpoint host", 255, host, - )) - } - - if paramErrs.Len() > 0 { - return paramErrs - } - return nil -} - -// ValidHostLabel returns if the label is a valid RFC 3986 host label. -func ValidHostLabel(label string) bool { - if l := len(label); l == 0 || l > 63 { - return false - } - for _, r := range label { - switch { - case r >= '0' && r <= '9': - case r >= 'A' && r <= 'Z': - case r >= 'a' && r <= 'z': - case r == '-': - default: - return false - } - } - - return true -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/host_prefix.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/host_prefix.go deleted file mode 100644 index 915b0fcafd..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/host_prefix.go +++ /dev/null @@ -1,54 +0,0 @@ -package protocol - -import ( - "strings" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/request" -) - -// HostPrefixHandlerName is the handler name for the host prefix request -// handler. -const HostPrefixHandlerName = "awssdk.endpoint.HostPrefixHandler" - -// NewHostPrefixHandler constructs a build handler -func NewHostPrefixHandler(prefix string, labelsFn func() map[string]string) request.NamedHandler { - builder := HostPrefixBuilder{ - Prefix: prefix, - LabelsFn: labelsFn, - } - - return request.NamedHandler{ - Name: HostPrefixHandlerName, - Fn: builder.Build, - } -} - -// HostPrefixBuilder provides the request handler to expand and prepend -// the host prefix into the operation's request endpoint host. -type HostPrefixBuilder struct { - Prefix string - LabelsFn func() map[string]string -} - -// Build updates the passed in Request with the HostPrefix template expanded. -func (h HostPrefixBuilder) Build(r *request.Request) { - if aws.BoolValue(r.Config.DisableEndpointHostPrefix) { - return - } - - var labels map[string]string - if h.LabelsFn != nil { - labels = h.LabelsFn() - } - - prefix := h.Prefix - for name, value := range labels { - prefix = strings.Replace(prefix, "{"+name+"}", value, -1) - } - - r.HTTPRequest.URL.Host = prefix + r.HTTPRequest.URL.Host - if len(r.HTTPRequest.Host) > 0 { - r.HTTPRequest.Host = prefix + r.HTTPRequest.Host - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go deleted file mode 100644 index 53831dff98..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go +++ /dev/null @@ -1,75 +0,0 @@ -package protocol - -import ( - "crypto/rand" - "fmt" - "reflect" -) - -// RandReader is the random reader the protocol package will use to read -// random bytes from. This is exported for testing, and should not be used. -var RandReader = rand.Reader - -const idempotencyTokenFillTag = `idempotencyToken` - -// CanSetIdempotencyToken returns true if the struct field should be -// automatically populated with a Idempotency token. -// -// Only *string and string type fields that are tagged with idempotencyToken -// which are not already set can be auto filled. -func CanSetIdempotencyToken(v reflect.Value, f reflect.StructField) bool { - switch u := v.Interface().(type) { - // To auto fill an Idempotency token the field must be a string, - // tagged for auto fill, and have a zero value. - case *string: - return u == nil && len(f.Tag.Get(idempotencyTokenFillTag)) != 0 - case string: - return len(u) == 0 && len(f.Tag.Get(idempotencyTokenFillTag)) != 0 - } - - return false -} - -// GetIdempotencyToken returns a randomly generated idempotency token. -func GetIdempotencyToken() string { - b := make([]byte, 16) - RandReader.Read(b) - - return UUIDVersion4(b) -} - -// SetIdempotencyToken will set the value provided with a Idempotency Token. -// Given that the value can be set. Will panic if value is not setable. -func SetIdempotencyToken(v reflect.Value) { - if v.Kind() == reflect.Ptr { - if v.IsNil() && v.CanSet() { - v.Set(reflect.New(v.Type().Elem())) - } - v = v.Elem() - } - v = reflect.Indirect(v) - - if !v.CanSet() { - panic(fmt.Sprintf("unable to set idempotnecy token %v", v)) - } - - b := make([]byte, 16) - _, err := rand.Read(b) - if err != nil { - // TODO handle error - return - } - - v.Set(reflect.ValueOf(UUIDVersion4(b))) -} - -// UUIDVersion4 returns a Version 4 random UUID from the byte slice provided -func UUIDVersion4(u []byte) string { - // https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_4_.28random.29 - // 13th character is "4" - u[6] = (u[6] | 0x40) & 0x4F - // 17th character is "8", "9", "a", or "b" - u[8] = (u[8] | 0x80) & 0xBF - - return fmt.Sprintf(`%X-%X-%X-%X-%X`, u[0:4], u[4:6], u[6:8], u[8:10], u[10:]) -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go deleted file mode 100644 index 864fb6704b..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go +++ /dev/null @@ -1,296 +0,0 @@ -// Package jsonutil provides JSON serialization of AWS requests and responses. -package jsonutil - -import ( - "bytes" - "encoding/base64" - "encoding/json" - "fmt" - "math" - "reflect" - "sort" - "strconv" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/private/protocol" -) - -var timeType = reflect.ValueOf(time.Time{}).Type() -var byteSliceType = reflect.ValueOf([]byte{}).Type() - -// BuildJSON builds a JSON string for a given object v. -func BuildJSON(v interface{}) ([]byte, error) { - var buf bytes.Buffer - - err := buildAny(reflect.ValueOf(v), &buf, "") - return buf.Bytes(), err -} - -func buildAny(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { - origVal := value - value = reflect.Indirect(value) - if !value.IsValid() { - return nil - } - - vtype := value.Type() - - t := tag.Get("type") - if t == "" { - switch vtype.Kind() { - case reflect.Struct: - // also it can't be a time object - if value.Type() != timeType { - t = "structure" - } - case reflect.Slice: - // also it can't be a byte slice - if _, ok := value.Interface().([]byte); !ok { - t = "list" - } - case reflect.Map: - // cannot be a JSONValue map - if _, ok := value.Interface().(aws.JSONValue); !ok { - t = "map" - } - } - } - - switch t { - case "structure": - if field, ok := vtype.FieldByName("_"); ok { - tag = field.Tag - } - return buildStruct(value, buf, tag) - case "list": - return buildList(value, buf, tag) - case "map": - return buildMap(value, buf, tag) - default: - return buildScalar(origVal, buf, tag) - } -} - -func buildStruct(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { - if !value.IsValid() { - return nil - } - - // unwrap payloads - if payload := tag.Get("payload"); payload != "" { - field, _ := value.Type().FieldByName(payload) - tag = field.Tag - value = elemOf(value.FieldByName(payload)) - - if !value.IsValid() { - return nil - } - } - - buf.WriteByte('{') - - t := value.Type() - first := true - for i := 0; i < t.NumField(); i++ { - member := value.Field(i) - - // This allocates the most memory. - // Additionally, we cannot skip nil fields due to - // idempotency auto filling. - field := t.Field(i) - - if field.PkgPath != "" { - continue // ignore unexported fields - } - if field.Tag.Get("json") == "-" { - continue - } - if field.Tag.Get("location") != "" { - continue // ignore non-body elements - } - if field.Tag.Get("ignore") != "" { - continue - } - - if protocol.CanSetIdempotencyToken(member, field) { - token := protocol.GetIdempotencyToken() - member = reflect.ValueOf(&token) - } - - if (member.Kind() == reflect.Ptr || member.Kind() == reflect.Slice || member.Kind() == reflect.Map) && member.IsNil() { - continue // ignore unset fields - } - - if first { - first = false - } else { - buf.WriteByte(',') - } - - // figure out what this field is called - name := field.Name - if locName := field.Tag.Get("locationName"); locName != "" { - name = locName - } - - writeString(name, buf) - buf.WriteString(`:`) - - err := buildAny(member, buf, field.Tag) - if err != nil { - return err - } - - } - - buf.WriteString("}") - - return nil -} - -func buildList(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { - buf.WriteString("[") - - for i := 0; i < value.Len(); i++ { - buildAny(value.Index(i), buf, "") - - if i < value.Len()-1 { - buf.WriteString(",") - } - } - - buf.WriteString("]") - - return nil -} - -type sortedValues []reflect.Value - -func (sv sortedValues) Len() int { return len(sv) } -func (sv sortedValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] } -func (sv sortedValues) Less(i, j int) bool { return sv[i].String() < sv[j].String() } - -func buildMap(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { - buf.WriteString("{") - - sv := sortedValues(value.MapKeys()) - sort.Sort(sv) - - for i, k := range sv { - if i > 0 { - buf.WriteByte(',') - } - - writeString(k.String(), buf) - buf.WriteString(`:`) - - buildAny(value.MapIndex(k), buf, "") - } - - buf.WriteString("}") - - return nil -} - -func buildScalar(v reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { - // prevents allocation on the heap. - scratch := [64]byte{} - switch value := reflect.Indirect(v); value.Kind() { - case reflect.String: - writeString(value.String(), buf) - case reflect.Bool: - if value.Bool() { - buf.WriteString("true") - } else { - buf.WriteString("false") - } - case reflect.Int64: - buf.Write(strconv.AppendInt(scratch[:0], value.Int(), 10)) - case reflect.Float64: - f := value.Float() - if math.IsInf(f, 0) || math.IsNaN(f) { - return &json.UnsupportedValueError{Value: v, Str: strconv.FormatFloat(f, 'f', -1, 64)} - } - buf.Write(strconv.AppendFloat(scratch[:0], f, 'f', -1, 64)) - default: - switch converted := value.Interface().(type) { - case time.Time: - format := tag.Get("timestampFormat") - if len(format) == 0 { - format = protocol.UnixTimeFormatName - } - - ts := protocol.FormatTime(format, converted) - if format != protocol.UnixTimeFormatName { - ts = `"` + ts + `"` - } - - buf.WriteString(ts) - case []byte: - if !value.IsNil() { - buf.WriteByte('"') - if len(converted) < 1024 { - // for small buffers, using Encode directly is much faster. - dst := make([]byte, base64.StdEncoding.EncodedLen(len(converted))) - base64.StdEncoding.Encode(dst, converted) - buf.Write(dst) - } else { - // for large buffers, avoid unnecessary extra temporary - // buffer space. - enc := base64.NewEncoder(base64.StdEncoding, buf) - enc.Write(converted) - enc.Close() - } - buf.WriteByte('"') - } - case aws.JSONValue: - str, err := protocol.EncodeJSONValue(converted, protocol.QuotedEscape) - if err != nil { - return fmt.Errorf("unable to encode JSONValue, %v", err) - } - buf.WriteString(str) - default: - return fmt.Errorf("unsupported JSON value %v (%s)", value.Interface(), value.Type()) - } - } - return nil -} - -var hex = "0123456789abcdef" - -func writeString(s string, buf *bytes.Buffer) { - buf.WriteByte('"') - for i := 0; i < len(s); i++ { - if s[i] == '"' { - buf.WriteString(`\"`) - } else if s[i] == '\\' { - buf.WriteString(`\\`) - } else if s[i] == '\b' { - buf.WriteString(`\b`) - } else if s[i] == '\f' { - buf.WriteString(`\f`) - } else if s[i] == '\r' { - buf.WriteString(`\r`) - } else if s[i] == '\t' { - buf.WriteString(`\t`) - } else if s[i] == '\n' { - buf.WriteString(`\n`) - } else if s[i] < 32 { - buf.WriteString("\\u00") - buf.WriteByte(hex[s[i]>>4]) - buf.WriteByte(hex[s[i]&0xF]) - } else { - buf.WriteByte(s[i]) - } - } - buf.WriteByte('"') -} - -// Returns the reflection element of a value, if it is a pointer. -func elemOf(value reflect.Value) reflect.Value { - for value.Kind() == reflect.Ptr { - value = value.Elem() - } - return value -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go deleted file mode 100644 index 5e9499699b..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go +++ /dev/null @@ -1,282 +0,0 @@ -package jsonutil - -import ( - "bytes" - "encoding/base64" - "encoding/json" - "fmt" - "io" - "reflect" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/private/protocol" -) - -// UnmarshalJSONError unmarshal's the reader's JSON document into the passed in -// type. The value to unmarshal the json document into must be a pointer to the -// type. -func UnmarshalJSONError(v interface{}, stream io.Reader) error { - var errBuf bytes.Buffer - body := io.TeeReader(stream, &errBuf) - - err := json.NewDecoder(body).Decode(v) - if err != nil { - msg := "failed decoding error message" - if err == io.EOF { - msg = "error message missing" - err = nil - } - return awserr.NewUnmarshalError(err, msg, errBuf.Bytes()) - } - - return nil -} - -// UnmarshalJSON reads a stream and unmarshals the results in object v. -func UnmarshalJSON(v interface{}, stream io.Reader) error { - var out interface{} - - err := json.NewDecoder(stream).Decode(&out) - if err == io.EOF { - return nil - } else if err != nil { - return err - } - - return unmarshaler{}.unmarshalAny(reflect.ValueOf(v), out, "") -} - -// UnmarshalJSONCaseInsensitive reads a stream and unmarshals the result into the -// object v. Ignores casing for structure members. -func UnmarshalJSONCaseInsensitive(v interface{}, stream io.Reader) error { - var out interface{} - - err := json.NewDecoder(stream).Decode(&out) - if err == io.EOF { - return nil - } else if err != nil { - return err - } - - return unmarshaler{ - caseInsensitive: true, - }.unmarshalAny(reflect.ValueOf(v), out, "") -} - -type unmarshaler struct { - caseInsensitive bool -} - -func (u unmarshaler) unmarshalAny(value reflect.Value, data interface{}, tag reflect.StructTag) error { - vtype := value.Type() - if vtype.Kind() == reflect.Ptr { - vtype = vtype.Elem() // check kind of actual element type - } - - t := tag.Get("type") - if t == "" { - switch vtype.Kind() { - case reflect.Struct: - // also it can't be a time object - if _, ok := value.Interface().(*time.Time); !ok { - t = "structure" - } - case reflect.Slice: - // also it can't be a byte slice - if _, ok := value.Interface().([]byte); !ok { - t = "list" - } - case reflect.Map: - // cannot be a JSONValue map - if _, ok := value.Interface().(aws.JSONValue); !ok { - t = "map" - } - } - } - - switch t { - case "structure": - if field, ok := vtype.FieldByName("_"); ok { - tag = field.Tag - } - return u.unmarshalStruct(value, data, tag) - case "list": - return u.unmarshalList(value, data, tag) - case "map": - return u.unmarshalMap(value, data, tag) - default: - return u.unmarshalScalar(value, data, tag) - } -} - -func (u unmarshaler) unmarshalStruct(value reflect.Value, data interface{}, tag reflect.StructTag) error { - if data == nil { - return nil - } - mapData, ok := data.(map[string]interface{}) - if !ok { - return fmt.Errorf("JSON value is not a structure (%#v)", data) - } - - t := value.Type() - if value.Kind() == reflect.Ptr { - if value.IsNil() { // create the structure if it's nil - s := reflect.New(value.Type().Elem()) - value.Set(s) - value = s - } - - value = value.Elem() - t = t.Elem() - } - - // unwrap any payloads - if payload := tag.Get("payload"); payload != "" { - field, _ := t.FieldByName(payload) - return u.unmarshalAny(value.FieldByName(payload), data, field.Tag) - } - - for i := 0; i < t.NumField(); i++ { - field := t.Field(i) - if field.PkgPath != "" { - continue // ignore unexported fields - } - - // figure out what this field is called - name := field.Name - if locName := field.Tag.Get("locationName"); locName != "" { - name = locName - } - if u.caseInsensitive { - if _, ok := mapData[name]; !ok { - // Fallback to uncased name search if the exact name didn't match. - for kn, v := range mapData { - if strings.EqualFold(kn, name) { - mapData[name] = v - } - } - } - } - - member := value.FieldByIndex(field.Index) - err := u.unmarshalAny(member, mapData[name], field.Tag) - if err != nil { - return err - } - } - return nil -} - -func (u unmarshaler) unmarshalList(value reflect.Value, data interface{}, tag reflect.StructTag) error { - if data == nil { - return nil - } - listData, ok := data.([]interface{}) - if !ok { - return fmt.Errorf("JSON value is not a list (%#v)", data) - } - - if value.IsNil() { - l := len(listData) - value.Set(reflect.MakeSlice(value.Type(), l, l)) - } - - for i, c := range listData { - err := u.unmarshalAny(value.Index(i), c, "") - if err != nil { - return err - } - } - - return nil -} - -func (u unmarshaler) unmarshalMap(value reflect.Value, data interface{}, tag reflect.StructTag) error { - if data == nil { - return nil - } - mapData, ok := data.(map[string]interface{}) - if !ok { - return fmt.Errorf("JSON value is not a map (%#v)", data) - } - - if value.IsNil() { - value.Set(reflect.MakeMap(value.Type())) - } - - for k, v := range mapData { - kvalue := reflect.ValueOf(k) - vvalue := reflect.New(value.Type().Elem()).Elem() - - u.unmarshalAny(vvalue, v, "") - value.SetMapIndex(kvalue, vvalue) - } - - return nil -} - -func (u unmarshaler) unmarshalScalar(value reflect.Value, data interface{}, tag reflect.StructTag) error { - - switch d := data.(type) { - case nil: - return nil // nothing to do here - case string: - switch value.Interface().(type) { - case *string: - value.Set(reflect.ValueOf(&d)) - case []byte: - b, err := base64.StdEncoding.DecodeString(d) - if err != nil { - return err - } - value.Set(reflect.ValueOf(b)) - case *time.Time: - format := tag.Get("timestampFormat") - if len(format) == 0 { - format = protocol.ISO8601TimeFormatName - } - - t, err := protocol.ParseTime(format, d) - if err != nil { - return err - } - value.Set(reflect.ValueOf(&t)) - case aws.JSONValue: - // No need to use escaping as the value is a non-quoted string. - v, err := protocol.DecodeJSONValue(d, protocol.NoEscape) - if err != nil { - return err - } - value.Set(reflect.ValueOf(v)) - default: - return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type()) - } - case float64: - switch value.Interface().(type) { - case *int64: - di := int64(d) - value.Set(reflect.ValueOf(&di)) - case *float64: - value.Set(reflect.ValueOf(&d)) - case *time.Time: - // Time unmarshaled from a float64 can only be epoch seconds - t := time.Unix(int64(d), 0).UTC() - value.Set(reflect.ValueOf(&t)) - default: - return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type()) - } - case bool: - switch value.Interface().(type) { - case *bool: - value.Set(reflect.ValueOf(&d)) - default: - return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type()) - } - default: - return fmt.Errorf("unsupported JSON value (%v)", data) - } - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go deleted file mode 100644 index 776d110184..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go +++ /dev/null @@ -1,76 +0,0 @@ -package protocol - -import ( - "encoding/base64" - "encoding/json" - "fmt" - "strconv" - - "github.com/aws/aws-sdk-go/aws" -) - -// EscapeMode is the mode that should be use for escaping a value -type EscapeMode uint - -// The modes for escaping a value before it is marshaled, and unmarshaled. -const ( - NoEscape EscapeMode = iota - Base64Escape - QuotedEscape -) - -// EncodeJSONValue marshals the value into a JSON string, and optionally base64 -// encodes the string before returning it. -// -// Will panic if the escape mode is unknown. -func EncodeJSONValue(v aws.JSONValue, escape EscapeMode) (string, error) { - b, err := json.Marshal(v) - if err != nil { - return "", err - } - - switch escape { - case NoEscape: - return string(b), nil - case Base64Escape: - return base64.StdEncoding.EncodeToString(b), nil - case QuotedEscape: - return strconv.Quote(string(b)), nil - } - - panic(fmt.Sprintf("EncodeJSONValue called with unknown EscapeMode, %v", escape)) -} - -// DecodeJSONValue will attempt to decode the string input as a JSONValue. -// Optionally decoding base64 the value first before JSON unmarshaling. -// -// Will panic if the escape mode is unknown. -func DecodeJSONValue(v string, escape EscapeMode) (aws.JSONValue, error) { - var b []byte - var err error - - switch escape { - case NoEscape: - b = []byte(v) - case Base64Escape: - b, err = base64.StdEncoding.DecodeString(v) - case QuotedEscape: - var u string - u, err = strconv.Unquote(v) - b = []byte(u) - default: - panic(fmt.Sprintf("DecodeJSONValue called with unknown EscapeMode, %v", escape)) - } - - if err != nil { - return nil, err - } - - m := aws.JSONValue{} - err = json.Unmarshal(b, &m) - if err != nil { - return nil, err - } - - return m, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go deleted file mode 100644 index 0ea0647a57..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go +++ /dev/null @@ -1,81 +0,0 @@ -package protocol - -import ( - "io" - "io/ioutil" - "net/http" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/client/metadata" - "github.com/aws/aws-sdk-go/aws/request" -) - -// PayloadUnmarshaler provides the interface for unmarshaling a payload's -// reader into a SDK shape. -type PayloadUnmarshaler interface { - UnmarshalPayload(io.Reader, interface{}) error -} - -// HandlerPayloadUnmarshal implements the PayloadUnmarshaler from a -// HandlerList. This provides the support for unmarshaling a payload reader to -// a shape without needing a SDK request first. -type HandlerPayloadUnmarshal struct { - Unmarshalers request.HandlerList -} - -// UnmarshalPayload unmarshals the io.Reader payload into the SDK shape using -// the Unmarshalers HandlerList provided. Returns an error if unable -// unmarshaling fails. -func (h HandlerPayloadUnmarshal) UnmarshalPayload(r io.Reader, v interface{}) error { - req := &request.Request{ - HTTPRequest: &http.Request{}, - HTTPResponse: &http.Response{ - StatusCode: 200, - Header: http.Header{}, - Body: ioutil.NopCloser(r), - }, - Data: v, - } - - h.Unmarshalers.Run(req) - - return req.Error -} - -// PayloadMarshaler provides the interface for marshaling a SDK shape into and -// io.Writer. -type PayloadMarshaler interface { - MarshalPayload(io.Writer, interface{}) error -} - -// HandlerPayloadMarshal implements the PayloadMarshaler from a HandlerList. -// This provides support for marshaling a SDK shape into an io.Writer without -// needing a SDK request first. -type HandlerPayloadMarshal struct { - Marshalers request.HandlerList -} - -// MarshalPayload marshals the SDK shape into the io.Writer using the -// Marshalers HandlerList provided. Returns an error if unable if marshal -// fails. -func (h HandlerPayloadMarshal) MarshalPayload(w io.Writer, v interface{}) error { - req := request.New( - aws.Config{}, - metadata.ClientInfo{}, - request.Handlers{}, - nil, - &request.Operation{HTTPMethod: "PUT"}, - v, - nil, - ) - - h.Marshalers.Run(req) - - if req.Error != nil { - return req.Error - } - - io.Copy(w, req.GetBody()) - - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/protocol.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/protocol.go deleted file mode 100644 index 9d521dcb95..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/protocol.go +++ /dev/null @@ -1,49 +0,0 @@ -package protocol - -import ( - "fmt" - "strings" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" -) - -// RequireHTTPMinProtocol request handler is used to enforce that -// the target endpoint supports the given major and minor HTTP protocol version. -type RequireHTTPMinProtocol struct { - Major, Minor int -} - -// Handler will mark the request.Request with an error if the -// target endpoint did not connect with the required HTTP protocol -// major and minor version. -func (p RequireHTTPMinProtocol) Handler(r *request.Request) { - if r.Error != nil || r.HTTPResponse == nil { - return - } - - if !strings.HasPrefix(r.HTTPResponse.Proto, "HTTP") { - r.Error = newMinHTTPProtoError(p.Major, p.Minor, r) - } - - if r.HTTPResponse.ProtoMajor < p.Major || r.HTTPResponse.ProtoMinor < p.Minor { - r.Error = newMinHTTPProtoError(p.Major, p.Minor, r) - } -} - -// ErrCodeMinimumHTTPProtocolError error code is returned when the target endpoint -// did not match the required HTTP major and minor protocol version. -const ErrCodeMinimumHTTPProtocolError = "MinimumHTTPProtocolError" - -func newMinHTTPProtoError(major, minor int, r *request.Request) error { - return awserr.NewRequestFailure( - awserr.New("MinimumHTTPProtocolError", - fmt.Sprintf( - "operation requires minimum HTTP protocol of HTTP/%d.%d, but was %s", - major, minor, r.HTTPResponse.Proto, - ), - nil, - ), - r.HTTPResponse.StatusCode, r.RequestID, - ) -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go deleted file mode 100644 index d40346a779..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go +++ /dev/null @@ -1,36 +0,0 @@ -// Package query provides serialization of AWS query requests, and responses. -package query - -//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/input/query.json build_test.go - -import ( - "net/url" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/private/protocol/query/queryutil" -) - -// BuildHandler is a named request handler for building query protocol requests -var BuildHandler = request.NamedHandler{Name: "awssdk.query.Build", Fn: Build} - -// Build builds a request for an AWS Query service. -func Build(r *request.Request) { - body := url.Values{ - "Action": {r.Operation.Name}, - "Version": {r.ClientInfo.APIVersion}, - } - if err := queryutil.Parse(body, r.Params, false); err != nil { - r.Error = awserr.New(request.ErrCodeSerialization, "failed encoding Query request", err) - return - } - - if !r.IsPresigned() { - r.HTTPRequest.Method = "POST" - r.HTTPRequest.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8") - r.SetBufferBody([]byte(body.Encode())) - } else { // This is a pre-signed request - r.HTTPRequest.Method = "GET" - r.HTTPRequest.URL.RawQuery = body.Encode() - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go deleted file mode 100644 index 75866d0121..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go +++ /dev/null @@ -1,246 +0,0 @@ -package queryutil - -import ( - "encoding/base64" - "fmt" - "net/url" - "reflect" - "sort" - "strconv" - "strings" - "time" - - "github.com/aws/aws-sdk-go/private/protocol" -) - -// Parse parses an object i and fills a url.Values object. The isEC2 flag -// indicates if this is the EC2 Query sub-protocol. -func Parse(body url.Values, i interface{}, isEC2 bool) error { - q := queryParser{isEC2: isEC2} - return q.parseValue(body, reflect.ValueOf(i), "", "") -} - -func elemOf(value reflect.Value) reflect.Value { - for value.Kind() == reflect.Ptr { - value = value.Elem() - } - return value -} - -type queryParser struct { - isEC2 bool -} - -func (q *queryParser) parseValue(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { - value = elemOf(value) - - // no need to handle zero values - if !value.IsValid() { - return nil - } - - t := tag.Get("type") - if t == "" { - switch value.Kind() { - case reflect.Struct: - t = "structure" - case reflect.Slice: - t = "list" - case reflect.Map: - t = "map" - } - } - - switch t { - case "structure": - return q.parseStruct(v, value, prefix) - case "list": - return q.parseList(v, value, prefix, tag) - case "map": - return q.parseMap(v, value, prefix, tag) - default: - return q.parseScalar(v, value, prefix, tag) - } -} - -func (q *queryParser) parseStruct(v url.Values, value reflect.Value, prefix string) error { - if !value.IsValid() { - return nil - } - - t := value.Type() - for i := 0; i < value.NumField(); i++ { - elemValue := elemOf(value.Field(i)) - field := t.Field(i) - - if field.PkgPath != "" { - continue // ignore unexported fields - } - if field.Tag.Get("ignore") != "" { - continue - } - - if protocol.CanSetIdempotencyToken(value.Field(i), field) { - token := protocol.GetIdempotencyToken() - elemValue = reflect.ValueOf(token) - } - - var name string - if q.isEC2 { - name = field.Tag.Get("queryName") - } - if name == "" { - if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" { - name = field.Tag.Get("locationNameList") - } else if locName := field.Tag.Get("locationName"); locName != "" { - name = locName - } - if name != "" && q.isEC2 { - name = strings.ToUpper(name[0:1]) + name[1:] - } - } - if name == "" { - name = field.Name - } - - if prefix != "" { - name = prefix + "." + name - } - - if err := q.parseValue(v, elemValue, name, field.Tag); err != nil { - return err - } - } - return nil -} - -func (q *queryParser) parseList(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { - // If it's empty, generate an empty value - if !value.IsNil() && value.Len() == 0 { - v.Set(prefix, "") - return nil - } - - if _, ok := value.Interface().([]byte); ok { - return q.parseScalar(v, value, prefix, tag) - } - - // check for unflattened list member - if !q.isEC2 && tag.Get("flattened") == "" { - if listName := tag.Get("locationNameList"); listName == "" { - prefix += ".member" - } else { - prefix += "." + listName - } - } - - for i := 0; i < value.Len(); i++ { - slicePrefix := prefix - if slicePrefix == "" { - slicePrefix = strconv.Itoa(i + 1) - } else { - slicePrefix = slicePrefix + "." + strconv.Itoa(i+1) - } - if err := q.parseValue(v, value.Index(i), slicePrefix, ""); err != nil { - return err - } - } - return nil -} - -func (q *queryParser) parseMap(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { - // If it's empty, generate an empty value - if !value.IsNil() && value.Len() == 0 { - v.Set(prefix, "") - return nil - } - - // check for unflattened list member - if !q.isEC2 && tag.Get("flattened") == "" { - prefix += ".entry" - } - - // sort keys for improved serialization consistency. - // this is not strictly necessary for protocol support. - mapKeyValues := value.MapKeys() - mapKeys := map[string]reflect.Value{} - mapKeyNames := make([]string, len(mapKeyValues)) - for i, mapKey := range mapKeyValues { - name := mapKey.String() - mapKeys[name] = mapKey - mapKeyNames[i] = name - } - sort.Strings(mapKeyNames) - - for i, mapKeyName := range mapKeyNames { - mapKey := mapKeys[mapKeyName] - mapValue := value.MapIndex(mapKey) - - kname := tag.Get("locationNameKey") - if kname == "" { - kname = "key" - } - vname := tag.Get("locationNameValue") - if vname == "" { - vname = "value" - } - - // serialize key - var keyName string - if prefix == "" { - keyName = strconv.Itoa(i+1) + "." + kname - } else { - keyName = prefix + "." + strconv.Itoa(i+1) + "." + kname - } - - if err := q.parseValue(v, mapKey, keyName, ""); err != nil { - return err - } - - // serialize value - var valueName string - if prefix == "" { - valueName = strconv.Itoa(i+1) + "." + vname - } else { - valueName = prefix + "." + strconv.Itoa(i+1) + "." + vname - } - - if err := q.parseValue(v, mapValue, valueName, ""); err != nil { - return err - } - } - - return nil -} - -func (q *queryParser) parseScalar(v url.Values, r reflect.Value, name string, tag reflect.StructTag) error { - switch value := r.Interface().(type) { - case string: - v.Set(name, value) - case []byte: - if !r.IsNil() { - v.Set(name, base64.StdEncoding.EncodeToString(value)) - } - case bool: - v.Set(name, strconv.FormatBool(value)) - case int64: - v.Set(name, strconv.FormatInt(value, 10)) - case int: - v.Set(name, strconv.Itoa(value)) - case float64: - v.Set(name, strconv.FormatFloat(value, 'f', -1, 64)) - case float32: - v.Set(name, strconv.FormatFloat(float64(value), 'f', -1, 32)) - case time.Time: - const ISO8601UTC = "2006-01-02T15:04:05Z" - format := tag.Get("timestampFormat") - if len(format) == 0 { - format = protocol.ISO8601TimeFormatName - } - - v.Set(name, protocol.FormatTime(format, value)) - default: - return fmt.Errorf("unsupported value for param %s: %v (%s)", name, r.Interface(), r.Type().Name()) - } - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go deleted file mode 100644 index 9231e95d16..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go +++ /dev/null @@ -1,39 +0,0 @@ -package query - -//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/output/query.json unmarshal_test.go - -import ( - "encoding/xml" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" -) - -// UnmarshalHandler is a named request handler for unmarshaling query protocol requests -var UnmarshalHandler = request.NamedHandler{Name: "awssdk.query.Unmarshal", Fn: Unmarshal} - -// UnmarshalMetaHandler is a named request handler for unmarshaling query protocol request metadata -var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalMeta", Fn: UnmarshalMeta} - -// Unmarshal unmarshals a response for an AWS Query service. -func Unmarshal(r *request.Request) { - defer r.HTTPResponse.Body.Close() - if r.DataFilled() { - decoder := xml.NewDecoder(r.HTTPResponse.Body) - err := xmlutil.UnmarshalXML(r.Data, decoder, r.Operation.Name+"Result") - if err != nil { - r.Error = awserr.NewRequestFailure( - awserr.New(request.ErrCodeSerialization, "failed decoding Query response", err), - r.HTTPResponse.StatusCode, - r.RequestID, - ) - return - } - } -} - -// UnmarshalMeta unmarshals header response values for an AWS Query service. -func UnmarshalMeta(r *request.Request) { - r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid") -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go deleted file mode 100644 index 831b0110c5..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go +++ /dev/null @@ -1,69 +0,0 @@ -package query - -import ( - "encoding/xml" - "fmt" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" -) - -// UnmarshalErrorHandler is a name request handler to unmarshal request errors -var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalError", Fn: UnmarshalError} - -type xmlErrorResponse struct { - Code string `xml:"Error>Code"` - Message string `xml:"Error>Message"` - RequestID string `xml:"RequestId"` -} - -type xmlResponseError struct { - xmlErrorResponse -} - -func (e *xmlResponseError) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { - const svcUnavailableTagName = "ServiceUnavailableException" - const errorResponseTagName = "ErrorResponse" - - switch start.Name.Local { - case svcUnavailableTagName: - e.Code = svcUnavailableTagName - e.Message = "service is unavailable" - return d.Skip() - - case errorResponseTagName: - return d.DecodeElement(&e.xmlErrorResponse, &start) - - default: - return fmt.Errorf("unknown error response tag, %v", start) - } -} - -// UnmarshalError unmarshals an error response for an AWS Query service. -func UnmarshalError(r *request.Request) { - defer r.HTTPResponse.Body.Close() - - var respErr xmlResponseError - err := xmlutil.UnmarshalXMLError(&respErr, r.HTTPResponse.Body) - if err != nil { - r.Error = awserr.NewRequestFailure( - awserr.New(request.ErrCodeSerialization, - "failed to unmarshal error message", err), - r.HTTPResponse.StatusCode, - r.RequestID, - ) - return - } - - reqID := respErr.RequestID - if len(reqID) == 0 { - reqID = r.RequestID - } - - r.Error = awserr.NewRequestFailure( - awserr.New(respErr.Code, respErr.Message, nil), - r.HTTPResponse.StatusCode, - reqID, - ) -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go deleted file mode 100644 index 1301b149d3..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go +++ /dev/null @@ -1,310 +0,0 @@ -// Package rest provides RESTful serialization of AWS requests and responses. -package rest - -import ( - "bytes" - "encoding/base64" - "fmt" - "io" - "net/http" - "net/url" - "path" - "reflect" - "strconv" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/private/protocol" -) - -// Whether the byte value can be sent without escaping in AWS URLs -var noEscape [256]bool - -var errValueNotSet = fmt.Errorf("value not set") - -var byteSliceType = reflect.TypeOf([]byte{}) - -func init() { - for i := 0; i < len(noEscape); i++ { - // AWS expects every character except these to be escaped - noEscape[i] = (i >= 'A' && i <= 'Z') || - (i >= 'a' && i <= 'z') || - (i >= '0' && i <= '9') || - i == '-' || - i == '.' || - i == '_' || - i == '~' - } -} - -// BuildHandler is a named request handler for building rest protocol requests -var BuildHandler = request.NamedHandler{Name: "awssdk.rest.Build", Fn: Build} - -// Build builds the REST component of a service request. -func Build(r *request.Request) { - if r.ParamsFilled() { - v := reflect.ValueOf(r.Params).Elem() - buildLocationElements(r, v, false) - buildBody(r, v) - } -} - -// BuildAsGET builds the REST component of a service request with the ability to hoist -// data from the body. -func BuildAsGET(r *request.Request) { - if r.ParamsFilled() { - v := reflect.ValueOf(r.Params).Elem() - buildLocationElements(r, v, true) - buildBody(r, v) - } -} - -func buildLocationElements(r *request.Request, v reflect.Value, buildGETQuery bool) { - query := r.HTTPRequest.URL.Query() - - // Setup the raw path to match the base path pattern. This is needed - // so that when the path is mutated a custom escaped version can be - // stored in RawPath that will be used by the Go client. - r.HTTPRequest.URL.RawPath = r.HTTPRequest.URL.Path - - for i := 0; i < v.NumField(); i++ { - m := v.Field(i) - if n := v.Type().Field(i).Name; n[0:1] == strings.ToLower(n[0:1]) { - continue - } - - if m.IsValid() { - field := v.Type().Field(i) - name := field.Tag.Get("locationName") - if name == "" { - name = field.Name - } - if kind := m.Kind(); kind == reflect.Ptr { - m = m.Elem() - } else if kind == reflect.Interface { - if !m.Elem().IsValid() { - continue - } - } - if !m.IsValid() { - continue - } - if field.Tag.Get("ignore") != "" { - continue - } - - // Support the ability to customize values to be marshaled as a - // blob even though they were modeled as a string. Required for S3 - // API operations like SSECustomerKey is modeled as stirng but - // required to be base64 encoded in request. - if field.Tag.Get("marshal-as") == "blob" { - m = m.Convert(byteSliceType) - } - - var err error - switch field.Tag.Get("location") { - case "headers": // header maps - err = buildHeaderMap(&r.HTTPRequest.Header, m, field.Tag) - case "header": - err = buildHeader(&r.HTTPRequest.Header, m, name, field.Tag) - case "uri": - err = buildURI(r.HTTPRequest.URL, m, name, field.Tag) - case "querystring": - err = buildQueryString(query, m, name, field.Tag) - default: - if buildGETQuery { - err = buildQueryString(query, m, name, field.Tag) - } - } - r.Error = err - } - if r.Error != nil { - return - } - } - - r.HTTPRequest.URL.RawQuery = query.Encode() - if !aws.BoolValue(r.Config.DisableRestProtocolURICleaning) { - cleanPath(r.HTTPRequest.URL) - } -} - -func buildBody(r *request.Request, v reflect.Value) { - if field, ok := v.Type().FieldByName("_"); ok { - if payloadName := field.Tag.Get("payload"); payloadName != "" { - pfield, _ := v.Type().FieldByName(payloadName) - if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" { - payload := reflect.Indirect(v.FieldByName(payloadName)) - if payload.IsValid() && payload.Interface() != nil { - switch reader := payload.Interface().(type) { - case io.ReadSeeker: - r.SetReaderBody(reader) - case []byte: - r.SetBufferBody(reader) - case string: - r.SetStringBody(reader) - default: - r.Error = awserr.New(request.ErrCodeSerialization, - "failed to encode REST request", - fmt.Errorf("unknown payload type %s", payload.Type())) - } - } - } - } - } -} - -func buildHeader(header *http.Header, v reflect.Value, name string, tag reflect.StructTag) error { - str, err := convertType(v, tag) - if err == errValueNotSet { - return nil - } else if err != nil { - return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err) - } - - name = strings.TrimSpace(name) - str = strings.TrimSpace(str) - - header.Add(name, str) - - return nil -} - -func buildHeaderMap(header *http.Header, v reflect.Value, tag reflect.StructTag) error { - prefix := tag.Get("locationName") - for _, key := range v.MapKeys() { - str, err := convertType(v.MapIndex(key), tag) - if err == errValueNotSet { - continue - } else if err != nil { - return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err) - - } - keyStr := strings.TrimSpace(key.String()) - str = strings.TrimSpace(str) - - header.Add(prefix+keyStr, str) - } - return nil -} - -func buildURI(u *url.URL, v reflect.Value, name string, tag reflect.StructTag) error { - value, err := convertType(v, tag) - if err == errValueNotSet { - return nil - } else if err != nil { - return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err) - } - - u.Path = strings.Replace(u.Path, "{"+name+"}", value, -1) - u.Path = strings.Replace(u.Path, "{"+name+"+}", value, -1) - - u.RawPath = strings.Replace(u.RawPath, "{"+name+"}", EscapePath(value, true), -1) - u.RawPath = strings.Replace(u.RawPath, "{"+name+"+}", EscapePath(value, false), -1) - - return nil -} - -func buildQueryString(query url.Values, v reflect.Value, name string, tag reflect.StructTag) error { - switch value := v.Interface().(type) { - case []*string: - for _, item := range value { - query.Add(name, *item) - } - case map[string]*string: - for key, item := range value { - query.Add(key, *item) - } - case map[string][]*string: - for key, items := range value { - for _, item := range items { - query.Add(key, *item) - } - } - default: - str, err := convertType(v, tag) - if err == errValueNotSet { - return nil - } else if err != nil { - return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err) - } - query.Set(name, str) - } - - return nil -} - -func cleanPath(u *url.URL) { - hasSlash := strings.HasSuffix(u.Path, "/") - - // clean up path, removing duplicate `/` - u.Path = path.Clean(u.Path) - u.RawPath = path.Clean(u.RawPath) - - if hasSlash && !strings.HasSuffix(u.Path, "/") { - u.Path += "/" - u.RawPath += "/" - } -} - -// EscapePath escapes part of a URL path in Amazon style -func EscapePath(path string, encodeSep bool) string { - var buf bytes.Buffer - for i := 0; i < len(path); i++ { - c := path[i] - if noEscape[c] || (c == '/' && !encodeSep) { - buf.WriteByte(c) - } else { - fmt.Fprintf(&buf, "%%%02X", c) - } - } - return buf.String() -} - -func convertType(v reflect.Value, tag reflect.StructTag) (str string, err error) { - v = reflect.Indirect(v) - if !v.IsValid() { - return "", errValueNotSet - } - - switch value := v.Interface().(type) { - case string: - str = value - case []byte: - str = base64.StdEncoding.EncodeToString(value) - case bool: - str = strconv.FormatBool(value) - case int64: - str = strconv.FormatInt(value, 10) - case float64: - str = strconv.FormatFloat(value, 'f', -1, 64) - case time.Time: - format := tag.Get("timestampFormat") - if len(format) == 0 { - format = protocol.RFC822TimeFormatName - if tag.Get("location") == "querystring" { - format = protocol.ISO8601TimeFormatName - } - } - str = protocol.FormatTime(format, value) - case aws.JSONValue: - if len(value) == 0 { - return "", errValueNotSet - } - escaping := protocol.NoEscape - if tag.Get("location") == "header" { - escaping = protocol.Base64Escape - } - str, err = protocol.EncodeJSONValue(value, escaping) - if err != nil { - return "", fmt.Errorf("unable to encode JSONValue, %v", err) - } - default: - err := fmt.Errorf("unsupported value for param %v (%s)", v.Interface(), v.Type()) - return "", err - } - return str, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go deleted file mode 100644 index 4366de2e1e..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go +++ /dev/null @@ -1,45 +0,0 @@ -package rest - -import "reflect" - -// PayloadMember returns the payload field member of i if there is one, or nil. -func PayloadMember(i interface{}) interface{} { - if i == nil { - return nil - } - - v := reflect.ValueOf(i).Elem() - if !v.IsValid() { - return nil - } - if field, ok := v.Type().FieldByName("_"); ok { - if payloadName := field.Tag.Get("payload"); payloadName != "" { - field, _ := v.Type().FieldByName(payloadName) - if field.Tag.Get("type") != "structure" { - return nil - } - - payload := v.FieldByName(payloadName) - if payload.IsValid() || (payload.Kind() == reflect.Ptr && !payload.IsNil()) { - return payload.Interface() - } - } - } - return nil -} - -// PayloadType returns the type of a payload field member of i if there is one, or "". -func PayloadType(i interface{}) string { - v := reflect.Indirect(reflect.ValueOf(i)) - if !v.IsValid() { - return "" - } - if field, ok := v.Type().FieldByName("_"); ok { - if payloadName := field.Tag.Get("payload"); payloadName != "" { - if member, ok := v.Type().FieldByName(payloadName); ok { - return member.Tag.Get("type") - } - } - } - return "" -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go deleted file mode 100644 index 92f8b4d9a4..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go +++ /dev/null @@ -1,257 +0,0 @@ -package rest - -import ( - "bytes" - "encoding/base64" - "fmt" - "io" - "io/ioutil" - "net/http" - "reflect" - "strconv" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" - awsStrings "github.com/aws/aws-sdk-go/internal/strings" - "github.com/aws/aws-sdk-go/private/protocol" -) - -// UnmarshalHandler is a named request handler for unmarshaling rest protocol requests -var UnmarshalHandler = request.NamedHandler{Name: "awssdk.rest.Unmarshal", Fn: Unmarshal} - -// UnmarshalMetaHandler is a named request handler for unmarshaling rest protocol request metadata -var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.rest.UnmarshalMeta", Fn: UnmarshalMeta} - -// Unmarshal unmarshals the REST component of a response in a REST service. -func Unmarshal(r *request.Request) { - if r.DataFilled() { - v := reflect.Indirect(reflect.ValueOf(r.Data)) - if err := unmarshalBody(r, v); err != nil { - r.Error = err - } - } -} - -// UnmarshalMeta unmarshals the REST metadata of a response in a REST service -func UnmarshalMeta(r *request.Request) { - r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid") - if r.RequestID == "" { - // Alternative version of request id in the header - r.RequestID = r.HTTPResponse.Header.Get("X-Amz-Request-Id") - } - if r.DataFilled() { - if err := UnmarshalResponse(r.HTTPResponse, r.Data, aws.BoolValue(r.Config.LowerCaseHeaderMaps)); err != nil { - r.Error = err - } - } -} - -// UnmarshalResponse attempts to unmarshal the REST response headers to -// the data type passed in. The type must be a pointer. An error is returned -// with any error unmarshaling the response into the target datatype. -func UnmarshalResponse(resp *http.Response, data interface{}, lowerCaseHeaderMaps bool) error { - v := reflect.Indirect(reflect.ValueOf(data)) - return unmarshalLocationElements(resp, v, lowerCaseHeaderMaps) -} - -func unmarshalBody(r *request.Request, v reflect.Value) error { - if field, ok := v.Type().FieldByName("_"); ok { - if payloadName := field.Tag.Get("payload"); payloadName != "" { - pfield, _ := v.Type().FieldByName(payloadName) - if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" { - payload := v.FieldByName(payloadName) - if payload.IsValid() { - switch payload.Interface().(type) { - case []byte: - defer r.HTTPResponse.Body.Close() - b, err := ioutil.ReadAll(r.HTTPResponse.Body) - if err != nil { - return awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) - } - - payload.Set(reflect.ValueOf(b)) - - case *string: - defer r.HTTPResponse.Body.Close() - b, err := ioutil.ReadAll(r.HTTPResponse.Body) - if err != nil { - return awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) - } - - str := string(b) - payload.Set(reflect.ValueOf(&str)) - - default: - switch payload.Type().String() { - case "io.ReadCloser": - payload.Set(reflect.ValueOf(r.HTTPResponse.Body)) - - case "io.ReadSeeker": - b, err := ioutil.ReadAll(r.HTTPResponse.Body) - if err != nil { - return awserr.New(request.ErrCodeSerialization, - "failed to read response body", err) - } - payload.Set(reflect.ValueOf(ioutil.NopCloser(bytes.NewReader(b)))) - - default: - io.Copy(ioutil.Discard, r.HTTPResponse.Body) - r.HTTPResponse.Body.Close() - return awserr.New(request.ErrCodeSerialization, - "failed to decode REST response", - fmt.Errorf("unknown payload type %s", payload.Type())) - } - } - } - } - } - } - - return nil -} - -func unmarshalLocationElements(resp *http.Response, v reflect.Value, lowerCaseHeaderMaps bool) error { - for i := 0; i < v.NumField(); i++ { - m, field := v.Field(i), v.Type().Field(i) - if n := field.Name; n[0:1] == strings.ToLower(n[0:1]) { - continue - } - - if m.IsValid() { - name := field.Tag.Get("locationName") - if name == "" { - name = field.Name - } - - switch field.Tag.Get("location") { - case "statusCode": - unmarshalStatusCode(m, resp.StatusCode) - - case "header": - err := unmarshalHeader(m, resp.Header.Get(name), field.Tag) - if err != nil { - return awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) - } - - case "headers": - prefix := field.Tag.Get("locationName") - err := unmarshalHeaderMap(m, resp.Header, prefix, lowerCaseHeaderMaps) - if err != nil { - awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) - } - } - } - } - - return nil -} - -func unmarshalStatusCode(v reflect.Value, statusCode int) { - if !v.IsValid() { - return - } - - switch v.Interface().(type) { - case *int64: - s := int64(statusCode) - v.Set(reflect.ValueOf(&s)) - } -} - -func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string, normalize bool) error { - if len(headers) == 0 { - return nil - } - switch r.Interface().(type) { - case map[string]*string: // we only support string map value types - out := map[string]*string{} - for k, v := range headers { - if awsStrings.HasPrefixFold(k, prefix) { - if normalize == true { - k = strings.ToLower(k) - } else { - k = http.CanonicalHeaderKey(k) - } - out[k[len(prefix):]] = &v[0] - } - } - if len(out) != 0 { - r.Set(reflect.ValueOf(out)) - } - - } - return nil -} - -func unmarshalHeader(v reflect.Value, header string, tag reflect.StructTag) error { - switch tag.Get("type") { - case "jsonvalue": - if len(header) == 0 { - return nil - } - case "blob": - if len(header) == 0 { - return nil - } - default: - if !v.IsValid() || (header == "" && v.Elem().Kind() != reflect.String) { - return nil - } - } - - switch v.Interface().(type) { - case *string: - v.Set(reflect.ValueOf(&header)) - case []byte: - b, err := base64.StdEncoding.DecodeString(header) - if err != nil { - return err - } - v.Set(reflect.ValueOf(b)) - case *bool: - b, err := strconv.ParseBool(header) - if err != nil { - return err - } - v.Set(reflect.ValueOf(&b)) - case *int64: - i, err := strconv.ParseInt(header, 10, 64) - if err != nil { - return err - } - v.Set(reflect.ValueOf(&i)) - case *float64: - f, err := strconv.ParseFloat(header, 64) - if err != nil { - return err - } - v.Set(reflect.ValueOf(&f)) - case *time.Time: - format := tag.Get("timestampFormat") - if len(format) == 0 { - format = protocol.RFC822TimeFormatName - } - t, err := protocol.ParseTime(format, header) - if err != nil { - return err - } - v.Set(reflect.ValueOf(&t)) - case aws.JSONValue: - escaping := protocol.NoEscape - if tag.Get("location") == "header" { - escaping = protocol.Base64Escape - } - m, err := protocol.DecodeJSONValue(header, escaping) - if err != nil { - return err - } - v.Set(reflect.ValueOf(m)) - default: - err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type()) - return err - } - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go deleted file mode 100644 index 05d4ff5192..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go +++ /dev/null @@ -1,84 +0,0 @@ -package protocol - -import ( - "math" - "strconv" - "time" - - "github.com/aws/aws-sdk-go/internal/sdkmath" -) - -// Names of time formats supported by the SDK -const ( - RFC822TimeFormatName = "rfc822" - ISO8601TimeFormatName = "iso8601" - UnixTimeFormatName = "unixTimestamp" -) - -// Time formats supported by the SDK -// Output time is intended to not contain decimals -const ( - // RFC 7231#section-7.1.1.1 timetamp format. e.g Tue, 29 Apr 2014 18:30:38 GMT - RFC822TimeFormat = "Mon, 2 Jan 2006 15:04:05 GMT" - - // This format is used for output time without seconds precision - RFC822OutputTimeFormat = "Mon, 02 Jan 2006 15:04:05 GMT" - - // RFC3339 a subset of the ISO8601 timestamp format. e.g 2014-04-29T18:30:38Z - ISO8601TimeFormat = "2006-01-02T15:04:05.999999999Z" - - // This format is used for output time without seconds precision - ISO8601OutputTimeFormat = "2006-01-02T15:04:05Z" -) - -// IsKnownTimestampFormat returns if the timestamp format name -// is know to the SDK's protocols. -func IsKnownTimestampFormat(name string) bool { - switch name { - case RFC822TimeFormatName: - fallthrough - case ISO8601TimeFormatName: - fallthrough - case UnixTimeFormatName: - return true - default: - return false - } -} - -// FormatTime returns a string value of the time. -func FormatTime(name string, t time.Time) string { - t = t.UTC() - - switch name { - case RFC822TimeFormatName: - return t.Format(RFC822OutputTimeFormat) - case ISO8601TimeFormatName: - return t.Format(ISO8601OutputTimeFormat) - case UnixTimeFormatName: - return strconv.FormatInt(t.Unix(), 10) - default: - panic("unknown timestamp format name, " + name) - } -} - -// ParseTime attempts to parse the time given the format. Returns -// the time if it was able to be parsed, and fails otherwise. -func ParseTime(formatName, value string) (time.Time, error) { - switch formatName { - case RFC822TimeFormatName: - return time.Parse(RFC822TimeFormat, value) - case ISO8601TimeFormatName: - return time.Parse(ISO8601TimeFormat, value) - case UnixTimeFormatName: - v, err := strconv.ParseFloat(value, 64) - _, dec := math.Modf(v) - dec = sdkmath.Round(dec*1e3) / 1e3 //Rounds 0.1229999 to 0.123 - if err != nil { - return time.Time{}, err - } - return time.Unix(int64(v), int64(dec*(1e9))), nil - default: - panic("unknown timestamp format name, " + formatName) - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go deleted file mode 100644 index f614ef898b..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go +++ /dev/null @@ -1,27 +0,0 @@ -package protocol - -import ( - "io" - "io/ioutil" - - "github.com/aws/aws-sdk-go/aws/request" -) - -// UnmarshalDiscardBodyHandler is a named request handler to empty and close a response's body -var UnmarshalDiscardBodyHandler = request.NamedHandler{Name: "awssdk.shared.UnmarshalDiscardBody", Fn: UnmarshalDiscardBody} - -// UnmarshalDiscardBody is a request handler to empty a response's body and closing it. -func UnmarshalDiscardBody(r *request.Request) { - if r.HTTPResponse == nil || r.HTTPResponse.Body == nil { - return - } - - io.Copy(ioutil.Discard, r.HTTPResponse.Body) - r.HTTPResponse.Body.Close() -} - -// ResponseMetadata provides the SDK response metadata attributes. -type ResponseMetadata struct { - StatusCode int - RequestID string -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal_error.go deleted file mode 100644 index cc857f136c..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal_error.go +++ /dev/null @@ -1,65 +0,0 @@ -package protocol - -import ( - "net/http" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" -) - -// UnmarshalErrorHandler provides unmarshaling errors API response errors for -// both typed and untyped errors. -type UnmarshalErrorHandler struct { - unmarshaler ErrorUnmarshaler -} - -// ErrorUnmarshaler is an abstract interface for concrete implementations to -// unmarshal protocol specific response errors. -type ErrorUnmarshaler interface { - UnmarshalError(*http.Response, ResponseMetadata) (error, error) -} - -// NewUnmarshalErrorHandler returns an UnmarshalErrorHandler -// initialized for the set of exception names to the error unmarshalers -func NewUnmarshalErrorHandler(unmarshaler ErrorUnmarshaler) *UnmarshalErrorHandler { - return &UnmarshalErrorHandler{ - unmarshaler: unmarshaler, - } -} - -// UnmarshalErrorHandlerName is the name of the named handler. -const UnmarshalErrorHandlerName = "awssdk.protocol.UnmarshalError" - -// NamedHandler returns a NamedHandler for the unmarshaler using the set of -// errors the unmarshaler was initialized for. -func (u *UnmarshalErrorHandler) NamedHandler() request.NamedHandler { - return request.NamedHandler{ - Name: UnmarshalErrorHandlerName, - Fn: u.UnmarshalError, - } -} - -// UnmarshalError will attempt to unmarshal the API response's error message -// into either a generic SDK error type, or a typed error corresponding to the -// errors exception name. -func (u *UnmarshalErrorHandler) UnmarshalError(r *request.Request) { - defer r.HTTPResponse.Body.Close() - - respMeta := ResponseMetadata{ - StatusCode: r.HTTPResponse.StatusCode, - RequestID: r.RequestID, - } - - v, err := u.unmarshaler.UnmarshalError(r.HTTPResponse, respMeta) - if err != nil { - r.Error = awserr.NewRequestFailure( - awserr.New(request.ErrCodeSerialization, - "failed to unmarshal response error", err), - respMeta.StatusCode, - respMeta.RequestID, - ) - return - } - - r.Error = v -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go deleted file mode 100644 index 09ad951595..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go +++ /dev/null @@ -1,315 +0,0 @@ -// Package xmlutil provides XML serialization of AWS requests and responses. -package xmlutil - -import ( - "encoding/base64" - "encoding/xml" - "fmt" - "reflect" - "sort" - "strconv" - "strings" - "time" - - "github.com/aws/aws-sdk-go/private/protocol" -) - -// BuildXML will serialize params into an xml.Encoder. Error will be returned -// if the serialization of any of the params or nested values fails. -func BuildXML(params interface{}, e *xml.Encoder) error { - return buildXML(params, e, false) -} - -func buildXML(params interface{}, e *xml.Encoder, sorted bool) error { - b := xmlBuilder{encoder: e, namespaces: map[string]string{}} - root := NewXMLElement(xml.Name{}) - if err := b.buildValue(reflect.ValueOf(params), root, ""); err != nil { - return err - } - for _, c := range root.Children { - for _, v := range c { - return StructToXML(e, v, sorted) - } - } - return nil -} - -// Returns the reflection element of a value, if it is a pointer. -func elemOf(value reflect.Value) reflect.Value { - for value.Kind() == reflect.Ptr { - value = value.Elem() - } - return value -} - -// A xmlBuilder serializes values from Go code to XML -type xmlBuilder struct { - encoder *xml.Encoder - namespaces map[string]string -} - -// buildValue generic XMLNode builder for any type. Will build value for their specific type -// struct, list, map, scalar. -// -// Also takes a "type" tag value to set what type a value should be converted to XMLNode as. If -// type is not provided reflect will be used to determine the value's type. -func (b *xmlBuilder) buildValue(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { - value = elemOf(value) - if !value.IsValid() { // no need to handle zero values - return nil - } else if tag.Get("location") != "" { // don't handle non-body location values - return nil - } - - xml := tag.Get("xml") - if len(xml) != 0 { - name := strings.SplitAfterN(xml, ",", 2)[0] - if name == "-" { - return nil - } - } - - t := tag.Get("type") - if t == "" { - switch value.Kind() { - case reflect.Struct: - t = "structure" - case reflect.Slice: - t = "list" - case reflect.Map: - t = "map" - } - } - - switch t { - case "structure": - if field, ok := value.Type().FieldByName("_"); ok { - tag = tag + reflect.StructTag(" ") + field.Tag - } - return b.buildStruct(value, current, tag) - case "list": - return b.buildList(value, current, tag) - case "map": - return b.buildMap(value, current, tag) - default: - return b.buildScalar(value, current, tag) - } -} - -// buildStruct adds a struct and its fields to the current XMLNode. All fields and any nested -// types are converted to XMLNodes also. -func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { - if !value.IsValid() { - return nil - } - - // unwrap payloads - if payload := tag.Get("payload"); payload != "" { - field, _ := value.Type().FieldByName(payload) - tag = field.Tag - value = elemOf(value.FieldByName(payload)) - - if !value.IsValid() { - return nil - } - } - - child := NewXMLElement(xml.Name{Local: tag.Get("locationName")}) - - // there is an xmlNamespace associated with this struct - if prefix, uri := tag.Get("xmlPrefix"), tag.Get("xmlURI"); uri != "" { - ns := xml.Attr{ - Name: xml.Name{Local: "xmlns"}, - Value: uri, - } - if prefix != "" { - b.namespaces[prefix] = uri // register the namespace - ns.Name.Local = "xmlns:" + prefix - } - - child.Attr = append(child.Attr, ns) - } - - var payloadFields, nonPayloadFields int - - t := value.Type() - for i := 0; i < value.NumField(); i++ { - member := elemOf(value.Field(i)) - field := t.Field(i) - - if field.PkgPath != "" { - continue // ignore unexported fields - } - if field.Tag.Get("ignore") != "" { - continue - } - - mTag := field.Tag - if mTag.Get("location") != "" { // skip non-body members - nonPayloadFields++ - continue - } - payloadFields++ - - if protocol.CanSetIdempotencyToken(value.Field(i), field) { - token := protocol.GetIdempotencyToken() - member = reflect.ValueOf(token) - } - - memberName := mTag.Get("locationName") - if memberName == "" { - memberName = field.Name - mTag = reflect.StructTag(string(mTag) + ` locationName:"` + memberName + `"`) - } - if err := b.buildValue(member, child, mTag); err != nil { - return err - } - } - - // Only case where the child shape is not added is if the shape only contains - // non-payload fields, e.g headers/query. - if !(payloadFields == 0 && nonPayloadFields > 0) { - current.AddChild(child) - } - - return nil -} - -// buildList adds the value's list items to the current XMLNode as children nodes. All -// nested values in the list are converted to XMLNodes also. -func (b *xmlBuilder) buildList(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { - if value.IsNil() { // don't build omitted lists - return nil - } - - // check for unflattened list member - flattened := tag.Get("flattened") != "" - - xname := xml.Name{Local: tag.Get("locationName")} - if flattened { - for i := 0; i < value.Len(); i++ { - child := NewXMLElement(xname) - current.AddChild(child) - if err := b.buildValue(value.Index(i), child, ""); err != nil { - return err - } - } - } else { - list := NewXMLElement(xname) - current.AddChild(list) - - for i := 0; i < value.Len(); i++ { - iname := tag.Get("locationNameList") - if iname == "" { - iname = "member" - } - - child := NewXMLElement(xml.Name{Local: iname}) - list.AddChild(child) - if err := b.buildValue(value.Index(i), child, ""); err != nil { - return err - } - } - } - - return nil -} - -// buildMap adds the value's key/value pairs to the current XMLNode as children nodes. All -// nested values in the map are converted to XMLNodes also. -// -// Error will be returned if it is unable to build the map's values into XMLNodes -func (b *xmlBuilder) buildMap(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { - if value.IsNil() { // don't build omitted maps - return nil - } - - maproot := NewXMLElement(xml.Name{Local: tag.Get("locationName")}) - current.AddChild(maproot) - current = maproot - - kname, vname := "key", "value" - if n := tag.Get("locationNameKey"); n != "" { - kname = n - } - if n := tag.Get("locationNameValue"); n != "" { - vname = n - } - - // sorting is not required for compliance, but it makes testing easier - keys := make([]string, value.Len()) - for i, k := range value.MapKeys() { - keys[i] = k.String() - } - sort.Strings(keys) - - for _, k := range keys { - v := value.MapIndex(reflect.ValueOf(k)) - - mapcur := current - if tag.Get("flattened") == "" { // add "entry" tag to non-flat maps - child := NewXMLElement(xml.Name{Local: "entry"}) - mapcur.AddChild(child) - mapcur = child - } - - kchild := NewXMLElement(xml.Name{Local: kname}) - kchild.Text = k - vchild := NewXMLElement(xml.Name{Local: vname}) - mapcur.AddChild(kchild) - mapcur.AddChild(vchild) - - if err := b.buildValue(v, vchild, ""); err != nil { - return err - } - } - - return nil -} - -// buildScalar will convert the value into a string and append it as a attribute or child -// of the current XMLNode. -// -// The value will be added as an attribute if tag contains a "xmlAttribute" attribute value. -// -// Error will be returned if the value type is unsupported. -func (b *xmlBuilder) buildScalar(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { - var str string - switch converted := value.Interface().(type) { - case string: - str = converted - case []byte: - if !value.IsNil() { - str = base64.StdEncoding.EncodeToString(converted) - } - case bool: - str = strconv.FormatBool(converted) - case int64: - str = strconv.FormatInt(converted, 10) - case int: - str = strconv.Itoa(converted) - case float64: - str = strconv.FormatFloat(converted, 'f', -1, 64) - case float32: - str = strconv.FormatFloat(float64(converted), 'f', -1, 32) - case time.Time: - format := tag.Get("timestampFormat") - if len(format) == 0 { - format = protocol.ISO8601TimeFormatName - } - - str = protocol.FormatTime(format, converted) - default: - return fmt.Errorf("unsupported value for param %s: %v (%s)", - tag.Get("locationName"), value.Interface(), value.Type().Name()) - } - - xname := xml.Name{Local: tag.Get("locationName")} - if tag.Get("xmlAttribute") != "" { // put into current node's attribute list - attr := xml.Attr{Name: xname, Value: str} - current.Attr = append(current.Attr, attr) - } else { // regular text node - current.AddChild(&XMLNode{Name: xname, Text: str}) - } - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/sort.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/sort.go deleted file mode 100644 index c1a511851f..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/sort.go +++ /dev/null @@ -1,32 +0,0 @@ -package xmlutil - -import ( - "encoding/xml" - "strings" -) - -type xmlAttrSlice []xml.Attr - -func (x xmlAttrSlice) Len() int { - return len(x) -} - -func (x xmlAttrSlice) Less(i, j int) bool { - spaceI, spaceJ := x[i].Name.Space, x[j].Name.Space - localI, localJ := x[i].Name.Local, x[j].Name.Local - valueI, valueJ := x[i].Value, x[j].Value - - spaceCmp := strings.Compare(spaceI, spaceJ) - localCmp := strings.Compare(localI, localJ) - valueCmp := strings.Compare(valueI, valueJ) - - if spaceCmp == -1 || (spaceCmp == 0 && (localCmp == -1 || (localCmp == 0 && valueCmp == -1))) { - return true - } - - return false -} - -func (x xmlAttrSlice) Swap(i, j int) { - x[i], x[j] = x[j], x[i] -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go deleted file mode 100644 index 107c053f8a..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go +++ /dev/null @@ -1,299 +0,0 @@ -package xmlutil - -import ( - "bytes" - "encoding/base64" - "encoding/xml" - "fmt" - "io" - "reflect" - "strconv" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/private/protocol" -) - -// UnmarshalXMLError unmarshals the XML error from the stream into the value -// type specified. The value must be a pointer. If the message fails to -// unmarshal, the message content will be included in the returned error as a -// awserr.UnmarshalError. -func UnmarshalXMLError(v interface{}, stream io.Reader) error { - var errBuf bytes.Buffer - body := io.TeeReader(stream, &errBuf) - - err := xml.NewDecoder(body).Decode(v) - if err != nil && err != io.EOF { - return awserr.NewUnmarshalError(err, - "failed to unmarshal error message", errBuf.Bytes()) - } - - return nil -} - -// UnmarshalXML deserializes an xml.Decoder into the container v. V -// needs to match the shape of the XML expected to be decoded. -// If the shape doesn't match unmarshaling will fail. -func UnmarshalXML(v interface{}, d *xml.Decoder, wrapper string) error { - n, err := XMLToStruct(d, nil) - if err != nil { - return err - } - if n.Children != nil { - for _, root := range n.Children { - for _, c := range root { - if wrappedChild, ok := c.Children[wrapper]; ok { - c = wrappedChild[0] // pull out wrapped element - } - - err = parse(reflect.ValueOf(v), c, "") - if err != nil { - if err == io.EOF { - return nil - } - return err - } - } - } - return nil - } - return nil -} - -// parse deserializes any value from the XMLNode. The type tag is used to infer the type, or reflect -// will be used to determine the type from r. -func parse(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { - xml := tag.Get("xml") - if len(xml) != 0 { - name := strings.SplitAfterN(xml, ",", 2)[0] - if name == "-" { - return nil - } - } - - rtype := r.Type() - if rtype.Kind() == reflect.Ptr { - rtype = rtype.Elem() // check kind of actual element type - } - - t := tag.Get("type") - if t == "" { - switch rtype.Kind() { - case reflect.Struct: - // also it can't be a time object - if _, ok := r.Interface().(*time.Time); !ok { - t = "structure" - } - case reflect.Slice: - // also it can't be a byte slice - if _, ok := r.Interface().([]byte); !ok { - t = "list" - } - case reflect.Map: - t = "map" - } - } - - switch t { - case "structure": - if field, ok := rtype.FieldByName("_"); ok { - tag = field.Tag - } - return parseStruct(r, node, tag) - case "list": - return parseList(r, node, tag) - case "map": - return parseMap(r, node, tag) - default: - return parseScalar(r, node, tag) - } -} - -// parseStruct deserializes a structure and its fields from an XMLNode. Any nested -// types in the structure will also be deserialized. -func parseStruct(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { - t := r.Type() - if r.Kind() == reflect.Ptr { - if r.IsNil() { // create the structure if it's nil - s := reflect.New(r.Type().Elem()) - r.Set(s) - r = s - } - - r = r.Elem() - t = t.Elem() - } - - // unwrap any payloads - if payload := tag.Get("payload"); payload != "" { - field, _ := t.FieldByName(payload) - return parseStruct(r.FieldByName(payload), node, field.Tag) - } - - for i := 0; i < t.NumField(); i++ { - field := t.Field(i) - if c := field.Name[0:1]; strings.ToLower(c) == c { - continue // ignore unexported fields - } - - // figure out what this field is called - name := field.Name - if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" { - name = field.Tag.Get("locationNameList") - } else if locName := field.Tag.Get("locationName"); locName != "" { - name = locName - } - - // try to find the field by name in elements - elems := node.Children[name] - - if elems == nil { // try to find the field in attributes - if val, ok := node.findElem(name); ok { - elems = []*XMLNode{{Text: val}} - } - } - - member := r.FieldByName(field.Name) - for _, elem := range elems { - err := parse(member, elem, field.Tag) - if err != nil { - return err - } - } - } - return nil -} - -// parseList deserializes a list of values from an XML node. Each list entry -// will also be deserialized. -func parseList(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { - t := r.Type() - - if tag.Get("flattened") == "" { // look at all item entries - mname := "member" - if name := tag.Get("locationNameList"); name != "" { - mname = name - } - - if Children, ok := node.Children[mname]; ok { - if r.IsNil() { - r.Set(reflect.MakeSlice(t, len(Children), len(Children))) - } - - for i, c := range Children { - err := parse(r.Index(i), c, "") - if err != nil { - return err - } - } - } - } else { // flattened list means this is a single element - if r.IsNil() { - r.Set(reflect.MakeSlice(t, 0, 0)) - } - - childR := reflect.Zero(t.Elem()) - r.Set(reflect.Append(r, childR)) - err := parse(r.Index(r.Len()-1), node, "") - if err != nil { - return err - } - } - - return nil -} - -// parseMap deserializes a map from an XMLNode. The direct children of the XMLNode -// will also be deserialized as map entries. -func parseMap(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { - if r.IsNil() { - r.Set(reflect.MakeMap(r.Type())) - } - - if tag.Get("flattened") == "" { // look at all child entries - for _, entry := range node.Children["entry"] { - parseMapEntry(r, entry, tag) - } - } else { // this element is itself an entry - parseMapEntry(r, node, tag) - } - - return nil -} - -// parseMapEntry deserializes a map entry from a XML node. -func parseMapEntry(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { - kname, vname := "key", "value" - if n := tag.Get("locationNameKey"); n != "" { - kname = n - } - if n := tag.Get("locationNameValue"); n != "" { - vname = n - } - - keys, ok := node.Children[kname] - values := node.Children[vname] - if ok { - for i, key := range keys { - keyR := reflect.ValueOf(key.Text) - value := values[i] - valueR := reflect.New(r.Type().Elem()).Elem() - - parse(valueR, value, "") - r.SetMapIndex(keyR, valueR) - } - } - return nil -} - -// parseScaller deserializes an XMLNode value into a concrete type based on the -// interface type of r. -// -// Error is returned if the deserialization fails due to invalid type conversion, -// or unsupported interface type. -func parseScalar(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { - switch r.Interface().(type) { - case *string: - r.Set(reflect.ValueOf(&node.Text)) - return nil - case []byte: - b, err := base64.StdEncoding.DecodeString(node.Text) - if err != nil { - return err - } - r.Set(reflect.ValueOf(b)) - case *bool: - v, err := strconv.ParseBool(node.Text) - if err != nil { - return err - } - r.Set(reflect.ValueOf(&v)) - case *int64: - v, err := strconv.ParseInt(node.Text, 10, 64) - if err != nil { - return err - } - r.Set(reflect.ValueOf(&v)) - case *float64: - v, err := strconv.ParseFloat(node.Text, 64) - if err != nil { - return err - } - r.Set(reflect.ValueOf(&v)) - case *time.Time: - format := tag.Get("timestampFormat") - if len(format) == 0 { - format = protocol.ISO8601TimeFormatName - } - - t, err := protocol.ParseTime(format, node.Text) - if err != nil { - return err - } - r.Set(reflect.ValueOf(&t)) - default: - return fmt.Errorf("unsupported value: %v (%s)", r.Interface(), r.Type()) - } - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go deleted file mode 100644 index 42f71648ee..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go +++ /dev/null @@ -1,159 +0,0 @@ -package xmlutil - -import ( - "encoding/xml" - "fmt" - "io" - "sort" -) - -// A XMLNode contains the values to be encoded or decoded. -type XMLNode struct { - Name xml.Name `json:",omitempty"` - Children map[string][]*XMLNode `json:",omitempty"` - Text string `json:",omitempty"` - Attr []xml.Attr `json:",omitempty"` - - namespaces map[string]string - parent *XMLNode -} - -// NewXMLElement returns a pointer to a new XMLNode initialized to default values. -func NewXMLElement(name xml.Name) *XMLNode { - return &XMLNode{ - Name: name, - Children: map[string][]*XMLNode{}, - Attr: []xml.Attr{}, - } -} - -// AddChild adds child to the XMLNode. -func (n *XMLNode) AddChild(child *XMLNode) { - child.parent = n - if _, ok := n.Children[child.Name.Local]; !ok { - n.Children[child.Name.Local] = []*XMLNode{} - } - n.Children[child.Name.Local] = append(n.Children[child.Name.Local], child) -} - -// XMLToStruct converts a xml.Decoder stream to XMLNode with nested values. -func XMLToStruct(d *xml.Decoder, s *xml.StartElement) (*XMLNode, error) { - out := &XMLNode{} - for { - tok, err := d.Token() - if err != nil { - if err == io.EOF { - break - } else { - return out, err - } - } - - if tok == nil { - break - } - - switch typed := tok.(type) { - case xml.CharData: - out.Text = string(typed.Copy()) - case xml.StartElement: - el := typed.Copy() - out.Attr = el.Attr - if out.Children == nil { - out.Children = map[string][]*XMLNode{} - } - - name := typed.Name.Local - slice := out.Children[name] - if slice == nil { - slice = []*XMLNode{} - } - node, e := XMLToStruct(d, &el) - out.findNamespaces() - if e != nil { - return out, e - } - node.Name = typed.Name - node.findNamespaces() - tempOut := *out - // Save into a temp variable, simply because out gets squashed during - // loop iterations - node.parent = &tempOut - slice = append(slice, node) - out.Children[name] = slice - case xml.EndElement: - if s != nil && s.Name.Local == typed.Name.Local { // matching end token - return out, nil - } - out = &XMLNode{} - } - } - return out, nil -} - -func (n *XMLNode) findNamespaces() { - ns := map[string]string{} - for _, a := range n.Attr { - if a.Name.Space == "xmlns" { - ns[a.Value] = a.Name.Local - } - } - - n.namespaces = ns -} - -func (n *XMLNode) findElem(name string) (string, bool) { - for node := n; node != nil; node = node.parent { - for _, a := range node.Attr { - namespace := a.Name.Space - if v, ok := node.namespaces[namespace]; ok { - namespace = v - } - if name == fmt.Sprintf("%s:%s", namespace, a.Name.Local) { - return a.Value, true - } - } - } - return "", false -} - -// StructToXML writes an XMLNode to a xml.Encoder as tokens. -func StructToXML(e *xml.Encoder, node *XMLNode, sorted bool) error { - // Sort Attributes - attrs := node.Attr - if sorted { - sortedAttrs := make([]xml.Attr, len(attrs)) - for _, k := range node.Attr { - sortedAttrs = append(sortedAttrs, k) - } - sort.Sort(xmlAttrSlice(sortedAttrs)) - attrs = sortedAttrs - } - - e.EncodeToken(xml.StartElement{Name: node.Name, Attr: attrs}) - - if node.Text != "" { - e.EncodeToken(xml.CharData([]byte(node.Text))) - } else if sorted { - sortedNames := []string{} - for k := range node.Children { - sortedNames = append(sortedNames, k) - } - sort.Strings(sortedNames) - - for _, k := range sortedNames { - for _, v := range node.Children[k] { - StructToXML(e, v, sorted) - } - } - } else { - for _, c := range node.Children { - for _, v := range c { - StructToXML(e, v, sorted) - } - } - } - - e.EncodeToken(xml.EndElement{Name: node.Name}) - return e.Flush() -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go deleted file mode 100644 index 550b5f687f..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go +++ /dev/null @@ -1,3115 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package sts - -import ( - "fmt" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awsutil" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/request" -) - -const opAssumeRole = "AssumeRole" - -// AssumeRoleRequest generates a "aws/request.Request" representing the -// client's request for the AssumeRole operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See AssumeRole for more information on using the AssumeRole -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the AssumeRoleRequest method. -// req, resp := client.AssumeRoleRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole -func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, output *AssumeRoleOutput) { - op := &request.Operation{ - Name: opAssumeRole, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &AssumeRoleInput{} - } - - output = &AssumeRoleOutput{} - req = c.newRequest(op, input, output) - return -} - -// AssumeRole API operation for AWS Security Token Service. -// -// Returns a set of temporary security credentials that you can use to access -// AWS resources that you might not normally have access to. These temporary -// credentials consist of an access key ID, a secret access key, and a security -// token. Typically, you use AssumeRole within your account or for cross-account -// access. For a comparison of AssumeRole with other API operations that produce -// temporary credentials, see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) -// in the IAM User Guide. -// -// You cannot use AWS account root user credentials to call AssumeRole. You -// must use credentials for an IAM user or an IAM role to call AssumeRole. -// -// For cross-account access, imagine that you own multiple accounts and need -// to access resources in each account. You could create long-term credentials -// in each account to access those resources. However, managing all those credentials -// and remembering which one can access which account can be time consuming. -// Instead, you can create one set of long-term credentials in one account. -// Then use temporary security credentials to access all the other accounts -// by assuming roles in those accounts. For more information about roles, see -// IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html) -// in the IAM User Guide. -// -// Session Duration -// -// By default, the temporary security credentials created by AssumeRole last -// for one hour. However, you can use the optional DurationSeconds parameter -// to specify the duration of your session. You can provide a value from 900 -// seconds (15 minutes) up to the maximum session duration setting for the role. -// This setting can have a value from 1 hour to 12 hours. To learn how to view -// the maximum value for your role, see View the Maximum Session Duration Setting -// for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) -// in the IAM User Guide. The maximum session duration limit applies when you -// use the AssumeRole* API operations or the assume-role* CLI commands. However -// the limit does not apply when you use those operations to create a console -// URL. For more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) -// in the IAM User Guide. -// -// Permissions -// -// The temporary security credentials created by AssumeRole can be used to make -// API calls to any AWS service with the following exception: You cannot call -// the AWS STS GetFederationToken or GetSessionToken API operations. -// -// (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) -// to this operation. You can pass a single JSON policy document to use as an -// inline session policy. You can also specify up to 10 managed policies to -// use as managed session policies. The plain text that you use for both inline -// and managed session policies can't exceed 2,048 characters. Passing policies -// to this operation returns new temporary credentials. The resulting session's -// permissions are the intersection of the role's identity-based policy and -// the session policies. You can use the role's temporary credentials in subsequent -// AWS API calls to access resources in the account that owns the role. You -// cannot use session policies to grant more permissions than those allowed -// by the identity-based policy of the role that is being assumed. For more -// information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) -// in the IAM User Guide. -// -// To assume a role from a different account, your AWS account must be trusted -// by the role. The trust relationship is defined in the role's trust policy -// when the role is created. That trust policy states which accounts are allowed -// to delegate that access to users in the account. -// -// A user who wants to access a role in a different account must also have permissions -// that are delegated from the user account administrator. The administrator -// must attach a policy that allows the user to call AssumeRole for the ARN -// of the role in the other account. If the user is in the same account as the -// role, then you can do either of the following: -// -// * Attach a policy to the user (identical to the previous user in a different -// account). -// -// * Add the user as a principal directly in the role's trust policy. -// -// In this case, the trust policy acts as an IAM resource-based policy. Users -// in the same account as the role do not need explicit permission to assume -// the role. For more information about trust policies and resource-based policies, -// see IAM Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) -// in the IAM User Guide. -// -// Tags -// -// (Optional) You can pass tag key-value pairs to your session. These tags are -// called session tags. For more information about session tags, see Passing -// Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) -// in the IAM User Guide. -// -// An administrator must grant you the permissions necessary to pass session -// tags. The administrator can also create granular permissions to allow you -// to pass only specific session tags. For more information, see Tutorial: Using -// Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) -// in the IAM User Guide. -// -// You can set the session tags as transitive. Transitive tags persist during -// role chaining. For more information, see Chaining Roles with Session Tags -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) -// in the IAM User Guide. -// -// Using MFA with AssumeRole -// -// (Optional) You can include multi-factor authentication (MFA) information -// when you call AssumeRole. This is useful for cross-account scenarios to ensure -// that the user that assumes the role has been authenticated with an AWS MFA -// device. In that scenario, the trust policy of the role being assumed includes -// a condition that tests for MFA authentication. If the caller does not include -// valid MFA information, the request to assume the role is denied. The condition -// in a trust policy that tests for MFA authentication might look like the following -// example. -// -// "Condition": {"Bool": {"aws:MultiFactorAuthPresent": true}} -// -// For more information, see Configuring MFA-Protected API Access (https://docs.aws.amazon.com/IAM/latest/UserGuide/MFAProtectedAPI.html) -// in the IAM User Guide guide. -// -// To use MFA with AssumeRole, you pass values for the SerialNumber and TokenCode -// parameters. The SerialNumber value identifies the user's hardware or virtual -// MFA device. The TokenCode is the time-based one-time password (TOTP) that -// the MFA device produces. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Security Token Service's -// API operation AssumeRole for usage and error information. -// -// Returned Error Codes: -// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" -// The request was rejected because the policy document was malformed. The error -// message describes the specific error. -// -// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" -// The request was rejected because the total packed size of the session policies -// and session tags combined was too large. An AWS conversion compresses the -// session policy document, session policy ARNs, and session tags into a packed -// binary format that has a separate limit. The error message indicates by percentage -// how close the policies and tags are to the upper size limit. For more information, -// see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) -// in the IAM User Guide. -// -// You could receive this error even though you meet other defined session policy -// and session tag limits. For more information, see IAM and STS Entity Character -// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) -// in the IAM User Guide. -// -// * ErrCodeRegionDisabledException "RegionDisabledException" -// STS is not activated in the requested region for the account that is being -// asked to generate credentials. The account administrator must use the IAM -// console to activate STS in that region. For more information, see Activating -// and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) -// in the IAM User Guide. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole -func (c *STS) AssumeRole(input *AssumeRoleInput) (*AssumeRoleOutput, error) { - req, out := c.AssumeRoleRequest(input) - return out, req.Send() -} - -// AssumeRoleWithContext is the same as AssumeRole with the addition of -// the ability to pass a context and additional request options. -// -// See AssumeRole for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *STS) AssumeRoleWithContext(ctx aws.Context, input *AssumeRoleInput, opts ...request.Option) (*AssumeRoleOutput, error) { - req, out := c.AssumeRoleRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opAssumeRoleWithSAML = "AssumeRoleWithSAML" - -// AssumeRoleWithSAMLRequest generates a "aws/request.Request" representing the -// client's request for the AssumeRoleWithSAML operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See AssumeRoleWithSAML for more information on using the AssumeRoleWithSAML -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the AssumeRoleWithSAMLRequest method. -// req, resp := client.AssumeRoleWithSAMLRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML -func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *request.Request, output *AssumeRoleWithSAMLOutput) { - op := &request.Operation{ - Name: opAssumeRoleWithSAML, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &AssumeRoleWithSAMLInput{} - } - - output = &AssumeRoleWithSAMLOutput{} - req = c.newRequest(op, input, output) - req.Config.Credentials = credentials.AnonymousCredentials - return -} - -// AssumeRoleWithSAML API operation for AWS Security Token Service. -// -// Returns a set of temporary security credentials for users who have been authenticated -// via a SAML authentication response. This operation provides a mechanism for -// tying an enterprise identity store or directory to role-based AWS access -// without user-specific credentials or configuration. For a comparison of AssumeRoleWithSAML -// with the other API operations that produce temporary credentials, see Requesting -// Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) -// in the IAM User Guide. -// -// The temporary security credentials returned by this operation consist of -// an access key ID, a secret access key, and a security token. Applications -// can use these temporary security credentials to sign calls to AWS services. -// -// Session Duration -// -// By default, the temporary security credentials created by AssumeRoleWithSAML -// last for one hour. However, you can use the optional DurationSeconds parameter -// to specify the duration of your session. Your role session lasts for the -// duration that you specify, or until the time specified in the SAML authentication -// response's SessionNotOnOrAfter value, whichever is shorter. You can provide -// a DurationSeconds value from 900 seconds (15 minutes) up to the maximum session -// duration setting for the role. This setting can have a value from 1 hour -// to 12 hours. To learn how to view the maximum value for your role, see View -// the Maximum Session Duration Setting for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) -// in the IAM User Guide. The maximum session duration limit applies when you -// use the AssumeRole* API operations or the assume-role* CLI commands. However -// the limit does not apply when you use those operations to create a console -// URL. For more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) -// in the IAM User Guide. -// -// Permissions -// -// The temporary security credentials created by AssumeRoleWithSAML can be used -// to make API calls to any AWS service with the following exception: you cannot -// call the STS GetFederationToken or GetSessionToken API operations. -// -// (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) -// to this operation. You can pass a single JSON policy document to use as an -// inline session policy. You can also specify up to 10 managed policies to -// use as managed session policies. The plain text that you use for both inline -// and managed session policies can't exceed 2,048 characters. Passing policies -// to this operation returns new temporary credentials. The resulting session's -// permissions are the intersection of the role's identity-based policy and -// the session policies. You can use the role's temporary credentials in subsequent -// AWS API calls to access resources in the account that owns the role. You -// cannot use session policies to grant more permissions than those allowed -// by the identity-based policy of the role that is being assumed. For more -// information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) -// in the IAM User Guide. -// -// Calling AssumeRoleWithSAML does not require the use of AWS security credentials. -// The identity of the caller is validated by using keys in the metadata document -// that is uploaded for the SAML provider entity for your identity provider. -// -// Calling AssumeRoleWithSAML can result in an entry in your AWS CloudTrail -// logs. The entry includes the value in the NameID element of the SAML assertion. -// We recommend that you use a NameIDType that is not associated with any personally -// identifiable information (PII). For example, you could instead use the persistent -// identifier (urn:oasis:names:tc:SAML:2.0:nameid-format:persistent). -// -// Tags -// -// (Optional) You can configure your IdP to pass attributes into your SAML assertion -// as session tags. Each session tag consists of a key name and an associated -// value. For more information about session tags, see Passing Session Tags -// in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) -// in the IAM User Guide. -// -// You can pass up to 50 session tags. The plain text session tag keys can’t -// exceed 128 characters and the values can’t exceed 256 characters. For these -// and additional limits, see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) -// in the IAM User Guide. -// -// An AWS conversion compresses the passed session policies and session tags -// into a packed binary format that has a separate limit. Your request can fail -// for this limit even if your plain text meets the other requirements. The -// PackedPolicySize response element indicates by percentage how close the policies -// and tags for your request are to the upper size limit. -// -// You can pass a session tag with the same key as a tag that is attached to -// the role. When you do, session tags override the role's tags with the same -// key. -// -// An administrator must grant you the permissions necessary to pass session -// tags. The administrator can also create granular permissions to allow you -// to pass only specific session tags. For more information, see Tutorial: Using -// Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) -// in the IAM User Guide. -// -// You can set the session tags as transitive. Transitive tags persist during -// role chaining. For more information, see Chaining Roles with Session Tags -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) -// in the IAM User Guide. -// -// SAML Configuration -// -// Before your application can call AssumeRoleWithSAML, you must configure your -// SAML identity provider (IdP) to issue the claims required by AWS. Additionally, -// you must use AWS Identity and Access Management (IAM) to create a SAML provider -// entity in your AWS account that represents your identity provider. You must -// also create an IAM role that specifies this SAML provider in its trust policy. -// -// For more information, see the following resources: -// -// * About SAML 2.0-based Federation (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html) -// in the IAM User Guide. -// -// * Creating SAML Identity Providers (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html) -// in the IAM User Guide. -// -// * Configuring a Relying Party and Claims (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html) -// in the IAM User Guide. -// -// * Creating a Role for SAML 2.0 Federation (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html) -// in the IAM User Guide. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Security Token Service's -// API operation AssumeRoleWithSAML for usage and error information. -// -// Returned Error Codes: -// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" -// The request was rejected because the policy document was malformed. The error -// message describes the specific error. -// -// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" -// The request was rejected because the total packed size of the session policies -// and session tags combined was too large. An AWS conversion compresses the -// session policy document, session policy ARNs, and session tags into a packed -// binary format that has a separate limit. The error message indicates by percentage -// how close the policies and tags are to the upper size limit. For more information, -// see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) -// in the IAM User Guide. -// -// You could receive this error even though you meet other defined session policy -// and session tag limits. For more information, see IAM and STS Entity Character -// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) -// in the IAM User Guide. -// -// * ErrCodeIDPRejectedClaimException "IDPRejectedClaim" -// The identity provider (IdP) reported that authentication failed. This might -// be because the claim is invalid. -// -// If this error is returned for the AssumeRoleWithWebIdentity operation, it -// can also mean that the claim has expired or has been explicitly revoked. -// -// * ErrCodeInvalidIdentityTokenException "InvalidIdentityToken" -// The web identity token that was passed could not be validated by AWS. Get -// a new identity token from the identity provider and then retry the request. -// -// * ErrCodeExpiredTokenException "ExpiredTokenException" -// The web identity token that was passed is expired or is not valid. Get a -// new identity token from the identity provider and then retry the request. -// -// * ErrCodeRegionDisabledException "RegionDisabledException" -// STS is not activated in the requested region for the account that is being -// asked to generate credentials. The account administrator must use the IAM -// console to activate STS in that region. For more information, see Activating -// and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) -// in the IAM User Guide. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML -func (c *STS) AssumeRoleWithSAML(input *AssumeRoleWithSAMLInput) (*AssumeRoleWithSAMLOutput, error) { - req, out := c.AssumeRoleWithSAMLRequest(input) - return out, req.Send() -} - -// AssumeRoleWithSAMLWithContext is the same as AssumeRoleWithSAML with the addition of -// the ability to pass a context and additional request options. -// -// See AssumeRoleWithSAML for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *STS) AssumeRoleWithSAMLWithContext(ctx aws.Context, input *AssumeRoleWithSAMLInput, opts ...request.Option) (*AssumeRoleWithSAMLOutput, error) { - req, out := c.AssumeRoleWithSAMLRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opAssumeRoleWithWebIdentity = "AssumeRoleWithWebIdentity" - -// AssumeRoleWithWebIdentityRequest generates a "aws/request.Request" representing the -// client's request for the AssumeRoleWithWebIdentity operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See AssumeRoleWithWebIdentity for more information on using the AssumeRoleWithWebIdentity -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the AssumeRoleWithWebIdentityRequest method. -// req, resp := client.AssumeRoleWithWebIdentityRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity -func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityInput) (req *request.Request, output *AssumeRoleWithWebIdentityOutput) { - op := &request.Operation{ - Name: opAssumeRoleWithWebIdentity, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &AssumeRoleWithWebIdentityInput{} - } - - output = &AssumeRoleWithWebIdentityOutput{} - req = c.newRequest(op, input, output) - req.Config.Credentials = credentials.AnonymousCredentials - return -} - -// AssumeRoleWithWebIdentity API operation for AWS Security Token Service. -// -// Returns a set of temporary security credentials for users who have been authenticated -// in a mobile or web application with a web identity provider. Example providers -// include Amazon Cognito, Login with Amazon, Facebook, Google, or any OpenID -// Connect-compatible identity provider. -// -// For mobile applications, we recommend that you use Amazon Cognito. You can -// use Amazon Cognito with the AWS SDK for iOS Developer Guide (http://aws.amazon.com/sdkforios/) -// and the AWS SDK for Android Developer Guide (http://aws.amazon.com/sdkforandroid/) -// to uniquely identify a user. You can also supply the user with a consistent -// identity throughout the lifetime of an application. -// -// To learn more about Amazon Cognito, see Amazon Cognito Overview (https://docs.aws.amazon.com/mobile/sdkforandroid/developerguide/cognito-auth.html#d0e840) -// in AWS SDK for Android Developer Guide and Amazon Cognito Overview (https://docs.aws.amazon.com/mobile/sdkforios/developerguide/cognito-auth.html#d0e664) -// in the AWS SDK for iOS Developer Guide. -// -// Calling AssumeRoleWithWebIdentity does not require the use of AWS security -// credentials. Therefore, you can distribute an application (for example, on -// mobile devices) that requests temporary security credentials without including -// long-term AWS credentials in the application. You also don't need to deploy -// server-based proxy services that use long-term AWS credentials. Instead, -// the identity of the caller is validated by using a token from the web identity -// provider. For a comparison of AssumeRoleWithWebIdentity with the other API -// operations that produce temporary credentials, see Requesting Temporary Security -// Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) -// in the IAM User Guide. -// -// The temporary security credentials returned by this API consist of an access -// key ID, a secret access key, and a security token. Applications can use these -// temporary security credentials to sign calls to AWS service API operations. -// -// Session Duration -// -// By default, the temporary security credentials created by AssumeRoleWithWebIdentity -// last for one hour. However, you can use the optional DurationSeconds parameter -// to specify the duration of your session. You can provide a value from 900 -// seconds (15 minutes) up to the maximum session duration setting for the role. -// This setting can have a value from 1 hour to 12 hours. To learn how to view -// the maximum value for your role, see View the Maximum Session Duration Setting -// for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) -// in the IAM User Guide. The maximum session duration limit applies when you -// use the AssumeRole* API operations or the assume-role* CLI commands. However -// the limit does not apply when you use those operations to create a console -// URL. For more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) -// in the IAM User Guide. -// -// Permissions -// -// The temporary security credentials created by AssumeRoleWithWebIdentity can -// be used to make API calls to any AWS service with the following exception: -// you cannot call the STS GetFederationToken or GetSessionToken API operations. -// -// (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) -// to this operation. You can pass a single JSON policy document to use as an -// inline session policy. You can also specify up to 10 managed policies to -// use as managed session policies. The plain text that you use for both inline -// and managed session policies can't exceed 2,048 characters. Passing policies -// to this operation returns new temporary credentials. The resulting session's -// permissions are the intersection of the role's identity-based policy and -// the session policies. You can use the role's temporary credentials in subsequent -// AWS API calls to access resources in the account that owns the role. You -// cannot use session policies to grant more permissions than those allowed -// by the identity-based policy of the role that is being assumed. For more -// information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) -// in the IAM User Guide. -// -// Tags -// -// (Optional) You can configure your IdP to pass attributes into your web identity -// token as session tags. Each session tag consists of a key name and an associated -// value. For more information about session tags, see Passing Session Tags -// in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) -// in the IAM User Guide. -// -// You can pass up to 50 session tags. The plain text session tag keys can’t -// exceed 128 characters and the values can’t exceed 256 characters. For these -// and additional limits, see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) -// in the IAM User Guide. -// -// An AWS conversion compresses the passed session policies and session tags -// into a packed binary format that has a separate limit. Your request can fail -// for this limit even if your plain text meets the other requirements. The -// PackedPolicySize response element indicates by percentage how close the policies -// and tags for your request are to the upper size limit. -// -// You can pass a session tag with the same key as a tag that is attached to -// the role. When you do, the session tag overrides the role tag with the same -// key. -// -// An administrator must grant you the permissions necessary to pass session -// tags. The administrator can also create granular permissions to allow you -// to pass only specific session tags. For more information, see Tutorial: Using -// Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) -// in the IAM User Guide. -// -// You can set the session tags as transitive. Transitive tags persist during -// role chaining. For more information, see Chaining Roles with Session Tags -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) -// in the IAM User Guide. -// -// Identities -// -// Before your application can call AssumeRoleWithWebIdentity, you must have -// an identity token from a supported identity provider and create a role that -// the application can assume. The role that your application assumes must trust -// the identity provider that is associated with the identity token. In other -// words, the identity provider must be specified in the role's trust policy. -// -// Calling AssumeRoleWithWebIdentity can result in an entry in your AWS CloudTrail -// logs. The entry includes the Subject (http://openid.net/specs/openid-connect-core-1_0.html#Claims) -// of the provided Web Identity Token. We recommend that you avoid using any -// personally identifiable information (PII) in this field. For example, you -// could instead use a GUID or a pairwise identifier, as suggested in the OIDC -// specification (http://openid.net/specs/openid-connect-core-1_0.html#SubjectIDTypes). -// -// For more information about how to use web identity federation and the AssumeRoleWithWebIdentity -// API, see the following resources: -// -// * Using Web Identity Federation API Operations for Mobile Apps (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html) -// and Federation Through a Web-based Identity Provider (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity). -// -// * Web Identity Federation Playground (https://web-identity-federation-playground.s3.amazonaws.com/index.html). -// Walk through the process of authenticating through Login with Amazon, -// Facebook, or Google, getting temporary security credentials, and then -// using those credentials to make a request to AWS. -// -// * AWS SDK for iOS Developer Guide (http://aws.amazon.com/sdkforios/) and -// AWS SDK for Android Developer Guide (http://aws.amazon.com/sdkforandroid/). -// These toolkits contain sample apps that show how to invoke the identity -// providers. The toolkits then show how to use the information from these -// providers to get and use temporary security credentials. -// -// * Web Identity Federation with Mobile Applications (http://aws.amazon.com/articles/web-identity-federation-with-mobile-applications). -// This article discusses web identity federation and shows an example of -// how to use web identity federation to get access to content in Amazon -// S3. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Security Token Service's -// API operation AssumeRoleWithWebIdentity for usage and error information. -// -// Returned Error Codes: -// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" -// The request was rejected because the policy document was malformed. The error -// message describes the specific error. -// -// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" -// The request was rejected because the total packed size of the session policies -// and session tags combined was too large. An AWS conversion compresses the -// session policy document, session policy ARNs, and session tags into a packed -// binary format that has a separate limit. The error message indicates by percentage -// how close the policies and tags are to the upper size limit. For more information, -// see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) -// in the IAM User Guide. -// -// You could receive this error even though you meet other defined session policy -// and session tag limits. For more information, see IAM and STS Entity Character -// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) -// in the IAM User Guide. -// -// * ErrCodeIDPRejectedClaimException "IDPRejectedClaim" -// The identity provider (IdP) reported that authentication failed. This might -// be because the claim is invalid. -// -// If this error is returned for the AssumeRoleWithWebIdentity operation, it -// can also mean that the claim has expired or has been explicitly revoked. -// -// * ErrCodeIDPCommunicationErrorException "IDPCommunicationError" -// The request could not be fulfilled because the identity provider (IDP) that -// was asked to verify the incoming identity token could not be reached. This -// is often a transient error caused by network conditions. Retry the request -// a limited number of times so that you don't exceed the request rate. If the -// error persists, the identity provider might be down or not responding. -// -// * ErrCodeInvalidIdentityTokenException "InvalidIdentityToken" -// The web identity token that was passed could not be validated by AWS. Get -// a new identity token from the identity provider and then retry the request. -// -// * ErrCodeExpiredTokenException "ExpiredTokenException" -// The web identity token that was passed is expired or is not valid. Get a -// new identity token from the identity provider and then retry the request. -// -// * ErrCodeRegionDisabledException "RegionDisabledException" -// STS is not activated in the requested region for the account that is being -// asked to generate credentials. The account administrator must use the IAM -// console to activate STS in that region. For more information, see Activating -// and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) -// in the IAM User Guide. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity -func (c *STS) AssumeRoleWithWebIdentity(input *AssumeRoleWithWebIdentityInput) (*AssumeRoleWithWebIdentityOutput, error) { - req, out := c.AssumeRoleWithWebIdentityRequest(input) - return out, req.Send() -} - -// AssumeRoleWithWebIdentityWithContext is the same as AssumeRoleWithWebIdentity with the addition of -// the ability to pass a context and additional request options. -// -// See AssumeRoleWithWebIdentity for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *STS) AssumeRoleWithWebIdentityWithContext(ctx aws.Context, input *AssumeRoleWithWebIdentityInput, opts ...request.Option) (*AssumeRoleWithWebIdentityOutput, error) { - req, out := c.AssumeRoleWithWebIdentityRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDecodeAuthorizationMessage = "DecodeAuthorizationMessage" - -// DecodeAuthorizationMessageRequest generates a "aws/request.Request" representing the -// client's request for the DecodeAuthorizationMessage operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DecodeAuthorizationMessage for more information on using the DecodeAuthorizationMessage -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DecodeAuthorizationMessageRequest method. -// req, resp := client.DecodeAuthorizationMessageRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage -func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessageInput) (req *request.Request, output *DecodeAuthorizationMessageOutput) { - op := &request.Operation{ - Name: opDecodeAuthorizationMessage, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DecodeAuthorizationMessageInput{} - } - - output = &DecodeAuthorizationMessageOutput{} - req = c.newRequest(op, input, output) - return -} - -// DecodeAuthorizationMessage API operation for AWS Security Token Service. -// -// Decodes additional information about the authorization status of a request -// from an encoded message returned in response to an AWS request. -// -// For example, if a user is not authorized to perform an operation that he -// or she has requested, the request returns a Client.UnauthorizedOperation -// response (an HTTP 403 response). Some AWS operations additionally return -// an encoded message that can provide details about this authorization failure. -// -// Only certain AWS operations return an encoded authorization message. The -// documentation for an individual operation indicates whether that operation -// returns an encoded message in addition to returning an HTTP code. -// -// The message is encoded because the details of the authorization status can -// constitute privileged information that the user who requested the operation -// should not see. To decode an authorization status message, a user must be -// granted permissions via an IAM policy to request the DecodeAuthorizationMessage -// (sts:DecodeAuthorizationMessage) action. -// -// The decoded message includes the following type of information: -// -// * Whether the request was denied due to an explicit deny or due to the -// absence of an explicit allow. For more information, see Determining Whether -// a Request is Allowed or Denied (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow) -// in the IAM User Guide. -// -// * The principal who made the request. -// -// * The requested action. -// -// * The requested resource. -// -// * The values of condition keys in the context of the user's request. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Security Token Service's -// API operation DecodeAuthorizationMessage for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInvalidAuthorizationMessageException "InvalidAuthorizationMessageException" -// The error returned if the message passed to DecodeAuthorizationMessage was -// invalid. This can happen if the token contains invalid characters, such as -// linebreaks. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage -func (c *STS) DecodeAuthorizationMessage(input *DecodeAuthorizationMessageInput) (*DecodeAuthorizationMessageOutput, error) { - req, out := c.DecodeAuthorizationMessageRequest(input) - return out, req.Send() -} - -// DecodeAuthorizationMessageWithContext is the same as DecodeAuthorizationMessage with the addition of -// the ability to pass a context and additional request options. -// -// See DecodeAuthorizationMessage for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *STS) DecodeAuthorizationMessageWithContext(ctx aws.Context, input *DecodeAuthorizationMessageInput, opts ...request.Option) (*DecodeAuthorizationMessageOutput, error) { - req, out := c.DecodeAuthorizationMessageRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetAccessKeyInfo = "GetAccessKeyInfo" - -// GetAccessKeyInfoRequest generates a "aws/request.Request" representing the -// client's request for the GetAccessKeyInfo operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetAccessKeyInfo for more information on using the GetAccessKeyInfo -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetAccessKeyInfoRequest method. -// req, resp := client.GetAccessKeyInfoRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetAccessKeyInfo -func (c *STS) GetAccessKeyInfoRequest(input *GetAccessKeyInfoInput) (req *request.Request, output *GetAccessKeyInfoOutput) { - op := &request.Operation{ - Name: opGetAccessKeyInfo, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &GetAccessKeyInfoInput{} - } - - output = &GetAccessKeyInfoOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetAccessKeyInfo API operation for AWS Security Token Service. -// -// Returns the account identifier for the specified access key ID. -// -// Access keys consist of two parts: an access key ID (for example, AKIAIOSFODNN7EXAMPLE) -// and a secret access key (for example, wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY). -// For more information about access keys, see Managing Access Keys for IAM -// Users (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html) -// in the IAM User Guide. -// -// When you pass an access key ID to this operation, it returns the ID of the -// AWS account to which the keys belong. Access key IDs beginning with AKIA -// are long-term credentials for an IAM user or the AWS account root user. Access -// key IDs beginning with ASIA are temporary credentials that are created using -// STS operations. If the account in the response belongs to you, you can sign -// in as the root user and review your root user access keys. Then, you can -// pull a credentials report (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_getting-report.html) -// to learn which IAM user owns the keys. To learn who requested the temporary -// credentials for an ASIA access key, view the STS events in your CloudTrail -// logs (https://docs.aws.amazon.com/IAM/latest/UserGuide/cloudtrail-integration.html) -// in the IAM User Guide. -// -// This operation does not indicate the state of the access key. The key might -// be active, inactive, or deleted. Active keys might not have permissions to -// perform an operation. Providing a deleted access key might return an error -// that the key doesn't exist. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Security Token Service's -// API operation GetAccessKeyInfo for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetAccessKeyInfo -func (c *STS) GetAccessKeyInfo(input *GetAccessKeyInfoInput) (*GetAccessKeyInfoOutput, error) { - req, out := c.GetAccessKeyInfoRequest(input) - return out, req.Send() -} - -// GetAccessKeyInfoWithContext is the same as GetAccessKeyInfo with the addition of -// the ability to pass a context and additional request options. -// -// See GetAccessKeyInfo for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *STS) GetAccessKeyInfoWithContext(ctx aws.Context, input *GetAccessKeyInfoInput, opts ...request.Option) (*GetAccessKeyInfoOutput, error) { - req, out := c.GetAccessKeyInfoRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetCallerIdentity = "GetCallerIdentity" - -// GetCallerIdentityRequest generates a "aws/request.Request" representing the -// client's request for the GetCallerIdentity operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetCallerIdentity for more information on using the GetCallerIdentity -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetCallerIdentityRequest method. -// req, resp := client.GetCallerIdentityRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity -func (c *STS) GetCallerIdentityRequest(input *GetCallerIdentityInput) (req *request.Request, output *GetCallerIdentityOutput) { - op := &request.Operation{ - Name: opGetCallerIdentity, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &GetCallerIdentityInput{} - } - - output = &GetCallerIdentityOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetCallerIdentity API operation for AWS Security Token Service. -// -// Returns details about the IAM user or role whose credentials are used to -// call the operation. -// -// No permissions are required to perform this operation. If an administrator -// adds a policy to your IAM user or role that explicitly denies access to the -// sts:GetCallerIdentity action, you can still perform this operation. Permissions -// are not required because the same information is returned when an IAM user -// or role is denied access. To view an example response, see I Am Not Authorized -// to Perform: iam:DeleteVirtualMFADevice (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_access-denied-delete-mfa) -// in the IAM User Guide. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Security Token Service's -// API operation GetCallerIdentity for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity -func (c *STS) GetCallerIdentity(input *GetCallerIdentityInput) (*GetCallerIdentityOutput, error) { - req, out := c.GetCallerIdentityRequest(input) - return out, req.Send() -} - -// GetCallerIdentityWithContext is the same as GetCallerIdentity with the addition of -// the ability to pass a context and additional request options. -// -// See GetCallerIdentity for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *STS) GetCallerIdentityWithContext(ctx aws.Context, input *GetCallerIdentityInput, opts ...request.Option) (*GetCallerIdentityOutput, error) { - req, out := c.GetCallerIdentityRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetFederationToken = "GetFederationToken" - -// GetFederationTokenRequest generates a "aws/request.Request" representing the -// client's request for the GetFederationToken operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetFederationToken for more information on using the GetFederationToken -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetFederationTokenRequest method. -// req, resp := client.GetFederationTokenRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken -func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *request.Request, output *GetFederationTokenOutput) { - op := &request.Operation{ - Name: opGetFederationToken, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &GetFederationTokenInput{} - } - - output = &GetFederationTokenOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetFederationToken API operation for AWS Security Token Service. -// -// Returns a set of temporary security credentials (consisting of an access -// key ID, a secret access key, and a security token) for a federated user. -// A typical use is in a proxy application that gets temporary security credentials -// on behalf of distributed applications inside a corporate network. You must -// call the GetFederationToken operation using the long-term security credentials -// of an IAM user. As a result, this call is appropriate in contexts where those -// credentials can be safely stored, usually in a server-based application. -// For a comparison of GetFederationToken with the other API operations that -// produce temporary credentials, see Requesting Temporary Security Credentials -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) -// in the IAM User Guide. -// -// You can create a mobile-based or browser-based app that can authenticate -// users using a web identity provider like Login with Amazon, Facebook, Google, -// or an OpenID Connect-compatible identity provider. In this case, we recommend -// that you use Amazon Cognito (http://aws.amazon.com/cognito/) or AssumeRoleWithWebIdentity. -// For more information, see Federation Through a Web-based Identity Provider -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity) -// in the IAM User Guide. -// -// You can also call GetFederationToken using the security credentials of an -// AWS account root user, but we do not recommend it. Instead, we recommend -// that you create an IAM user for the purpose of the proxy application. Then -// attach a policy to the IAM user that limits federated users to only the actions -// and resources that they need to access. For more information, see IAM Best -// Practices (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html) -// in the IAM User Guide. -// -// Session duration -// -// The temporary credentials are valid for the specified duration, from 900 -// seconds (15 minutes) up to a maximum of 129,600 seconds (36 hours). The default -// session duration is 43,200 seconds (12 hours). Temporary credentials that -// are obtained by using AWS account root user credentials have a maximum duration -// of 3,600 seconds (1 hour). -// -// Permissions -// -// You can use the temporary credentials created by GetFederationToken in any -// AWS service except the following: -// -// * You cannot call any IAM operations using the AWS CLI or the AWS API. -// -// * You cannot call any STS operations except GetCallerIdentity. -// -// You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) -// to this operation. You can pass a single JSON policy document to use as an -// inline session policy. You can also specify up to 10 managed policies to -// use as managed session policies. The plain text that you use for both inline -// and managed session policies can't exceed 2,048 characters. -// -// Though the session policy parameters are optional, if you do not pass a policy, -// then the resulting federated user session has no permissions. When you pass -// session policies, the session permissions are the intersection of the IAM -// user policies and the session policies that you pass. This gives you a way -// to further restrict the permissions for a federated user. You cannot use -// session policies to grant more permissions than those that are defined in -// the permissions policy of the IAM user. For more information, see Session -// Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) -// in the IAM User Guide. For information about using GetFederationToken to -// create temporary security credentials, see GetFederationToken—Federation -// Through a Custom Identity Broker (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken). -// -// You can use the credentials to access a resource that has a resource-based -// policy. If that policy specifically references the federated user session -// in the Principal element of the policy, the session has the permissions allowed -// by the policy. These permissions are granted in addition to the permissions -// granted by the session policies. -// -// Tags -// -// (Optional) You can pass tag key-value pairs to your session. These are called -// session tags. For more information about session tags, see Passing Session -// Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) -// in the IAM User Guide. -// -// An administrator must grant you the permissions necessary to pass session -// tags. The administrator can also create granular permissions to allow you -// to pass only specific session tags. For more information, see Tutorial: Using -// Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) -// in the IAM User Guide. -// -// Tag key–value pairs are not case sensitive, but case is preserved. This -// means that you cannot have separate Department and department tag keys. Assume -// that the user that you are federating has the Department=Marketing tag and -// you pass the department=engineering session tag. Department and department -// are not saved as separate tags, and the session tag passed in the request -// takes precedence over the user tag. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Security Token Service's -// API operation GetFederationToken for usage and error information. -// -// Returned Error Codes: -// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" -// The request was rejected because the policy document was malformed. The error -// message describes the specific error. -// -// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" -// The request was rejected because the total packed size of the session policies -// and session tags combined was too large. An AWS conversion compresses the -// session policy document, session policy ARNs, and session tags into a packed -// binary format that has a separate limit. The error message indicates by percentage -// how close the policies and tags are to the upper size limit. For more information, -// see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) -// in the IAM User Guide. -// -// You could receive this error even though you meet other defined session policy -// and session tag limits. For more information, see IAM and STS Entity Character -// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) -// in the IAM User Guide. -// -// * ErrCodeRegionDisabledException "RegionDisabledException" -// STS is not activated in the requested region for the account that is being -// asked to generate credentials. The account administrator must use the IAM -// console to activate STS in that region. For more information, see Activating -// and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) -// in the IAM User Guide. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken -func (c *STS) GetFederationToken(input *GetFederationTokenInput) (*GetFederationTokenOutput, error) { - req, out := c.GetFederationTokenRequest(input) - return out, req.Send() -} - -// GetFederationTokenWithContext is the same as GetFederationToken with the addition of -// the ability to pass a context and additional request options. -// -// See GetFederationToken for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *STS) GetFederationTokenWithContext(ctx aws.Context, input *GetFederationTokenInput, opts ...request.Option) (*GetFederationTokenOutput, error) { - req, out := c.GetFederationTokenRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetSessionToken = "GetSessionToken" - -// GetSessionTokenRequest generates a "aws/request.Request" representing the -// client's request for the GetSessionToken operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetSessionToken for more information on using the GetSessionToken -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetSessionTokenRequest method. -// req, resp := client.GetSessionTokenRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken -func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request.Request, output *GetSessionTokenOutput) { - op := &request.Operation{ - Name: opGetSessionToken, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &GetSessionTokenInput{} - } - - output = &GetSessionTokenOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetSessionToken API operation for AWS Security Token Service. -// -// Returns a set of temporary credentials for an AWS account or IAM user. The -// credentials consist of an access key ID, a secret access key, and a security -// token. Typically, you use GetSessionToken if you want to use MFA to protect -// programmatic calls to specific AWS API operations like Amazon EC2 StopInstances. -// MFA-enabled IAM users would need to call GetSessionToken and submit an MFA -// code that is associated with their MFA device. Using the temporary security -// credentials that are returned from the call, IAM users can then make programmatic -// calls to API operations that require MFA authentication. If you do not supply -// a correct MFA code, then the API returns an access denied error. For a comparison -// of GetSessionToken with the other API operations that produce temporary credentials, -// see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) -// in the IAM User Guide. -// -// Session Duration -// -// The GetSessionToken operation must be called by using the long-term AWS security -// credentials of the AWS account root user or an IAM user. Credentials that -// are created by IAM users are valid for the duration that you specify. This -// duration can range from 900 seconds (15 minutes) up to a maximum of 129,600 -// seconds (36 hours), with a default of 43,200 seconds (12 hours). Credentials -// based on account credentials can range from 900 seconds (15 minutes) up to -// 3,600 seconds (1 hour), with a default of 1 hour. -// -// Permissions -// -// The temporary security credentials created by GetSessionToken can be used -// to make API calls to any AWS service with the following exceptions: -// -// * You cannot call any IAM API operations unless MFA authentication information -// is included in the request. -// -// * You cannot call any STS API except AssumeRole or GetCallerIdentity. -// -// We recommend that you do not call GetSessionToken with AWS account root user -// credentials. Instead, follow our best practices (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#create-iam-users) -// by creating one or more IAM users, giving them the necessary permissions, -// and using IAM users for everyday interaction with AWS. -// -// The credentials that are returned by GetSessionToken are based on permissions -// associated with the user whose credentials were used to call the operation. -// If GetSessionToken is called using AWS account root user credentials, the -// temporary credentials have root user permissions. Similarly, if GetSessionToken -// is called using the credentials of an IAM user, the temporary credentials -// have the same permissions as the IAM user. -// -// For more information about using GetSessionToken to create temporary credentials, -// go to Temporary Credentials for Users in Untrusted Environments (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken) -// in the IAM User Guide. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Security Token Service's -// API operation GetSessionToken for usage and error information. -// -// Returned Error Codes: -// * ErrCodeRegionDisabledException "RegionDisabledException" -// STS is not activated in the requested region for the account that is being -// asked to generate credentials. The account administrator must use the IAM -// console to activate STS in that region. For more information, see Activating -// and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) -// in the IAM User Guide. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken -func (c *STS) GetSessionToken(input *GetSessionTokenInput) (*GetSessionTokenOutput, error) { - req, out := c.GetSessionTokenRequest(input) - return out, req.Send() -} - -// GetSessionTokenWithContext is the same as GetSessionToken with the addition of -// the ability to pass a context and additional request options. -// -// See GetSessionToken for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *STS) GetSessionTokenWithContext(ctx aws.Context, input *GetSessionTokenInput, opts ...request.Option) (*GetSessionTokenOutput, error) { - req, out := c.GetSessionTokenRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -type AssumeRoleInput struct { - _ struct{} `type:"structure"` - - // The duration, in seconds, of the role session. The value can range from 900 - // seconds (15 minutes) up to the maximum session duration setting for the role. - // This setting can have a value from 1 hour to 12 hours. If you specify a value - // higher than this setting, the operation fails. For example, if you specify - // a session duration of 12 hours, but your administrator set the maximum session - // duration to 6 hours, your operation fails. To learn how to view the maximum - // value for your role, see View the Maximum Session Duration Setting for a - // Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) - // in the IAM User Guide. - // - // By default, the value is set to 3600 seconds. - // - // The DurationSeconds parameter is separate from the duration of a console - // session that you might request using the returned credentials. The request - // to the federation endpoint for a console sign-in token takes a SessionDuration - // parameter that specifies the maximum length of the console session. For more - // information, see Creating a URL that Enables Federated Users to Access the - // AWS Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) - // in the IAM User Guide. - DurationSeconds *int64 `min:"900" type:"integer"` - - // A unique identifier that might be required when you assume a role in another - // account. If the administrator of the account to which the role belongs provided - // you with an external ID, then provide that value in the ExternalId parameter. - // This value can be any string, such as a passphrase or account number. A cross-account - // role is usually set up to trust everyone in an account. Therefore, the administrator - // of the trusting account might send an external ID to the administrator of - // the trusted account. That way, only someone with the ID can assume the role, - // rather than everyone in the account. For more information about the external - // ID, see How to Use an External ID When Granting Access to Your AWS Resources - // to a Third Party (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html) - // in the IAM User Guide. - // - // The regex used to validate this parameter is a string of characters consisting - // of upper- and lower-case alphanumeric characters with no spaces. You can - // also include underscores or any of the following characters: =,.@:/- - ExternalId *string `min:"2" type:"string"` - - // An IAM policy in JSON format that you want to use as an inline session policy. - // - // This parameter is optional. Passing policies to this operation returns new - // temporary credentials. The resulting session's permissions are the intersection - // of the role's identity-based policy and the session policies. You can use - // the role's temporary credentials in subsequent AWS API calls to access resources - // in the account that owns the role. You cannot use session policies to grant - // more permissions than those allowed by the identity-based policy of the role - // that is being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // in the IAM User Guide. - // - // The plain text that you use for both inline and managed session policies - // can't exceed 2,048 characters. The JSON policy characters can be any ASCII - // character from the space character to the end of the valid character list - // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A), - // and carriage return (\u000D) characters. - // - // An AWS conversion compresses the passed session policies and session tags - // into a packed binary format that has a separate limit. Your request can fail - // for this limit even if your plain text meets the other requirements. The - // PackedPolicySize response element indicates by percentage how close the policies - // and tags for your request are to the upper size limit. - Policy *string `min:"1" type:"string"` - - // The Amazon Resource Names (ARNs) of the IAM managed policies that you want - // to use as managed session policies. The policies must exist in the same account - // as the role. - // - // This parameter is optional. You can provide up to 10 managed policy ARNs. - // However, the plain text that you use for both inline and managed session - // policies can't exceed 2,048 characters. For more information about ARNs, - // see Amazon Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) - // in the AWS General Reference. - // - // An AWS conversion compresses the passed session policies and session tags - // into a packed binary format that has a separate limit. Your request can fail - // for this limit even if your plain text meets the other requirements. The - // PackedPolicySize response element indicates by percentage how close the policies - // and tags for your request are to the upper size limit. - // - // Passing policies to this operation returns new temporary credentials. The - // resulting session's permissions are the intersection of the role's identity-based - // policy and the session policies. You can use the role's temporary credentials - // in subsequent AWS API calls to access resources in the account that owns - // the role. You cannot use session policies to grant more permissions than - // those allowed by the identity-based policy of the role that is being assumed. - // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // in the IAM User Guide. - PolicyArns []*PolicyDescriptorType `type:"list"` - - // The Amazon Resource Name (ARN) of the role to assume. - // - // RoleArn is a required field - RoleArn *string `min:"20" type:"string" required:"true"` - - // An identifier for the assumed role session. - // - // Use the role session name to uniquely identify a session when the same role - // is assumed by different principals or for different reasons. In cross-account - // scenarios, the role session name is visible to, and can be logged by the - // account that owns the role. The role session name is also used in the ARN - // of the assumed role principal. This means that subsequent cross-account API - // requests that use the temporary security credentials will expose the role - // session name to the external account in their AWS CloudTrail logs. - // - // The regex used to validate this parameter is a string of characters consisting - // of upper- and lower-case alphanumeric characters with no spaces. You can - // also include underscores or any of the following characters: =,.@- - // - // RoleSessionName is a required field - RoleSessionName *string `min:"2" type:"string" required:"true"` - - // The identification number of the MFA device that is associated with the user - // who is making the AssumeRole call. Specify this value if the trust policy - // of the role being assumed includes a condition that requires MFA authentication. - // The value is either the serial number for a hardware device (such as GAHT12345678) - // or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). - // - // The regex used to validate this parameter is a string of characters consisting - // of upper- and lower-case alphanumeric characters with no spaces. You can - // also include underscores or any of the following characters: =,.@- - SerialNumber *string `min:"9" type:"string"` - - // A list of session tags that you want to pass. Each session tag consists of - // a key name and an associated value. For more information about session tags, - // see Tagging AWS STS Sessions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) - // in the IAM User Guide. - // - // This parameter is optional. You can pass up to 50 session tags. The plain - // text session tag keys can’t exceed 128 characters, and the values can’t - // exceed 256 characters. For these and additional limits, see IAM and STS Character - // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) - // in the IAM User Guide. - // - // An AWS conversion compresses the passed session policies and session tags - // into a packed binary format that has a separate limit. Your request can fail - // for this limit even if your plain text meets the other requirements. The - // PackedPolicySize response element indicates by percentage how close the policies - // and tags for your request are to the upper size limit. - // - // You can pass a session tag with the same key as a tag that is already attached - // to the role. When you do, session tags override a role tag with the same - // key. - // - // Tag key–value pairs are not case sensitive, but case is preserved. This - // means that you cannot have separate Department and department tag keys. Assume - // that the role has the Department=Marketing tag and you pass the department=engineering - // session tag. Department and department are not saved as separate tags, and - // the session tag passed in the request takes precedence over the role tag. - // - // Additionally, if you used temporary credentials to perform this operation, - // the new session inherits any transitive session tags from the calling session. - // If you pass a session tag with the same key as an inherited tag, the operation - // fails. To view the inherited tags for a session, see the AWS CloudTrail logs. - // For more information, see Viewing Session Tags in CloudTrail (https://docs.aws.amazon.com/IAM/latest/UserGuide/session-tags.html#id_session-tags_ctlogs) - // in the IAM User Guide. - Tags []*Tag `type:"list"` - - // The value provided by the MFA device, if the trust policy of the role being - // assumed requires MFA (that is, if the policy includes a condition that tests - // for MFA). If the role being assumed requires MFA and if the TokenCode value - // is missing or expired, the AssumeRole call returns an "access denied" error. - // - // The format for this parameter, as described by its regex pattern, is a sequence - // of six numeric digits. - TokenCode *string `min:"6" type:"string"` - - // A list of keys for session tags that you want to set as transitive. If you - // set a tag key as transitive, the corresponding key and value passes to subsequent - // sessions in a role chain. For more information, see Chaining Roles with Session - // Tags (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) - // in the IAM User Guide. - // - // This parameter is optional. When you set session tags as transitive, the - // session policy and session tags packed binary limit is not affected. - // - // If you choose not to specify a transitive tag key, then no tags are passed - // from this session to any subsequent sessions. - TransitiveTagKeys []*string `type:"list"` -} - -// String returns the string representation -func (s AssumeRoleInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AssumeRoleInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *AssumeRoleInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "AssumeRoleInput"} - if s.DurationSeconds != nil && *s.DurationSeconds < 900 { - invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) - } - if s.ExternalId != nil && len(*s.ExternalId) < 2 { - invalidParams.Add(request.NewErrParamMinLen("ExternalId", 2)) - } - if s.Policy != nil && len(*s.Policy) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) - } - if s.RoleArn == nil { - invalidParams.Add(request.NewErrParamRequired("RoleArn")) - } - if s.RoleArn != nil && len(*s.RoleArn) < 20 { - invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) - } - if s.RoleSessionName == nil { - invalidParams.Add(request.NewErrParamRequired("RoleSessionName")) - } - if s.RoleSessionName != nil && len(*s.RoleSessionName) < 2 { - invalidParams.Add(request.NewErrParamMinLen("RoleSessionName", 2)) - } - if s.SerialNumber != nil && len(*s.SerialNumber) < 9 { - invalidParams.Add(request.NewErrParamMinLen("SerialNumber", 9)) - } - if s.TokenCode != nil && len(*s.TokenCode) < 6 { - invalidParams.Add(request.NewErrParamMinLen("TokenCode", 6)) - } - if s.PolicyArns != nil { - for i, v := range s.PolicyArns { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams)) - } - } - } - if s.Tags != nil { - for i, v := range s.Tags { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDurationSeconds sets the DurationSeconds field's value. -func (s *AssumeRoleInput) SetDurationSeconds(v int64) *AssumeRoleInput { - s.DurationSeconds = &v - return s -} - -// SetExternalId sets the ExternalId field's value. -func (s *AssumeRoleInput) SetExternalId(v string) *AssumeRoleInput { - s.ExternalId = &v - return s -} - -// SetPolicy sets the Policy field's value. -func (s *AssumeRoleInput) SetPolicy(v string) *AssumeRoleInput { - s.Policy = &v - return s -} - -// SetPolicyArns sets the PolicyArns field's value. -func (s *AssumeRoleInput) SetPolicyArns(v []*PolicyDescriptorType) *AssumeRoleInput { - s.PolicyArns = v - return s -} - -// SetRoleArn sets the RoleArn field's value. -func (s *AssumeRoleInput) SetRoleArn(v string) *AssumeRoleInput { - s.RoleArn = &v - return s -} - -// SetRoleSessionName sets the RoleSessionName field's value. -func (s *AssumeRoleInput) SetRoleSessionName(v string) *AssumeRoleInput { - s.RoleSessionName = &v - return s -} - -// SetSerialNumber sets the SerialNumber field's value. -func (s *AssumeRoleInput) SetSerialNumber(v string) *AssumeRoleInput { - s.SerialNumber = &v - return s -} - -// SetTags sets the Tags field's value. -func (s *AssumeRoleInput) SetTags(v []*Tag) *AssumeRoleInput { - s.Tags = v - return s -} - -// SetTokenCode sets the TokenCode field's value. -func (s *AssumeRoleInput) SetTokenCode(v string) *AssumeRoleInput { - s.TokenCode = &v - return s -} - -// SetTransitiveTagKeys sets the TransitiveTagKeys field's value. -func (s *AssumeRoleInput) SetTransitiveTagKeys(v []*string) *AssumeRoleInput { - s.TransitiveTagKeys = v - return s -} - -// Contains the response to a successful AssumeRole request, including temporary -// AWS credentials that can be used to make AWS requests. -type AssumeRoleOutput struct { - _ struct{} `type:"structure"` - - // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers - // that you can use to refer to the resulting temporary security credentials. - // For example, you can reference these credentials as a principal in a resource-based - // policy by using the ARN or assumed role ID. The ARN and ID include the RoleSessionName - // that you specified when you called AssumeRole. - AssumedRoleUser *AssumedRoleUser `type:"structure"` - - // The temporary security credentials, which include an access key ID, a secret - // access key, and a security (or session) token. - // - // The size of the security token that STS API operations return is not fixed. - // We strongly recommend that you make no assumptions about the maximum size. - Credentials *Credentials `type:"structure"` - - // A percentage value that indicates the packed size of the session policies - // and session tags combined passed in the request. The request fails if the - // packed size is greater than 100 percent, which means the policies and tags - // exceeded the allowed space. - PackedPolicySize *int64 `type:"integer"` -} - -// String returns the string representation -func (s AssumeRoleOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AssumeRoleOutput) GoString() string { - return s.String() -} - -// SetAssumedRoleUser sets the AssumedRoleUser field's value. -func (s *AssumeRoleOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleOutput { - s.AssumedRoleUser = v - return s -} - -// SetCredentials sets the Credentials field's value. -func (s *AssumeRoleOutput) SetCredentials(v *Credentials) *AssumeRoleOutput { - s.Credentials = v - return s -} - -// SetPackedPolicySize sets the PackedPolicySize field's value. -func (s *AssumeRoleOutput) SetPackedPolicySize(v int64) *AssumeRoleOutput { - s.PackedPolicySize = &v - return s -} - -type AssumeRoleWithSAMLInput struct { - _ struct{} `type:"structure"` - - // The duration, in seconds, of the role session. Your role session lasts for - // the duration that you specify for the DurationSeconds parameter, or until - // the time specified in the SAML authentication response's SessionNotOnOrAfter - // value, whichever is shorter. You can provide a DurationSeconds value from - // 900 seconds (15 minutes) up to the maximum session duration setting for the - // role. This setting can have a value from 1 hour to 12 hours. If you specify - // a value higher than this setting, the operation fails. For example, if you - // specify a session duration of 12 hours, but your administrator set the maximum - // session duration to 6 hours, your operation fails. To learn how to view the - // maximum value for your role, see View the Maximum Session Duration Setting - // for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) - // in the IAM User Guide. - // - // By default, the value is set to 3600 seconds. - // - // The DurationSeconds parameter is separate from the duration of a console - // session that you might request using the returned credentials. The request - // to the federation endpoint for a console sign-in token takes a SessionDuration - // parameter that specifies the maximum length of the console session. For more - // information, see Creating a URL that Enables Federated Users to Access the - // AWS Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) - // in the IAM User Guide. - DurationSeconds *int64 `min:"900" type:"integer"` - - // An IAM policy in JSON format that you want to use as an inline session policy. - // - // This parameter is optional. Passing policies to this operation returns new - // temporary credentials. The resulting session's permissions are the intersection - // of the role's identity-based policy and the session policies. You can use - // the role's temporary credentials in subsequent AWS API calls to access resources - // in the account that owns the role. You cannot use session policies to grant - // more permissions than those allowed by the identity-based policy of the role - // that is being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // in the IAM User Guide. - // - // The plain text that you use for both inline and managed session policies - // can't exceed 2,048 characters. The JSON policy characters can be any ASCII - // character from the space character to the end of the valid character list - // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A), - // and carriage return (\u000D) characters. - // - // An AWS conversion compresses the passed session policies and session tags - // into a packed binary format that has a separate limit. Your request can fail - // for this limit even if your plain text meets the other requirements. The - // PackedPolicySize response element indicates by percentage how close the policies - // and tags for your request are to the upper size limit. - Policy *string `min:"1" type:"string"` - - // The Amazon Resource Names (ARNs) of the IAM managed policies that you want - // to use as managed session policies. The policies must exist in the same account - // as the role. - // - // This parameter is optional. You can provide up to 10 managed policy ARNs. - // However, the plain text that you use for both inline and managed session - // policies can't exceed 2,048 characters. For more information about ARNs, - // see Amazon Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) - // in the AWS General Reference. - // - // An AWS conversion compresses the passed session policies and session tags - // into a packed binary format that has a separate limit. Your request can fail - // for this limit even if your plain text meets the other requirements. The - // PackedPolicySize response element indicates by percentage how close the policies - // and tags for your request are to the upper size limit. - // - // Passing policies to this operation returns new temporary credentials. The - // resulting session's permissions are the intersection of the role's identity-based - // policy and the session policies. You can use the role's temporary credentials - // in subsequent AWS API calls to access resources in the account that owns - // the role. You cannot use session policies to grant more permissions than - // those allowed by the identity-based policy of the role that is being assumed. - // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // in the IAM User Guide. - PolicyArns []*PolicyDescriptorType `type:"list"` - - // The Amazon Resource Name (ARN) of the SAML provider in IAM that describes - // the IdP. - // - // PrincipalArn is a required field - PrincipalArn *string `min:"20" type:"string" required:"true"` - - // The Amazon Resource Name (ARN) of the role that the caller is assuming. - // - // RoleArn is a required field - RoleArn *string `min:"20" type:"string" required:"true"` - - // The base-64 encoded SAML authentication response provided by the IdP. - // - // For more information, see Configuring a Relying Party and Adding Claims (https://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html) - // in the IAM User Guide. - // - // SAMLAssertion is a required field - SAMLAssertion *string `min:"4" type:"string" required:"true" sensitive:"true"` -} - -// String returns the string representation -func (s AssumeRoleWithSAMLInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AssumeRoleWithSAMLInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *AssumeRoleWithSAMLInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "AssumeRoleWithSAMLInput"} - if s.DurationSeconds != nil && *s.DurationSeconds < 900 { - invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) - } - if s.Policy != nil && len(*s.Policy) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) - } - if s.PrincipalArn == nil { - invalidParams.Add(request.NewErrParamRequired("PrincipalArn")) - } - if s.PrincipalArn != nil && len(*s.PrincipalArn) < 20 { - invalidParams.Add(request.NewErrParamMinLen("PrincipalArn", 20)) - } - if s.RoleArn == nil { - invalidParams.Add(request.NewErrParamRequired("RoleArn")) - } - if s.RoleArn != nil && len(*s.RoleArn) < 20 { - invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) - } - if s.SAMLAssertion == nil { - invalidParams.Add(request.NewErrParamRequired("SAMLAssertion")) - } - if s.SAMLAssertion != nil && len(*s.SAMLAssertion) < 4 { - invalidParams.Add(request.NewErrParamMinLen("SAMLAssertion", 4)) - } - if s.PolicyArns != nil { - for i, v := range s.PolicyArns { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDurationSeconds sets the DurationSeconds field's value. -func (s *AssumeRoleWithSAMLInput) SetDurationSeconds(v int64) *AssumeRoleWithSAMLInput { - s.DurationSeconds = &v - return s -} - -// SetPolicy sets the Policy field's value. -func (s *AssumeRoleWithSAMLInput) SetPolicy(v string) *AssumeRoleWithSAMLInput { - s.Policy = &v - return s -} - -// SetPolicyArns sets the PolicyArns field's value. -func (s *AssumeRoleWithSAMLInput) SetPolicyArns(v []*PolicyDescriptorType) *AssumeRoleWithSAMLInput { - s.PolicyArns = v - return s -} - -// SetPrincipalArn sets the PrincipalArn field's value. -func (s *AssumeRoleWithSAMLInput) SetPrincipalArn(v string) *AssumeRoleWithSAMLInput { - s.PrincipalArn = &v - return s -} - -// SetRoleArn sets the RoleArn field's value. -func (s *AssumeRoleWithSAMLInput) SetRoleArn(v string) *AssumeRoleWithSAMLInput { - s.RoleArn = &v - return s -} - -// SetSAMLAssertion sets the SAMLAssertion field's value. -func (s *AssumeRoleWithSAMLInput) SetSAMLAssertion(v string) *AssumeRoleWithSAMLInput { - s.SAMLAssertion = &v - return s -} - -// Contains the response to a successful AssumeRoleWithSAML request, including -// temporary AWS credentials that can be used to make AWS requests. -type AssumeRoleWithSAMLOutput struct { - _ struct{} `type:"structure"` - - // The identifiers for the temporary security credentials that the operation - // returns. - AssumedRoleUser *AssumedRoleUser `type:"structure"` - - // The value of the Recipient attribute of the SubjectConfirmationData element - // of the SAML assertion. - Audience *string `type:"string"` - - // The temporary security credentials, which include an access key ID, a secret - // access key, and a security (or session) token. - // - // The size of the security token that STS API operations return is not fixed. - // We strongly recommend that you make no assumptions about the maximum size. - Credentials *Credentials `type:"structure"` - - // The value of the Issuer element of the SAML assertion. - Issuer *string `type:"string"` - - // A hash value based on the concatenation of the Issuer response value, the - // AWS account ID, and the friendly name (the last part of the ARN) of the SAML - // provider in IAM. The combination of NameQualifier and Subject can be used - // to uniquely identify a federated user. - // - // The following pseudocode shows how the hash value is calculated: - // - // BASE64 ( SHA1 ( "https://example.com/saml" + "123456789012" + "/MySAMLIdP" - // ) ) - NameQualifier *string `type:"string"` - - // A percentage value that indicates the packed size of the session policies - // and session tags combined passed in the request. The request fails if the - // packed size is greater than 100 percent, which means the policies and tags - // exceeded the allowed space. - PackedPolicySize *int64 `type:"integer"` - - // The value of the NameID element in the Subject element of the SAML assertion. - Subject *string `type:"string"` - - // The format of the name ID, as defined by the Format attribute in the NameID - // element of the SAML assertion. Typical examples of the format are transient - // or persistent. - // - // If the format includes the prefix urn:oasis:names:tc:SAML:2.0:nameid-format, - // that prefix is removed. For example, urn:oasis:names:tc:SAML:2.0:nameid-format:transient - // is returned as transient. If the format includes any other prefix, the format - // is returned with no modifications. - SubjectType *string `type:"string"` -} - -// String returns the string representation -func (s AssumeRoleWithSAMLOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AssumeRoleWithSAMLOutput) GoString() string { - return s.String() -} - -// SetAssumedRoleUser sets the AssumedRoleUser field's value. -func (s *AssumeRoleWithSAMLOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleWithSAMLOutput { - s.AssumedRoleUser = v - return s -} - -// SetAudience sets the Audience field's value. -func (s *AssumeRoleWithSAMLOutput) SetAudience(v string) *AssumeRoleWithSAMLOutput { - s.Audience = &v - return s -} - -// SetCredentials sets the Credentials field's value. -func (s *AssumeRoleWithSAMLOutput) SetCredentials(v *Credentials) *AssumeRoleWithSAMLOutput { - s.Credentials = v - return s -} - -// SetIssuer sets the Issuer field's value. -func (s *AssumeRoleWithSAMLOutput) SetIssuer(v string) *AssumeRoleWithSAMLOutput { - s.Issuer = &v - return s -} - -// SetNameQualifier sets the NameQualifier field's value. -func (s *AssumeRoleWithSAMLOutput) SetNameQualifier(v string) *AssumeRoleWithSAMLOutput { - s.NameQualifier = &v - return s -} - -// SetPackedPolicySize sets the PackedPolicySize field's value. -func (s *AssumeRoleWithSAMLOutput) SetPackedPolicySize(v int64) *AssumeRoleWithSAMLOutput { - s.PackedPolicySize = &v - return s -} - -// SetSubject sets the Subject field's value. -func (s *AssumeRoleWithSAMLOutput) SetSubject(v string) *AssumeRoleWithSAMLOutput { - s.Subject = &v - return s -} - -// SetSubjectType sets the SubjectType field's value. -func (s *AssumeRoleWithSAMLOutput) SetSubjectType(v string) *AssumeRoleWithSAMLOutput { - s.SubjectType = &v - return s -} - -type AssumeRoleWithWebIdentityInput struct { - _ struct{} `type:"structure"` - - // The duration, in seconds, of the role session. The value can range from 900 - // seconds (15 minutes) up to the maximum session duration setting for the role. - // This setting can have a value from 1 hour to 12 hours. If you specify a value - // higher than this setting, the operation fails. For example, if you specify - // a session duration of 12 hours, but your administrator set the maximum session - // duration to 6 hours, your operation fails. To learn how to view the maximum - // value for your role, see View the Maximum Session Duration Setting for a - // Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) - // in the IAM User Guide. - // - // By default, the value is set to 3600 seconds. - // - // The DurationSeconds parameter is separate from the duration of a console - // session that you might request using the returned credentials. The request - // to the federation endpoint for a console sign-in token takes a SessionDuration - // parameter that specifies the maximum length of the console session. For more - // information, see Creating a URL that Enables Federated Users to Access the - // AWS Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) - // in the IAM User Guide. - DurationSeconds *int64 `min:"900" type:"integer"` - - // An IAM policy in JSON format that you want to use as an inline session policy. - // - // This parameter is optional. Passing policies to this operation returns new - // temporary credentials. The resulting session's permissions are the intersection - // of the role's identity-based policy and the session policies. You can use - // the role's temporary credentials in subsequent AWS API calls to access resources - // in the account that owns the role. You cannot use session policies to grant - // more permissions than those allowed by the identity-based policy of the role - // that is being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // in the IAM User Guide. - // - // The plain text that you use for both inline and managed session policies - // can't exceed 2,048 characters. The JSON policy characters can be any ASCII - // character from the space character to the end of the valid character list - // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A), - // and carriage return (\u000D) characters. - // - // An AWS conversion compresses the passed session policies and session tags - // into a packed binary format that has a separate limit. Your request can fail - // for this limit even if your plain text meets the other requirements. The - // PackedPolicySize response element indicates by percentage how close the policies - // and tags for your request are to the upper size limit. - Policy *string `min:"1" type:"string"` - - // The Amazon Resource Names (ARNs) of the IAM managed policies that you want - // to use as managed session policies. The policies must exist in the same account - // as the role. - // - // This parameter is optional. You can provide up to 10 managed policy ARNs. - // However, the plain text that you use for both inline and managed session - // policies can't exceed 2,048 characters. For more information about ARNs, - // see Amazon Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) - // in the AWS General Reference. - // - // An AWS conversion compresses the passed session policies and session tags - // into a packed binary format that has a separate limit. Your request can fail - // for this limit even if your plain text meets the other requirements. The - // PackedPolicySize response element indicates by percentage how close the policies - // and tags for your request are to the upper size limit. - // - // Passing policies to this operation returns new temporary credentials. The - // resulting session's permissions are the intersection of the role's identity-based - // policy and the session policies. You can use the role's temporary credentials - // in subsequent AWS API calls to access resources in the account that owns - // the role. You cannot use session policies to grant more permissions than - // those allowed by the identity-based policy of the role that is being assumed. - // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // in the IAM User Guide. - PolicyArns []*PolicyDescriptorType `type:"list"` - - // The fully qualified host component of the domain name of the identity provider. - // - // Specify this value only for OAuth 2.0 access tokens. Currently www.amazon.com - // and graph.facebook.com are the only supported identity providers for OAuth - // 2.0 access tokens. Do not include URL schemes and port numbers. - // - // Do not specify this value for OpenID Connect ID tokens. - ProviderId *string `min:"4" type:"string"` - - // The Amazon Resource Name (ARN) of the role that the caller is assuming. - // - // RoleArn is a required field - RoleArn *string `min:"20" type:"string" required:"true"` - - // An identifier for the assumed role session. Typically, you pass the name - // or identifier that is associated with the user who is using your application. - // That way, the temporary security credentials that your application will use - // are associated with that user. This session name is included as part of the - // ARN and assumed role ID in the AssumedRoleUser response element. - // - // The regex used to validate this parameter is a string of characters consisting - // of upper- and lower-case alphanumeric characters with no spaces. You can - // also include underscores or any of the following characters: =,.@- - // - // RoleSessionName is a required field - RoleSessionName *string `min:"2" type:"string" required:"true"` - - // The OAuth 2.0 access token or OpenID Connect ID token that is provided by - // the identity provider. Your application must get this token by authenticating - // the user who is using your application with a web identity provider before - // the application makes an AssumeRoleWithWebIdentity call. - // - // WebIdentityToken is a required field - WebIdentityToken *string `min:"4" type:"string" required:"true" sensitive:"true"` -} - -// String returns the string representation -func (s AssumeRoleWithWebIdentityInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AssumeRoleWithWebIdentityInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *AssumeRoleWithWebIdentityInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "AssumeRoleWithWebIdentityInput"} - if s.DurationSeconds != nil && *s.DurationSeconds < 900 { - invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) - } - if s.Policy != nil && len(*s.Policy) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) - } - if s.ProviderId != nil && len(*s.ProviderId) < 4 { - invalidParams.Add(request.NewErrParamMinLen("ProviderId", 4)) - } - if s.RoleArn == nil { - invalidParams.Add(request.NewErrParamRequired("RoleArn")) - } - if s.RoleArn != nil && len(*s.RoleArn) < 20 { - invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) - } - if s.RoleSessionName == nil { - invalidParams.Add(request.NewErrParamRequired("RoleSessionName")) - } - if s.RoleSessionName != nil && len(*s.RoleSessionName) < 2 { - invalidParams.Add(request.NewErrParamMinLen("RoleSessionName", 2)) - } - if s.WebIdentityToken == nil { - invalidParams.Add(request.NewErrParamRequired("WebIdentityToken")) - } - if s.WebIdentityToken != nil && len(*s.WebIdentityToken) < 4 { - invalidParams.Add(request.NewErrParamMinLen("WebIdentityToken", 4)) - } - if s.PolicyArns != nil { - for i, v := range s.PolicyArns { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDurationSeconds sets the DurationSeconds field's value. -func (s *AssumeRoleWithWebIdentityInput) SetDurationSeconds(v int64) *AssumeRoleWithWebIdentityInput { - s.DurationSeconds = &v - return s -} - -// SetPolicy sets the Policy field's value. -func (s *AssumeRoleWithWebIdentityInput) SetPolicy(v string) *AssumeRoleWithWebIdentityInput { - s.Policy = &v - return s -} - -// SetPolicyArns sets the PolicyArns field's value. -func (s *AssumeRoleWithWebIdentityInput) SetPolicyArns(v []*PolicyDescriptorType) *AssumeRoleWithWebIdentityInput { - s.PolicyArns = v - return s -} - -// SetProviderId sets the ProviderId field's value. -func (s *AssumeRoleWithWebIdentityInput) SetProviderId(v string) *AssumeRoleWithWebIdentityInput { - s.ProviderId = &v - return s -} - -// SetRoleArn sets the RoleArn field's value. -func (s *AssumeRoleWithWebIdentityInput) SetRoleArn(v string) *AssumeRoleWithWebIdentityInput { - s.RoleArn = &v - return s -} - -// SetRoleSessionName sets the RoleSessionName field's value. -func (s *AssumeRoleWithWebIdentityInput) SetRoleSessionName(v string) *AssumeRoleWithWebIdentityInput { - s.RoleSessionName = &v - return s -} - -// SetWebIdentityToken sets the WebIdentityToken field's value. -func (s *AssumeRoleWithWebIdentityInput) SetWebIdentityToken(v string) *AssumeRoleWithWebIdentityInput { - s.WebIdentityToken = &v - return s -} - -// Contains the response to a successful AssumeRoleWithWebIdentity request, -// including temporary AWS credentials that can be used to make AWS requests. -type AssumeRoleWithWebIdentityOutput struct { - _ struct{} `type:"structure"` - - // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers - // that you can use to refer to the resulting temporary security credentials. - // For example, you can reference these credentials as a principal in a resource-based - // policy by using the ARN or assumed role ID. The ARN and ID include the RoleSessionName - // that you specified when you called AssumeRole. - AssumedRoleUser *AssumedRoleUser `type:"structure"` - - // The intended audience (also known as client ID) of the web identity token. - // This is traditionally the client identifier issued to the application that - // requested the web identity token. - Audience *string `type:"string"` - - // The temporary security credentials, which include an access key ID, a secret - // access key, and a security token. - // - // The size of the security token that STS API operations return is not fixed. - // We strongly recommend that you make no assumptions about the maximum size. - Credentials *Credentials `type:"structure"` - - // A percentage value that indicates the packed size of the session policies - // and session tags combined passed in the request. The request fails if the - // packed size is greater than 100 percent, which means the policies and tags - // exceeded the allowed space. - PackedPolicySize *int64 `type:"integer"` - - // The issuing authority of the web identity token presented. For OpenID Connect - // ID tokens, this contains the value of the iss field. For OAuth 2.0 access - // tokens, this contains the value of the ProviderId parameter that was passed - // in the AssumeRoleWithWebIdentity request. - Provider *string `type:"string"` - - // The unique user identifier that is returned by the identity provider. This - // identifier is associated with the WebIdentityToken that was submitted with - // the AssumeRoleWithWebIdentity call. The identifier is typically unique to - // the user and the application that acquired the WebIdentityToken (pairwise - // identifier). For OpenID Connect ID tokens, this field contains the value - // returned by the identity provider as the token's sub (Subject) claim. - SubjectFromWebIdentityToken *string `min:"6" type:"string"` -} - -// String returns the string representation -func (s AssumeRoleWithWebIdentityOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AssumeRoleWithWebIdentityOutput) GoString() string { - return s.String() -} - -// SetAssumedRoleUser sets the AssumedRoleUser field's value. -func (s *AssumeRoleWithWebIdentityOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleWithWebIdentityOutput { - s.AssumedRoleUser = v - return s -} - -// SetAudience sets the Audience field's value. -func (s *AssumeRoleWithWebIdentityOutput) SetAudience(v string) *AssumeRoleWithWebIdentityOutput { - s.Audience = &v - return s -} - -// SetCredentials sets the Credentials field's value. -func (s *AssumeRoleWithWebIdentityOutput) SetCredentials(v *Credentials) *AssumeRoleWithWebIdentityOutput { - s.Credentials = v - return s -} - -// SetPackedPolicySize sets the PackedPolicySize field's value. -func (s *AssumeRoleWithWebIdentityOutput) SetPackedPolicySize(v int64) *AssumeRoleWithWebIdentityOutput { - s.PackedPolicySize = &v - return s -} - -// SetProvider sets the Provider field's value. -func (s *AssumeRoleWithWebIdentityOutput) SetProvider(v string) *AssumeRoleWithWebIdentityOutput { - s.Provider = &v - return s -} - -// SetSubjectFromWebIdentityToken sets the SubjectFromWebIdentityToken field's value. -func (s *AssumeRoleWithWebIdentityOutput) SetSubjectFromWebIdentityToken(v string) *AssumeRoleWithWebIdentityOutput { - s.SubjectFromWebIdentityToken = &v - return s -} - -// The identifiers for the temporary security credentials that the operation -// returns. -type AssumedRoleUser struct { - _ struct{} `type:"structure"` - - // The ARN of the temporary security credentials that are returned from the - // AssumeRole action. For more information about ARNs and how to use them in - // policies, see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) - // in the IAM User Guide. - // - // Arn is a required field - Arn *string `min:"20" type:"string" required:"true"` - - // A unique identifier that contains the role ID and the role session name of - // the role that is being assumed. The role ID is generated by AWS when the - // role is created. - // - // AssumedRoleId is a required field - AssumedRoleId *string `min:"2" type:"string" required:"true"` -} - -// String returns the string representation -func (s AssumedRoleUser) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AssumedRoleUser) GoString() string { - return s.String() -} - -// SetArn sets the Arn field's value. -func (s *AssumedRoleUser) SetArn(v string) *AssumedRoleUser { - s.Arn = &v - return s -} - -// SetAssumedRoleId sets the AssumedRoleId field's value. -func (s *AssumedRoleUser) SetAssumedRoleId(v string) *AssumedRoleUser { - s.AssumedRoleId = &v - return s -} - -// AWS credentials for API authentication. -type Credentials struct { - _ struct{} `type:"structure"` - - // The access key ID that identifies the temporary security credentials. - // - // AccessKeyId is a required field - AccessKeyId *string `min:"16" type:"string" required:"true"` - - // The date on which the current credentials expire. - // - // Expiration is a required field - Expiration *time.Time `type:"timestamp" required:"true"` - - // The secret access key that can be used to sign requests. - // - // SecretAccessKey is a required field - SecretAccessKey *string `type:"string" required:"true"` - - // The token that users must pass to the service API to use the temporary credentials. - // - // SessionToken is a required field - SessionToken *string `type:"string" required:"true"` -} - -// String returns the string representation -func (s Credentials) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Credentials) GoString() string { - return s.String() -} - -// SetAccessKeyId sets the AccessKeyId field's value. -func (s *Credentials) SetAccessKeyId(v string) *Credentials { - s.AccessKeyId = &v - return s -} - -// SetExpiration sets the Expiration field's value. -func (s *Credentials) SetExpiration(v time.Time) *Credentials { - s.Expiration = &v - return s -} - -// SetSecretAccessKey sets the SecretAccessKey field's value. -func (s *Credentials) SetSecretAccessKey(v string) *Credentials { - s.SecretAccessKey = &v - return s -} - -// SetSessionToken sets the SessionToken field's value. -func (s *Credentials) SetSessionToken(v string) *Credentials { - s.SessionToken = &v - return s -} - -type DecodeAuthorizationMessageInput struct { - _ struct{} `type:"structure"` - - // The encoded message that was returned with the response. - // - // EncodedMessage is a required field - EncodedMessage *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s DecodeAuthorizationMessageInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DecodeAuthorizationMessageInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DecodeAuthorizationMessageInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DecodeAuthorizationMessageInput"} - if s.EncodedMessage == nil { - invalidParams.Add(request.NewErrParamRequired("EncodedMessage")) - } - if s.EncodedMessage != nil && len(*s.EncodedMessage) < 1 { - invalidParams.Add(request.NewErrParamMinLen("EncodedMessage", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetEncodedMessage sets the EncodedMessage field's value. -func (s *DecodeAuthorizationMessageInput) SetEncodedMessage(v string) *DecodeAuthorizationMessageInput { - s.EncodedMessage = &v - return s -} - -// A document that contains additional information about the authorization status -// of a request from an encoded message that is returned in response to an AWS -// request. -type DecodeAuthorizationMessageOutput struct { - _ struct{} `type:"structure"` - - // An XML document that contains the decoded message. - DecodedMessage *string `type:"string"` -} - -// String returns the string representation -func (s DecodeAuthorizationMessageOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DecodeAuthorizationMessageOutput) GoString() string { - return s.String() -} - -// SetDecodedMessage sets the DecodedMessage field's value. -func (s *DecodeAuthorizationMessageOutput) SetDecodedMessage(v string) *DecodeAuthorizationMessageOutput { - s.DecodedMessage = &v - return s -} - -// Identifiers for the federated user that is associated with the credentials. -type FederatedUser struct { - _ struct{} `type:"structure"` - - // The ARN that specifies the federated user that is associated with the credentials. - // For more information about ARNs and how to use them in policies, see IAM - // Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) - // in the IAM User Guide. - // - // Arn is a required field - Arn *string `min:"20" type:"string" required:"true"` - - // The string that identifies the federated user associated with the credentials, - // similar to the unique ID of an IAM user. - // - // FederatedUserId is a required field - FederatedUserId *string `min:"2" type:"string" required:"true"` -} - -// String returns the string representation -func (s FederatedUser) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s FederatedUser) GoString() string { - return s.String() -} - -// SetArn sets the Arn field's value. -func (s *FederatedUser) SetArn(v string) *FederatedUser { - s.Arn = &v - return s -} - -// SetFederatedUserId sets the FederatedUserId field's value. -func (s *FederatedUser) SetFederatedUserId(v string) *FederatedUser { - s.FederatedUserId = &v - return s -} - -type GetAccessKeyInfoInput struct { - _ struct{} `type:"structure"` - - // The identifier of an access key. - // - // This parameter allows (through its regex pattern) a string of characters - // that can consist of any upper- or lowercase letter or digit. - // - // AccessKeyId is a required field - AccessKeyId *string `min:"16" type:"string" required:"true"` -} - -// String returns the string representation -func (s GetAccessKeyInfoInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetAccessKeyInfoInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetAccessKeyInfoInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetAccessKeyInfoInput"} - if s.AccessKeyId == nil { - invalidParams.Add(request.NewErrParamRequired("AccessKeyId")) - } - if s.AccessKeyId != nil && len(*s.AccessKeyId) < 16 { - invalidParams.Add(request.NewErrParamMinLen("AccessKeyId", 16)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAccessKeyId sets the AccessKeyId field's value. -func (s *GetAccessKeyInfoInput) SetAccessKeyId(v string) *GetAccessKeyInfoInput { - s.AccessKeyId = &v - return s -} - -type GetAccessKeyInfoOutput struct { - _ struct{} `type:"structure"` - - // The number used to identify the AWS account. - Account *string `type:"string"` -} - -// String returns the string representation -func (s GetAccessKeyInfoOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetAccessKeyInfoOutput) GoString() string { - return s.String() -} - -// SetAccount sets the Account field's value. -func (s *GetAccessKeyInfoOutput) SetAccount(v string) *GetAccessKeyInfoOutput { - s.Account = &v - return s -} - -type GetCallerIdentityInput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s GetCallerIdentityInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetCallerIdentityInput) GoString() string { - return s.String() -} - -// Contains the response to a successful GetCallerIdentity request, including -// information about the entity making the request. -type GetCallerIdentityOutput struct { - _ struct{} `type:"structure"` - - // The AWS account ID number of the account that owns or contains the calling - // entity. - Account *string `type:"string"` - - // The AWS ARN associated with the calling entity. - Arn *string `min:"20" type:"string"` - - // The unique identifier of the calling entity. The exact value depends on the - // type of entity that is making the call. The values returned are those listed - // in the aws:userid column in the Principal table (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html#principaltable) - // found on the Policy Variables reference page in the IAM User Guide. - UserId *string `type:"string"` -} - -// String returns the string representation -func (s GetCallerIdentityOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetCallerIdentityOutput) GoString() string { - return s.String() -} - -// SetAccount sets the Account field's value. -func (s *GetCallerIdentityOutput) SetAccount(v string) *GetCallerIdentityOutput { - s.Account = &v - return s -} - -// SetArn sets the Arn field's value. -func (s *GetCallerIdentityOutput) SetArn(v string) *GetCallerIdentityOutput { - s.Arn = &v - return s -} - -// SetUserId sets the UserId field's value. -func (s *GetCallerIdentityOutput) SetUserId(v string) *GetCallerIdentityOutput { - s.UserId = &v - return s -} - -type GetFederationTokenInput struct { - _ struct{} `type:"structure"` - - // The duration, in seconds, that the session should last. Acceptable durations - // for federation sessions range from 900 seconds (15 minutes) to 129,600 seconds - // (36 hours), with 43,200 seconds (12 hours) as the default. Sessions obtained - // using AWS account root user credentials are restricted to a maximum of 3,600 - // seconds (one hour). If the specified duration is longer than one hour, the - // session obtained by using root user credentials defaults to one hour. - DurationSeconds *int64 `min:"900" type:"integer"` - - // The name of the federated user. The name is used as an identifier for the - // temporary security credentials (such as Bob). For example, you can reference - // the federated user name in a resource-based policy, such as in an Amazon - // S3 bucket policy. - // - // The regex used to validate this parameter is a string of characters consisting - // of upper- and lower-case alphanumeric characters with no spaces. You can - // also include underscores or any of the following characters: =,.@- - // - // Name is a required field - Name *string `min:"2" type:"string" required:"true"` - - // An IAM policy in JSON format that you want to use as an inline session policy. - // - // You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // to this operation. You can pass a single JSON policy document to use as an - // inline session policy. You can also specify up to 10 managed policies to - // use as managed session policies. - // - // This parameter is optional. However, if you do not pass any session policies, - // then the resulting federated user session has no permissions. - // - // When you pass session policies, the session permissions are the intersection - // of the IAM user policies and the session policies that you pass. This gives - // you a way to further restrict the permissions for a federated user. You cannot - // use session policies to grant more permissions than those that are defined - // in the permissions policy of the IAM user. For more information, see Session - // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // in the IAM User Guide. - // - // The resulting credentials can be used to access a resource that has a resource-based - // policy. If that policy specifically references the federated user session - // in the Principal element of the policy, the session has the permissions allowed - // by the policy. These permissions are granted in addition to the permissions - // that are granted by the session policies. - // - // The plain text that you use for both inline and managed session policies - // can't exceed 2,048 characters. The JSON policy characters can be any ASCII - // character from the space character to the end of the valid character list - // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A), - // and carriage return (\u000D) characters. - // - // An AWS conversion compresses the passed session policies and session tags - // into a packed binary format that has a separate limit. Your request can fail - // for this limit even if your plain text meets the other requirements. The - // PackedPolicySize response element indicates by percentage how close the policies - // and tags for your request are to the upper size limit. - Policy *string `min:"1" type:"string"` - - // The Amazon Resource Names (ARNs) of the IAM managed policies that you want - // to use as a managed session policy. The policies must exist in the same account - // as the IAM user that is requesting federated access. - // - // You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // to this operation. You can pass a single JSON policy document to use as an - // inline session policy. You can also specify up to 10 managed policies to - // use as managed session policies. The plain text that you use for both inline - // and managed session policies can't exceed 2,048 characters. You can provide - // up to 10 managed policy ARNs. For more information about ARNs, see Amazon - // Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) - // in the AWS General Reference. - // - // This parameter is optional. However, if you do not pass any session policies, - // then the resulting federated user session has no permissions. - // - // When you pass session policies, the session permissions are the intersection - // of the IAM user policies and the session policies that you pass. This gives - // you a way to further restrict the permissions for a federated user. You cannot - // use session policies to grant more permissions than those that are defined - // in the permissions policy of the IAM user. For more information, see Session - // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // in the IAM User Guide. - // - // The resulting credentials can be used to access a resource that has a resource-based - // policy. If that policy specifically references the federated user session - // in the Principal element of the policy, the session has the permissions allowed - // by the policy. These permissions are granted in addition to the permissions - // that are granted by the session policies. - // - // An AWS conversion compresses the passed session policies and session tags - // into a packed binary format that has a separate limit. Your request can fail - // for this limit even if your plain text meets the other requirements. The - // PackedPolicySize response element indicates by percentage how close the policies - // and tags for your request are to the upper size limit. - PolicyArns []*PolicyDescriptorType `type:"list"` - - // A list of session tags. Each session tag consists of a key name and an associated - // value. For more information about session tags, see Passing Session Tags - // in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) - // in the IAM User Guide. - // - // This parameter is optional. You can pass up to 50 session tags. The plain - // text session tag keys can’t exceed 128 characters and the values can’t - // exceed 256 characters. For these and additional limits, see IAM and STS Character - // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) - // in the IAM User Guide. - // - // An AWS conversion compresses the passed session policies and session tags - // into a packed binary format that has a separate limit. Your request can fail - // for this limit even if your plain text meets the other requirements. The - // PackedPolicySize response element indicates by percentage how close the policies - // and tags for your request are to the upper size limit. - // - // You can pass a session tag with the same key as a tag that is already attached - // to the user you are federating. When you do, session tags override a user - // tag with the same key. - // - // Tag key–value pairs are not case sensitive, but case is preserved. This - // means that you cannot have separate Department and department tag keys. Assume - // that the role has the Department=Marketing tag and you pass the department=engineering - // session tag. Department and department are not saved as separate tags, and - // the session tag passed in the request takes precedence over the role tag. - Tags []*Tag `type:"list"` -} - -// String returns the string representation -func (s GetFederationTokenInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetFederationTokenInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetFederationTokenInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetFederationTokenInput"} - if s.DurationSeconds != nil && *s.DurationSeconds < 900 { - invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) - } - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 2 { - invalidParams.Add(request.NewErrParamMinLen("Name", 2)) - } - if s.Policy != nil && len(*s.Policy) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) - } - if s.PolicyArns != nil { - for i, v := range s.PolicyArns { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams)) - } - } - } - if s.Tags != nil { - for i, v := range s.Tags { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDurationSeconds sets the DurationSeconds field's value. -func (s *GetFederationTokenInput) SetDurationSeconds(v int64) *GetFederationTokenInput { - s.DurationSeconds = &v - return s -} - -// SetName sets the Name field's value. -func (s *GetFederationTokenInput) SetName(v string) *GetFederationTokenInput { - s.Name = &v - return s -} - -// SetPolicy sets the Policy field's value. -func (s *GetFederationTokenInput) SetPolicy(v string) *GetFederationTokenInput { - s.Policy = &v - return s -} - -// SetPolicyArns sets the PolicyArns field's value. -func (s *GetFederationTokenInput) SetPolicyArns(v []*PolicyDescriptorType) *GetFederationTokenInput { - s.PolicyArns = v - return s -} - -// SetTags sets the Tags field's value. -func (s *GetFederationTokenInput) SetTags(v []*Tag) *GetFederationTokenInput { - s.Tags = v - return s -} - -// Contains the response to a successful GetFederationToken request, including -// temporary AWS credentials that can be used to make AWS requests. -type GetFederationTokenOutput struct { - _ struct{} `type:"structure"` - - // The temporary security credentials, which include an access key ID, a secret - // access key, and a security (or session) token. - // - // The size of the security token that STS API operations return is not fixed. - // We strongly recommend that you make no assumptions about the maximum size. - Credentials *Credentials `type:"structure"` - - // Identifiers for the federated user associated with the credentials (such - // as arn:aws:sts::123456789012:federated-user/Bob or 123456789012:Bob). You - // can use the federated user's ARN in your resource-based policies, such as - // an Amazon S3 bucket policy. - FederatedUser *FederatedUser `type:"structure"` - - // A percentage value that indicates the packed size of the session policies - // and session tags combined passed in the request. The request fails if the - // packed size is greater than 100 percent, which means the policies and tags - // exceeded the allowed space. - PackedPolicySize *int64 `type:"integer"` -} - -// String returns the string representation -func (s GetFederationTokenOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetFederationTokenOutput) GoString() string { - return s.String() -} - -// SetCredentials sets the Credentials field's value. -func (s *GetFederationTokenOutput) SetCredentials(v *Credentials) *GetFederationTokenOutput { - s.Credentials = v - return s -} - -// SetFederatedUser sets the FederatedUser field's value. -func (s *GetFederationTokenOutput) SetFederatedUser(v *FederatedUser) *GetFederationTokenOutput { - s.FederatedUser = v - return s -} - -// SetPackedPolicySize sets the PackedPolicySize field's value. -func (s *GetFederationTokenOutput) SetPackedPolicySize(v int64) *GetFederationTokenOutput { - s.PackedPolicySize = &v - return s -} - -type GetSessionTokenInput struct { - _ struct{} `type:"structure"` - - // The duration, in seconds, that the credentials should remain valid. Acceptable - // durations for IAM user sessions range from 900 seconds (15 minutes) to 129,600 - // seconds (36 hours), with 43,200 seconds (12 hours) as the default. Sessions - // for AWS account owners are restricted to a maximum of 3,600 seconds (one - // hour). If the duration is longer than one hour, the session for AWS account - // owners defaults to one hour. - DurationSeconds *int64 `min:"900" type:"integer"` - - // The identification number of the MFA device that is associated with the IAM - // user who is making the GetSessionToken call. Specify this value if the IAM - // user has a policy that requires MFA authentication. The value is either the - // serial number for a hardware device (such as GAHT12345678) or an Amazon Resource - // Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). - // You can find the device for an IAM user by going to the AWS Management Console - // and viewing the user's security credentials. - // - // The regex used to validate this parameter is a string of characters consisting - // of upper- and lower-case alphanumeric characters with no spaces. You can - // also include underscores or any of the following characters: =,.@:/- - SerialNumber *string `min:"9" type:"string"` - - // The value provided by the MFA device, if MFA is required. If any policy requires - // the IAM user to submit an MFA code, specify this value. If MFA authentication - // is required, the user must provide a code when requesting a set of temporary - // security credentials. A user who fails to provide the code receives an "access - // denied" response when requesting resources that require MFA authentication. - // - // The format for this parameter, as described by its regex pattern, is a sequence - // of six numeric digits. - TokenCode *string `min:"6" type:"string"` -} - -// String returns the string representation -func (s GetSessionTokenInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetSessionTokenInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetSessionTokenInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetSessionTokenInput"} - if s.DurationSeconds != nil && *s.DurationSeconds < 900 { - invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) - } - if s.SerialNumber != nil && len(*s.SerialNumber) < 9 { - invalidParams.Add(request.NewErrParamMinLen("SerialNumber", 9)) - } - if s.TokenCode != nil && len(*s.TokenCode) < 6 { - invalidParams.Add(request.NewErrParamMinLen("TokenCode", 6)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDurationSeconds sets the DurationSeconds field's value. -func (s *GetSessionTokenInput) SetDurationSeconds(v int64) *GetSessionTokenInput { - s.DurationSeconds = &v - return s -} - -// SetSerialNumber sets the SerialNumber field's value. -func (s *GetSessionTokenInput) SetSerialNumber(v string) *GetSessionTokenInput { - s.SerialNumber = &v - return s -} - -// SetTokenCode sets the TokenCode field's value. -func (s *GetSessionTokenInput) SetTokenCode(v string) *GetSessionTokenInput { - s.TokenCode = &v - return s -} - -// Contains the response to a successful GetSessionToken request, including -// temporary AWS credentials that can be used to make AWS requests. -type GetSessionTokenOutput struct { - _ struct{} `type:"structure"` - - // The temporary security credentials, which include an access key ID, a secret - // access key, and a security (or session) token. - // - // The size of the security token that STS API operations return is not fixed. - // We strongly recommend that you make no assumptions about the maximum size. - Credentials *Credentials `type:"structure"` -} - -// String returns the string representation -func (s GetSessionTokenOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetSessionTokenOutput) GoString() string { - return s.String() -} - -// SetCredentials sets the Credentials field's value. -func (s *GetSessionTokenOutput) SetCredentials(v *Credentials) *GetSessionTokenOutput { - s.Credentials = v - return s -} - -// A reference to the IAM managed policy that is passed as a session policy -// for a role session or a federated user session. -type PolicyDescriptorType struct { - _ struct{} `type:"structure"` - - // The Amazon Resource Name (ARN) of the IAM managed policy to use as a session - // policy for the role. For more information about ARNs, see Amazon Resource - // Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) - // in the AWS General Reference. - Arn *string `locationName:"arn" min:"20" type:"string"` -} - -// String returns the string representation -func (s PolicyDescriptorType) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PolicyDescriptorType) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PolicyDescriptorType) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PolicyDescriptorType"} - if s.Arn != nil && len(*s.Arn) < 20 { - invalidParams.Add(request.NewErrParamMinLen("Arn", 20)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetArn sets the Arn field's value. -func (s *PolicyDescriptorType) SetArn(v string) *PolicyDescriptorType { - s.Arn = &v - return s -} - -// You can pass custom key-value pair attributes when you assume a role or federate -// a user. These are called session tags. You can then use the session tags -// to control access to resources. For more information, see Tagging AWS STS -// Sessions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) -// in the IAM User Guide. -type Tag struct { - _ struct{} `type:"structure"` - - // The key for a session tag. - // - // You can pass up to 50 session tags. The plain text session tag keys can’t - // exceed 128 characters. For these and additional limits, see IAM and STS Character - // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) - // in the IAM User Guide. - // - // Key is a required field - Key *string `min:"1" type:"string" required:"true"` - - // The value for a session tag. - // - // You can pass up to 50 session tags. The plain text session tag values can’t - // exceed 256 characters. For these and additional limits, see IAM and STS Character - // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) - // in the IAM User Guide. - // - // Value is a required field - Value *string `type:"string" required:"true"` -} - -// String returns the string representation -func (s Tag) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Tag) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *Tag) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Tag"} - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - if s.Value == nil { - invalidParams.Add(request.NewErrParamRequired("Value")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetKey sets the Key field's value. -func (s *Tag) SetKey(v string) *Tag { - s.Key = &v - return s -} - -// SetValue sets the Value field's value. -func (s *Tag) SetValue(v string) *Tag { - s.Value = &v - return s -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go deleted file mode 100644 index d5307fcaa0..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go +++ /dev/null @@ -1,11 +0,0 @@ -package sts - -import "github.com/aws/aws-sdk-go/aws/request" - -func init() { - initRequest = customizeRequest -} - -func customizeRequest(r *request.Request) { - r.RetryErrorCodes = append(r.RetryErrorCodes, ErrCodeIDPCommunicationErrorException) -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go b/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go deleted file mode 100644 index fcb720dcac..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go +++ /dev/null @@ -1,108 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -// Package sts provides the client and types for making API -// requests to AWS Security Token Service. -// -// The AWS Security Token Service (STS) is a web service that enables you to -// request temporary, limited-privilege credentials for AWS Identity and Access -// Management (IAM) users or for users that you authenticate (federated users). -// This guide provides descriptions of the STS API. For more detailed information -// about using this service, go to Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html). -// -// For information about setting up signatures and authorization through the -// API, go to Signing AWS API Requests (https://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html) -// in the AWS General Reference. For general information about the Query API, -// go to Making Query Requests (https://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html) -// in Using IAM. For information about using security tokens with other AWS -// products, go to AWS Services That Work with IAM (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_aws-services-that-work-with-iam.html) -// in the IAM User Guide. -// -// If you're new to AWS and need additional technical information about a specific -// AWS product, you can find the product's technical documentation at http://aws.amazon.com/documentation/ -// (http://aws.amazon.com/documentation/). -// -// Endpoints -// -// By default, AWS Security Token Service (STS) is available as a global service, -// and all AWS STS requests go to a single endpoint at https://sts.amazonaws.com. -// Global requests map to the US East (N. Virginia) region. AWS recommends using -// Regional AWS STS endpoints instead of the global endpoint to reduce latency, -// build in redundancy, and increase session token validity. For more information, -// see Managing AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) -// in the IAM User Guide. -// -// Most AWS Regions are enabled for operations in all AWS services by default. -// Those Regions are automatically activated for use with AWS STS. Some Regions, -// such as Asia Pacific (Hong Kong), must be manually enabled. To learn more -// about enabling and disabling AWS Regions, see Managing AWS Regions (https://docs.aws.amazon.com/general/latest/gr/rande-manage.html) -// in the AWS General Reference. When you enable these AWS Regions, they are -// automatically activated for use with AWS STS. You cannot activate the STS -// endpoint for a Region that is disabled. Tokens that are valid in all AWS -// Regions are longer than tokens that are valid in Regions that are enabled -// by default. Changing this setting might affect existing systems where you -// temporarily store tokens. For more information, see Managing Global Endpoint -// Session Tokens (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html#sts-regions-manage-tokens) -// in the IAM User Guide. -// -// After you activate a Region for use with AWS STS, you can direct AWS STS -// API calls to that Region. AWS STS recommends that you provide both the Region -// and endpoint when you make calls to a Regional endpoint. You can provide -// the Region alone for manually enabled Regions, such as Asia Pacific (Hong -// Kong). In this case, the calls are directed to the STS Regional endpoint. -// However, if you provide the Region alone for Regions enabled by default, -// the calls are directed to the global endpoint of https://sts.amazonaws.com. -// -// To view the list of AWS STS endpoints and whether they are active by default, -// see Writing Code to Use AWS STS Regions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html#id_credentials_temp_enable-regions_writing_code) -// in the IAM User Guide. -// -// Recording API requests -// -// STS supports AWS CloudTrail, which is a service that records AWS calls for -// your AWS account and delivers log files to an Amazon S3 bucket. By using -// information collected by CloudTrail, you can determine what requests were -// successfully made to STS, who made the request, when it was made, and so -// on. -// -// If you activate AWS STS endpoints in Regions other than the default global -// endpoint, then you must also turn on CloudTrail logging in those Regions. -// This is necessary to record any AWS STS API calls that are made in those -// Regions. For more information, see Turning On CloudTrail in Additional Regions -// (https://docs.aws.amazon.com/awscloudtrail/latest/userguide/aggregating_logs_regions_turn_on_ct.html) -// in the AWS CloudTrail User Guide. -// -// AWS Security Token Service (STS) is a global service with a single endpoint -// at https://sts.amazonaws.com. Calls to this endpoint are logged as calls -// to a global service. However, because this endpoint is physically located -// in the US East (N. Virginia) Region, your logs list us-east-1 as the event -// Region. CloudTrail does not write these logs to the US East (Ohio) Region -// unless you choose to include global service logs in that Region. CloudTrail -// writes calls to all Regional endpoints to their respective Regions. For example, -// calls to sts.us-east-2.amazonaws.com are published to the US East (Ohio) -// Region and calls to sts.eu-central-1.amazonaws.com are published to the EU -// (Frankfurt) Region. -// -// To learn more about CloudTrail, including how to turn it on and find your -// log files, see the AWS CloudTrail User Guide (https://docs.aws.amazon.com/awscloudtrail/latest/userguide/what_is_cloud_trail_top_level.html). -// -// See https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15 for more information on this service. -// -// See sts package documentation for more information. -// https://docs.aws.amazon.com/sdk-for-go/api/service/sts/ -// -// Using the Client -// -// To contact AWS Security Token Service with the SDK use the New function to create -// a new service client. With that client you can make API requests to the service. -// These clients are safe to use concurrently. -// -// See the SDK's documentation for more information on how to use the SDK. -// https://docs.aws.amazon.com/sdk-for-go/api/ -// -// See aws.Config documentation for more information on configuring SDK clients. -// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config -// -// See the AWS Security Token Service client STS for more -// information on creating client for this service. -// https://docs.aws.amazon.com/sdk-for-go/api/service/sts/#New -package sts diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go b/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go deleted file mode 100644 index a233f542ef..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go +++ /dev/null @@ -1,82 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package sts - -const ( - - // ErrCodeExpiredTokenException for service response error code - // "ExpiredTokenException". - // - // The web identity token that was passed is expired or is not valid. Get a - // new identity token from the identity provider and then retry the request. - ErrCodeExpiredTokenException = "ExpiredTokenException" - - // ErrCodeIDPCommunicationErrorException for service response error code - // "IDPCommunicationError". - // - // The request could not be fulfilled because the identity provider (IDP) that - // was asked to verify the incoming identity token could not be reached. This - // is often a transient error caused by network conditions. Retry the request - // a limited number of times so that you don't exceed the request rate. If the - // error persists, the identity provider might be down or not responding. - ErrCodeIDPCommunicationErrorException = "IDPCommunicationError" - - // ErrCodeIDPRejectedClaimException for service response error code - // "IDPRejectedClaim". - // - // The identity provider (IdP) reported that authentication failed. This might - // be because the claim is invalid. - // - // If this error is returned for the AssumeRoleWithWebIdentity operation, it - // can also mean that the claim has expired or has been explicitly revoked. - ErrCodeIDPRejectedClaimException = "IDPRejectedClaim" - - // ErrCodeInvalidAuthorizationMessageException for service response error code - // "InvalidAuthorizationMessageException". - // - // The error returned if the message passed to DecodeAuthorizationMessage was - // invalid. This can happen if the token contains invalid characters, such as - // linebreaks. - ErrCodeInvalidAuthorizationMessageException = "InvalidAuthorizationMessageException" - - // ErrCodeInvalidIdentityTokenException for service response error code - // "InvalidIdentityToken". - // - // The web identity token that was passed could not be validated by AWS. Get - // a new identity token from the identity provider and then retry the request. - ErrCodeInvalidIdentityTokenException = "InvalidIdentityToken" - - // ErrCodeMalformedPolicyDocumentException for service response error code - // "MalformedPolicyDocument". - // - // The request was rejected because the policy document was malformed. The error - // message describes the specific error. - ErrCodeMalformedPolicyDocumentException = "MalformedPolicyDocument" - - // ErrCodePackedPolicyTooLargeException for service response error code - // "PackedPolicyTooLarge". - // - // The request was rejected because the total packed size of the session policies - // and session tags combined was too large. An AWS conversion compresses the - // session policy document, session policy ARNs, and session tags into a packed - // binary format that has a separate limit. The error message indicates by percentage - // how close the policies and tags are to the upper size limit. For more information, - // see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) - // in the IAM User Guide. - // - // You could receive this error even though you meet other defined session policy - // and session tag limits. For more information, see IAM and STS Entity Character - // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) - // in the IAM User Guide. - ErrCodePackedPolicyTooLargeException = "PackedPolicyTooLarge" - - // ErrCodeRegionDisabledException for service response error code - // "RegionDisabledException". - // - // STS is not activated in the requested region for the account that is being - // asked to generate credentials. The account administrator must use the IAM - // console to activate STS in that region. For more information, see Activating - // and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) - // in the IAM User Guide. - ErrCodeRegionDisabledException = "RegionDisabledException" -) diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/service.go b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go deleted file mode 100644 index d34a685533..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/service.go +++ /dev/null @@ -1,98 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package sts - -import ( - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/client/metadata" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/aws/signer/v4" - "github.com/aws/aws-sdk-go/private/protocol/query" -) - -// STS provides the API operation methods for making requests to -// AWS Security Token Service. See this package's package overview docs -// for details on the service. -// -// STS methods are safe to use concurrently. It is not safe to -// modify mutate any of the struct's properties though. -type STS struct { - *client.Client -} - -// Used for custom client initialization logic -var initClient func(*client.Client) - -// Used for custom request initialization logic -var initRequest func(*request.Request) - -// Service information constants -const ( - ServiceName = "sts" // Name of service. - EndpointsID = ServiceName // ID to lookup a service endpoint with. - ServiceID = "STS" // ServiceID is a unique identifier of a specific service. -) - -// New creates a new instance of the STS client with a session. -// If additional configuration is needed for the client instance use the optional -// aws.Config parameter to add your extra config. -// -// Example: -// mySession := session.Must(session.NewSession()) -// -// // Create a STS client from just a session. -// svc := sts.New(mySession) -// -// // Create a STS client with additional configuration -// svc := sts.New(mySession, aws.NewConfig().WithRegion("us-west-2")) -func New(p client.ConfigProvider, cfgs ...*aws.Config) *STS { - c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) -} - -// newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *STS { - svc := &STS{ - Client: client.New( - cfg, - metadata.ClientInfo{ - ServiceName: ServiceName, - ServiceID: ServiceID, - SigningName: signingName, - SigningRegion: signingRegion, - PartitionID: partitionID, - Endpoint: endpoint, - APIVersion: "2011-06-15", - }, - handlers, - ), - } - - // Handlers - svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) - svc.Handlers.Build.PushBackNamed(query.BuildHandler) - svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) - svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) - svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) - - // Run custom client initialization if present - if initClient != nil { - initClient(svc.Client) - } - - return svc -} - -// newRequest creates a new request for a STS operation and runs any -// custom request initialization. -func (c *STS) newRequest(op *request.Operation, params, data interface{}) *request.Request { - req := c.NewRequest(op, params, data) - - // Run custom request initialization if present - if initRequest != nil { - initRequest(req) - } - - return req -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go deleted file mode 100644 index e2e1d6efe5..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go +++ /dev/null @@ -1,96 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -// Package stsiface provides an interface to enable mocking the AWS Security Token Service service client -// for testing your code. -// -// It is important to note that this interface will have breaking changes -// when the service model is updated and adds new API operations, paginators, -// and waiters. -package stsiface - -import ( - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/service/sts" -) - -// STSAPI provides an interface to enable mocking the -// sts.STS service client's API operation, -// paginators, and waiters. This make unit testing your code that calls out -// to the SDK's service client's calls easier. -// -// The best way to use this interface is so the SDK's service client's calls -// can be stubbed out for unit testing your code with the SDK without needing -// to inject custom request handlers into the SDK's request pipeline. -// -// // myFunc uses an SDK service client to make a request to -// // AWS Security Token Service. -// func myFunc(svc stsiface.STSAPI) bool { -// // Make svc.AssumeRole request -// } -// -// func main() { -// sess := session.New() -// svc := sts.New(sess) -// -// myFunc(svc) -// } -// -// In your _test.go file: -// -// // Define a mock struct to be used in your unit tests of myFunc. -// type mockSTSClient struct { -// stsiface.STSAPI -// } -// func (m *mockSTSClient) AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) { -// // mock response/functionality -// } -// -// func TestMyFunc(t *testing.T) { -// // Setup Test -// mockSvc := &mockSTSClient{} -// -// myfunc(mockSvc) -// -// // Verify myFunc's functionality -// } -// -// It is important to note that this interface will have breaking changes -// when the service model is updated and adds new API operations, paginators, -// and waiters. Its suggested to use the pattern above for testing, or using -// tooling to generate mocks to satisfy the interfaces. -type STSAPI interface { - AssumeRole(*sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) - AssumeRoleWithContext(aws.Context, *sts.AssumeRoleInput, ...request.Option) (*sts.AssumeRoleOutput, error) - AssumeRoleRequest(*sts.AssumeRoleInput) (*request.Request, *sts.AssumeRoleOutput) - - AssumeRoleWithSAML(*sts.AssumeRoleWithSAMLInput) (*sts.AssumeRoleWithSAMLOutput, error) - AssumeRoleWithSAMLWithContext(aws.Context, *sts.AssumeRoleWithSAMLInput, ...request.Option) (*sts.AssumeRoleWithSAMLOutput, error) - AssumeRoleWithSAMLRequest(*sts.AssumeRoleWithSAMLInput) (*request.Request, *sts.AssumeRoleWithSAMLOutput) - - AssumeRoleWithWebIdentity(*sts.AssumeRoleWithWebIdentityInput) (*sts.AssumeRoleWithWebIdentityOutput, error) - AssumeRoleWithWebIdentityWithContext(aws.Context, *sts.AssumeRoleWithWebIdentityInput, ...request.Option) (*sts.AssumeRoleWithWebIdentityOutput, error) - AssumeRoleWithWebIdentityRequest(*sts.AssumeRoleWithWebIdentityInput) (*request.Request, *sts.AssumeRoleWithWebIdentityOutput) - - DecodeAuthorizationMessage(*sts.DecodeAuthorizationMessageInput) (*sts.DecodeAuthorizationMessageOutput, error) - DecodeAuthorizationMessageWithContext(aws.Context, *sts.DecodeAuthorizationMessageInput, ...request.Option) (*sts.DecodeAuthorizationMessageOutput, error) - DecodeAuthorizationMessageRequest(*sts.DecodeAuthorizationMessageInput) (*request.Request, *sts.DecodeAuthorizationMessageOutput) - - GetAccessKeyInfo(*sts.GetAccessKeyInfoInput) (*sts.GetAccessKeyInfoOutput, error) - GetAccessKeyInfoWithContext(aws.Context, *sts.GetAccessKeyInfoInput, ...request.Option) (*sts.GetAccessKeyInfoOutput, error) - GetAccessKeyInfoRequest(*sts.GetAccessKeyInfoInput) (*request.Request, *sts.GetAccessKeyInfoOutput) - - GetCallerIdentity(*sts.GetCallerIdentityInput) (*sts.GetCallerIdentityOutput, error) - GetCallerIdentityWithContext(aws.Context, *sts.GetCallerIdentityInput, ...request.Option) (*sts.GetCallerIdentityOutput, error) - GetCallerIdentityRequest(*sts.GetCallerIdentityInput) (*request.Request, *sts.GetCallerIdentityOutput) - - GetFederationToken(*sts.GetFederationTokenInput) (*sts.GetFederationTokenOutput, error) - GetFederationTokenWithContext(aws.Context, *sts.GetFederationTokenInput, ...request.Option) (*sts.GetFederationTokenOutput, error) - GetFederationTokenRequest(*sts.GetFederationTokenInput) (*request.Request, *sts.GetFederationTokenOutput) - - GetSessionToken(*sts.GetSessionTokenInput) (*sts.GetSessionTokenOutput, error) - GetSessionTokenWithContext(aws.Context, *sts.GetSessionTokenInput, ...request.Option) (*sts.GetSessionTokenOutput, error) - GetSessionTokenRequest(*sts.GetSessionTokenInput) (*request.Request, *sts.GetSessionTokenOutput) -} - -var _ STSAPI = (*sts.STS)(nil) diff --git a/vendor/github.com/evanphx/json-patch/v5/merge.go b/vendor/github.com/evanphx/json-patch/v5/merge.go index 7219316d66..a7c4573428 100644 --- a/vendor/github.com/evanphx/json-patch/v5/merge.go +++ b/vendor/github.com/evanphx/json-patch/v5/merge.go @@ -48,7 +48,9 @@ func mergeDocs(doc, patch *partialDoc, mergeMerge bool) { cur, ok := doc.obj[k] if !ok || cur == nil { - pruneNulls(v) + if !mergeMerge { + pruneNulls(v) + } _ = doc.set(k, v, &ApplyOptions{}) } else { _ = doc.set(k, merge(cur, v, mergeMerge), &ApplyOptions{}) @@ -89,8 +91,8 @@ func pruneAryNulls(ary *partialArray) *partialArray { for _, v := range *ary { if v != nil { pruneNulls(v) - newAry = append(newAry, v) } + newAry = append(newAry, v) } *ary = newAry diff --git a/vendor/github.com/evanphx/json-patch/v5/patch.go b/vendor/github.com/evanphx/json-patch/v5/patch.go index fc860aca38..f24e9d59c7 100644 --- a/vendor/github.com/evanphx/json-patch/v5/patch.go +++ b/vendor/github.com/evanphx/json-patch/v5/patch.go @@ -27,7 +27,7 @@ var ( startObject = json.Delim('{') endObject = json.Delim('}') startArray = json.Delim('[') - endArray = json.Delim(']') + endArray = json.Delim(']') ) var ( @@ -57,7 +57,7 @@ type Patch []Operation type partialDoc struct { keys []string - obj map[string]*lazyNode + obj map[string]*lazyNode } type partialArray []*lazyNode @@ -1026,6 +1026,10 @@ func (p Patch) ApplyIndent(doc []byte, indent string) ([]byte, error) { // ApplyIndentWithOptions mutates a JSON document according to the patch and the passed in ApplyOptions. // It returns the new document indented. func (p Patch) ApplyIndentWithOptions(doc []byte, indent string, options *ApplyOptions) ([]byte, error) { + if len(doc) == 0 { + return doc, nil + } + var pd container if doc[0] == '[' { pd = &partialArray{} diff --git a/vendor/github.com/go-openapi/swag/.golangci.yml b/vendor/github.com/go-openapi/swag/.golangci.yml index 7fae938e64..842ac1c095 100644 --- a/vendor/github.com/go-openapi/swag/.golangci.yml +++ b/vendor/github.com/go-openapi/swag/.golangci.yml @@ -36,3 +36,6 @@ linters: - funlen - gci - gocognit + - paralleltest + - thelper + - ifshort diff --git a/vendor/github.com/go-openapi/swag/loading.go b/vendor/github.com/go-openapi/swag/loading.go index 0bb22df140..9a60409725 100644 --- a/vendor/github.com/go-openapi/swag/loading.go +++ b/vendor/github.com/go-openapi/swag/loading.go @@ -19,7 +19,9 @@ import ( "io/ioutil" "log" "net/http" + "net/url" "path/filepath" + "runtime" "strings" "time" ) @@ -53,10 +55,30 @@ func LoadStrategy(path string, local, remote func(string) ([]byte, error)) func( return remote } return func(pth string) ([]byte, error) { - upth, err := pathUnescape(strings.TrimPrefix(pth, `file://`)) + upth, err := pathUnescape(pth) if err != nil { return nil, err } + + if strings.HasPrefix(pth, `file://`) { + if runtime.GOOS == "windows" { + // support for canonical file URIs on windows. + // Zero tolerance here for dodgy URIs. + u, _ := url.Parse(upth) + if u.Host != "" { + // assume UNC name (volume share) + // file://host/share/folder\... ==> \\host\share\path\folder + // NOTE: UNC port not yet supported + upth = strings.Join([]string{`\`, u.Host, u.Path}, `\`) + } else { + // file:///c:/folder/... ==> just remove the leading slash + upth = strings.TrimPrefix(upth, `file:///`) + } + } else { + upth = strings.TrimPrefix(upth, `file://`) + } + } + return local(filepath.FromSlash(upth)) } } diff --git a/vendor/github.com/gobuffalo/flect/plural_rules.go b/vendor/github.com/gobuffalo/flect/plural_rules.go index 8cd3ba72e7..ff67928438 100644 --- a/vendor/github.com/gobuffalo/flect/plural_rules.go +++ b/vendor/github.com/gobuffalo/flect/plural_rules.go @@ -199,6 +199,7 @@ var singularToPluralSuffixList = []singularToPluralSuffix{ {"shoe", "shoes"}, {"stis", "stes"}, {"tive", "tives"}, + {"vice", "vices"}, {"wife", "wives"}, {"afe", "aves"}, {"bfe", "bves"}, diff --git a/vendor/github.com/golang/protobuf/descriptor/descriptor.go b/vendor/github.com/golang/protobuf/descriptor/descriptor.go index c3c4a2b87e..ffde8a6508 100644 --- a/vendor/github.com/golang/protobuf/descriptor/descriptor.go +++ b/vendor/github.com/golang/protobuf/descriptor/descriptor.go @@ -79,14 +79,9 @@ func deriveRawDescriptor(d protoreflect.Descriptor) ([]byte, []int) { } // Obtain the raw file descriptor. - var raw []byte - switch fd := d.(type) { - case interface{ ProtoLegacyRawDesc() []byte }: - raw = fd.ProtoLegacyRawDesc() - case protoreflect.FileDescriptor: - raw, _ = proto.Marshal(protodesc.ToFileDescriptorProto(fd)) - } - file := protoimpl.X.CompressGZIP(raw) + fd := d.(protoreflect.FileDescriptor) + b, _ := proto.Marshal(protodesc.ToFileDescriptorProto(fd)) + file := protoimpl.X.CompressGZIP(b) // Reverse the indexes, since we populated it in reverse. for i, j := 0, len(idxs)-1; i < j; i, j = i+1, j-1 { diff --git a/vendor/github.com/golang/protobuf/jsonpb/decode.go b/vendor/github.com/golang/protobuf/jsonpb/decode.go index 7c6c5a5244..60e82caa9a 100644 --- a/vendor/github.com/golang/protobuf/jsonpb/decode.go +++ b/vendor/github.com/golang/protobuf/jsonpb/decode.go @@ -135,14 +135,14 @@ func (u *Unmarshaler) unmarshalMessage(m protoreflect.Message, in []byte) error md := m.Descriptor() fds := md.Fields() - if string(in) == "null" && md.FullName() != "google.protobuf.Value" { - return nil - } - if jsu, ok := proto.MessageV1(m.Interface()).(JSONPBUnmarshaler); ok { return jsu.UnmarshalJSONPB(u, in) } + if string(in) == "null" && md.FullName() != "google.protobuf.Value" { + return nil + } + switch wellKnownType(md.FullName()) { case "Any": var jsonObject map[string]json.RawMessage @@ -332,11 +332,12 @@ func (u *Unmarshaler) unmarshalMessage(m protoreflect.Message, in []byte) error raw = v } + field := m.NewField(fd) // Unmarshal the field value. - if raw == nil || (string(raw) == "null" && !isSingularWellKnownValue(fd)) { + if raw == nil || (string(raw) == "null" && !isSingularWellKnownValue(fd) && !isSingularJSONPBUnmarshaler(field, fd)) { continue } - v, err := u.unmarshalValue(m.NewField(fd), raw, fd) + v, err := u.unmarshalValue(field, raw, fd) if err != nil { return err } @@ -364,11 +365,12 @@ func (u *Unmarshaler) unmarshalMessage(m protoreflect.Message, in []byte) error return fmt.Errorf("extension field %q does not extend message %q", xname, m.Descriptor().FullName()) } + field := m.NewField(fd) // Unmarshal the field value. - if raw == nil || (string(raw) == "null" && !isSingularWellKnownValue(fd)) { + if raw == nil || (string(raw) == "null" && !isSingularWellKnownValue(fd) && !isSingularJSONPBUnmarshaler(field, fd)) { continue } - v, err := u.unmarshalValue(m.NewField(fd), raw, fd) + v, err := u.unmarshalValue(field, raw, fd) if err != nil { return err } @@ -390,6 +392,14 @@ func isSingularWellKnownValue(fd protoreflect.FieldDescriptor) bool { return false } +func isSingularJSONPBUnmarshaler(v protoreflect.Value, fd protoreflect.FieldDescriptor) bool { + if fd.Message() != nil && fd.Cardinality() != protoreflect.Repeated { + _, ok := proto.MessageV1(v.Interface()).(JSONPBUnmarshaler) + return ok + } + return false +} + func (u *Unmarshaler) unmarshalValue(v protoreflect.Value, in []byte, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { switch { case fd.IsList(): diff --git a/vendor/github.com/golang/protobuf/proto/registry.go b/vendor/github.com/golang/protobuf/proto/registry.go index 1e7ff64205..066b4323b4 100644 --- a/vendor/github.com/golang/protobuf/proto/registry.go +++ b/vendor/github.com/golang/protobuf/proto/registry.go @@ -13,6 +13,7 @@ import ( "strings" "sync" + "google.golang.org/protobuf/reflect/protodesc" "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/reflect/protoregistry" "google.golang.org/protobuf/runtime/protoimpl" @@ -62,14 +63,7 @@ func FileDescriptor(s filePath) fileDescGZIP { // Find the descriptor in the v2 registry. var b []byte if fd, _ := protoregistry.GlobalFiles.FindFileByPath(s); fd != nil { - if fd, ok := fd.(interface{ ProtoLegacyRawDesc() []byte }); ok { - b = fd.ProtoLegacyRawDesc() - } else { - // TODO: Use protodesc.ToFileDescriptorProto to construct - // a descriptorpb.FileDescriptorProto and marshal it. - // However, doing so causes the proto package to have a dependency - // on descriptorpb, leading to cyclic dependency issues. - } + b, _ = Marshal(protodesc.ToFileDescriptorProto(fd)) } // Locally cache the raw descriptor form for the file. diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go index e729dcff13..85f9f57365 100644 --- a/vendor/github.com/golang/protobuf/ptypes/any.go +++ b/vendor/github.com/golang/protobuf/ptypes/any.go @@ -19,6 +19,8 @@ const urlPrefix = "type.googleapis.com/" // AnyMessageName returns the message name contained in an anypb.Any message. // Most type assertions should use the Is function instead. +// +// Deprecated: Call the any.MessageName method instead. func AnyMessageName(any *anypb.Any) (string, error) { name, err := anyMessageName(any) return string(name), err @@ -38,6 +40,8 @@ func anyMessageName(any *anypb.Any) (protoreflect.FullName, error) { } // MarshalAny marshals the given message m into an anypb.Any message. +// +// Deprecated: Call the anypb.New function instead. func MarshalAny(m proto.Message) (*anypb.Any, error) { switch dm := m.(type) { case DynamicAny: @@ -58,6 +62,9 @@ func MarshalAny(m proto.Message) (*anypb.Any, error) { // Empty returns a new message of the type specified in an anypb.Any message. // It returns protoregistry.NotFound if the corresponding message type could not // be resolved in the global registry. +// +// Deprecated: Use protoregistry.GlobalTypes.FindMessageByName instead +// to resolve the message name and create a new instance of it. func Empty(any *anypb.Any) (proto.Message, error) { name, err := anyMessageName(any) if err != nil { @@ -76,6 +83,8 @@ func Empty(any *anypb.Any) (proto.Message, error) { // // The target message m may be a *DynamicAny message. If the underlying message // type could not be resolved, then this returns protoregistry.NotFound. +// +// Deprecated: Call the any.UnmarshalTo method instead. func UnmarshalAny(any *anypb.Any, m proto.Message) error { if dm, ok := m.(*DynamicAny); ok { if dm.Message == nil { @@ -100,6 +109,8 @@ func UnmarshalAny(any *anypb.Any, m proto.Message) error { } // Is reports whether the Any message contains a message of the specified type. +// +// Deprecated: Call the any.MessageIs method instead. func Is(any *anypb.Any, m proto.Message) bool { if any == nil || m == nil { return false @@ -119,6 +130,9 @@ func Is(any *anypb.Any, m proto.Message) bool { // var x ptypes.DynamicAny // if err := ptypes.UnmarshalAny(a, &x); err != nil { ... } // fmt.Printf("unmarshaled message: %v", x.Message) +// +// Deprecated: Use the any.UnmarshalNew method instead to unmarshal +// the any message contents into a new instance of the underlying message. type DynamicAny struct{ proto.Message } func (m DynamicAny) String() string { diff --git a/vendor/github.com/golang/protobuf/ptypes/doc.go b/vendor/github.com/golang/protobuf/ptypes/doc.go index fb9edd5c62..d3c33259d2 100644 --- a/vendor/github.com/golang/protobuf/ptypes/doc.go +++ b/vendor/github.com/golang/protobuf/ptypes/doc.go @@ -3,4 +3,8 @@ // license that can be found in the LICENSE file. // Package ptypes provides functionality for interacting with well-known types. +// +// Deprecated: Well-known types have specialized functionality directly +// injected into the generated packages for each message type. +// See the deprecation notice for each function for the suggested alternative. package ptypes diff --git a/vendor/github.com/golang/protobuf/ptypes/duration.go b/vendor/github.com/golang/protobuf/ptypes/duration.go index 6110ae8a41..b2b55dd851 100644 --- a/vendor/github.com/golang/protobuf/ptypes/duration.go +++ b/vendor/github.com/golang/protobuf/ptypes/duration.go @@ -21,6 +21,8 @@ const ( // Duration converts a durationpb.Duration to a time.Duration. // Duration returns an error if dur is invalid or overflows a time.Duration. +// +// Deprecated: Call the dur.AsDuration and dur.CheckValid methods instead. func Duration(dur *durationpb.Duration) (time.Duration, error) { if err := validateDuration(dur); err != nil { return 0, err @@ -39,6 +41,8 @@ func Duration(dur *durationpb.Duration) (time.Duration, error) { } // DurationProto converts a time.Duration to a durationpb.Duration. +// +// Deprecated: Call the durationpb.New function instead. func DurationProto(d time.Duration) *durationpb.Duration { nanos := d.Nanoseconds() secs := nanos / 1e9 diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp.go b/vendor/github.com/golang/protobuf/ptypes/timestamp.go index 026d0d4915..8368a3f70d 100644 --- a/vendor/github.com/golang/protobuf/ptypes/timestamp.go +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp.go @@ -33,6 +33,8 @@ const ( // // A nil Timestamp returns an error. The first return value in that case is // undefined. +// +// Deprecated: Call the ts.AsTime and ts.CheckValid methods instead. func Timestamp(ts *timestamppb.Timestamp) (time.Time, error) { // Don't return the zero value on error, because corresponds to a valid // timestamp. Instead return whatever time.Unix gives us. @@ -46,6 +48,8 @@ func Timestamp(ts *timestamppb.Timestamp) (time.Time, error) { } // TimestampNow returns a google.protobuf.Timestamp for the current time. +// +// Deprecated: Call the timestamppb.Now function instead. func TimestampNow() *timestamppb.Timestamp { ts, err := TimestampProto(time.Now()) if err != nil { @@ -56,6 +60,8 @@ func TimestampNow() *timestamppb.Timestamp { // TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. // It returns an error if the resulting Timestamp is invalid. +// +// Deprecated: Call the timestamppb.New function instead. func TimestampProto(t time.Time) (*timestamppb.Timestamp, error) { ts := ×tamppb.Timestamp{ Seconds: t.Unix(), @@ -69,6 +75,9 @@ func TimestampProto(t time.Time) (*timestamppb.Timestamp, error) { // TimestampString returns the RFC 3339 string for valid Timestamps. // For invalid Timestamps, it returns an error message in parentheses. +// +// Deprecated: Call the ts.AsTime method instead, +// followed by a call to the Format method on the time.Time value. func TimestampString(ts *timestamppb.Timestamp) string { t, err := Timestamp(ts) if err != nil { diff --git a/vendor/github.com/google/cel-go/LICENSE b/vendor/github.com/google/cel-go/LICENSE deleted file mode 100644 index d645695673..0000000000 --- a/vendor/github.com/google/cel-go/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/google/cel-go/cel/BUILD.bazel b/vendor/github.com/google/cel-go/cel/BUILD.bazel deleted file mode 100644 index 0c740dd896..0000000000 --- a/vendor/github.com/google/cel-go/cel/BUILD.bazel +++ /dev/null @@ -1,64 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -package( - licenses = ["notice"], # Apache 2.0 -) - -go_library( - name = "go_default_library", - srcs = [ - "cel.go", - "env.go", - "io.go", - "library.go", - "options.go", - "program.go", - ], - deps = [ - "//checker:go_default_library", - "//checker/decls:go_default_library", - "//common:go_default_library", - "//common/containers:go_default_library", - "//common/types:go_default_library", - "//common/types/pb:go_default_library", - "//common/types/ref:go_default_library", - "//interpreter:go_default_library", - "//interpreter/functions:go_default_library", - "//parser:go_default_library", - "@org_golang_google_protobuf//proto:go_default_library", - "@org_golang_google_protobuf//reflect/protodesc:go_default_library", - "@org_golang_google_protobuf//reflect/protoreflect:go_default_library", - "@org_golang_google_protobuf//reflect/protoregistry:go_default_library", - "@org_golang_google_protobuf//types/descriptorpb:go_default_library", - "@org_golang_google_protobuf//types/dynamicpb:go_default_library", - "@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library", - ], - importpath = "github.com/google/cel-go/cel", - visibility = ["//visibility:public"], -) - -go_test( - name = "go_default_test", - srcs = [ - "cel_test.go", - ], - embed = [ - ":go_default_library", - ], - data = [ - "//cel/testdata:gen_test_fds", - ], - deps = [ - "//checker/decls:go_default_library", - "//common/operators:go_default_library", - "//common/overloads:go_default_library", - "//common/types:go_default_library", - "//common/types/ref:go_default_library", - "//common/types/traits:go_default_library", - "//interpreter/functions:go_default_library", - "//test/proto2pb:go_default_library", - "//test/proto3pb:go_default_library", - "@io_bazel_rules_go//proto/wkt:descriptor_go_proto", - "@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library", - ], -) diff --git a/vendor/github.com/google/cel-go/cel/cel.go b/vendor/github.com/google/cel-go/cel/cel.go deleted file mode 100644 index eb5a9f4cc5..0000000000 --- a/vendor/github.com/google/cel-go/cel/cel.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2019 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package cel defines the top-level interface for the Common Expression Language (CEL). -// -// CEL is a non-Turing complete expression language designed to parse, check, and evaluate -// expressions against user-defined environments. -package cel diff --git a/vendor/github.com/google/cel-go/cel/env.go b/vendor/github.com/google/cel-go/cel/env.go deleted file mode 100644 index 0f97696fa4..0000000000 --- a/vendor/github.com/google/cel-go/cel/env.go +++ /dev/null @@ -1,466 +0,0 @@ -// Copyright 2019 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package cel - -import ( - "errors" - "sync" - - "github.com/google/cel-go/checker" - "github.com/google/cel-go/checker/decls" - "github.com/google/cel-go/common" - "github.com/google/cel-go/common/containers" - "github.com/google/cel-go/common/types" - "github.com/google/cel-go/common/types/ref" - "github.com/google/cel-go/interpreter" - "github.com/google/cel-go/parser" - - exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" -) - -// Source interface representing a user-provided expression. -type Source interface { - common.Source -} - -// Ast representing the checked or unchecked expression, its source, and related metadata such as -// source position information. -type Ast struct { - expr *exprpb.Expr - info *exprpb.SourceInfo - source Source - refMap map[int64]*exprpb.Reference - typeMap map[int64]*exprpb.Type -} - -// Expr returns the proto serializable instance of the parsed/checked expression. -func (ast *Ast) Expr() *exprpb.Expr { - return ast.expr -} - -// IsChecked returns whether the Ast value has been successfully type-checked. -func (ast *Ast) IsChecked() bool { - return ast.refMap != nil && ast.typeMap != nil -} - -// SourceInfo returns character offset and newling position information about expression elements. -func (ast *Ast) SourceInfo() *exprpb.SourceInfo { - return ast.info -} - -// ResultType returns the output type of the expression if the Ast has been type-checked, else -// returns decls.Dyn as the parse step cannot infer the type. -func (ast *Ast) ResultType() *exprpb.Type { - if !ast.IsChecked() { - return decls.Dyn - } - return ast.typeMap[ast.expr.Id] -} - -// Source returns a view of the input used to create the Ast. This source may be complete or -// constructed from the SourceInfo. -func (ast *Ast) Source() Source { - return ast.source -} - -// FormatType converts a type message into a string representation. -func FormatType(t *exprpb.Type) string { - return checker.FormatCheckedType(t) -} - -// Env encapsulates the context necessary to perform parsing, type checking, or generation of -// evaluable programs for different expressions. -type Env struct { - Container *containers.Container - declarations []*exprpb.Decl - macros []parser.Macro - adapter ref.TypeAdapter - provider ref.TypeProvider - features map[int]bool - // program options tied to the environment. - progOpts []ProgramOption - - // Internal checker representation - chk *checker.Env - chkErr error - once sync.Once -} - -// NewEnv creates a program environment configured with the standard library of CEL functions and -// macros. The Env value returned can parse and check any CEL program which builds upon the core -// features documented in the CEL specification. -// -// See the EnvOption helper functions for the options that can be used to configure the -// environment. -func NewEnv(opts ...EnvOption) (*Env, error) { - stdOpts := append([]EnvOption{StdLib()}, opts...) - return NewCustomEnv(stdOpts...) -} - -// NewCustomEnv creates a custom program environment which is not automatically configured with the -// standard library of functions and macros documented in the CEL spec. -// -// The purpose for using a custom environment might be for subsetting the standard library produced -// by the cel.StdLib() function. Subsetting CEL is a core aspect of its design that allows users to -// limit the compute and memory impact of a CEL program by controlling the functions and macros -// that may appear in a given expression. -// -// See the EnvOption helper functions for the options that can be used to configure the -// environment. -func NewCustomEnv(opts ...EnvOption) (*Env, error) { - registry, err := types.NewRegistry() - if err != nil { - return nil, err - } - return (&Env{ - declarations: []*exprpb.Decl{}, - macros: []parser.Macro{}, - Container: containers.DefaultContainer, - adapter: registry, - provider: registry, - features: map[int]bool{}, - progOpts: []ProgramOption{}, - }).configure(opts) -} - -// Check performs type-checking on the input Ast and yields a checked Ast and/or set of Issues. -// -// Checking has failed if the returned Issues value and its Issues.Err() value are non-nil. -// Issues should be inspected if they are non-nil, but may not represent a fatal error. -// -// It is possible to have both non-nil Ast and Issues values returned from this call: however, -// the mere presence of an Ast does not imply that it is valid for use. -func (e *Env) Check(ast *Ast) (*Ast, *Issues) { - // Note, errors aren't currently possible on the Ast to ParsedExpr conversion. - pe, _ := AstToParsedExpr(ast) - - // Construct the internal checker env, erroring if there is an issue adding the declarations. - e.once.Do(func() { - ce := checker.NewEnv(e.Container, e.provider) - ce.EnableDynamicAggregateLiterals(true) - if e.HasFeature(FeatureDisableDynamicAggregateLiterals) { - ce.EnableDynamicAggregateLiterals(false) - } - err := ce.Add(e.declarations...) - if err != nil { - e.chkErr = err - } else { - e.chk = ce - } - }) - // The once call will ensure that this value is set or nil for all invocations. - if e.chkErr != nil { - errs := common.NewErrors(ast.Source()) - errs.ReportError(common.NoLocation, e.chkErr.Error()) - return nil, NewIssues(errs) - } - - res, errs := checker.Check(pe, ast.Source(), e.chk) - if len(errs.GetErrors()) > 0 { - return nil, NewIssues(errs) - } - // Manually create the Ast to ensure that the Ast source information (which may be more - // detailed than the information provided by Check), is returned to the caller. - return &Ast{ - source: ast.Source(), - expr: res.GetExpr(), - info: res.GetSourceInfo(), - refMap: res.GetReferenceMap(), - typeMap: res.GetTypeMap()}, nil -} - -// Compile combines the Parse and Check phases CEL program compilation to produce an Ast and -// associated issues. -// -// If an error is encountered during parsing the Compile step will not continue with the Check -// phase. If non-error issues are encountered during Parse, they may be combined with any issues -// discovered during Check. -// -// Note, for parse-only uses of CEL use Parse. -func (e *Env) Compile(txt string) (*Ast, *Issues) { - return e.CompileSource(common.NewTextSource(txt)) -} - -// CompileSource combines the Parse and Check phases CEL program compilation to produce an Ast and -// associated issues. -// -// If an error is encountered during parsing the CompileSource step will not continue with the -// Check phase. If non-error issues are encountered during Parse, they may be combined with any -// issues discovered during Check. -// -// Note, for parse-only uses of CEL use Parse. -func (e *Env) CompileSource(src common.Source) (*Ast, *Issues) { - ast, iss := e.ParseSource(src) - if iss.Err() != nil { - return nil, iss - } - checked, iss2 := e.Check(ast) - iss = iss.Append(iss2) - if iss.Err() != nil { - return nil, iss - } - return checked, iss -} - -// Extend the current environment with additional options to produce a new Env. -// -// Note, the extended Env value should not share memory with the original. It is possible, however, -// that a CustomTypeAdapter or CustomTypeProvider options could provide values which are mutable. -// To ensure separation of state between extended environments either make sure the TypeAdapter and -// TypeProvider are immutable, or that their underlying implementations are based on the -// ref.TypeRegistry which provides a Copy method which will be invoked by this method. -func (e *Env) Extend(opts ...EnvOption) (*Env, error) { - if e.chkErr != nil { - return nil, e.chkErr - } - // Copy slices. - decsCopy := make([]*exprpb.Decl, len(e.declarations)) - macsCopy := make([]parser.Macro, len(e.macros)) - progOptsCopy := make([]ProgramOption, len(e.progOpts)) - copy(decsCopy, e.declarations) - copy(macsCopy, e.macros) - copy(progOptsCopy, e.progOpts) - - // Copy the adapter / provider if they appear to be mutable. - adapter := e.adapter - provider := e.provider - adapterReg, isAdapterReg := e.adapter.(ref.TypeRegistry) - providerReg, isProviderReg := e.provider.(ref.TypeRegistry) - // In most cases the provider and adapter will be a ref.TypeRegistry; - // however, in the rare cases where they are not, they are assumed to - // be immutable. Since it is possible to set the TypeProvider separately - // from the TypeAdapter, the possible configurations which could use a - // TypeRegistry as the base implementation are captured below. - if isAdapterReg && isProviderReg { - reg := providerReg.Copy() - provider = reg - // If the adapter and provider are the same object, set the adapter - // to the same ref.TypeRegistry as the provider. - if adapterReg == providerReg { - adapter = reg - } else { - // Otherwise, make a copy of the adapter. - adapter = adapterReg.Copy() - } - } else if isProviderReg { - provider = providerReg.Copy() - } else if isAdapterReg { - adapter = adapterReg.Copy() - } - - featuresCopy := make(map[int]bool, len(e.features)) - for k, v := range e.features { - featuresCopy[k] = v - } - - ext := &Env{ - Container: e.Container, - declarations: decsCopy, - macros: macsCopy, - progOpts: progOptsCopy, - adapter: adapter, - features: featuresCopy, - provider: provider, - } - return ext.configure(opts) -} - -// HasFeature checks whether the environment enables the given feature -// flag, as enumerated in options.go. -func (e *Env) HasFeature(flag int) bool { - _, has := e.features[flag] - return has -} - -// Parse parses the input expression value `txt` to a Ast and/or a set of Issues. -// -// This form of Parse creates a common.Source value for the input `txt` and forwards to the -// ParseSource method. -func (e *Env) Parse(txt string) (*Ast, *Issues) { - src := common.NewTextSource(txt) - return e.ParseSource(src) -} - -// ParseSource parses the input source to an Ast and/or set of Issues. -// -// Parsing has failed if the returned Issues value and its Issues.Err() value is non-nil. -// Issues should be inspected if they are non-nil, but may not represent a fatal error. -// -// It is possible to have both non-nil Ast and Issues values returned from this call; however, -// the mere presence of an Ast does not imply that it is valid for use. -func (e *Env) ParseSource(src common.Source) (*Ast, *Issues) { - res, errs := parser.ParseWithMacros(src, e.macros) - if len(errs.GetErrors()) > 0 { - return nil, &Issues{errs: errs} - } - // Manually create the Ast to ensure that the text source information is propagated on - // subsequent calls to Check. - return &Ast{ - source: Source(src), - expr: res.GetExpr(), - info: res.GetSourceInfo()}, nil -} - -// Program generates an evaluable instance of the Ast within the environment (Env). -func (e *Env) Program(ast *Ast, opts ...ProgramOption) (Program, error) { - optSet := e.progOpts - if len(opts) != 0 { - mergedOpts := []ProgramOption{} - mergedOpts = append(mergedOpts, e.progOpts...) - mergedOpts = append(mergedOpts, opts...) - optSet = mergedOpts - } - return newProgram(e, ast, optSet) -} - -// SetFeature sets the given feature flag, as enumerated in options.go. -func (e *Env) SetFeature(flag int) { - e.features[flag] = true -} - -// TypeAdapter returns the `ref.TypeAdapter` configured for the environment. -func (e *Env) TypeAdapter() ref.TypeAdapter { - return e.adapter -} - -// TypeProvider returns the `ref.TypeProvider` configured for the environment. -func (e *Env) TypeProvider() ref.TypeProvider { - return e.provider -} - -// UnknownVars returns an interpreter.PartialActivation which marks all variables -// declared in the Env as unknown AttributePattern values. -// -// Note, the UnknownVars will behave the same as an interpreter.EmptyActivation -// unless the PartialAttributes option is provided as a ProgramOption. -func (e *Env) UnknownVars() interpreter.PartialActivation { - var unknownPatterns []*interpreter.AttributePattern - for _, d := range e.declarations { - switch d.GetDeclKind().(type) { - case *exprpb.Decl_Ident: - unknownPatterns = append(unknownPatterns, - interpreter.NewAttributePattern(d.GetName())) - } - } - part, _ := PartialVars( - interpreter.EmptyActivation(), - unknownPatterns...) - return part -} - -// ResidualAst takes an Ast and its EvalDetails to produce a new Ast which only contains the -// attribute references which are unknown. -// -// Residual expressions are beneficial in a few scenarios: -// -// - Optimizing constant expression evaluations away. -// - Indexing and pruning expressions based on known input arguments. -// - Surfacing additional requirements that are needed in order to complete an evaluation. -// - Sharing the evaluation of an expression across multiple machines/nodes. -// -// For example, if an expression targets a 'resource' and 'request' attribute and the possible -// values for the resource are known, a PartialActivation could mark the 'request' as an unknown -// interpreter.AttributePattern and the resulting ResidualAst would be reduced to only the parts -// of the expression that reference the 'request'. -// -// Note, the expression ids within the residual AST generated through this method have no -// correlation to the expression ids of the original AST. -// -// See the PartialVars helper for how to construct a PartialActivation. -// -// TODO: Consider adding an option to generate a Program.Residual to avoid round-tripping to an -// Ast format and then Program again. -func (e *Env) ResidualAst(a *Ast, details *EvalDetails) (*Ast, error) { - pruned := interpreter.PruneAst(a.Expr(), details.State()) - expr, err := AstToString(ParsedExprToAst(&exprpb.ParsedExpr{Expr: pruned})) - if err != nil { - return nil, err - } - parsed, iss := e.Parse(expr) - if iss != nil && iss.Err() != nil { - return nil, iss.Err() - } - if !a.IsChecked() { - return parsed, nil - } - checked, iss := e.Check(parsed) - if iss != nil && iss.Err() != nil { - return nil, iss.Err() - } - return checked, nil -} - -// configure applies a series of EnvOptions to the current environment. -func (e *Env) configure(opts []EnvOption) (*Env, error) { - // Customized the environment using the provided EnvOption values. If an error is - // generated at any step this, will be returned as a nil Env with a non-nil error. - var err error - for _, opt := range opts { - e, err = opt(e) - if err != nil { - return nil, err - } - } - return e, nil -} - -// Issues defines methods for inspecting the error details of parse and check calls. -// -// Note: in the future, non-fatal warnings and notices may be inspectable via the Issues struct. -type Issues struct { - errs *common.Errors -} - -// NewIssues returns an Issues struct from a common.Errors object. -func NewIssues(errs *common.Errors) *Issues { - return &Issues{ - errs: errs, - } -} - -// Err returns an error value if the issues list contains one or more errors. -func (i *Issues) Err() error { - if i == nil { - return nil - } - if len(i.Errors()) > 0 { - return errors.New(i.String()) - } - return nil -} - -// Errors returns the collection of errors encountered in more granular detail. -func (i *Issues) Errors() []common.Error { - if i == nil { - return []common.Error{} - } - return i.errs.GetErrors() -} - -// Append collects the issues from another Issues struct into a new Issues object. -func (i *Issues) Append(other *Issues) *Issues { - if i == nil { - return other - } - return NewIssues(i.errs.Append(other.errs.GetErrors())) -} - -// String converts the issues to a suitable display string. -func (i *Issues) String() string { - if i == nil { - return "" - } - return i.errs.ToDisplayString() -} diff --git a/vendor/github.com/google/cel-go/cel/io.go b/vendor/github.com/google/cel-go/cel/io.go deleted file mode 100644 index b6cf58d927..0000000000 --- a/vendor/github.com/google/cel-go/cel/io.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright 2019 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package cel - -import ( - "fmt" - - "github.com/google/cel-go/common" - "github.com/google/cel-go/parser" - - exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" -) - -// CheckedExprToAst converts a checked expression proto message to an Ast. -func CheckedExprToAst(checkedExpr *exprpb.CheckedExpr) *Ast { - refMap := checkedExpr.GetReferenceMap() - if refMap == nil { - refMap = map[int64]*exprpb.Reference{} - } - typeMap := checkedExpr.GetTypeMap() - if typeMap == nil { - typeMap = map[int64]*exprpb.Type{} - } - return &Ast{ - expr: checkedExpr.GetExpr(), - info: checkedExpr.GetSourceInfo(), - source: common.NewInfoSource(checkedExpr.GetSourceInfo()), - refMap: refMap, - typeMap: typeMap, - } -} - -// AstToCheckedExpr converts an Ast to an protobuf CheckedExpr value. -// -// If the Ast.IsChecked() returns false, this conversion method will return an error. -func AstToCheckedExpr(a *Ast) (*exprpb.CheckedExpr, error) { - if !a.IsChecked() { - return nil, fmt.Errorf("cannot convert unchecked ast") - } - return &exprpb.CheckedExpr{ - Expr: a.Expr(), - SourceInfo: a.SourceInfo(), - ReferenceMap: a.refMap, - TypeMap: a.typeMap, - }, nil -} - -// ParsedExprToAst converts a parsed expression proto message to an Ast. -func ParsedExprToAst(parsedExpr *exprpb.ParsedExpr) *Ast { - si := parsedExpr.GetSourceInfo() - if si == nil { - si = &exprpb.SourceInfo{} - } - return &Ast{ - expr: parsedExpr.GetExpr(), - info: si, - source: common.NewInfoSource(si), - } -} - -// AstToParsedExpr converts an Ast to an protobuf ParsedExpr value. -func AstToParsedExpr(a *Ast) (*exprpb.ParsedExpr, error) { - return &exprpb.ParsedExpr{ - Expr: a.Expr(), - SourceInfo: a.SourceInfo(), - }, nil -} - -// AstToString converts an Ast back to a string if possible. -// -// Note, the conversion may not be an exact replica of the original expression, but will produce -// a string that is semantically equivalent and whose textual representation is stable. -func AstToString(a *Ast) (string, error) { - expr := a.Expr() - info := a.SourceInfo() - return parser.Unparse(expr, info) -} diff --git a/vendor/github.com/google/cel-go/cel/library.go b/vendor/github.com/google/cel-go/cel/library.go deleted file mode 100644 index cef161d6a2..0000000000 --- a/vendor/github.com/google/cel-go/cel/library.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package cel - -import ( - "github.com/google/cel-go/checker" - "github.com/google/cel-go/interpreter/functions" - "github.com/google/cel-go/parser" -) - -// Library provides a collection of EnvOption and ProgramOption values used to confiugre a CEL -// environment for a particular use case or with a related set of functionality. -// -// Note, the ProgramOption values provided by a library are expected to be static and not vary -// between calls to Env.Program(). If there is a need for such dynamic configuration, prefer to -// configure these options outside the Library and within the Env.Program() call directly. -type Library interface { - // CompileOptions returns a collection of funcitional options for configuring the Parse / Check - // environment. - CompileOptions() []EnvOption - - // ProgramOptions returns a collection of functional options which should be included in every - // Program generated from the Env.Program() call. - ProgramOptions() []ProgramOption -} - -// Lib creates an EnvOption out of a Library, allowing libraries to be provided as functional args, -// and to be linked to each other. -func Lib(l Library) EnvOption { - return func(e *Env) (*Env, error) { - var err error - for _, opt := range l.CompileOptions() { - e, err = opt(e) - if err != nil { - return nil, err - } - } - e.progOpts = append(e.progOpts, l.ProgramOptions()...) - return e, nil - } -} - -// StdLib returns an EnvOption for the standard library of CEL functions and macros. -func StdLib() EnvOption { - return Lib(stdLibrary{}) -} - -// stdLibrary implements the Library interface and provides functional options for the core CEL -// features documented in the specification. -type stdLibrary struct{} - -// EnvOptions returns options for the standard CEL function declarations and macros. -func (stdLibrary) CompileOptions() []EnvOption { - return []EnvOption{ - Declarations(checker.StandardDeclarations()...), - Macros(parser.AllMacros...), - } -} - -// ProgramOptions returns function implementations for the standard CEL functions. -func (stdLibrary) ProgramOptions() []ProgramOption { - return []ProgramOption{ - Functions(functions.StandardOverloads()...), - } -} diff --git a/vendor/github.com/google/cel-go/cel/options.go b/vendor/github.com/google/cel-go/cel/options.go deleted file mode 100644 index 1da6ecd79a..0000000000 --- a/vendor/github.com/google/cel-go/cel/options.go +++ /dev/null @@ -1,379 +0,0 @@ -// Copyright 2019 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package cel - -import ( - "fmt" - - "github.com/google/cel-go/common/containers" - "github.com/google/cel-go/common/types/pb" - "github.com/google/cel-go/common/types/ref" - "github.com/google/cel-go/interpreter" - "github.com/google/cel-go/interpreter/functions" - "github.com/google/cel-go/parser" - - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/reflect/protodesc" - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" - - exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" - descpb "google.golang.org/protobuf/types/descriptorpb" -) - -// These constants beginning with "Feature" enable optional behavior in -// the library. See the documentation for each constant to see its -// effects, compatibility restrictions, and standard conformance. -const ( - _ = iota - - // Disallow heterogeneous aggregate (list, map) literals. - // Note, it is still possible to have heterogeneous aggregates when - // provided as variables to the expression, as well as via conversion - // of well-known dynamic types, or with unchecked expressions. - // Affects checking. Provides a subset of standard behavior. - FeatureDisableDynamicAggregateLiterals -) - -// EnvOption is a functional interface for configuring the environment. -type EnvOption func(e *Env) (*Env, error) - -// ClearMacros options clears all parser macros. -// -// Clearing macros will ensure CEL expressions can only contain linear evaluation paths, as -// comprehensions such as `all` and `exists` are enabled only via macros. -func ClearMacros() EnvOption { - return func(e *Env) (*Env, error) { - e.macros = parser.NoMacros - return e, nil - } -} - -// CustomTypeAdapter swaps the default ref.TypeAdapter implementation with a custom one. -// -// Note: This option must be specified before the Types and TypeDescs options when used together. -func CustomTypeAdapter(adapter ref.TypeAdapter) EnvOption { - return func(e *Env) (*Env, error) { - e.adapter = adapter - return e, nil - } -} - -// CustomTypeProvider swaps the default ref.TypeProvider implementation with a custom one. -// -// Note: This option must be specified before the Types and TypeDescs options when used together. -func CustomTypeProvider(provider ref.TypeProvider) EnvOption { - return func(e *Env) (*Env, error) { - e.provider = provider - return e, nil - } -} - -// Declarations option extends the declaration set configured in the environment. -// -// Note: Declarations will by default be appended to the pre-existing declaration set configured -// for the environment. The NewEnv call builds on top of the standard CEL declarations. For a -// purely custom set of declarations use NewCustomEnv. -func Declarations(decls ...*exprpb.Decl) EnvOption { - // TODO: provide an alternative means of specifying declarations that doesn't refer - // to the underlying proto implementations. - return func(e *Env) (*Env, error) { - e.declarations = append(e.declarations, decls...) - return e, nil - } -} - -// Features sets the given feature flags. See list of Feature constants above. -func Features(flags ...int) EnvOption { - return func(e *Env) (*Env, error) { - for _, flag := range flags { - e.SetFeature(flag) - } - return e, nil - } -} - -// HomogeneousAggregateLiterals option ensures that list and map literal entry types must agree -// during type-checking. -// -// Note, it is still possible to have heterogeneous aggregates when provided as variables to the -// expression, as well as via conversion of well-known dynamic types, or with unchecked -// expressions. -func HomogeneousAggregateLiterals() EnvOption { - return Features(FeatureDisableDynamicAggregateLiterals) -} - -// Macros option extends the macro set configured in the environment. -// -// Note: This option must be specified after ClearMacros if used together. -func Macros(macros ...parser.Macro) EnvOption { - return func(e *Env) (*Env, error) { - e.macros = append(e.macros, macros...) - return e, nil - } -} - -// Container sets the container for resolving variable names. Defaults to an empty container. -// -// If all references within an expression are relative to a protocol buffer package, then -// specifying a container of `google.type` would make it possible to write expressions such as -// `Expr{expression: 'a < b'}` instead of having to write `google.type.Expr{...}`. -func Container(name string) EnvOption { - return func(e *Env) (*Env, error) { - cont, err := e.Container.Extend(containers.Name(name)) - if err != nil { - return nil, err - } - e.Container = cont - return e, nil - } -} - -// Abbrevs configures a set of simple names as abbreviations for fully-qualified names. -// -// An abbreviation (abbrev for short) is a simple name that expands to a fully-qualified name. -// Abbreviations can be useful when working with variables, functions, and especially types from -// multiple namespaces: -// -// // CEL object construction -// qual.pkg.version.ObjTypeName{ -// field: alt.container.ver.FieldTypeName{value: ...} -// } -// -// Only one the qualified names above may be used as the CEL container, so at least one of these -// references must be a long qualified name within an otherwise short CEL program. Using the -// following abbreviations, the program becomes much simpler: -// -// // CEL Go option -// Abbrevs("qual.pkg.version.ObjTypeName", "alt.container.ver.FieldTypeName") -// // Simplified Object construction -// ObjTypeName{field: FieldTypeName{value: ...}} -// -// There are a few rules for the qualified names and the simple abbreviations generated from them: -// - Qualified names must be dot-delimited, e.g. `package.subpkg.name`. -// - The last element in the qualified name is the abbreviation. -// - Abbreviations must not collide with each other. -// - The abbreviation must not collide with unqualified names in use. -// -// Abbreviations are distinct from container-based references in the following important ways: -// - Abbreviations must expand to a fully-qualified name. -// - Expanded abbreviations do not participate in namespace resolution. -// - Abbreviation expansion is done instead of the container search for a matching identifier. -// - Containers follow C++ namespace resolution rules with searches from the most qualified name -// to the least qualified name. -// - Container references within the CEL program may be relative, and are resolved to fully -// qualified names at either type-check time or program plan time, whichever comes first. -// -// If there is ever a case where an identifier could be in both the container and as an -// abbreviation, the abbreviation wins as this will ensure that the meaning of a program is -// preserved between compilations even as the container evolves. -func Abbrevs(qualifiedNames ...string) EnvOption { - return func(e *Env) (*Env, error) { - cont, err := e.Container.Extend(containers.Abbrevs(qualifiedNames...)) - if err != nil { - return nil, err - } - e.Container = cont - return e, nil - } -} - -// Types adds one or more type declarations to the environment, allowing for construction of -// type-literals whose definitions are included in the common expression built-in set. -// -// The input types may either be instances of `proto.Message` or `ref.Type`. Any other type -// provided to this option will result in an error. -// -// Well-known protobuf types within the `google.protobuf.*` package are included in the standard -// environment by default. -// -// Note: This option must be specified after the CustomTypeProvider option when used together. -func Types(addTypes ...interface{}) EnvOption { - return func(e *Env) (*Env, error) { - reg, isReg := e.provider.(ref.TypeRegistry) - if !isReg { - return nil, fmt.Errorf("custom types not supported by provider: %T", e.provider) - } - for _, t := range addTypes { - switch v := t.(type) { - case proto.Message: - fdMap := pb.CollectFileDescriptorSet(v) - for _, fd := range fdMap { - err := reg.RegisterDescriptor(fd) - if err != nil { - return nil, err - } - } - case ref.Type: - err := reg.RegisterType(v) - if err != nil { - return nil, err - } - default: - return nil, fmt.Errorf("unsupported type: %T", t) - } - } - return e, nil - } -} - -// TypeDescs adds type declarations from any protoreflect.FileDescriptor, protoregistry.Files, -// google.protobuf.FileDescriptorProto or google.protobuf.FileDescriptorSet provided. -// -// Note that messages instantiated from these descriptors will be *dynamicpb.Message values -// rather than the concrete message type. -// -// TypeDescs are hermetic to a single Env object, but may be copied to other Env values via -// extension or by re-using the same EnvOption with another NewEnv() call. -func TypeDescs(descs ...interface{}) EnvOption { - return func(e *Env) (*Env, error) { - reg, isReg := e.provider.(ref.TypeRegistry) - if !isReg { - return nil, fmt.Errorf("custom types not supported by provider: %T", e.provider) - } - // Scan the input descriptors for FileDescriptorProto messages and accumulate them into a - // synthetic FileDescriptorSet as the FileDescriptorProto messages may refer to each other - // and will not resolve properly unless they are part of the same set. - var fds *descpb.FileDescriptorSet - for _, d := range descs { - switch f := d.(type) { - case *descpb.FileDescriptorProto: - if fds == nil { - fds = &descpb.FileDescriptorSet{ - File: []*descpb.FileDescriptorProto{}, - } - } - fds.File = append(fds.File, f) - } - } - if fds != nil { - if err := registerFileSet(reg, fds); err != nil { - return nil, err - } - } - for _, d := range descs { - switch f := d.(type) { - case *protoregistry.Files: - if err := registerFiles(reg, f); err != nil { - return nil, err - } - case protoreflect.FileDescriptor: - if err := reg.RegisterDescriptor(f); err != nil { - return nil, err - } - case *descpb.FileDescriptorSet: - if err := registerFileSet(reg, f); err != nil { - return nil, err - } - case *descpb.FileDescriptorProto: - // skip, handled as a synthetic file descriptor set. - default: - return nil, fmt.Errorf("unsupported type descriptor: %T", d) - } - } - return e, nil - } -} - -func registerFileSet(reg ref.TypeRegistry, fileSet *descpb.FileDescriptorSet) error { - files, err := protodesc.NewFiles(fileSet) - if err != nil { - return fmt.Errorf("protodesc.NewFiles(%v) failed: %v", fileSet, err) - } - return registerFiles(reg, files) -} - -func registerFiles(reg ref.TypeRegistry, files *protoregistry.Files) error { - var err error - files.RangeFiles(func(fd protoreflect.FileDescriptor) bool { - err = reg.RegisterDescriptor(fd) - return err == nil - }) - return err -} - -// ProgramOption is a functional interface for configuring evaluation bindings and behaviors. -type ProgramOption func(p *prog) (*prog, error) - -// CustomDecorator appends an InterpreterDecorator to the program. -// -// InterpretableDecorators can be used to inspect, alter, or replace the Program plan. -func CustomDecorator(dec interpreter.InterpretableDecorator) ProgramOption { - return func(p *prog) (*prog, error) { - p.decorators = append(p.decorators, dec) - return p, nil - } -} - -// Functions adds function overloads that extend or override the set of CEL built-ins. -func Functions(funcs ...*functions.Overload) ProgramOption { - return func(p *prog) (*prog, error) { - if err := p.dispatcher.Add(funcs...); err != nil { - return nil, err - } - return p, nil - } -} - -// Globals sets the global variable values for a given program. These values may be shadowed by -// variables with the same name provided to the Eval() call. -// -// The vars value may either be an `interpreter.Activation` instance or a `map[string]interface{}`. -func Globals(vars interface{}) ProgramOption { - return func(p *prog) (*prog, error) { - defaultVars, err := - interpreter.NewActivation(vars) - if err != nil { - return nil, err - } - p.defaultVars = defaultVars - return p, nil - } -} - -// EvalOption indicates an evaluation option that may affect the evaluation behavior or information -// in the output result. -type EvalOption int - -const ( - // OptTrackState will cause the runtime to return an immutable EvalState value in the Result. - OptTrackState EvalOption = 1 << iota - - // OptExhaustiveEval causes the runtime to disable short-circuits and track state. - OptExhaustiveEval EvalOption = 1< 0 { - // Instantiate overload's type with fresh type variables. - substitutions := newMapping() - for _, typePar := range overload.TypeParams { - substitutions.add(decls.NewTypeParamType(typePar), c.newTypeVar()) - } - overloadType = substitute(substitutions, overloadType, false) - } - - candidateArgTypes := overloadType.GetFunction().ArgTypes - if c.isAssignableList(argTypes, candidateArgTypes) { - if checkedRef == nil { - checkedRef = newFunctionReference(overload.OverloadId) - } else { - checkedRef.OverloadId = append(checkedRef.OverloadId, overload.OverloadId) - } - - // First matching overload, determines result type. - fnResultType := substitute(c.mappings, - overloadType.GetFunction().ResultType, - false) - if resultType == nil { - resultType = fnResultType - } else if !isDyn(resultType) && !proto.Equal(fnResultType, resultType) { - resultType = decls.Dyn - } - } - } - - if resultType == nil { - c.errors.noMatchingOverload(loc, fn.GetName(), argTypes, target != nil) - resultType = decls.Error - return nil - } - - return newResolution(checkedRef, resultType) -} - -func (c *checker) checkCreateList(e *exprpb.Expr) { - create := e.GetListExpr() - var elemType *exprpb.Type - for _, e := range create.Elements { - c.check(e) - elemType = c.joinTypes(c.location(e), elemType, c.getType(e)) - } - if elemType == nil { - // If the list is empty, assign free type var to elem type. - elemType = c.newTypeVar() - } - c.setType(e, decls.NewListType(elemType)) -} - -func (c *checker) checkCreateStruct(e *exprpb.Expr) { - str := e.GetStructExpr() - if str.MessageName != "" { - c.checkCreateMessage(e) - } else { - c.checkCreateMap(e) - } -} - -func (c *checker) checkCreateMap(e *exprpb.Expr) { - mapVal := e.GetStructExpr() - var keyType *exprpb.Type - var valueType *exprpb.Type - for _, ent := range mapVal.GetEntries() { - key := ent.GetMapKey() - c.check(key) - keyType = c.joinTypes(c.location(key), keyType, c.getType(key)) - - c.check(ent.Value) - valueType = c.joinTypes(c.location(ent.Value), valueType, c.getType(ent.Value)) - } - if keyType == nil { - // If the map is empty, assign free type variables to typeKey and value type. - keyType = c.newTypeVar() - valueType = c.newTypeVar() - } - c.setType(e, decls.NewMapType(keyType, valueType)) -} - -func (c *checker) checkCreateMessage(e *exprpb.Expr) { - msgVal := e.GetStructExpr() - // Determine the type of the message. - messageType := decls.Error - decl := c.env.LookupIdent(msgVal.MessageName) - if decl == nil { - c.errors.undeclaredReference( - c.location(e), c.env.container.Name(), msgVal.MessageName) - return - } - // Ensure the type name is fully qualified in the AST. - msgVal.MessageName = decl.GetName() - c.setReference(e, newIdentReference(decl.GetName(), nil)) - ident := decl.GetIdent() - identKind := kindOf(ident.Type) - if identKind != kindError { - if identKind != kindType { - c.errors.notAType(c.location(e), ident.Type) - } else { - messageType = ident.Type.GetType() - if kindOf(messageType) != kindObject { - c.errors.notAMessageType(c.location(e), messageType) - messageType = decls.Error - } - } - } - if isObjectWellKnownType(messageType) { - c.setType(e, getObjectWellKnownType(messageType)) - } else { - c.setType(e, messageType) - } - - // Check the field initializers. - for _, ent := range msgVal.GetEntries() { - field := ent.GetFieldKey() - value := ent.Value - c.check(value) - - fieldType := decls.Error - if t, found := c.lookupFieldType( - c.locationByID(ent.Id), - messageType.GetMessageType(), - field); found { - fieldType = t.Type - } - if !c.isAssignable(fieldType, c.getType(value)) { - c.errors.fieldTypeMismatch( - c.locationByID(ent.Id), field, fieldType, c.getType(value)) - } - } -} - -func (c *checker) checkComprehension(e *exprpb.Expr) { - comp := e.GetComprehensionExpr() - c.check(comp.IterRange) - c.check(comp.AccuInit) - accuType := c.getType(comp.AccuInit) - rangeType := c.getType(comp.IterRange) - var varType *exprpb.Type - - switch kindOf(rangeType) { - case kindList: - varType = rangeType.GetListType().ElemType - case kindMap: - // Ranges over the keys. - varType = rangeType.GetMapType().KeyType - case kindDyn, kindError, kindTypeParam: - // Set the range type to DYN to prevent assignment to a potentionally incorrect type - // at a later point in type-checking. The isAssignable call will update the type - // substitutions for the type param under the covers. - c.isAssignable(decls.Dyn, rangeType) - // Set the range iteration variable to type DYN as well. - varType = decls.Dyn - default: - c.errors.notAComprehensionRange(c.location(comp.IterRange), rangeType) - varType = decls.Error - } - - // Create a scope for the comprehension since it has a local accumulation variable. - // This scope will contain the accumulation variable used to compute the result. - c.env = c.env.enterScope() - c.env.Add(decls.NewVar(comp.AccuVar, accuType)) - // Create a block scope for the loop. - c.env = c.env.enterScope() - c.env.Add(decls.NewVar(comp.IterVar, varType)) - // Check the variable references in the condition and step. - c.check(comp.LoopCondition) - c.assertType(comp.LoopCondition, decls.Bool) - c.check(comp.LoopStep) - c.assertType(comp.LoopStep, accuType) - // Exit the loop's block scope before checking the result. - c.env = c.env.exitScope() - c.check(comp.Result) - // Exit the comprehension scope. - c.env = c.env.exitScope() - c.setType(e, c.getType(comp.Result)) -} - -// Checks compatibility of joined types, and returns the most general common type. -func (c *checker) joinTypes(loc common.Location, - previous *exprpb.Type, - current *exprpb.Type) *exprpb.Type { - if previous == nil { - return current - } - if c.isAssignable(previous, current) { - return mostGeneral(previous, current) - } - if c.dynAggregateLiteralElementTypesEnabled() { - return decls.Dyn - } - c.errors.typeMismatch(loc, previous, current) - return decls.Error -} - -func (c *checker) dynAggregateLiteralElementTypesEnabled() bool { - return c.env.aggLitElemType == dynElementType -} - -func (c *checker) newTypeVar() *exprpb.Type { - id := c.freeTypeVarCounter - c.freeTypeVarCounter++ - return decls.NewTypeParamType(fmt.Sprintf("_var%d", id)) -} - -func (c *checker) isAssignable(t1 *exprpb.Type, t2 *exprpb.Type) bool { - subs := isAssignable(c.mappings, t1, t2) - if subs != nil { - c.mappings = subs - return true - } - - return false -} - -func (c *checker) isAssignableList(l1 []*exprpb.Type, l2 []*exprpb.Type) bool { - subs := isAssignableList(c.mappings, l1, l2) - if subs != nil { - c.mappings = subs - return true - } - - return false -} - -func (c *checker) lookupFieldType(l common.Location, messageType string, fieldName string) (*ref.FieldType, bool) { - if _, found := c.env.provider.FindType(messageType); !found { - // This should not happen, anyway, report an error. - c.errors.unexpectedFailedResolution(l, messageType) - return nil, false - } - - if ft, found := c.env.provider.FindFieldType(messageType, fieldName); found { - return ft, found - } - - c.errors.undefinedField(l, fieldName) - return nil, false -} - -func (c *checker) setType(e *exprpb.Expr, t *exprpb.Type) { - if old, found := c.types[e.Id]; found && !proto.Equal(old, t) { - panic(fmt.Sprintf("(Incompatible) Type already exists for expression: %v(%d) old:%v, new:%v", e, e.Id, old, t)) - } - c.types[e.Id] = t -} - -func (c *checker) getType(e *exprpb.Expr) *exprpb.Type { - return c.types[e.Id] -} - -func (c *checker) setReference(e *exprpb.Expr, r *exprpb.Reference) { - if old, found := c.references[e.Id]; found && !proto.Equal(old, r) { - panic(fmt.Sprintf("Reference already exists for expression: %v(%d) old:%v, new:%v", e, e.Id, old, r)) - } - c.references[e.Id] = r -} - -func (c *checker) assertType(e *exprpb.Expr, t *exprpb.Type) { - if !c.isAssignable(t, c.getType(e)) { - c.errors.typeMismatch(c.location(e), t, c.getType(e)) - } -} - -type overloadResolution struct { - Reference *exprpb.Reference - Type *exprpb.Type -} - -func newResolution(checkedRef *exprpb.Reference, t *exprpb.Type) *overloadResolution { - return &overloadResolution{ - Reference: checkedRef, - Type: t, - } -} - -func (c *checker) location(e *exprpb.Expr) common.Location { - return c.locationByID(e.Id) -} - -func (c *checker) locationByID(id int64) common.Location { - positions := c.sourceInfo.GetPositions() - var line = 1 - if offset, found := positions[id]; found { - col := int(offset) - for _, lineOffset := range c.sourceInfo.LineOffsets { - if lineOffset < offset { - line++ - col = int(offset - lineOffset) - } else { - break - } - } - return common.NewLocation(line, col) - } - return common.NoLocation -} - -func newIdentReference(name string, value *exprpb.Constant) *exprpb.Reference { - return &exprpb.Reference{Name: name, Value: value} -} - -func newFunctionReference(overloads ...string) *exprpb.Reference { - return &exprpb.Reference{OverloadId: overloads} -} diff --git a/vendor/github.com/google/cel-go/checker/decls/BUILD.bazel b/vendor/github.com/google/cel-go/checker/decls/BUILD.bazel deleted file mode 100644 index a904d09d81..0000000000 --- a/vendor/github.com/google/cel-go/checker/decls/BUILD.bazel +++ /dev/null @@ -1,20 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -package( - default_visibility = ["//visibility:public"], - licenses = ["notice"], # Apache 2.0 -) - -go_library( - name = "go_default_library", - srcs = [ - "decls.go", - "scopes.go", - ], - deps = [ - "@org_golang_google_protobuf//types/known/emptypb:go_default_library", - "@org_golang_google_protobuf//types/known/structpb:go_default_library", - "@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library", - ], - importpath = "github.com/google/cel-go/checker/decls", -) diff --git a/vendor/github.com/google/cel-go/checker/decls/decls.go b/vendor/github.com/google/cel-go/checker/decls/decls.go deleted file mode 100644 index 88a99282d9..0000000000 --- a/vendor/github.com/google/cel-go/checker/decls/decls.go +++ /dev/null @@ -1,231 +0,0 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package decls provides helpers for creating variable and function declarations. -package decls - -import ( - emptypb "google.golang.org/protobuf/types/known/emptypb" - structpb "google.golang.org/protobuf/types/known/structpb" - exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" -) - -var ( - // Error type used to communicate issues during type-checking. - Error = &exprpb.Type{ - TypeKind: &exprpb.Type_Error{ - Error: &emptypb.Empty{}}} - - // Dyn is a top-type used to represent any value. - Dyn = &exprpb.Type{ - TypeKind: &exprpb.Type_Dyn{ - Dyn: &emptypb.Empty{}}} -) - -// Commonly used types. -var ( - Bool = NewPrimitiveType(exprpb.Type_BOOL) - Bytes = NewPrimitiveType(exprpb.Type_BYTES) - Double = NewPrimitiveType(exprpb.Type_DOUBLE) - Int = NewPrimitiveType(exprpb.Type_INT64) - Null = &exprpb.Type{ - TypeKind: &exprpb.Type_Null{ - Null: structpb.NullValue_NULL_VALUE}} - String = NewPrimitiveType(exprpb.Type_STRING) - Uint = NewPrimitiveType(exprpb.Type_UINT64) -) - -// Well-known types. -// TODO: Replace with an abstract type registry. -var ( - Any = NewWellKnownType(exprpb.Type_ANY) - Duration = NewWellKnownType(exprpb.Type_DURATION) - Timestamp = NewWellKnownType(exprpb.Type_TIMESTAMP) -) - -// NewAbstractType creates an abstract type declaration which references a proto -// message name and may also include type parameters. -func NewAbstractType(name string, paramTypes ...*exprpb.Type) *exprpb.Type { - return &exprpb.Type{ - TypeKind: &exprpb.Type_AbstractType_{ - AbstractType: &exprpb.Type_AbstractType{ - Name: name, - ParameterTypes: paramTypes}}} -} - -// NewFunctionType creates a function invocation contract, typically only used -// by type-checking steps after overload resolution. -func NewFunctionType(resultType *exprpb.Type, - argTypes ...*exprpb.Type) *exprpb.Type { - return &exprpb.Type{ - TypeKind: &exprpb.Type_Function{ - Function: &exprpb.Type_FunctionType{ - ResultType: resultType, - ArgTypes: argTypes}}} -} - -// NewFunction creates a named function declaration with one or more overloads. -func NewFunction(name string, - overloads ...*exprpb.Decl_FunctionDecl_Overload) *exprpb.Decl { - return &exprpb.Decl{ - Name: name, - DeclKind: &exprpb.Decl_Function{ - Function: &exprpb.Decl_FunctionDecl{ - Overloads: overloads}}} -} - -// NewIdent creates a named identifier declaration with an optional literal -// value. -// -// Literal values are typically only associated with enum identifiers. -// -// Deprecated: Use NewVar or NewConst instead. -func NewIdent(name string, t *exprpb.Type, v *exprpb.Constant) *exprpb.Decl { - return &exprpb.Decl{ - Name: name, - DeclKind: &exprpb.Decl_Ident{ - Ident: &exprpb.Decl_IdentDecl{ - Type: t, - Value: v}}} -} - -// NewConst creates a constant identifier with a CEL constant literal value. -func NewConst(name string, t *exprpb.Type, v *exprpb.Constant) *exprpb.Decl { - return NewIdent(name, t, v) -} - -// NewVar creates a variable identifier. -func NewVar(name string, t *exprpb.Type) *exprpb.Decl { - return NewIdent(name, t, nil) -} - -// NewInstanceOverload creates a instance function overload contract. -// First element of argTypes is instance. -func NewInstanceOverload(id string, argTypes []*exprpb.Type, - resultType *exprpb.Type) *exprpb.Decl_FunctionDecl_Overload { - return &exprpb.Decl_FunctionDecl_Overload{ - OverloadId: id, - ResultType: resultType, - Params: argTypes, - IsInstanceFunction: true} -} - -// NewListType generates a new list with elements of a certain type. -func NewListType(elem *exprpb.Type) *exprpb.Type { - return &exprpb.Type{ - TypeKind: &exprpb.Type_ListType_{ - ListType: &exprpb.Type_ListType{ - ElemType: elem}}} -} - -// NewMapType generates a new map with typed keys and values. -func NewMapType(key *exprpb.Type, value *exprpb.Type) *exprpb.Type { - return &exprpb.Type{ - TypeKind: &exprpb.Type_MapType_{ - MapType: &exprpb.Type_MapType{ - KeyType: key, - ValueType: value}}} -} - -// NewObjectType creates an object type for a qualified type name. -func NewObjectType(typeName string) *exprpb.Type { - return &exprpb.Type{ - TypeKind: &exprpb.Type_MessageType{ - MessageType: typeName}} -} - -// NewOverload creates a function overload declaration which contains a unique -// overload id as well as the expected argument and result types. Overloads -// must be aggregated within a Function declaration. -func NewOverload(id string, argTypes []*exprpb.Type, - resultType *exprpb.Type) *exprpb.Decl_FunctionDecl_Overload { - return &exprpb.Decl_FunctionDecl_Overload{ - OverloadId: id, - ResultType: resultType, - Params: argTypes, - IsInstanceFunction: false} -} - -// NewParameterizedInstanceOverload creates a parametric function instance overload type. -func NewParameterizedInstanceOverload(id string, - argTypes []*exprpb.Type, - resultType *exprpb.Type, - typeParams []string) *exprpb.Decl_FunctionDecl_Overload { - return &exprpb.Decl_FunctionDecl_Overload{ - OverloadId: id, - ResultType: resultType, - Params: argTypes, - TypeParams: typeParams, - IsInstanceFunction: true} -} - -// NewParameterizedOverload creates a parametric function overload type. -func NewParameterizedOverload(id string, - argTypes []*exprpb.Type, - resultType *exprpb.Type, - typeParams []string) *exprpb.Decl_FunctionDecl_Overload { - return &exprpb.Decl_FunctionDecl_Overload{ - OverloadId: id, - ResultType: resultType, - Params: argTypes, - TypeParams: typeParams, - IsInstanceFunction: false} -} - -// NewPrimitiveType creates a type for a primitive value. See the var declarations -// for Int, Uint, etc. -func NewPrimitiveType(primitive exprpb.Type_PrimitiveType) *exprpb.Type { - return &exprpb.Type{ - TypeKind: &exprpb.Type_Primitive{ - Primitive: primitive}} -} - -// NewTypeType creates a new type designating a type. -func NewTypeType(nested *exprpb.Type) *exprpb.Type { - if nested == nil { - // must set the nested field for a valid oneof option - nested = &exprpb.Type{} - } - return &exprpb.Type{ - TypeKind: &exprpb.Type_Type{ - Type: nested}} -} - -// NewTypeParamType creates a type corresponding to a named, contextual parameter. -func NewTypeParamType(name string) *exprpb.Type { - return &exprpb.Type{ - TypeKind: &exprpb.Type_TypeParam{ - TypeParam: name}} -} - -// NewWellKnownType creates a type corresponding to a protobuf well-known type -// value. -func NewWellKnownType(wellKnown exprpb.Type_WellKnownType) *exprpb.Type { - return &exprpb.Type{ - TypeKind: &exprpb.Type_WellKnown{ - WellKnown: wellKnown}} -} - -// NewWrapperType creates a wrapped primitive type instance. Wrapped types -// are roughly equivalent to a nullable, or optionally valued type. -func NewWrapperType(wrapped *exprpb.Type) *exprpb.Type { - primitive := wrapped.GetPrimitive() - if primitive == exprpb.Type_PRIMITIVE_TYPE_UNSPECIFIED { - // TODO: return an error - panic("Wrapped type must be a primitive") - } - return &exprpb.Type{ - TypeKind: &exprpb.Type_Wrapper{ - Wrapper: primitive}} -} diff --git a/vendor/github.com/google/cel-go/checker/decls/scopes.go b/vendor/github.com/google/cel-go/checker/decls/scopes.go deleted file mode 100644 index 1064c73b1f..0000000000 --- a/vendor/github.com/google/cel-go/checker/decls/scopes.go +++ /dev/null @@ -1,115 +0,0 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package decls - -import exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" - -// Scopes represents nested Decl sets where the Scopes value contains a Groups containing all -// identifiers in scope and an optional parent representing outer scopes. -// Each Groups value is a mapping of names to Decls in the ident and function namespaces. -// Lookups are performed such that bindings in inner scopes shadow those in outer scopes. -type Scopes struct { - parent *Scopes - scopes *Group -} - -// NewScopes creates a new, empty Scopes. -// Some operations can't be safely performed until a Group is added with Push. -func NewScopes() *Scopes { - return &Scopes{ - scopes: newGroup(), - } -} - -// Push creates a new Scopes value which references the current Scope as its parent. -func (s *Scopes) Push() *Scopes { - return &Scopes{ - parent: s, - scopes: newGroup(), - } -} - -// Pop returns the parent Scopes value for the current scope, or the current scope if the parent -// is nil. -func (s *Scopes) Pop() *Scopes { - if s.parent != nil { - return s.parent - } - // TODO: Consider whether this should be an error / panic. - return s -} - -// AddIdent adds the ident Decl in the current scope. -// Note: If the name collides with an existing identifier in the scope, the Decl is overwritten. -func (s *Scopes) AddIdent(decl *exprpb.Decl) { - s.scopes.idents[decl.Name] = decl -} - -// FindIdent finds the first ident Decl with a matching name in Scopes, or nil if one cannot be -// found. -// Note: The search is performed from innermost to outermost. -func (s *Scopes) FindIdent(name string) *exprpb.Decl { - if ident, found := s.scopes.idents[name]; found { - return ident - } - if s.parent != nil { - return s.parent.FindIdent(name) - } - return nil -} - -// FindIdentInScope finds the first ident Decl with a matching name in the current Scopes value, or -// nil if one does not exist. -// Note: The search is only performed on the current scope and does not search outer scopes. -func (s *Scopes) FindIdentInScope(name string) *exprpb.Decl { - if ident, found := s.scopes.idents[name]; found { - return ident - } - return nil -} - -// AddFunction adds the function Decl to the current scope. -// Note: Any previous entry for a function in the current scope with the same name is overwritten. -func (s *Scopes) AddFunction(fn *exprpb.Decl) { - s.scopes.functions[fn.Name] = fn -} - -// FindFunction finds the first function Decl with a matching name in Scopes. -// The search is performed from innermost to outermost. -// Returns nil if no such function in Scopes. -func (s *Scopes) FindFunction(name string) *exprpb.Decl { - if fn, found := s.scopes.functions[name]; found { - return fn - } - if s.parent != nil { - return s.parent.FindFunction(name) - } - return nil -} - -// Group is a set of Decls that is pushed on or popped off a Scopes as a unit. -// Contains separate namespaces for idenifier and function Decls. -// (Should be named "Scope" perhaps?) -type Group struct { - idents map[string]*exprpb.Decl - functions map[string]*exprpb.Decl -} - -func newGroup() *Group { - return &Group{ - idents: make(map[string]*exprpb.Decl), - functions: make(map[string]*exprpb.Decl), - } -} diff --git a/vendor/github.com/google/cel-go/checker/env.go b/vendor/github.com/google/cel-go/checker/env.go deleted file mode 100644 index 93cb106126..0000000000 --- a/vendor/github.com/google/cel-go/checker/env.go +++ /dev/null @@ -1,354 +0,0 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package checker - -import ( - "fmt" - "strings" - - "github.com/google/cel-go/checker/decls" - "github.com/google/cel-go/common/containers" - "github.com/google/cel-go/common/types" - "github.com/google/cel-go/common/types/pb" - "github.com/google/cel-go/common/types/ref" - "github.com/google/cel-go/parser" - - exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" -) - -type aggregateLiteralElementType int - -const ( - dynElementType aggregateLiteralElementType = iota - homogenousElementType aggregateLiteralElementType = 1 << iota -) - -// Env is the environment for type checking. -// -// The Env is comprised of a container, type provider, declarations, and other related objects -// which can be used to assist with type-checking. -type Env struct { - container *containers.Container - provider ref.TypeProvider - declarations *decls.Scopes - aggLitElemType aggregateLiteralElementType -} - -// NewEnv returns a new *Env with the given parameters. -func NewEnv(container *containers.Container, provider ref.TypeProvider) *Env { - declarations := decls.NewScopes() - declarations.Push() - - return &Env{ - container: container, - provider: provider, - declarations: declarations, - } -} - -// NewStandardEnv returns a new *Env with the given params plus standard declarations. -func NewStandardEnv(container *containers.Container, provider ref.TypeProvider) *Env { - e := NewEnv(container, provider) - if err := e.Add(StandardDeclarations()...); err != nil { - // The standard declaration set should never have duplicate declarations. - panic(err) - } - // TODO: isolate standard declarations from the custom set which may be provided layer. - return e -} - -// EnableDynamicAggregateLiterals detmerines whether list and map literals may support mixed -// element types at check-time. This does not preclude the presence of a dynamic list or map -// somewhere in the CEL evaluation process. -func (e *Env) EnableDynamicAggregateLiterals(enabled bool) *Env { - e.aggLitElemType = dynElementType - if !enabled { - e.aggLitElemType = homogenousElementType - } - return e -} - -// Add adds new Decl protos to the Env. -// Returns an error for identifier redeclarations. -func (e *Env) Add(decls ...*exprpb.Decl) error { - errMsgs := make([]errorMsg, 0) - for _, decl := range decls { - switch decl.DeclKind.(type) { - case *exprpb.Decl_Ident: - errMsgs = append(errMsgs, e.addIdent(sanitizeIdent(decl))) - case *exprpb.Decl_Function: - errMsgs = append(errMsgs, e.addFunction(sanitizeFunction(decl))...) - } - } - return formatError(errMsgs) -} - -// LookupIdent returns a Decl proto for typeName as an identifier in the Env. -// Returns nil if no such identifier is found in the Env. -func (e *Env) LookupIdent(name string) *exprpb.Decl { - for _, candidate := range e.container.ResolveCandidateNames(name) { - if ident := e.declarations.FindIdent(candidate); ident != nil { - return ident - } - - // Next try to import the name as a reference to a message type. If found, - // the declaration is added to the outest (global) scope of the - // environment, so next time we can access it faster. - if t, found := e.provider.FindType(candidate); found { - decl := decls.NewVar(candidate, t) - e.declarations.AddIdent(decl) - return decl - } - - // Next try to import this as an enum value by splitting the name in a type prefix and - // the enum inside. - if enumValue := e.provider.EnumValue(candidate); enumValue.Type() != types.ErrType { - decl := decls.NewIdent(candidate, - decls.Int, - &exprpb.Constant{ - ConstantKind: &exprpb.Constant_Int64Value{ - Int64Value: int64(enumValue.(types.Int))}}) - e.declarations.AddIdent(decl) - return decl - } - } - return nil -} - -// LookupFunction returns a Decl proto for typeName as a function in env. -// Returns nil if no such function is found in env. -func (e *Env) LookupFunction(name string) *exprpb.Decl { - for _, candidate := range e.container.ResolveCandidateNames(name) { - if fn := e.declarations.FindFunction(candidate); fn != nil { - return fn - } - } - return nil -} - -// addOverload adds overload to function declaration f. -// Returns one or more errorMsg values if the overload overlaps with an existing overload or macro. -func (e *Env) addOverload(f *exprpb.Decl, overload *exprpb.Decl_FunctionDecl_Overload) []errorMsg { - errMsgs := make([]errorMsg, 0) - function := f.GetFunction() - emptyMappings := newMapping() - overloadFunction := decls.NewFunctionType(overload.GetResultType(), - overload.GetParams()...) - overloadErased := substitute(emptyMappings, overloadFunction, true) - for _, existing := range function.GetOverloads() { - existingFunction := decls.NewFunctionType(existing.GetResultType(), - existing.GetParams()...) - existingErased := substitute(emptyMappings, existingFunction, true) - overlap := isAssignable(emptyMappings, overloadErased, existingErased) != nil || - isAssignable(emptyMappings, existingErased, overloadErased) != nil - if overlap && - overload.GetIsInstanceFunction() == existing.GetIsInstanceFunction() { - errMsgs = append(errMsgs, - overlappingOverloadError(f.Name, - overload.GetOverloadId(), overloadFunction, - existing.GetOverloadId(), existingFunction)) - } - } - - for _, macro := range parser.AllMacros { - if macro.Function() == f.Name && - macro.IsReceiverStyle() == overload.GetIsInstanceFunction() && - macro.ArgCount() == len(overload.GetParams()) { - errMsgs = append(errMsgs, overlappingMacroError(f.Name, macro.ArgCount())) - } - } - if len(errMsgs) > 0 { - return errMsgs - } - function.Overloads = append(function.GetOverloads(), overload) - return errMsgs -} - -// addFunction adds the function Decl to the Env. -// Adds a function decl if one doesn't already exist, then adds all overloads from the Decl. -// If overload overlaps with an existing overload, adds to the errors in the Env instead. -func (e *Env) addFunction(decl *exprpb.Decl) []errorMsg { - current := e.declarations.FindFunction(decl.Name) - if current == nil { - //Add the function declaration without overloads and check the overloads below. - current = decls.NewFunction(decl.Name) - e.declarations.AddFunction(current) - } - - errorMsgs := make([]errorMsg, 0) - for _, overload := range decl.GetFunction().GetOverloads() { - errorMsgs = append(errorMsgs, e.addOverload(current, overload)...) - } - return errorMsgs -} - -// addIdent adds the Decl to the declarations in the Env. -// Returns a non-empty errorMsg if the identifier is already declared in the scope. -func (e *Env) addIdent(decl *exprpb.Decl) errorMsg { - current := e.declarations.FindIdentInScope(decl.Name) - if current != nil { - return overlappingIdentifierError(decl.Name) - } - e.declarations.AddIdent(decl) - return "" -} - -// sanitizeFunction replaces well-known types referenced by message name with their equivalent -// CEL built-in type instances. -func sanitizeFunction(decl *exprpb.Decl) *exprpb.Decl { - fn := decl.GetFunction() - // Determine whether the declaration requires replacements from proto-based message type - // references to well-known CEL type references. - var needsSanitizing bool - for _, o := range fn.GetOverloads() { - if isObjectWellKnownType(o.GetResultType()) { - needsSanitizing = true - break - } - for _, p := range o.GetParams() { - if isObjectWellKnownType(p) { - needsSanitizing = true - break - } - } - } - - // Early return if the declaration requires no modification. - if !needsSanitizing { - return decl - } - - // Sanitize all of the overloads if any overload requires an update to its type references. - overloads := make([]*exprpb.Decl_FunctionDecl_Overload, 0, len(fn.GetOverloads())) - for i, o := range fn.GetOverloads() { - var sanitized bool - rt := o.GetResultType() - if isObjectWellKnownType(rt) { - rt = getObjectWellKnownType(rt) - sanitized = true - } - params := make([]*exprpb.Type, 0, len(o.GetParams())) - copy(params, o.GetParams()) - for j, p := range params { - if isObjectWellKnownType(p) { - params[j] = getObjectWellKnownType(p) - sanitized = true - } - } - // If sanitized, replace the overload definition. - if sanitized { - if o.IsInstanceFunction { - overloads[i] = - decls.NewInstanceOverload(o.GetOverloadId(), params, rt) - } else { - overloads[i] = - decls.NewOverload(o.GetOverloadId(), params, rt) - } - } else { - // Otherwise, preserve the original overload. - overloads[i] = o - } - } - return decls.NewFunction(decl.GetName(), overloads...) -} - -// sanitizeIdent replaces the identifier's well-known types referenced by message name with -// references to CEL built-in type instances. -func sanitizeIdent(decl *exprpb.Decl) *exprpb.Decl { - id := decl.GetIdent() - t := id.GetType() - if !isObjectWellKnownType(t) { - return decl - } - return decls.NewIdent(decl.GetName(), getObjectWellKnownType(t), id.GetValue()) -} - -// isObjectWellKnownType returns true if the input type is an OBJECT type with a message name -// that corresponds the message name of a built-in CEL type. -func isObjectWellKnownType(t *exprpb.Type) bool { - if kindOf(t) != kindObject { - return false - } - _, found := pb.CheckedWellKnowns[t.GetMessageType()] - return found -} - -// getObjectWellKnownType returns the built-in CEL type declaration for input type's message name. -func getObjectWellKnownType(t *exprpb.Type) *exprpb.Type { - return pb.CheckedWellKnowns[t.GetMessageType()] -} - -// enterScope creates a new Env instance with a new innermost declaration scope. -func (e *Env) enterScope() *Env { - childDecls := e.declarations.Push() - return &Env{ - declarations: childDecls, - container: e.container, - provider: e.provider, - aggLitElemType: e.aggLitElemType, - } -} - -// exitScope creates a new Env instance with the nearest outer declaration scope. -func (e *Env) exitScope() *Env { - parentDecls := e.declarations.Pop() - return &Env{ - declarations: parentDecls, - container: e.container, - provider: e.provider, - aggLitElemType: e.aggLitElemType, - } -} - -// errorMsg is a type alias meant to represent error-based return values which -// may be accumulated into an error at a later point in execution. -type errorMsg string - -func overlappingIdentifierError(name string) errorMsg { - return errorMsg(fmt.Sprintf("overlapping identifier for name '%s'", name)) -} - -func overlappingOverloadError(name string, - overloadID1 string, f1 *exprpb.Type, - overloadID2 string, f2 *exprpb.Type) errorMsg { - return errorMsg(fmt.Sprintf( - "overlapping overload for name '%s' (type '%s' with overloadId: '%s' "+ - "cannot be distinguished from '%s' with overloadId: '%s')", - name, - FormatCheckedType(f1), - overloadID1, - FormatCheckedType(f2), - overloadID2)) -} - -func overlappingMacroError(name string, argCount int) errorMsg { - return errorMsg(fmt.Sprintf( - "overlapping macro for name '%s' with %d args", name, argCount)) -} - -func formatError(errMsgs []errorMsg) error { - errStrs := make([]string, 0) - if len(errMsgs) > 0 { - for i := 0; i < len(errMsgs); i++ { - if errMsgs[i] != "" { - errStrs = append(errStrs, string(errMsgs[i])) - } - } - } - if len(errStrs) > 0 { - return fmt.Errorf("%s", strings.Join(errStrs, "\n")) - } - return nil -} diff --git a/vendor/github.com/google/cel-go/checker/errors.go b/vendor/github.com/google/cel-go/checker/errors.go deleted file mode 100644 index 33af57cd21..0000000000 --- a/vendor/github.com/google/cel-go/checker/errors.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package checker - -import ( - "github.com/google/cel-go/common" - - exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" -) - -// typeErrors is a specialization of Errors. -type typeErrors struct { - *common.Errors -} - -func (e *typeErrors) undeclaredReference(l common.Location, container string, name string) { - e.ReportError(l, "undeclared reference to '%s' (in container '%s')", name, container) -} - -func (e *typeErrors) expressionDoesNotSelectField(l common.Location) { - e.ReportError(l, "expression does not select a field") -} - -func (e *typeErrors) typeDoesNotSupportFieldSelection(l common.Location, t *exprpb.Type) { - e.ReportError(l, "type '%s' does not support field selection", t) -} - -func (e *typeErrors) undefinedField(l common.Location, field string) { - e.ReportError(l, "undefined field '%s'", field) -} - -func (e *typeErrors) fieldDoesNotSupportPresenceCheck(l common.Location, field string) { - e.ReportError(l, "field '%s' does not support presence check", field) -} - -func (e *typeErrors) overlappingOverload(l common.Location, name string, overloadID1 string, f1 *exprpb.Type, - overloadID2 string, f2 *exprpb.Type) { - e.ReportError(l, "overlapping overload for name '%s' (type '%s' with overloadId: '%s' cannot be distinguished from '%s' with "+ - "overloadId: '%s')", name, FormatCheckedType(f1), overloadID1, FormatCheckedType(f2), overloadID2) -} - -func (e *typeErrors) overlappingMacro(l common.Location, name string, args int) { - e.ReportError(l, "overload for name '%s' with %d argument(s) overlaps with predefined macro", - name, args) -} - -func (e *typeErrors) noMatchingOverload(l common.Location, name string, args []*exprpb.Type, isInstance bool) { - signature := formatFunction(nil, args, isInstance) - e.ReportError(l, "found no matching overload for '%s' applied to '%s'", name, signature) -} - -func (e *typeErrors) aggregateTypeMismatch(l common.Location, aggregate *exprpb.Type, member *exprpb.Type) { - e.ReportError( - l, - "type '%s' does not match previous type '%s' in aggregate. Use 'dyn(x)' to make the aggregate dynamic.", - FormatCheckedType(member), - FormatCheckedType(aggregate)) -} - -func (e *typeErrors) notAType(l common.Location, t *exprpb.Type) { - e.ReportError(l, "'%s(%v)' is not a type", FormatCheckedType(t), t) -} - -func (e *typeErrors) notAMessageType(l common.Location, t *exprpb.Type) { - e.ReportError(l, "'%s' is not a message type", FormatCheckedType(t)) -} - -func (e *typeErrors) fieldTypeMismatch(l common.Location, name string, field *exprpb.Type, value *exprpb.Type) { - e.ReportError(l, "expected type of field '%s' is '%s' but provided type is '%s'", - name, FormatCheckedType(field), FormatCheckedType(value)) -} - -func (e *typeErrors) unexpectedFailedResolution(l common.Location, typeName string) { - e.ReportError(l, "[internal] unexpected failed resolution of '%s'", typeName) -} - -func (e *typeErrors) notAComprehensionRange(l common.Location, t *exprpb.Type) { - e.ReportError(l, "expression of type '%s' cannot be range of a comprehension (must be list, map, or dynamic)", - FormatCheckedType(t)) -} - -func (e *typeErrors) typeMismatch(l common.Location, expected *exprpb.Type, actual *exprpb.Type) { - e.ReportError(l, "expected type '%s' but found '%s'", - FormatCheckedType(expected), FormatCheckedType(actual)) -} - -func formatFunction(resultType *exprpb.Type, argTypes []*exprpb.Type, isInstance bool) string { - result := "" - if isInstance { - target := argTypes[0] - argTypes = argTypes[1:] - - result += FormatCheckedType(target) - result += "." - } - - result += "(" - for i, arg := range argTypes { - if i > 0 { - result += ", " - } - result += FormatCheckedType(arg) - } - result += ")" - if resultType != nil { - result += " -> " - result += FormatCheckedType(resultType) - } - - return result -} diff --git a/vendor/github.com/google/cel-go/checker/mapping.go b/vendor/github.com/google/cel-go/checker/mapping.go deleted file mode 100644 index bd5e412db6..0000000000 --- a/vendor/github.com/google/cel-go/checker/mapping.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package checker - -import ( - "fmt" - - exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" -) - -type mapping struct { - mapping map[string]*exprpb.Type -} - -func newMapping() *mapping { - return &mapping{ - mapping: make(map[string]*exprpb.Type), - } -} - -func (m *mapping) add(from *exprpb.Type, to *exprpb.Type) { - m.mapping[typeKey(from)] = to -} - -func (m *mapping) find(from *exprpb.Type) (*exprpb.Type, bool) { - if r, found := m.mapping[typeKey(from)]; found { - return r, found - } - return nil, false -} - -func (m *mapping) copy() *mapping { - c := newMapping() - - for k, v := range m.mapping { - c.mapping[k] = v - } - return c -} - -func (m *mapping) String() string { - result := "{" - - for k, v := range m.mapping { - result += fmt.Sprintf("%v => %v ", k, v) - } - - result += "}" - return result -} diff --git a/vendor/github.com/google/cel-go/checker/printer.go b/vendor/github.com/google/cel-go/checker/printer.go deleted file mode 100644 index 15c25ecc68..0000000000 --- a/vendor/github.com/google/cel-go/checker/printer.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package checker - -import ( - "github.com/google/cel-go/common/debug" - - exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" -) - -type semanticAdorner struct { - checks *exprpb.CheckedExpr -} - -var _ debug.Adorner = &semanticAdorner{} - -func (a *semanticAdorner) GetMetadata(elem interface{}) string { - result := "" - e, isExpr := elem.(*exprpb.Expr) - if !isExpr { - return result - } - t := a.checks.TypeMap[e.Id] - if t != nil { - result += "~" - result += FormatCheckedType(t) - } - - switch e.ExprKind.(type) { - case *exprpb.Expr_IdentExpr, - *exprpb.Expr_CallExpr, - *exprpb.Expr_StructExpr, - *exprpb.Expr_SelectExpr: - if ref, found := a.checks.ReferenceMap[e.Id]; found { - if len(ref.GetOverloadId()) == 0 { - result += "^" + ref.Name - } else { - for i, overload := range ref.OverloadId { - if i == 0 { - result += "^" - } else { - result += "|" - } - result += overload - } - } - } - } - - return result -} - -// Print returns a string representation of the Expr message, -// annotated with types from the CheckedExpr. The Expr must -// be a sub-expression embedded in the CheckedExpr. -func Print(e *exprpb.Expr, checks *exprpb.CheckedExpr) string { - a := &semanticAdorner{checks: checks} - return debug.ToAdornedDebugString(e, a) -} diff --git a/vendor/github.com/google/cel-go/checker/standard.go b/vendor/github.com/google/cel-go/checker/standard.go deleted file mode 100644 index 769de7511f..0000000000 --- a/vendor/github.com/google/cel-go/checker/standard.go +++ /dev/null @@ -1,440 +0,0 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package checker - -import ( - "github.com/google/cel-go/checker/decls" - "github.com/google/cel-go/common/operators" - "github.com/google/cel-go/common/overloads" - - exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" -) - -// StandardDeclarations returns the Decls for all functions and constants in the evaluator. -func StandardDeclarations() []*exprpb.Decl { - // Some shortcuts we use when building declarations. - paramA := decls.NewTypeParamType("A") - typeParamAList := []string{"A"} - listOfA := decls.NewListType(paramA) - paramB := decls.NewTypeParamType("B") - typeParamABList := []string{"A", "B"} - mapOfAB := decls.NewMapType(paramA, paramB) - - var idents []*exprpb.Decl - for _, t := range []*exprpb.Type{ - decls.Int, decls.Uint, decls.Bool, - decls.Double, decls.Bytes, decls.String} { - idents = append(idents, - decls.NewVar(FormatCheckedType(t), decls.NewTypeType(t))) - } - idents = append(idents, - decls.NewVar("list", decls.NewTypeType(listOfA)), - decls.NewVar("map", decls.NewTypeType(mapOfAB)), - decls.NewVar("null_type", decls.NewTypeType(decls.Null)), - decls.NewVar("type", decls.NewTypeType(decls.NewTypeType(nil)))) - - // Booleans - // TODO: allow the conditional to return a heterogenous type. - return append(idents, []*exprpb.Decl{ - decls.NewFunction(operators.Conditional, - decls.NewParameterizedOverload(overloads.Conditional, - []*exprpb.Type{decls.Bool, paramA, paramA}, paramA, - typeParamAList)), - - decls.NewFunction(operators.LogicalAnd, - decls.NewOverload(overloads.LogicalAnd, - []*exprpb.Type{decls.Bool, decls.Bool}, decls.Bool)), - - decls.NewFunction(operators.LogicalOr, - decls.NewOverload(overloads.LogicalOr, - []*exprpb.Type{decls.Bool, decls.Bool}, decls.Bool)), - - decls.NewFunction(operators.LogicalNot, - decls.NewOverload(overloads.LogicalNot, - []*exprpb.Type{decls.Bool}, decls.Bool)), - - decls.NewFunction(operators.NotStrictlyFalse, - decls.NewOverload(overloads.NotStrictlyFalse, - []*exprpb.Type{decls.Bool}, decls.Bool)), - - // Relations. - - decls.NewFunction(operators.Less, - decls.NewOverload(overloads.LessBool, - []*exprpb.Type{decls.Bool, decls.Bool}, decls.Bool), - decls.NewOverload(overloads.LessInt64, - []*exprpb.Type{decls.Int, decls.Int}, decls.Bool), - decls.NewOverload(overloads.LessUint64, - []*exprpb.Type{decls.Uint, decls.Uint}, decls.Bool), - decls.NewOverload(overloads.LessDouble, - []*exprpb.Type{decls.Double, decls.Double}, decls.Bool), - decls.NewOverload(overloads.LessString, - []*exprpb.Type{decls.String, decls.String}, decls.Bool), - decls.NewOverload(overloads.LessBytes, - []*exprpb.Type{decls.Bytes, decls.Bytes}, decls.Bool), - decls.NewOverload(overloads.LessTimestamp, - []*exprpb.Type{decls.Timestamp, decls.Timestamp}, decls.Bool), - decls.NewOverload(overloads.LessDuration, - []*exprpb.Type{decls.Duration, decls.Duration}, decls.Bool)), - - decls.NewFunction(operators.LessEquals, - decls.NewOverload(overloads.LessEqualsBool, - []*exprpb.Type{decls.Bool, decls.Bool}, decls.Bool), - decls.NewOverload(overloads.LessEqualsInt64, - []*exprpb.Type{decls.Int, decls.Int}, decls.Bool), - decls.NewOverload(overloads.LessEqualsUint64, - []*exprpb.Type{decls.Uint, decls.Uint}, decls.Bool), - decls.NewOverload(overloads.LessEqualsDouble, - []*exprpb.Type{decls.Double, decls.Double}, decls.Bool), - decls.NewOverload(overloads.LessEqualsString, - []*exprpb.Type{decls.String, decls.String}, decls.Bool), - decls.NewOverload(overloads.LessEqualsBytes, - []*exprpb.Type{decls.Bytes, decls.Bytes}, decls.Bool), - decls.NewOverload(overloads.LessEqualsTimestamp, - []*exprpb.Type{decls.Timestamp, decls.Timestamp}, decls.Bool), - decls.NewOverload(overloads.LessEqualsDuration, - []*exprpb.Type{decls.Duration, decls.Duration}, decls.Bool)), - - decls.NewFunction(operators.Greater, - decls.NewOverload(overloads.GreaterBool, - []*exprpb.Type{decls.Bool, decls.Bool}, decls.Bool), - decls.NewOverload(overloads.GreaterInt64, - []*exprpb.Type{decls.Int, decls.Int}, decls.Bool), - decls.NewOverload(overloads.GreaterUint64, - []*exprpb.Type{decls.Uint, decls.Uint}, decls.Bool), - decls.NewOverload(overloads.GreaterDouble, - []*exprpb.Type{decls.Double, decls.Double}, decls.Bool), - decls.NewOverload(overloads.GreaterString, - []*exprpb.Type{decls.String, decls.String}, decls.Bool), - decls.NewOverload(overloads.GreaterBytes, - []*exprpb.Type{decls.Bytes, decls.Bytes}, decls.Bool), - decls.NewOverload(overloads.GreaterTimestamp, - []*exprpb.Type{decls.Timestamp, decls.Timestamp}, decls.Bool), - decls.NewOverload(overloads.GreaterDuration, - []*exprpb.Type{decls.Duration, decls.Duration}, decls.Bool)), - - decls.NewFunction(operators.GreaterEquals, - decls.NewOverload(overloads.GreaterEqualsBool, - []*exprpb.Type{decls.Bool, decls.Bool}, decls.Bool), - decls.NewOverload(overloads.GreaterEqualsInt64, - []*exprpb.Type{decls.Int, decls.Int}, decls.Bool), - decls.NewOverload(overloads.GreaterEqualsUint64, - []*exprpb.Type{decls.Uint, decls.Uint}, decls.Bool), - decls.NewOverload(overloads.GreaterEqualsDouble, - []*exprpb.Type{decls.Double, decls.Double}, decls.Bool), - decls.NewOverload(overloads.GreaterEqualsString, - []*exprpb.Type{decls.String, decls.String}, decls.Bool), - decls.NewOverload(overloads.GreaterEqualsBytes, - []*exprpb.Type{decls.Bytes, decls.Bytes}, decls.Bool), - decls.NewOverload(overloads.GreaterEqualsTimestamp, - []*exprpb.Type{decls.Timestamp, decls.Timestamp}, decls.Bool), - decls.NewOverload(overloads.GreaterEqualsDuration, - []*exprpb.Type{decls.Duration, decls.Duration}, decls.Bool)), - - decls.NewFunction(operators.Equals, - decls.NewParameterizedOverload(overloads.Equals, - []*exprpb.Type{paramA, paramA}, decls.Bool, - typeParamAList)), - - decls.NewFunction(operators.NotEquals, - decls.NewParameterizedOverload(overloads.NotEquals, - []*exprpb.Type{paramA, paramA}, decls.Bool, - typeParamAList)), - - // Algebra. - - decls.NewFunction(operators.Subtract, - decls.NewOverload(overloads.SubtractInt64, - []*exprpb.Type{decls.Int, decls.Int}, decls.Int), - decls.NewOverload(overloads.SubtractUint64, - []*exprpb.Type{decls.Uint, decls.Uint}, decls.Uint), - decls.NewOverload(overloads.SubtractDouble, - []*exprpb.Type{decls.Double, decls.Double}, decls.Double), - decls.NewOverload(overloads.SubtractTimestampTimestamp, - []*exprpb.Type{decls.Timestamp, decls.Timestamp}, decls.Duration), - decls.NewOverload(overloads.SubtractTimestampDuration, - []*exprpb.Type{decls.Timestamp, decls.Duration}, decls.Timestamp), - decls.NewOverload(overloads.SubtractDurationDuration, - []*exprpb.Type{decls.Duration, decls.Duration}, decls.Duration)), - - decls.NewFunction(operators.Multiply, - decls.NewOverload(overloads.MultiplyInt64, - []*exprpb.Type{decls.Int, decls.Int}, decls.Int), - decls.NewOverload(overloads.MultiplyUint64, - []*exprpb.Type{decls.Uint, decls.Uint}, decls.Uint), - decls.NewOverload(overloads.MultiplyDouble, - []*exprpb.Type{decls.Double, decls.Double}, decls.Double)), - - decls.NewFunction(operators.Divide, - decls.NewOverload(overloads.DivideInt64, - []*exprpb.Type{decls.Int, decls.Int}, decls.Int), - decls.NewOverload(overloads.DivideUint64, - []*exprpb.Type{decls.Uint, decls.Uint}, decls.Uint), - decls.NewOverload(overloads.DivideDouble, - []*exprpb.Type{decls.Double, decls.Double}, decls.Double)), - - decls.NewFunction(operators.Modulo, - decls.NewOverload(overloads.ModuloInt64, - []*exprpb.Type{decls.Int, decls.Int}, decls.Int), - decls.NewOverload(overloads.ModuloUint64, - []*exprpb.Type{decls.Uint, decls.Uint}, decls.Uint)), - - decls.NewFunction(operators.Add, - decls.NewOverload(overloads.AddInt64, - []*exprpb.Type{decls.Int, decls.Int}, decls.Int), - decls.NewOverload(overloads.AddUint64, - []*exprpb.Type{decls.Uint, decls.Uint}, decls.Uint), - decls.NewOverload(overloads.AddDouble, - []*exprpb.Type{decls.Double, decls.Double}, decls.Double), - decls.NewOverload(overloads.AddString, - []*exprpb.Type{decls.String, decls.String}, decls.String), - decls.NewOverload(overloads.AddBytes, - []*exprpb.Type{decls.Bytes, decls.Bytes}, decls.Bytes), - decls.NewParameterizedOverload(overloads.AddList, - []*exprpb.Type{listOfA, listOfA}, listOfA, - typeParamAList), - decls.NewOverload(overloads.AddTimestampDuration, - []*exprpb.Type{decls.Timestamp, decls.Duration}, decls.Timestamp), - decls.NewOverload(overloads.AddDurationTimestamp, - []*exprpb.Type{decls.Duration, decls.Timestamp}, decls.Timestamp), - decls.NewOverload(overloads.AddDurationDuration, - []*exprpb.Type{decls.Duration, decls.Duration}, decls.Duration)), - - decls.NewFunction(operators.Negate, - decls.NewOverload(overloads.NegateInt64, - []*exprpb.Type{decls.Int}, decls.Int), - decls.NewOverload(overloads.NegateDouble, - []*exprpb.Type{decls.Double}, decls.Double)), - - // Index. - - decls.NewFunction(operators.Index, - decls.NewParameterizedOverload(overloads.IndexList, - []*exprpb.Type{listOfA, decls.Int}, paramA, - typeParamAList), - decls.NewParameterizedOverload(overloads.IndexMap, - []*exprpb.Type{mapOfAB, paramA}, paramB, - typeParamABList)), - //decls.NewOverload(overloads.IndexMessage, - // []*expr.Type{decls.Dyn, decls.String}, decls.Dyn)), - - // Collections. - - decls.NewFunction(overloads.Size, - decls.NewInstanceOverload(overloads.SizeStringInst, - []*exprpb.Type{decls.String}, decls.Int), - decls.NewInstanceOverload(overloads.SizeBytesInst, - []*exprpb.Type{decls.Bytes}, decls.Int), - decls.NewParameterizedInstanceOverload(overloads.SizeListInst, - []*exprpb.Type{listOfA}, decls.Int, typeParamAList), - decls.NewParameterizedInstanceOverload(overloads.SizeMapInst, - []*exprpb.Type{mapOfAB}, decls.Int, typeParamABList), - decls.NewOverload(overloads.SizeString, - []*exprpb.Type{decls.String}, decls.Int), - decls.NewOverload(overloads.SizeBytes, - []*exprpb.Type{decls.Bytes}, decls.Int), - decls.NewParameterizedOverload(overloads.SizeList, - []*exprpb.Type{listOfA}, decls.Int, typeParamAList), - decls.NewParameterizedOverload(overloads.SizeMap, - []*exprpb.Type{mapOfAB}, decls.Int, typeParamABList)), - - decls.NewFunction(operators.In, - decls.NewParameterizedOverload(overloads.InList, - []*exprpb.Type{paramA, listOfA}, decls.Bool, - typeParamAList), - decls.NewParameterizedOverload(overloads.InMap, - []*exprpb.Type{paramA, mapOfAB}, decls.Bool, - typeParamABList)), - - // Deprecated 'in()' function. - - decls.NewFunction(overloads.DeprecatedIn, - decls.NewParameterizedOverload(overloads.InList, - []*exprpb.Type{paramA, listOfA}, decls.Bool, - typeParamAList), - decls.NewParameterizedOverload(overloads.InMap, - []*exprpb.Type{paramA, mapOfAB}, decls.Bool, - typeParamABList)), - //decls.NewOverload(overloads.InMessage, - // []*expr.Type{Dyn, decls.String},decls.Bool)), - - // Conversions to type. - - decls.NewFunction(overloads.TypeConvertType, - decls.NewParameterizedOverload(overloads.TypeConvertType, - []*exprpb.Type{paramA}, decls.NewTypeType(paramA), typeParamAList)), - - // Conversions to int. - - decls.NewFunction(overloads.TypeConvertInt, - decls.NewOverload(overloads.IntToInt, []*exprpb.Type{decls.Int}, decls.Int), - decls.NewOverload(overloads.UintToInt, []*exprpb.Type{decls.Uint}, decls.Int), - decls.NewOverload(overloads.DoubleToInt, []*exprpb.Type{decls.Double}, decls.Int), - decls.NewOverload(overloads.StringToInt, []*exprpb.Type{decls.String}, decls.Int), - decls.NewOverload(overloads.TimestampToInt, []*exprpb.Type{decls.Timestamp}, decls.Int), - decls.NewOverload(overloads.DurationToInt, []*exprpb.Type{decls.Duration}, decls.Int)), - - // Conversions to uint. - - decls.NewFunction(overloads.TypeConvertUint, - decls.NewOverload(overloads.UintToUint, []*exprpb.Type{decls.Uint}, decls.Uint), - decls.NewOverload(overloads.IntToUint, []*exprpb.Type{decls.Int}, decls.Uint), - decls.NewOverload(overloads.DoubleToUint, []*exprpb.Type{decls.Double}, decls.Uint), - decls.NewOverload(overloads.StringToUint, []*exprpb.Type{decls.String}, decls.Uint)), - - // Conversions to double. - - decls.NewFunction(overloads.TypeConvertDouble, - decls.NewOverload(overloads.DoubleToDouble, []*exprpb.Type{decls.Double}, decls.Double), - decls.NewOverload(overloads.IntToDouble, []*exprpb.Type{decls.Int}, decls.Double), - decls.NewOverload(overloads.UintToDouble, []*exprpb.Type{decls.Uint}, decls.Double), - decls.NewOverload(overloads.StringToDouble, []*exprpb.Type{decls.String}, decls.Double)), - - // Conversions to bool. - - decls.NewFunction(overloads.TypeConvertBool, - decls.NewOverload(overloads.BoolToBool, []*exprpb.Type{decls.Bool}, decls.Bool), - decls.NewOverload(overloads.StringToBool, []*exprpb.Type{decls.String}, decls.Bool)), - - // Conversions to string. - - decls.NewFunction(overloads.TypeConvertString, - decls.NewOverload(overloads.StringToString, []*exprpb.Type{decls.String}, decls.String), - decls.NewOverload(overloads.BoolToString, []*exprpb.Type{decls.Bool}, decls.String), - decls.NewOverload(overloads.IntToString, []*exprpb.Type{decls.Int}, decls.String), - decls.NewOverload(overloads.UintToString, []*exprpb.Type{decls.Uint}, decls.String), - decls.NewOverload(overloads.DoubleToString, []*exprpb.Type{decls.Double}, decls.String), - decls.NewOverload(overloads.BytesToString, []*exprpb.Type{decls.Bytes}, decls.String), - decls.NewOverload(overloads.TimestampToString, []*exprpb.Type{decls.Timestamp}, decls.String), - decls.NewOverload(overloads.DurationToString, []*exprpb.Type{decls.Duration}, decls.String)), - - // Conversions to bytes. - - decls.NewFunction(overloads.TypeConvertBytes, - decls.NewOverload(overloads.BytesToBytes, []*exprpb.Type{decls.Bytes}, decls.Bytes), - decls.NewOverload(overloads.StringToBytes, []*exprpb.Type{decls.String}, decls.Bytes)), - - // Conversions to timestamps. - - decls.NewFunction(overloads.TypeConvertTimestamp, - decls.NewOverload(overloads.TimestampToTimestamp, - []*exprpb.Type{decls.Timestamp}, decls.Timestamp), - decls.NewOverload(overloads.StringToTimestamp, - []*exprpb.Type{decls.String}, decls.Timestamp), - decls.NewOverload(overloads.IntToTimestamp, - []*exprpb.Type{decls.Int}, decls.Timestamp)), - - // Conversions to durations. - - decls.NewFunction(overloads.TypeConvertDuration, - decls.NewOverload(overloads.DurationToDuration, - []*exprpb.Type{decls.Duration}, decls.Duration), - decls.NewOverload(overloads.StringToDuration, - []*exprpb.Type{decls.String}, decls.Duration), - decls.NewOverload(overloads.IntToDuration, - []*exprpb.Type{decls.Int}, decls.Duration)), - - // Conversions to Dyn. - - decls.NewFunction(overloads.TypeConvertDyn, - decls.NewParameterizedOverload(overloads.ToDyn, - []*exprpb.Type{paramA}, decls.Dyn, - typeParamAList)), - - // String functions. - - decls.NewFunction(overloads.Contains, - decls.NewInstanceOverload(overloads.ContainsString, - []*exprpb.Type{decls.String, decls.String}, decls.Bool)), - decls.NewFunction(overloads.EndsWith, - decls.NewInstanceOverload(overloads.EndsWithString, - []*exprpb.Type{decls.String, decls.String}, decls.Bool)), - decls.NewFunction(overloads.Matches, - decls.NewInstanceOverload(overloads.MatchesString, - []*exprpb.Type{decls.String, decls.String}, decls.Bool)), - decls.NewFunction(overloads.StartsWith, - decls.NewInstanceOverload(overloads.StartsWithString, - []*exprpb.Type{decls.String, decls.String}, decls.Bool)), - - // Date/time functions. - - decls.NewFunction(overloads.TimeGetFullYear, - decls.NewInstanceOverload(overloads.TimestampToYear, - []*exprpb.Type{decls.Timestamp}, decls.Int), - decls.NewInstanceOverload(overloads.TimestampToYearWithTz, - []*exprpb.Type{decls.Timestamp, decls.String}, decls.Int)), - - decls.NewFunction(overloads.TimeGetMonth, - decls.NewInstanceOverload(overloads.TimestampToMonth, - []*exprpb.Type{decls.Timestamp}, decls.Int), - decls.NewInstanceOverload(overloads.TimestampToMonthWithTz, - []*exprpb.Type{decls.Timestamp, decls.String}, decls.Int)), - - decls.NewFunction(overloads.TimeGetDayOfYear, - decls.NewInstanceOverload(overloads.TimestampToDayOfYear, - []*exprpb.Type{decls.Timestamp}, decls.Int), - decls.NewInstanceOverload(overloads.TimestampToDayOfYearWithTz, - []*exprpb.Type{decls.Timestamp, decls.String}, decls.Int)), - - decls.NewFunction(overloads.TimeGetDayOfMonth, - decls.NewInstanceOverload(overloads.TimestampToDayOfMonthZeroBased, - []*exprpb.Type{decls.Timestamp}, decls.Int), - decls.NewInstanceOverload(overloads.TimestampToDayOfMonthZeroBasedWithTz, - []*exprpb.Type{decls.Timestamp, decls.String}, decls.Int)), - - decls.NewFunction(overloads.TimeGetDate, - decls.NewInstanceOverload(overloads.TimestampToDayOfMonthOneBased, - []*exprpb.Type{decls.Timestamp}, decls.Int), - decls.NewInstanceOverload(overloads.TimestampToDayOfMonthOneBasedWithTz, - []*exprpb.Type{decls.Timestamp, decls.String}, decls.Int)), - - decls.NewFunction(overloads.TimeGetDayOfWeek, - decls.NewInstanceOverload(overloads.TimestampToDayOfWeek, - []*exprpb.Type{decls.Timestamp}, decls.Int), - decls.NewInstanceOverload(overloads.TimestampToDayOfWeekWithTz, - []*exprpb.Type{decls.Timestamp, decls.String}, decls.Int)), - - decls.NewFunction(overloads.TimeGetHours, - decls.NewInstanceOverload(overloads.TimestampToHours, - []*exprpb.Type{decls.Timestamp}, decls.Int), - decls.NewInstanceOverload(overloads.TimestampToHoursWithTz, - []*exprpb.Type{decls.Timestamp, decls.String}, decls.Int), - decls.NewInstanceOverload(overloads.DurationToHours, - []*exprpb.Type{decls.Duration}, decls.Int)), - - decls.NewFunction(overloads.TimeGetMinutes, - decls.NewInstanceOverload(overloads.TimestampToMinutes, - []*exprpb.Type{decls.Timestamp}, decls.Int), - decls.NewInstanceOverload(overloads.TimestampToMinutesWithTz, - []*exprpb.Type{decls.Timestamp, decls.String}, decls.Int), - decls.NewInstanceOverload(overloads.DurationToMinutes, - []*exprpb.Type{decls.Duration}, decls.Int)), - - decls.NewFunction(overloads.TimeGetSeconds, - decls.NewInstanceOverload(overloads.TimestampToSeconds, - []*exprpb.Type{decls.Timestamp}, decls.Int), - decls.NewInstanceOverload(overloads.TimestampToSecondsWithTz, - []*exprpb.Type{decls.Timestamp, decls.String}, decls.Int), - decls.NewInstanceOverload(overloads.DurationToSeconds, - []*exprpb.Type{decls.Duration}, decls.Int)), - - decls.NewFunction(overloads.TimeGetMilliseconds, - decls.NewInstanceOverload(overloads.TimestampToMilliseconds, - []*exprpb.Type{decls.Timestamp}, decls.Int), - decls.NewInstanceOverload(overloads.TimestampToMillisecondsWithTz, - []*exprpb.Type{decls.Timestamp, decls.String}, decls.Int), - decls.NewInstanceOverload(overloads.DurationToMilliseconds, - []*exprpb.Type{decls.Duration}, decls.Int))}...) -} diff --git a/vendor/github.com/google/cel-go/checker/types.go b/vendor/github.com/google/cel-go/checker/types.go deleted file mode 100644 index 05d30aa1b9..0000000000 --- a/vendor/github.com/google/cel-go/checker/types.go +++ /dev/null @@ -1,507 +0,0 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package checker - -import ( - "fmt" - "strings" - - "github.com/google/cel-go/checker/decls" - - "google.golang.org/protobuf/proto" - - exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" -) - -const ( - kindUnknown = iota + 1 - kindError - kindFunction - kindDyn - kindPrimitive - kindWellKnown - kindWrapper - kindNull - kindAbstract // TODO: Update the checker protos to include abstract - kindType - kindList - kindMap - kindObject - kindTypeParam -) - -// FormatCheckedType converts a type message into a string representation. -func FormatCheckedType(t *exprpb.Type) string { - switch kindOf(t) { - case kindDyn: - return "dyn" - case kindFunction: - return formatFunction(t.GetFunction().GetResultType(), - t.GetFunction().GetArgTypes(), - false) - case kindList: - return fmt.Sprintf("list(%s)", FormatCheckedType(t.GetListType().ElemType)) - case kindObject: - return t.GetMessageType() - case kindMap: - return fmt.Sprintf("map(%s, %s)", - FormatCheckedType(t.GetMapType().KeyType), - FormatCheckedType(t.GetMapType().ValueType)) - case kindNull: - return "null" - case kindPrimitive: - switch t.GetPrimitive() { - case exprpb.Type_UINT64: - return "uint" - case exprpb.Type_INT64: - return "int" - } - return strings.Trim(strings.ToLower(t.GetPrimitive().String()), " ") - case kindType: - if t.GetType() == nil { - return "type" - } - return fmt.Sprintf("type(%s)", FormatCheckedType(t.GetType())) - case kindWellKnown: - switch t.GetWellKnown() { - case exprpb.Type_ANY: - return "any" - case exprpb.Type_DURATION: - return "duration" - case exprpb.Type_TIMESTAMP: - return "timestamp" - } - case kindWrapper: - return fmt.Sprintf("wrapper(%s)", - FormatCheckedType(decls.NewPrimitiveType(t.GetWrapper()))) - case kindError: - return "!error!" - } - return t.String() -} - -// isDyn returns true if the input t is either type DYN or a well-known ANY message. -func isDyn(t *exprpb.Type) bool { - // Note: object type values that are well-known and map to a DYN value in practice - // are sanitized prior to being added to the environment. - switch kindOf(t) { - case kindDyn: - return true - case kindWellKnown: - return t.GetWellKnown() == exprpb.Type_ANY - default: - return false - } -} - -// isDynOrError returns true if the input is either an Error, DYN, or well-known ANY message. -func isDynOrError(t *exprpb.Type) bool { - switch kindOf(t) { - case kindError: - return true - default: - return isDyn(t) - } -} - -// isEqualOrLessSpecific checks whether one type is equal or less specific than the other one. -// A type is less specific if it matches the other type using the DYN type. -func isEqualOrLessSpecific(t1 *exprpb.Type, t2 *exprpb.Type) bool { - kind1, kind2 := kindOf(t1), kindOf(t2) - // The first type is less specific. - if isDyn(t1) || kind1 == kindTypeParam { - return true - } - // The first type is not less specific. - if isDyn(t2) || kind2 == kindTypeParam { - return false - } - // Types must be of the same kind to be equal. - if kind1 != kind2 { - return false - } - - // With limited exceptions for ANY and JSON values, the types must agree and be equivalent in - // order to return true. - switch kind1 { - case kindAbstract: - a1 := t1.GetAbstractType() - a2 := t2.GetAbstractType() - if a1.GetName() != a2.GetName() || - len(a1.GetParameterTypes()) != len(a2.GetParameterTypes()) { - return false - } - for i, p1 := range a1.GetParameterTypes() { - if !isEqualOrLessSpecific(p1, a2.GetParameterTypes()[i]) { - return false - } - } - return true - case kindFunction: - fn1 := t1.GetFunction() - fn2 := t2.GetFunction() - if len(fn1.ArgTypes) != len(fn2.ArgTypes) { - return false - } - if !isEqualOrLessSpecific(fn1.ResultType, fn2.ResultType) { - return false - } - for i, a1 := range fn1.ArgTypes { - if !isEqualOrLessSpecific(a1, fn2.ArgTypes[i]) { - return false - } - } - return true - case kindList: - return isEqualOrLessSpecific(t1.GetListType().ElemType, t2.GetListType().ElemType) - case kindMap: - m1 := t1.GetMapType() - m2 := t2.GetMapType() - return isEqualOrLessSpecific(m1.KeyType, m2.KeyType) && - isEqualOrLessSpecific(m1.ValueType, m2.ValueType) - case kindType: - return true - default: - return proto.Equal(t1, t2) - } -} - -/// internalIsAssignable returns true if t1 is assignable to t2. -func internalIsAssignable(m *mapping, t1 *exprpb.Type, t2 *exprpb.Type) bool { - // A type is always assignable to itself. - // Early terminate the call to avoid cases of infinite recursion. - if proto.Equal(t1, t2) { - return true - } - // Process type parameters. - kind1, kind2 := kindOf(t1), kindOf(t2) - if kind2 == kindTypeParam { - if t2Sub, found := m.find(t2); found { - // If the types are compatible, pick the more general type and return true - if internalIsAssignable(m, t1, t2Sub) { - m.add(t2, mostGeneral(t1, t2Sub)) - return true - } - return false - } - if notReferencedIn(m, t2, t1) { - m.add(t2, t1) - return true - } - } - if kind1 == kindTypeParam { - // For the lower type bound, we currently do not perform adjustment. The restricted - // way we use type parameters in lower type bounds, it is not necessary, but may - // become if we generalize type unification. - if t1Sub, found := m.find(t1); found { - // If the types are compatible, pick the more general type and return true - if internalIsAssignable(m, t1Sub, t2) { - m.add(t1, mostGeneral(t1Sub, t2)) - return true - } - return false - } - if notReferencedIn(m, t1, t2) { - m.add(t1, t2) - return true - } - } - - // Next check for wildcard types. - if isDynOrError(t1) || isDynOrError(t2) { - return true - } - - // Test for when the types do not need to agree, but are more specific than dyn. - switch kind1 { - case kindNull: - return internalIsAssignableNull(t2) - case kindPrimitive: - return internalIsAssignablePrimitive(t1.GetPrimitive(), t2) - case kindWrapper: - return internalIsAssignable(m, decls.NewPrimitiveType(t1.GetWrapper()), t2) - default: - if kind1 != kind2 { - return false - } - } - - // Test for when the types must agree. - switch kind1 { - // ERROR, TYPE_PARAM, and DYN handled above. - case kindAbstract: - return internalIsAssignableAbstractType(m, - t1.GetAbstractType(), t2.GetAbstractType()) - case kindFunction: - return internalIsAssignableFunction(m, - t1.GetFunction(), t2.GetFunction()) - case kindList: - return internalIsAssignable(m, - t1.GetListType().GetElemType(), - t2.GetListType().GetElemType()) - case kindMap: - return internalIsAssignableMap(m, t1.GetMapType(), t2.GetMapType()) - case kindObject: - return t1.GetMessageType() == t2.GetMessageType() - case kindType: - // A type is a type is a type, any additional parameterization of the - // type cannot affect method resolution or assignability. - return true - case kindWellKnown: - return t1.GetWellKnown() == t2.GetWellKnown() - default: - return false - } -} - -// internalIsAssignableAbstractType returns true if the abstract type names agree and all type -// parameters are assignable. -func internalIsAssignableAbstractType(m *mapping, - a1 *exprpb.Type_AbstractType, - a2 *exprpb.Type_AbstractType) bool { - if a1.GetName() != a2.GetName() { - return false - } - if internalIsAssignableList(m, a1.GetParameterTypes(), a2.GetParameterTypes()) { - return true - } - return false -} - -// internalIsAssignableFunction returns true if the function return type and arg types are -// assignable. -func internalIsAssignableFunction(m *mapping, - f1 *exprpb.Type_FunctionType, - f2 *exprpb.Type_FunctionType) bool { - f1ArgTypes := flattenFunctionTypes(f1) - f2ArgTypes := flattenFunctionTypes(f2) - if internalIsAssignableList(m, f1ArgTypes, f2ArgTypes) { - return true - } - return false -} - -// internalIsAssignableList returns true if the element types at each index in the list are -// assignable from l1[i] to l2[i]. The list lengths must also agree for the lists to be -// assignable. -func internalIsAssignableList(m *mapping, l1 []*exprpb.Type, l2 []*exprpb.Type) bool { - if len(l1) != len(l2) { - return false - } - for i, t1 := range l1 { - if !internalIsAssignable(m, t1, l2[i]) { - return false - } - } - return true -} - -// internalIsAssignableMap returns true if map m1 may be assigned to map m2. -func internalIsAssignableMap(m *mapping, m1 *exprpb.Type_MapType, m2 *exprpb.Type_MapType) bool { - if internalIsAssignableList(m, - []*exprpb.Type{m1.GetKeyType(), m1.GetValueType()}, - []*exprpb.Type{m2.GetKeyType(), m2.GetValueType()}) { - return true - } - return false -} - -// internalIsAssignableNull returns true if the type is nullable. -func internalIsAssignableNull(t *exprpb.Type) bool { - switch kindOf(t) { - case kindAbstract, kindObject, kindNull, kindWellKnown, kindWrapper: - return true - default: - return false - } -} - -// internalIsAssignablePrimitive returns true if the target type is the same or if it is a wrapper -// for the primitive type. -func internalIsAssignablePrimitive(p exprpb.Type_PrimitiveType, target *exprpb.Type) bool { - switch kindOf(target) { - case kindPrimitive: - return p == target.GetPrimitive() - case kindWrapper: - return p == target.GetWrapper() - default: - return false - } -} - -// isAssignable returns an updated type substitution mapping if t1 is assignable to t2. -func isAssignable(m *mapping, t1 *exprpb.Type, t2 *exprpb.Type) *mapping { - mCopy := m.copy() - if internalIsAssignable(mCopy, t1, t2) { - return mCopy - } - return nil -} - -// isAssignableList returns an updated type substitution mapping if l1 is assignable to l2. -func isAssignableList(m *mapping, l1 []*exprpb.Type, l2 []*exprpb.Type) *mapping { - mCopy := m.copy() - if internalIsAssignableList(mCopy, l1, l2) { - return mCopy - } - return nil -} - -// kindOf returns the kind of the type as defined in the checked.proto. -func kindOf(t *exprpb.Type) int { - if t == nil || t.TypeKind == nil { - return kindUnknown - } - switch t.TypeKind.(type) { - case *exprpb.Type_Error: - return kindError - case *exprpb.Type_Function: - return kindFunction - case *exprpb.Type_Dyn: - return kindDyn - case *exprpb.Type_Primitive: - return kindPrimitive - case *exprpb.Type_WellKnown: - return kindWellKnown - case *exprpb.Type_Wrapper: - return kindWrapper - case *exprpb.Type_Null: - return kindNull - case *exprpb.Type_Type: - return kindType - case *exprpb.Type_ListType_: - return kindList - case *exprpb.Type_MapType_: - return kindMap - case *exprpb.Type_MessageType: - return kindObject - case *exprpb.Type_TypeParam: - return kindTypeParam - } - return kindUnknown -} - -// mostGeneral returns the more general of two types which are known to unify. -func mostGeneral(t1 *exprpb.Type, t2 *exprpb.Type) *exprpb.Type { - if isEqualOrLessSpecific(t1, t2) { - return t1 - } - return t2 -} - -// notReferencedIn checks whether the type doesn't appear directly or transitively within the other -// type. This is a standard requirement for type unification, commonly referred to as the "occurs -// check". -func notReferencedIn(m *mapping, t *exprpb.Type, withinType *exprpb.Type) bool { - if proto.Equal(t, withinType) { - return false - } - withinKind := kindOf(withinType) - switch withinKind { - case kindTypeParam: - wtSub, found := m.find(withinType) - if !found { - return true - } - return notReferencedIn(m, t, wtSub) - case kindAbstract: - for _, pt := range withinType.GetAbstractType().GetParameterTypes() { - if !notReferencedIn(m, t, pt) { - return false - } - } - return true - case kindFunction: - fn := withinType.GetFunction() - types := flattenFunctionTypes(fn) - for _, a := range types { - if !notReferencedIn(m, t, a) { - return false - } - } - return true - case kindList: - return notReferencedIn(m, t, withinType.GetListType().ElemType) - case kindMap: - mt := withinType.GetMapType() - return notReferencedIn(m, t, mt.KeyType) && notReferencedIn(m, t, mt.ValueType) - case kindWrapper: - return notReferencedIn(m, t, decls.NewPrimitiveType(withinType.GetWrapper())) - default: - return true - } -} - -// substitute replaces all direct and indirect occurrences of bound type parameters. Unbound type -// parameters are replaced by DYN if typeParamToDyn is true. -func substitute(m *mapping, t *exprpb.Type, typeParamToDyn bool) *exprpb.Type { - if tSub, found := m.find(t); found { - return substitute(m, tSub, typeParamToDyn) - } - kind := kindOf(t) - if typeParamToDyn && kind == kindTypeParam { - return decls.Dyn - } - switch kind { - case kindAbstract: - // TODO: implement! - at := t.GetAbstractType() - params := make([]*exprpb.Type, len(at.GetParameterTypes())) - for i, p := range at.GetParameterTypes() { - params[i] = substitute(m, p, typeParamToDyn) - } - return decls.NewAbstractType(at.GetName(), params...) - case kindFunction: - fn := t.GetFunction() - rt := substitute(m, fn.ResultType, typeParamToDyn) - args := make([]*exprpb.Type, len(fn.ArgTypes)) - for i, a := range fn.ArgTypes { - args[i] = substitute(m, a, typeParamToDyn) - } - return decls.NewFunctionType(rt, args...) - case kindList: - return decls.NewListType(substitute(m, t.GetListType().ElemType, typeParamToDyn)) - case kindMap: - mt := t.GetMapType() - return decls.NewMapType(substitute(m, mt.KeyType, typeParamToDyn), - substitute(m, mt.ValueType, typeParamToDyn)) - case kindType: - if t.GetType() != nil { - return decls.NewTypeType(substitute(m, t.GetType(), typeParamToDyn)) - } - return t - default: - return t - } -} - -func typeKey(t *exprpb.Type) string { - return FormatCheckedType(t) -} - -// flattenFunctionTypes takes a function with arg types T1, T2, ..., TN and result type TR -// and returns a slice containing {T1, T2, ..., TN, TR}. -func flattenFunctionTypes(f *exprpb.Type_FunctionType) []*exprpb.Type { - argTypes := f.GetArgTypes() - if len(argTypes) == 0 { - return []*exprpb.Type{f.GetResultType()} - } - flattend := make([]*exprpb.Type, len(argTypes)+1, len(argTypes)+1) - for i, at := range argTypes { - flattend[i] = at - } - flattend[len(argTypes)] = f.GetResultType() - return flattend -} diff --git a/vendor/github.com/google/cel-go/common/BUILD.bazel b/vendor/github.com/google/cel-go/common/BUILD.bazel deleted file mode 100644 index 97c18bcae2..0000000000 --- a/vendor/github.com/google/cel-go/common/BUILD.bazel +++ /dev/null @@ -1,33 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -package( - default_visibility = ["//visibility:public"], - licenses = ["notice"], # Apache 2.0 -) - -go_library( - name = "go_default_library", - srcs = [ - "error.go", - "errors.go", - "location.go", - "source.go", - ], - importpath = "github.com/google/cel-go/common", - deps = [ - "@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library", - "@org_golang_x_text//width:go_default_library", - ], -) - -go_test( - name = "go_default_test", - srcs = [ - "errors_test.go", - "source_test.go", - ], - size = "small", - embed = [ - ":go_default_library", - ], -) diff --git a/vendor/github.com/google/cel-go/common/containers/BUILD.bazel b/vendor/github.com/google/cel-go/common/containers/BUILD.bazel deleted file mode 100644 index 2f3715ff7d..0000000000 --- a/vendor/github.com/google/cel-go/common/containers/BUILD.bazel +++ /dev/null @@ -1,31 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -package( - default_visibility = ["//visibility:public"], - licenses = ["notice"], # Apache 2.0 -) - -go_library( - name = "go_default_library", - importpath = "github.com/google/cel-go/common/containers", - srcs = [ - "container.go", - ], - deps = [ - "@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library", - ], -) - -go_test( - name = "go_default_test", - srcs = [ - "container_test.go", - ], - size = "small", - embed = [ - ":go_default_library", - ], - deps = [ - "@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library", - ], -) diff --git a/vendor/github.com/google/cel-go/common/containers/container.go b/vendor/github.com/google/cel-go/common/containers/container.go deleted file mode 100644 index 07ae72c81b..0000000000 --- a/vendor/github.com/google/cel-go/common/containers/container.go +++ /dev/null @@ -1,316 +0,0 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package containers defines types and functions for resolving qualified names within a namespace -// or type provided to CEL. -package containers - -import ( - "fmt" - "strings" - - exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" -) - -var ( - // DefaultContainer has an empty container name. - DefaultContainer *Container = nil - - // Empty map to search for aliases when needed. - noAliases = make(map[string]string) -) - -// NewContainer creates a new Container with the fully-qualified name. -func NewContainer(opts ...ContainerOption) (*Container, error) { - var c *Container - var err error - for _, opt := range opts { - c, err = opt(c) - if err != nil { - return nil, err - } - } - return c, nil -} - -// Container holds a reference to an optional qualified container name and set of aliases. -// -// The program container can be used to simplify variable, function, and type specification within -// CEL programs and behaves more or less like a C++ namespace. See ResolveCandidateNames for more -// details. -type Container struct { - name string - aliases map[string]string -} - -// Extend creates a new Container with the existing settings and applies a series of -// ContainerOptions to further configure the new container. -func (c *Container) Extend(opts ...ContainerOption) (*Container, error) { - if c == nil { - return NewContainer(opts...) - } - // Copy the name and aliases of the existing container. - ext := &Container{name: c.Name()} - if len(c.aliasSet()) > 0 { - aliasSet := make(map[string]string, len(c.aliasSet())) - for k, v := range c.aliasSet() { - aliasSet[k] = v - } - ext.aliases = aliasSet - } - // Apply the new options to the container. - var err error - for _, opt := range opts { - ext, err = opt(ext) - if err != nil { - return nil, err - } - } - return ext, nil -} - -// Name returns the fully-qualified name of the container. -// -// The name may conceptually be a namespace, package, or type. -func (c *Container) Name() string { - if c == nil { - return "" - } - return c.name -} - -// ResolveCandidateNames returns the candidates name of namespaced identifiers in C++ resolution -// order. -// -// Names which shadow other names are returned first. If a name includes a leading dot ('.'), -// the name is treated as an absolute identifier which cannot be shadowed. -// -// Given a container name a.b.c.M.N and a type name R.s, this will deliver in order: -// -// a.b.c.M.N.R.s -// a.b.c.M.R.s -// a.b.c.R.s -// a.b.R.s -// a.R.s -// R.s -// -// If aliases or abbreviations are configured for the container, then alias names will take -// precedence over containerized names. -func (c *Container) ResolveCandidateNames(name string) []string { - if strings.HasPrefix(name, ".") { - qn := name[1:] - alias, isAlias := c.findAlias(qn) - if isAlias { - return []string{alias} - } - return []string{qn} - } - alias, isAlias := c.findAlias(name) - if isAlias { - return []string{alias} - } - if c.Name() == "" { - return []string{name} - } - nextCont := c.Name() - candidates := []string{nextCont + "." + name} - for i := strings.LastIndex(nextCont, "."); i >= 0; i = strings.LastIndex(nextCont, ".") { - nextCont = nextCont[:i] - candidates = append(candidates, nextCont+"."+name) - } - return append(candidates, name) -} - -// aliasSet returns the alias to fully-qualified name mapping stored in the container. -func (c *Container) aliasSet() map[string]string { - if c == nil || c.aliases == nil { - return noAliases - } - return c.aliases -} - -// findAlias takes a name as input and returns an alias expansion if one exists. -// -// If the name is qualified, the first component of the qualified name is checked against known -// aliases. Any alias that is found in a qualified name is expanded in the result: -// -// alias: R -> my.alias.R -// name: R.S.T -// output: my.alias.R.S.T -// -// Note, the name must not have a leading dot. -func (c *Container) findAlias(name string) (string, bool) { - // If an alias exists for the name, ensure it is searched last. - simple := name - qualifier := "" - dot := strings.Index(name, ".") - if dot >= 0 { - simple = name[0:dot] - qualifier = name[dot:] - } - alias, found := c.aliasSet()[simple] - if !found { - return "", false - } - return alias + qualifier, true -} - -// ContainerOption specifies a functional configuration option for a Container. -// -// Note, ContainerOption implementations must be able to handle nil container inputs. -type ContainerOption func(*Container) (*Container, error) - -// Abbrevs configures a set of simple names as abbreviations for fully-qualified names. -// -// An abbreviation (abbrev for short) is a simple name that expands to a fully-qualified name. -// Abbreviations can be useful when working with variables, functions, and especially types from -// multiple namespaces: -// -// // CEL object construction -// qual.pkg.version.ObjTypeName{ -// field: alt.container.ver.FieldTypeName{value: ...} -// } -// -// Only one the qualified names above may be used as the CEL container, so at least one of these -// references must be a long qualified name within an otherwise short CEL program. Using the -// following abbreviations, the program becomes much simpler: -// -// // CEL Go option -// Abbrevs("qual.pkg.version.ObjTypeName", "alt.container.ver.FieldTypeName") -// // Simplified Object construction -// ObjTypeName{field: FieldTypeName{value: ...}} -// -// There are a few rules for the qualified names and the simple abbreviations generated from them: -// - Qualified names must be dot-delimited, e.g. `package.subpkg.name`. -// - The last element in the qualified name is the abbreviation. -// - Abbreviations must not collide with each other. -// - The abbreviation must not collide with unqualified names in use. -// -// Abbreviations are distinct from container-based references in the following important ways: -// - Abbreviations must expand to a fully-qualified name. -// - Expanded abbreviations do not participate in namespace resolution. -// - Abbreviation expansion is done instead of the container search for a matching identifier. -// - Containers follow C++ namespace resolution rules with searches from the most qualified name -// to the least qualified name. -// - Container references within the CEL program may be relative, and are resolved to fully -// qualified names at either type-check time or program plan time, whichever comes first. -// -// If there is ever a case where an identifier could be in both the container and as an -// abbreviation, the abbreviation wins as this will ensure that the meaning of a program is -// preserved between compilations even as the container evolves. -func Abbrevs(qualifiedNames ...string) ContainerOption { - return func(c *Container) (*Container, error) { - for _, qn := range qualifiedNames { - ind := strings.LastIndex(qn, ".") - if ind <= 0 || ind >= len(qn)-1 { - return nil, fmt.Errorf( - "invalid qualified name: %s, wanted name of the form 'qualified.name'", qn) - } - alias := qn[ind+1:] - var err error - c, err = aliasAs("abbreviation", qn, alias)(c) - if err != nil { - return nil, err - } - } - return c, nil - } -} - -// Alias associates a fully-qualified name with a user-defined alias. -// -// In general, Abbrevs is preferred to Alias since the names generated from the Abbrevs option -// are more easily traced back to source code. The Alias option is useful for propagating alias -// configuration from one Container instance to another, and may also be useful for remapping -// poorly chosen protobuf message / package names. -// -// Note: all of the rules that apply to Abbrevs also apply to Alias. -func Alias(qualifiedName, alias string) ContainerOption { - return aliasAs("alias", qualifiedName, alias) -} - -func aliasAs(kind, qualifiedName, alias string) ContainerOption { - return func(c *Container) (*Container, error) { - if len(alias) == 0 || strings.Contains(alias, ".") { - return nil, fmt.Errorf( - "%s must be non-empty and simple (not qualified): %s=%s", kind, kind, alias) - } - - if qualifiedName[0:1] == "." { - return nil, fmt.Errorf("qualified name must not begin with a leading '.': %s", - qualifiedName) - } - ind := strings.LastIndex(qualifiedName, ".") - if ind <= 0 || ind == len(qualifiedName)-1 { - return nil, fmt.Errorf("%s must refer to a valid qualified name: %s", - kind, qualifiedName) - } - aliasRef, found := c.aliasSet()[alias] - if found { - return nil, fmt.Errorf( - "%s collides with existing reference: name=%s, %s=%s, existing=%s", - kind, qualifiedName, kind, alias, aliasRef) - } - if strings.HasPrefix(c.Name(), alias+".") || c.Name() == alias { - return nil, fmt.Errorf( - "%s collides with container name: name=%s, %s=%s, container=%s", - kind, qualifiedName, kind, alias, c.Name()) - } - if c == nil { - c = &Container{} - } - if c.aliases == nil { - c.aliases = make(map[string]string) - } - c.aliases[alias] = qualifiedName - return c, nil - } -} - -// Name sets the fully-qualified name of the Container. -func Name(name string) ContainerOption { - return func(c *Container) (*Container, error) { - if len(name) > 0 && name[0:1] == "." { - return nil, fmt.Errorf("container name must not contain a leading '.': %s", name) - } - if c.Name() == name { - return c, nil - } - if c == nil { - return &Container{name: name}, nil - } - c.name = name - return c, nil - } -} - -// ToQualifiedName converts an expression AST into a qualified name if possible, with a boolean -// 'found' value that indicates if the conversion is successful. -func ToQualifiedName(e *exprpb.Expr) (string, bool) { - switch e.ExprKind.(type) { - case *exprpb.Expr_IdentExpr: - id := e.GetIdentExpr() - return id.Name, true - case *exprpb.Expr_SelectExpr: - sel := e.GetSelectExpr() - // Test only expressions are not valid as qualified names. - if sel.GetTestOnly() { - return "", false - } - if qual, found := ToQualifiedName(sel.Operand); found { - return qual + "." + sel.Field, true - } - } - return "", false -} diff --git a/vendor/github.com/google/cel-go/common/debug/BUILD.bazel b/vendor/github.com/google/cel-go/common/debug/BUILD.bazel deleted file mode 100644 index cf5c5d2467..0000000000 --- a/vendor/github.com/google/cel-go/common/debug/BUILD.bazel +++ /dev/null @@ -1,18 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -package( - default_visibility = ["//visibility:public"], - licenses = ["notice"], # Apache 2.0 -) - -go_library( - name = "go_default_library", - srcs = [ - "debug.go", - ], - importpath = "github.com/google/cel-go/common/debug", - deps = [ - "//common:go_default_library", - "@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library", - ], -) diff --git a/vendor/github.com/google/cel-go/common/debug/debug.go b/vendor/github.com/google/cel-go/common/debug/debug.go deleted file mode 100644 index 5a4894733d..0000000000 --- a/vendor/github.com/google/cel-go/common/debug/debug.go +++ /dev/null @@ -1,305 +0,0 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package debug provides tools to print a parsed expression graph and -// adorn each expression element with additional metadata. -package debug - -import ( - "bytes" - "fmt" - "strconv" - "strings" - - exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" -) - -// Adorner returns debug metadata that will be tacked on to the string -// representation of an expression. -type Adorner interface { - // GetMetadata for the input context. - GetMetadata(ctx interface{}) string -} - -// Writer manages writing expressions to an internal string. -type Writer interface { - fmt.Stringer - - // Buffer pushes an expression into an internal queue of expressions to - // write to a string. - Buffer(e *exprpb.Expr) -} - -type emptyDebugAdorner struct { -} - -var emptyAdorner Adorner = &emptyDebugAdorner{} - -func (a *emptyDebugAdorner) GetMetadata(e interface{}) string { - return "" -} - -// ToDebugString gives the unadorned string representation of the Expr. -func ToDebugString(e *exprpb.Expr) string { - return ToAdornedDebugString(e, emptyAdorner) -} - -// ToAdornedDebugString gives the adorned string representation of the Expr. -func ToAdornedDebugString(e *exprpb.Expr, adorner Adorner) string { - w := newDebugWriter(adorner) - w.Buffer(e) - return w.String() -} - -// debugWriter is used to print out pretty-printed debug strings. -type debugWriter struct { - adorner Adorner - buffer bytes.Buffer - indent int - lineStart bool -} - -func newDebugWriter(a Adorner) *debugWriter { - return &debugWriter{ - adorner: a, - indent: 0, - lineStart: true, - } -} - -func (w *debugWriter) Buffer(e *exprpb.Expr) { - if e == nil { - return - } - switch e.ExprKind.(type) { - case *exprpb.Expr_ConstExpr: - w.append(formatLiteral(e.GetConstExpr())) - case *exprpb.Expr_IdentExpr: - w.append(e.GetIdentExpr().Name) - case *exprpb.Expr_SelectExpr: - w.appendSelect(e.GetSelectExpr()) - case *exprpb.Expr_CallExpr: - w.appendCall(e.GetCallExpr()) - case *exprpb.Expr_ListExpr: - w.appendList(e.GetListExpr()) - case *exprpb.Expr_StructExpr: - w.appendStruct(e.GetStructExpr()) - case *exprpb.Expr_ComprehensionExpr: - w.appendComprehension(e.GetComprehensionExpr()) - } - w.adorn(e) -} - -func (w *debugWriter) appendSelect(sel *exprpb.Expr_Select) { - w.Buffer(sel.Operand) - w.append(".") - w.append(sel.Field) - if sel.TestOnly { - w.append("~test-only~") - } -} - -func (w *debugWriter) appendCall(call *exprpb.Expr_Call) { - if call.Target != nil { - w.Buffer(call.Target) - w.append(".") - } - w.append(call.Function) - w.append("(") - if len(call.GetArgs()) > 0 { - w.addIndent() - w.appendLine() - for i, arg := range call.Args { - if i > 0 { - w.append(",") - w.appendLine() - } - w.Buffer(arg) - } - w.removeIndent() - w.appendLine() - } - w.append(")") -} - -func (w *debugWriter) appendList(list *exprpb.Expr_CreateList) { - w.append("[") - if len(list.GetElements()) > 0 { - w.appendLine() - w.addIndent() - for i, elem := range list.Elements { - if i > 0 { - w.append(",") - w.appendLine() - } - w.Buffer(elem) - } - w.removeIndent() - w.appendLine() - } - w.append("]") -} - -func (w *debugWriter) appendStruct(obj *exprpb.Expr_CreateStruct) { - if obj.MessageName != "" { - w.appendObject(obj) - } else { - w.appendMap(obj) - } -} - -func (w *debugWriter) appendObject(obj *exprpb.Expr_CreateStruct) { - w.append(obj.MessageName) - w.append("{") - if len(obj.Entries) > 0 { - w.appendLine() - w.addIndent() - for i, entry := range obj.Entries { - if i > 0 { - w.append(",") - w.appendLine() - } - w.append(entry.GetFieldKey()) - w.append(":") - w.Buffer(entry.Value) - w.adorn(entry) - } - w.removeIndent() - w.appendLine() - } - w.append("}") -} - -func (w *debugWriter) appendMap(obj *exprpb.Expr_CreateStruct) { - w.append("{") - if len(obj.Entries) > 0 { - w.appendLine() - w.addIndent() - for i, entry := range obj.Entries { - if i > 0 { - w.append(",") - w.appendLine() - } - w.Buffer(entry.GetMapKey()) - w.append(":") - w.Buffer(entry.Value) - w.adorn(entry) - } - w.removeIndent() - w.appendLine() - } - w.append("}") -} - -func (w *debugWriter) appendComprehension(comprehension *exprpb.Expr_Comprehension) { - w.append("__comprehension__(") - w.addIndent() - w.appendLine() - w.append("// Variable") - w.appendLine() - w.append(comprehension.IterVar) - w.append(",") - w.appendLine() - w.append("// Target") - w.appendLine() - w.Buffer(comprehension.IterRange) - w.append(",") - w.appendLine() - w.append("// Accumulator") - w.appendLine() - w.append(comprehension.AccuVar) - w.append(",") - w.appendLine() - w.append("// Init") - w.appendLine() - w.Buffer(comprehension.AccuInit) - w.append(",") - w.appendLine() - w.append("// LoopCondition") - w.appendLine() - w.Buffer(comprehension.LoopCondition) - w.append(",") - w.appendLine() - w.append("// LoopStep") - w.appendLine() - w.Buffer(comprehension.LoopStep) - w.append(",") - w.appendLine() - w.append("// Result") - w.appendLine() - w.Buffer(comprehension.Result) - w.append(")") - w.removeIndent() -} - -func formatLiteral(c *exprpb.Constant) string { - switch c.ConstantKind.(type) { - case *exprpb.Constant_BoolValue: - return fmt.Sprintf("%t", c.GetBoolValue()) - case *exprpb.Constant_BytesValue: - return fmt.Sprintf("b\"%s\"", string(c.GetBytesValue())) - case *exprpb.Constant_DoubleValue: - return fmt.Sprintf("%v", c.GetDoubleValue()) - case *exprpb.Constant_Int64Value: - return fmt.Sprintf("%d", c.GetInt64Value()) - case *exprpb.Constant_StringValue: - return strconv.Quote(c.GetStringValue()) - case *exprpb.Constant_Uint64Value: - return fmt.Sprintf("%du", c.GetUint64Value()) - case *exprpb.Constant_NullValue: - return "null" - default: - panic("Unknown constant type") - } -} - -func (w *debugWriter) append(s string) { - w.doIndent() - w.buffer.WriteString(s) -} - -func (w *debugWriter) appendFormat(f string, args ...interface{}) { - w.append(fmt.Sprintf(f, args...)) -} - -func (w *debugWriter) doIndent() { - if w.lineStart { - w.lineStart = false - w.buffer.WriteString(strings.Repeat(" ", w.indent)) - } -} - -func (w *debugWriter) adorn(e interface{}) { - w.append(w.adorner.GetMetadata(e)) -} - -func (w *debugWriter) appendLine() { - w.buffer.WriteString("\n") - w.lineStart = true -} - -func (w *debugWriter) addIndent() { - w.indent++ -} - -func (w *debugWriter) removeIndent() { - w.indent-- - if w.indent < 0 { - panic("negative indent") - } -} - -func (w *debugWriter) String() string { - return w.buffer.String() -} diff --git a/vendor/github.com/google/cel-go/common/doc.go b/vendor/github.com/google/cel-go/common/doc.go deleted file mode 100644 index 5362fdfe4b..0000000000 --- a/vendor/github.com/google/cel-go/common/doc.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package common defines types and utilities common to expression parsing, -// checking, and interpretation -package common diff --git a/vendor/github.com/google/cel-go/common/error.go b/vendor/github.com/google/cel-go/common/error.go deleted file mode 100644 index bfe93d9737..0000000000 --- a/vendor/github.com/google/cel-go/common/error.go +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package common - -import ( - "fmt" - "strings" - "unicode/utf8" - - "golang.org/x/text/width" -) - -// Error type which references a location within source and a message. -type Error struct { - Location Location - Message string -} - -const ( - dot = "." - ind = "^" -) - -var ( - wideDot = width.Widen.String(dot) - wideInd = width.Widen.String(ind) -) - -// ToDisplayString decorates the error message with the source location. -func (e *Error) ToDisplayString(source Source) string { - var result = fmt.Sprintf("ERROR: %s:%d:%d: %s", - source.Description(), - e.Location.Line(), - e.Location.Column()+1, // add one to the 0-based column for display - e.Message) - if snippet, found := source.Snippet(e.Location.Line()); found { - snippet := strings.Replace(snippet, "\t", " ", -1) - srcLine := "\n | " + snippet - var bytes = []byte(snippet) - var indLine = "\n | " - for i := 0; i < e.Location.Column() && len(bytes) > 0; i++ { - _, sz := utf8.DecodeRune(bytes) - bytes = bytes[sz:] - if sz > 1 { - indLine += wideDot - } else { - indLine += dot - } - } - if _, sz := utf8.DecodeRune(bytes); sz > 1 { - indLine += wideInd - } else { - indLine += ind - } - result += srcLine + indLine - } - return result -} diff --git a/vendor/github.com/google/cel-go/common/errors.go b/vendor/github.com/google/cel-go/common/errors.go deleted file mode 100644 index 6be0fdef32..0000000000 --- a/vendor/github.com/google/cel-go/common/errors.go +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package common - -import ( - "fmt" - "sort" -) - -// Errors type which contains a list of errors observed during parsing. -type Errors struct { - errors []Error - source Source -} - -// NewErrors creates a new instance of the Errors type. -func NewErrors(source Source) *Errors { - return &Errors{ - errors: []Error{}, - source: source} -} - -// ReportError records an error at a source location. -func (e *Errors) ReportError(l Location, format string, args ...interface{}) { - err := Error{ - Location: l, - Message: fmt.Sprintf(format, args...), - } - e.errors = append(e.errors, err) -} - -// GetErrors returns the list of observed errors. -func (e *Errors) GetErrors() []Error { - return e.errors[:] -} - -// Append takes an Errors object as input creates a new Errors object with the current and input -// errors. -func (e *Errors) Append(errs []Error) *Errors { - return &Errors{ - errors: append(e.errors, errs...), - source: e.source, - } -} - -// ToDisplayString returns the error set to a newline delimited string. -func (e *Errors) ToDisplayString() string { - var result = "" - sort.SliceStable(e.errors, func(i, j int) bool { - ei := e.errors[i].Location - ej := e.errors[j].Location - return ei.Line() < ej.Line() || - (ei.Line() == ej.Line() && ei.Column() < ej.Column()) - }) - for i, err := range e.errors { - if i >= 1 { - result += "\n" - } - result += err.ToDisplayString(e.source) - } - return result -} diff --git a/vendor/github.com/google/cel-go/common/location.go b/vendor/github.com/google/cel-go/common/location.go deleted file mode 100644 index 31fce6cd31..0000000000 --- a/vendor/github.com/google/cel-go/common/location.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package common - -// Location interface to represent a location within Source. -type Location interface { - Line() int // 1-based line number within source. - Column() int // 0-based column number within source. -} - -// SourceLocation helper type to manually construct a location. -type SourceLocation struct { - line int - column int -} - -var ( - // Location implements the SourcceLocation interface. - _ Location = &SourceLocation{} - // NoLocation is a particular illegal location. - NoLocation = &SourceLocation{-1, -1} -) - -// NewLocation creates a new location. -func NewLocation(line, column int) Location { - return &SourceLocation{ - line: line, - column: column} -} - -// Line returns the 1-based line of the location. -func (l *SourceLocation) Line() int { - return l.line -} - -// Column returns the 0-based column number of the location. -func (l *SourceLocation) Column() int { - return l.column -} diff --git a/vendor/github.com/google/cel-go/common/operators/BUILD.bazel b/vendor/github.com/google/cel-go/common/operators/BUILD.bazel deleted file mode 100644 index b5b67f0623..0000000000 --- a/vendor/github.com/google/cel-go/common/operators/BUILD.bazel +++ /dev/null @@ -1,14 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -package( - default_visibility = ["//visibility:public"], - licenses = ["notice"], # Apache 2.0 -) - -go_library( - name = "go_default_library", - srcs = [ - "operators.go", - ], - importpath = "github.com/google/cel-go/common/operators", -) diff --git a/vendor/github.com/google/cel-go/common/operators/operators.go b/vendor/github.com/google/cel-go/common/operators/operators.go deleted file mode 100644 index 8f832c4d44..0000000000 --- a/vendor/github.com/google/cel-go/common/operators/operators.go +++ /dev/null @@ -1,145 +0,0 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package operators defines the internal function names of operators. -// -// ALl operators in the expression language are modelled as function calls. -package operators - -// String "names" for CEL operators. -const ( - // Symbolic operators. - Conditional = "_?_:_" - LogicalAnd = "_&&_" - LogicalOr = "_||_" - LogicalNot = "!_" - Equals = "_==_" - NotEquals = "_!=_" - Less = "_<_" - LessEquals = "_<=_" - Greater = "_>_" - GreaterEquals = "_>=_" - Add = "_+_" - Subtract = "_-_" - Multiply = "_*_" - Divide = "_/_" - Modulo = "_%_" - Negate = "-_" - Index = "_[_]" - - // Macros, must have a valid identifier. - Has = "has" - All = "all" - Exists = "exists" - ExistsOne = "exists_one" - Map = "map" - Filter = "filter" - - // Named operators, must not have be valid identifiers. - NotStrictlyFalse = "@not_strictly_false" - In = "@in" - - // Deprecated: named operators with valid identifiers. - OldNotStrictlyFalse = "__not_strictly_false__" - OldIn = "_in_" -) - -var ( - operators = map[string]string{ - "+": Add, - "/": Divide, - "==": Equals, - ">": Greater, - ">=": GreaterEquals, - "in": In, - "<": Less, - "<=": LessEquals, - "%": Modulo, - "*": Multiply, - "!=": NotEquals, - "-": Subtract, - } - reverseOperators = map[string]string{ - Add: "+", - Divide: "/", - Equals: "==", - Greater: ">", - GreaterEquals: ">=", - In: "in", - Less: "<", - LessEquals: "<=", - LogicalAnd: "&&", - LogicalNot: "!", - LogicalOr: "||", - Modulo: "%", - Multiply: "*", - Negate: "-", - NotEquals: "!=", - OldIn: "in", - Subtract: "-", - } - // precedence of the operator, where the higher value means higher. - precedence = map[string]int{ - Conditional: 8, - LogicalOr: 7, - LogicalAnd: 6, - Equals: 5, - Greater: 5, - GreaterEquals: 5, - In: 5, - Less: 5, - LessEquals: 5, - NotEquals: 5, - OldIn: 5, - Add: 4, - Subtract: 4, - Divide: 3, - Modulo: 3, - Multiply: 3, - LogicalNot: 2, - Negate: 2, - Index: 1, - } -) - -// Find the internal function name for an operator, if the input text is one. -func Find(text string) (string, bool) { - op, found := operators[text] - return op, found -} - -// FindReverse returns the unmangled, text representation of the operator. -func FindReverse(op string) (string, bool) { - txt, found := reverseOperators[op] - return txt, found -} - -// FindReverseBinaryOperator returns the unmangled, text representation of a binary operator. -func FindReverseBinaryOperator(op string) (string, bool) { - if op == LogicalNot || op == Negate { - return "", false - } - txt, found := reverseOperators[op] - return txt, found -} - -// Precedence returns the operator precedence, where the higher the number indicates -// higher precedence operations. -func Precedence(op string) int { - p, found := precedence[op] - if found { - return p - } - return 0 -} diff --git a/vendor/github.com/google/cel-go/common/overloads/BUILD.bazel b/vendor/github.com/google/cel-go/common/overloads/BUILD.bazel deleted file mode 100644 index 144d93f0ba..0000000000 --- a/vendor/github.com/google/cel-go/common/overloads/BUILD.bazel +++ /dev/null @@ -1,14 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -package( - default_visibility = ["//visibility:public"], - licenses = ["notice"], # Apache 2.0 -) - -go_library( - name = "go_default_library", - srcs = [ - "overloads.go", - ], - importpath = "github.com/google/cel-go/common/overloads", -) \ No newline at end of file diff --git a/vendor/github.com/google/cel-go/common/overloads/overloads.go b/vendor/github.com/google/cel-go/common/overloads/overloads.go deleted file mode 100644 index a1c01ee725..0000000000 --- a/vendor/github.com/google/cel-go/common/overloads/overloads.go +++ /dev/null @@ -1,293 +0,0 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package overloads defines the internal overload identifiers for function and -// operator overloads. -package overloads - -// Boolean logic overloads -const ( - Conditional = "conditional" - LogicalAnd = "logical_and" - LogicalOr = "logical_or" - LogicalNot = "logical_not" - NotStrictlyFalse = "not_strictly_false" - Equals = "equals" - NotEquals = "not_equals" - LessBool = "less_bool" - LessInt64 = "less_int64" - LessUint64 = "less_uint64" - LessDouble = "less_double" - LessString = "less_string" - LessBytes = "less_bytes" - LessTimestamp = "less_timestamp" - LessDuration = "less_duration" - LessEqualsBool = "less_equals_bool" - LessEqualsInt64 = "less_equals_int64" - LessEqualsUint64 = "less_equals_uint64" - LessEqualsDouble = "less_equals_double" - LessEqualsString = "less_equals_string" - LessEqualsBytes = "less_equals_bytes" - LessEqualsTimestamp = "less_equals_timestamp" - LessEqualsDuration = "less_equals_duration" - GreaterBool = "greater_bool" - GreaterInt64 = "greater_int64" - GreaterUint64 = "greater_uint64" - GreaterDouble = "greater_double" - GreaterString = "greater_string" - GreaterBytes = "greater_bytes" - GreaterTimestamp = "greater_timestamp" - GreaterDuration = "greater_duration" - GreaterEqualsBool = "greater_equals_bool" - GreaterEqualsInt64 = "greater_equals_int64" - GreaterEqualsUint64 = "greater_equals_uint64" - GreaterEqualsDouble = "greater_equals_double" - GreaterEqualsString = "greater_equals_string" - GreaterEqualsBytes = "greater_equals_bytes" - GreaterEqualsTimestamp = "greater_equals_timestamp" - GreaterEqualsDuration = "greater_equals_duration" -) - -// Math overloads -const ( - AddInt64 = "add_int64" - AddUint64 = "add_uint64" - AddDouble = "add_double" - AddString = "add_string" - AddBytes = "add_bytes" - AddList = "add_list" - AddTimestampDuration = "add_timestamp_duration" - AddDurationTimestamp = "add_duration_timestamp" - AddDurationDuration = "add_duration_duration" - SubtractInt64 = "subtract_int64" - SubtractUint64 = "subtract_uint64" - SubtractDouble = "subtract_double" - SubtractTimestampTimestamp = "subtract_timestamp_timestamp" - SubtractTimestampDuration = "subtract_timestamp_duration" - SubtractDurationDuration = "subtract_duration_duration" - MultiplyInt64 = "multiply_int64" - MultiplyUint64 = "multiply_uint64" - MultiplyDouble = "multiply_double" - DivideInt64 = "divide_int64" - DivideUint64 = "divide_uint64" - DivideDouble = "divide_double" - ModuloInt64 = "modulo_int64" - ModuloUint64 = "modulo_uint64" - NegateInt64 = "negate_int64" - NegateDouble = "negate_double" -) - -// Index overloads -const ( - IndexList = "index_list" - IndexMap = "index_map" - IndexMessage = "index_message" // TODO: introduce concept of types.Message -) - -// In operators -const ( - DeprecatedIn = "in" - InList = "in_list" - InMap = "in_map" - InMessage = "in_message" // TODO: introduce concept of types.Message -) - -// Size overloads -const ( - Size = "size" - SizeString = "size_string" - SizeBytes = "size_bytes" - SizeList = "size_list" - SizeMap = "size_map" - SizeStringInst = "string_size" - SizeBytesInst = "bytes_size" - SizeListInst = "list_size" - SizeMapInst = "map_size" -) - -// String function names. -const ( - Contains = "contains" - EndsWith = "endsWith" - Matches = "matches" - StartsWith = "startsWith" -) - -// String function overload names. -const ( - ContainsString = "contains_string" - EndsWithString = "ends_with_string" - MatchesString = "matches_string" - StartsWithString = "starts_with_string" -) - -// Time-based functions. -const ( - TimeGetFullYear = "getFullYear" - TimeGetMonth = "getMonth" - TimeGetDayOfYear = "getDayOfYear" - TimeGetDate = "getDate" - TimeGetDayOfMonth = "getDayOfMonth" - TimeGetDayOfWeek = "getDayOfWeek" - TimeGetHours = "getHours" - TimeGetMinutes = "getMinutes" - TimeGetSeconds = "getSeconds" - TimeGetMilliseconds = "getMilliseconds" -) - -// Timestamp overloads for time functions without timezones. -const ( - TimestampToYear = "timestamp_to_year" - TimestampToMonth = "timestamp_to_month" - TimestampToDayOfYear = "timestamp_to_day_of_year" - TimestampToDayOfMonthZeroBased = "timestamp_to_day_of_month" - TimestampToDayOfMonthOneBased = "timestamp_to_day_of_month_1_based" - TimestampToDayOfWeek = "timestamp_to_day_of_week" - TimestampToHours = "timestamp_to_hours" - TimestampToMinutes = "timestamp_to_minutes" - TimestampToSeconds = "timestamp_to_seconds" - TimestampToMilliseconds = "timestamp_to_milliseconds" -) - -// Timestamp overloads for time functions with timezones. -const ( - TimestampToYearWithTz = "timestamp_to_year_with_tz" - TimestampToMonthWithTz = "timestamp_to_month_with_tz" - TimestampToDayOfYearWithTz = "timestamp_to_day_of_year_with_tz" - TimestampToDayOfMonthZeroBasedWithTz = "timestamp_to_day_of_month_with_tz" - TimestampToDayOfMonthOneBasedWithTz = "timestamp_to_day_of_month_1_based_with_tz" - TimestampToDayOfWeekWithTz = "timestamp_to_day_of_week_with_tz" - TimestampToHoursWithTz = "timestamp_to_hours_with_tz" - TimestampToMinutesWithTz = "timestamp_to_minutes_with_tz" - TimestampToSecondsWithTz = "timestamp_to_seconds_tz" - TimestampToMillisecondsWithTz = "timestamp_to_milliseconds_with_tz" -) - -// Duration overloads for time functions. -const ( - DurationToHours = "duration_to_hours" - DurationToMinutes = "duration_to_minutes" - DurationToSeconds = "duration_to_seconds" - DurationToMilliseconds = "duration_to_milliseconds" -) - -// Type conversion methods and overloads -const ( - TypeConvertInt = "int" - TypeConvertUint = "uint" - TypeConvertDouble = "double" - TypeConvertBool = "bool" - TypeConvertString = "string" - TypeConvertBytes = "bytes" - TypeConvertTimestamp = "timestamp" - TypeConvertDuration = "duration" - TypeConvertType = "type" - TypeConvertDyn = "dyn" -) - -// Int conversion functions. -const ( - IntToInt = "int64_to_int64" - UintToInt = "uint64_to_int64" - DoubleToInt = "double_to_int64" - StringToInt = "string_to_int64" - TimestampToInt = "timestamp_to_int64" - DurationToInt = "duration_to_int64" -) - -// Uint conversion functions. -const ( - UintToUint = "uint64_to_uint64" - IntToUint = "int64_to_uint64" - DoubleToUint = "double_to_uint64" - StringToUint = "string_to_uint64" -) - -// Double conversion functions. -const ( - DoubleToDouble = "double_to_double" - IntToDouble = "int64_to_double" - UintToDouble = "uint64_to_double" - StringToDouble = "string_to_double" -) - -// Bool conversion functions. -const ( - BoolToBool = "bool_to_bool" - StringToBool = "string_to_bool" -) - -// Bytes conversion functions. -const ( - BytesToBytes = "bytes_to_bytes" - StringToBytes = "string_to_bytes" -) - -// String conversion functions. -const ( - StringToString = "string_to_string" - BoolToString = "bool_to_string" - IntToString = "int64_to_string" - UintToString = "uint64_to_string" - DoubleToString = "double_to_string" - BytesToString = "bytes_to_string" - TimestampToString = "timestamp_to_string" - DurationToString = "duration_to_string" -) - -// Timestamp conversion functions -const ( - TimestampToTimestamp = "timestamp_to_timestamp" - StringToTimestamp = "string_to_timestamp" - IntToTimestamp = "int64_to_timestamp" -) - -// Convert duration from string -const ( - DurationToDuration = "duration_to_duration" - StringToDuration = "string_to_duration" - IntToDuration = "int64_to_duration" -) - -// Convert to dyn -const ( - ToDyn = "to_dyn" -) - -// Comprehensions helper methods, not directly accessible via a developer. -const ( - Iterator = "@iterator" - HasNext = "@hasNext" - Next = "@next" -) - -// IsTypeConversionFunction returns whether the input function is a standard library type -// conversion function. -func IsTypeConversionFunction(function string) bool { - switch function { - case TypeConvertBool, - TypeConvertBytes, - TypeConvertDouble, - TypeConvertDuration, - TypeConvertDyn, - TypeConvertInt, - TypeConvertString, - TypeConvertTimestamp, - TypeConvertType, - TypeConvertUint: - return true - default: - return false - } -} diff --git a/vendor/github.com/google/cel-go/common/source.go b/vendor/github.com/google/cel-go/common/source.go deleted file mode 100644 index d4fa4bbfc0..0000000000 --- a/vendor/github.com/google/cel-go/common/source.go +++ /dev/null @@ -1,182 +0,0 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package common - -import ( - "strings" - "unicode/utf8" - - exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" -) - -// Source interface for filter source contents. -type Source interface { - // Content returns the source content represented as a string. - // Examples contents are the single file contents, textbox field, - // or url parameter. - Content() string - - // Description gives a brief description of the source. - // Example descriptions are a file name or ui element. - Description() string - - // LineOffsets gives the character offsets at which lines occur. - // The zero-th entry should refer to the break between the first - // and second line, or EOF if there is only one line of source. - LineOffsets() []int32 - - // LocationOffset translates a Location to an offset. - // Given the line and column of the Location returns the - // Location's character offset in the Source, and a bool - // indicating whether the Location was found. - LocationOffset(location Location) (int32, bool) - - // OffsetLocation translates a character offset to a Location, or - // false if the conversion was not feasible. - OffsetLocation(offset int32) (Location, bool) - - // NewLocation takes an input line and column and produces a Location. - // The default behavior is to treat the line and column as absolute, - // but concrete derivations may use this method to convert a relative - // line and column position into an absolute location. - NewLocation(line, col int) Location - - // Snippet returns a line of content and whether the line was found. - Snippet(line int) (string, bool) -} - -// The sourceImpl type implementation of the Source interface. -type sourceImpl struct { - contents []rune - description string - lineOffsets []int32 - idOffsets map[int64]int32 -} - -// TODO(jimlarson) "Character offsets" should index the code points -// within the UTF-8 encoded string. It currently indexes bytes. -// Can be accomplished by using rune[] instead of string for contents. - -// NewTextSource creates a new Source from the input text string. -func NewTextSource(text string) Source { - return NewStringSource(text, "") -} - -// NewStringSource creates a new Source from the given contents and description. -func NewStringSource(contents string, description string) Source { - // Compute line offsets up front as they are referred to frequently. - lines := strings.Split(contents, "\n") - offsets := make([]int32, len(lines)) - var offset int32 - for i, line := range lines { - offset = offset + int32(utf8.RuneCountInString(line)) + 1 - offsets[int32(i)] = offset - } - return &sourceImpl{ - contents: []rune(contents), - description: description, - lineOffsets: offsets, - idOffsets: map[int64]int32{}, - } -} - -// NewInfoSource creates a new Source from a SourceInfo. -func NewInfoSource(info *exprpb.SourceInfo) Source { - return &sourceImpl{ - contents: []rune(""), - description: info.Location, - lineOffsets: info.LineOffsets, - idOffsets: info.Positions, - } -} - -// Content implements the Source interface method. -func (s *sourceImpl) Content() string { - return string(s.contents) -} - -// Description implements the Source interface method. -func (s *sourceImpl) Description() string { - return s.description -} - -// LineOffsets implements the Source interface method. -func (s *sourceImpl) LineOffsets() []int32 { - return s.lineOffsets -} - -// LocationOffset implements the Source interface method. -func (s *sourceImpl) LocationOffset(location Location) (int32, bool) { - if lineOffset, found := s.findLineOffset(location.Line()); found { - return lineOffset + int32(location.Column()), true - } - return -1, false -} - -// NewLocation implements the Source interface method. -func (s *sourceImpl) NewLocation(line, col int) Location { - return NewLocation(line, col) -} - -// OffsetLocation implements the Source interface method. -func (s *sourceImpl) OffsetLocation(offset int32) (Location, bool) { - line, lineOffset := s.findLine(offset) - return NewLocation(int(line), int(offset-lineOffset)), true -} - -// Snippet implements the Source interface method. -func (s *sourceImpl) Snippet(line int) (string, bool) { - charStart, found := s.findLineOffset(line) - if !found || len(s.contents) == 0 { - return "", false - } - charEnd, found := s.findLineOffset(line + 1) - if found { - return string(s.contents[charStart : charEnd-1]), true - } - return string(s.contents[charStart:]), true -} - -// findLineOffset returns the offset where the (1-indexed) line begins, -// or false if line doesn't exist. -func (s *sourceImpl) findLineOffset(line int) (int32, bool) { - if line == 1 { - return 0, true - } - if line > 1 && line <= int(len(s.lineOffsets)) { - offset := s.lineOffsets[line-2] - return offset, true - } - return -1, false -} - -// findLine finds the line that contains the given character offset and -// returns the line number and offset of the beginning of that line. -// Note that the last line is treated as if it contains all offsets -// beyond the end of the actual source. -func (s *sourceImpl) findLine(characterOffset int32) (int32, int32) { - var line int32 = 1 - for _, lineOffset := range s.lineOffsets { - if lineOffset > characterOffset { - break - } else { - line++ - } - } - if line == 1 { - return line, 0 - } - return line, s.lineOffsets[line-2] -} diff --git a/vendor/github.com/google/cel-go/common/types/BUILD.bazel b/vendor/github.com/google/cel-go/common/types/BUILD.bazel deleted file mode 100644 index e78b31738d..0000000000 --- a/vendor/github.com/google/cel-go/common/types/BUILD.bazel +++ /dev/null @@ -1,83 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -package( - default_visibility = ["//visibility:public"], - licenses = ["notice"], # Apache 2.0 -) - -go_library( - name = "go_default_library", - srcs = [ - "any_value.go", - "bool.go", - "bytes.go", - "double.go", - "duration.go", - "err.go", - "int.go", - "iterator.go", - "json_value.go", - "list.go", - "map.go", - "null.go", - "object.go", - "provider.go", - "string.go", - "timestamp.go", - "type.go", - "uint.go", - "unknown.go", - "util.go", - ], - importpath = "github.com/google/cel-go/common/types", - deps = [ - "//common/overloads:go_default_library", - "//common/types/ref:go_default_library", - "//common/types/pb:go_default_library", - "//common/types/traits:go_default_library", - "@com_github_stoewer_go_strcase//:go_default_library", - "@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library", - "@org_golang_google_protobuf//proto:go_default_library", - "@org_golang_google_protobuf//encoding/protojson:go_default_library", - "@org_golang_google_protobuf//reflect/protoreflect:go_default_library", - "@org_golang_google_protobuf//types/known/anypb:go_default_library", - "@org_golang_google_protobuf//types/known/durationpb:go_default_library", - "@org_golang_google_protobuf//types/known/structpb:go_default_library", - "@org_golang_google_protobuf//types/known/timestamppb:go_default_library", - "@org_golang_google_protobuf//types/known/wrapperspb:go_default_library", - ], -) - -go_test( - name = "go_default_test", - srcs = [ - "bool_test.go", - "bytes_test.go", - "double_test.go", - "duration_test.go", - "int_test.go", - "json_list_test.go", - "json_struct_test.go", - "list_test.go", - "map_test.go", - "null_test.go", - "object_test.go", - "provider_test.go", - "string_test.go", - "timestamp_test.go", - "type_test.go", - "uint_test.go", - ], - size = "small", - embed = [":go_default_library"], - deps = [ - "//common/types/ref:go_default_library", - "//test:go_default_library", - "//test/proto3pb:test_all_types_go_proto", - "@org_golang_google_protobuf//encoding/protojson:go_default_library", - "@org_golang_google_protobuf//types/known/anypb:go_default_library", - "@org_golang_google_protobuf//types/known/durationpb:go_default_library", - "@org_golang_google_protobuf//types/known/timestamppb:go_default_library", - "@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library", - ], -) diff --git a/vendor/github.com/google/cel-go/common/types/any_value.go b/vendor/github.com/google/cel-go/common/types/any_value.go deleted file mode 100644 index cda0f13acf..0000000000 --- a/vendor/github.com/google/cel-go/common/types/any_value.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "reflect" - - anypb "google.golang.org/protobuf/types/known/anypb" -) - -// anyValueType constant representing the reflected type of google.protobuf.Any. -var anyValueType = reflect.TypeOf(&anypb.Any{}) diff --git a/vendor/github.com/google/cel-go/common/types/bool.go b/vendor/github.com/google/cel-go/common/types/bool.go deleted file mode 100644 index 16214c2578..0000000000 --- a/vendor/github.com/google/cel-go/common/types/bool.go +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "fmt" - "reflect" - "strconv" - - "github.com/google/cel-go/common/types/ref" - "github.com/google/cel-go/common/types/traits" - - anypb "google.golang.org/protobuf/types/known/anypb" - structpb "google.golang.org/protobuf/types/known/structpb" - wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" -) - -// Bool type that implements ref.Val and supports comparison and negation. -type Bool bool - -var ( - // BoolType singleton. - BoolType = NewTypeValue("bool", - traits.ComparerType, - traits.NegatorType) - - // boolWrapperType golang reflected type for protobuf bool wrapper type. - boolWrapperType = reflect.TypeOf(&wrapperspb.BoolValue{}) -) - -// Boolean constants -var ( - False = Bool(false) - True = Bool(true) -) - -// Compare implements the traits.Comparer interface method. -func (b Bool) Compare(other ref.Val) ref.Val { - otherBool, ok := other.(Bool) - if !ok { - return ValOrErr(other, "no such overload") - } - if b == otherBool { - return IntZero - } - if !b && otherBool { - return IntNegOne - } - return IntOne -} - -// ConvertToNative implements the ref.Val interface method. -func (b Bool) ConvertToNative(typeDesc reflect.Type) (interface{}, error) { - switch typeDesc.Kind() { - case reflect.Bool: - return reflect.ValueOf(b).Convert(typeDesc).Interface(), nil - case reflect.Ptr: - switch typeDesc { - case anyValueType: - // Primitives must be wrapped to a wrapperspb.BoolValue before being packed into an Any. - return anypb.New(wrapperspb.Bool(bool(b))) - case boolWrapperType: - // Convert the bool to a wrapperspb.BoolValue. - return wrapperspb.Bool(bool(b)), nil - case jsonValueType: - // Return the bool as a new structpb.Value. - return structpb.NewBoolValue(bool(b)), nil - default: - if typeDesc.Elem().Kind() == reflect.Bool { - p := bool(b) - return &p, nil - } - } - case reflect.Interface: - bv := b.Value() - if reflect.TypeOf(bv).Implements(typeDesc) { - return bv, nil - } - if reflect.TypeOf(b).Implements(typeDesc) { - return b, nil - } - } - return nil, fmt.Errorf("type conversion error from bool to '%v'", typeDesc) -} - -// ConvertToType implements the ref.Val interface method. -func (b Bool) ConvertToType(typeVal ref.Type) ref.Val { - switch typeVal { - case StringType: - return String(strconv.FormatBool(bool(b))) - case BoolType: - return b - case TypeType: - return BoolType - } - return NewErr("type conversion error from '%v' to '%v'", BoolType, typeVal) -} - -// Equal implements the ref.Val interface method. -func (b Bool) Equal(other ref.Val) ref.Val { - otherBool, ok := other.(Bool) - if !ok { - return ValOrErr(other, "no such overload") - } - return Bool(b == otherBool) -} - -// Negate implements the traits.Negater interface method. -func (b Bool) Negate() ref.Val { - return !b -} - -// Type implements the ref.Val interface method. -func (b Bool) Type() ref.Type { - return BoolType -} - -// Value implements the ref.Val interface method. -func (b Bool) Value() interface{} { - return bool(b) -} - -// IsBool returns whether the input ref.Val or ref.Type is equal to BoolType. -func IsBool(elem interface{}) bool { - switch elem := elem.(type) { - case ref.Type: - return elem == BoolType - case ref.Val: - return IsBool(elem.Type()) - } - return false -} diff --git a/vendor/github.com/google/cel-go/common/types/bytes.go b/vendor/github.com/google/cel-go/common/types/bytes.go deleted file mode 100644 index 5ef367f023..0000000000 --- a/vendor/github.com/google/cel-go/common/types/bytes.go +++ /dev/null @@ -1,135 +0,0 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "bytes" - "encoding/base64" - "fmt" - "reflect" - "unicode/utf8" - - "github.com/google/cel-go/common/types/ref" - "github.com/google/cel-go/common/types/traits" - - anypb "google.golang.org/protobuf/types/known/anypb" - structpb "google.golang.org/protobuf/types/known/structpb" - wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" -) - -// Bytes type that implements ref.Val and supports add, compare, and size -// operations. -type Bytes []byte - -var ( - // BytesType singleton. - BytesType = NewTypeValue("bytes", - traits.AdderType, - traits.ComparerType, - traits.SizerType) - - // byteWrapperType golang reflected type for protobuf bytes wrapper type. - byteWrapperType = reflect.TypeOf(&wrapperspb.BytesValue{}) -) - -// Add implements traits.Adder interface method by concatenating byte sequences. -func (b Bytes) Add(other ref.Val) ref.Val { - otherBytes, ok := other.(Bytes) - if !ok { - return ValOrErr(other, "no such overload") - } - return append(b, otherBytes...) -} - -// Compare implments traits.Comparer interface method by lexicographic ordering. -func (b Bytes) Compare(other ref.Val) ref.Val { - otherBytes, ok := other.(Bytes) - if !ok { - return ValOrErr(other, "no such overload") - } - return Int(bytes.Compare(b, otherBytes)) -} - -// ConvertToNative implements the ref.Val interface method. -func (b Bytes) ConvertToNative(typeDesc reflect.Type) (interface{}, error) { - switch typeDesc.Kind() { - case reflect.Array, reflect.Slice: - return reflect.ValueOf(b).Convert(typeDesc).Interface(), nil - case reflect.Ptr: - switch typeDesc { - case anyValueType: - // Primitives must be wrapped before being set on an Any field. - return anypb.New(wrapperspb.Bytes([]byte(b))) - case byteWrapperType: - // Convert the bytes to a wrapperspb.BytesValue. - return wrapperspb.Bytes([]byte(b)), nil - case jsonValueType: - // CEL follows the proto3 to JSON conversion by encoding bytes to a string via base64. - // The encoding below matches the golang 'encoding/json' behavior during marshaling, - // which uses base64.StdEncoding. - str := base64.StdEncoding.EncodeToString([]byte(b)) - return structpb.NewStringValue(str), nil - } - case reflect.Interface: - bv := b.Value() - if reflect.TypeOf(bv).Implements(typeDesc) { - return bv, nil - } - if reflect.TypeOf(b).Implements(typeDesc) { - return b, nil - } - } - return nil, fmt.Errorf("type conversion error from Bytes to '%v'", typeDesc) -} - -// ConvertToType implements the ref.Val interface method. -func (b Bytes) ConvertToType(typeVal ref.Type) ref.Val { - switch typeVal { - case StringType: - if !utf8.Valid(b) { - return NewErr("invalid UTF-8 in bytes, cannot convert to string") - } - return String(b) - case BytesType: - return b - case TypeType: - return BytesType - } - return NewErr("type conversion error from '%s' to '%s'", BytesType, typeVal) -} - -// Equal implements the ref.Val interface method. -func (b Bytes) Equal(other ref.Val) ref.Val { - otherBytes, ok := other.(Bytes) - if !ok { - return ValOrErr(other, "no such overload") - } - return Bool(bytes.Equal(b, otherBytes)) -} - -// Size implements the traits.Sizer interface method. -func (b Bytes) Size() ref.Val { - return Int(len(b)) -} - -// Type implements the ref.Val interface method. -func (b Bytes) Type() ref.Type { - return BytesType -} - -// Value implements the ref.Val interface method. -func (b Bytes) Value() interface{} { - return []byte(b) -} diff --git a/vendor/github.com/google/cel-go/common/types/doc.go b/vendor/github.com/google/cel-go/common/types/doc.go deleted file mode 100644 index 5f641d7043..0000000000 --- a/vendor/github.com/google/cel-go/common/types/doc.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package types contains the types, traits, and utilities common to all -// components of expression handling. -package types diff --git a/vendor/github.com/google/cel-go/common/types/double.go b/vendor/github.com/google/cel-go/common/types/double.go deleted file mode 100644 index 54c66faa6d..0000000000 --- a/vendor/github.com/google/cel-go/common/types/double.go +++ /dev/null @@ -1,201 +0,0 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "fmt" - "math" - "reflect" - - "github.com/google/cel-go/common/types/ref" - "github.com/google/cel-go/common/types/traits" - - anypb "google.golang.org/protobuf/types/known/anypb" - structpb "google.golang.org/protobuf/types/known/structpb" - wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" -) - -// Double type that implements ref.Val, comparison, and mathematical -// operations. -type Double float64 - -var ( - // DoubleType singleton. - DoubleType = NewTypeValue("double", - traits.AdderType, - traits.ComparerType, - traits.DividerType, - traits.MultiplierType, - traits.NegatorType, - traits.SubtractorType) - - // doubleWrapperType reflected type for protobuf double wrapper type. - doubleWrapperType = reflect.TypeOf(&wrapperspb.DoubleValue{}) - - // floatWrapperType reflected type for protobuf float wrapper type. - floatWrapperType = reflect.TypeOf(&wrapperspb.FloatValue{}) -) - -// Add implements traits.Adder.Add. -func (d Double) Add(other ref.Val) ref.Val { - otherDouble, ok := other.(Double) - if !ok { - return ValOrErr(other, "no such overload") - } - return d + otherDouble -} - -// Compare implements traits.Comparer.Compare. -func (d Double) Compare(other ref.Val) ref.Val { - otherDouble, ok := other.(Double) - if !ok { - return ValOrErr(other, "no such overload") - } - if d < otherDouble { - return IntNegOne - } - if d > otherDouble { - return IntOne - } - return IntZero -} - -// ConvertToNative implements ref.Val.ConvertToNative. -func (d Double) ConvertToNative(typeDesc reflect.Type) (interface{}, error) { - switch typeDesc.Kind() { - case reflect.Float32: - v := float32(d) - return reflect.ValueOf(v).Convert(typeDesc).Interface(), nil - case reflect.Float64: - v := float64(d) - return reflect.ValueOf(v).Convert(typeDesc).Interface(), nil - case reflect.Ptr: - switch typeDesc { - case anyValueType: - // Primitives must be wrapped before being set on an Any field. - return anypb.New(wrapperspb.Double(float64(d))) - case doubleWrapperType: - // Convert to a wrapperspb.DoubleValue - return wrapperspb.Double(float64(d)), nil - case floatWrapperType: - // Convert to a wrapperspb.FloatValue (with truncation). - return wrapperspb.Float(float32(d)), nil - case jsonValueType: - // Note, there are special cases for proto3 to json conversion that - // expect the floating point value to be converted to a NaN, - // Infinity, or -Infinity string values, but the jsonpb string - // marshaling of the protobuf.Value will handle this conversion. - return structpb.NewNumberValue(float64(d)), nil - } - switch typeDesc.Elem().Kind() { - case reflect.Float32: - v := float32(d) - p := reflect.New(typeDesc.Elem()) - p.Elem().Set(reflect.ValueOf(v).Convert(typeDesc.Elem())) - return p.Interface(), nil - case reflect.Float64: - v := float64(d) - p := reflect.New(typeDesc.Elem()) - p.Elem().Set(reflect.ValueOf(v).Convert(typeDesc.Elem())) - return p.Interface(), nil - } - case reflect.Interface: - dv := d.Value() - if reflect.TypeOf(dv).Implements(typeDesc) { - return dv, nil - } - if reflect.TypeOf(d).Implements(typeDesc) { - return d, nil - } - } - return nil, fmt.Errorf("type conversion error from Double to '%v'", typeDesc) -} - -// ConvertToType implements ref.Val.ConvertToType. -func (d Double) ConvertToType(typeVal ref.Type) ref.Val { - switch typeVal { - case IntType: - i := math.Round(float64(d)) - if i > math.MaxInt64 || i < math.MinInt64 { - return NewErr("range error converting %g to int", float64(d)) - } - return Int(float64(i)) - case UintType: - i := math.Round(float64(d)) - if i > math.MaxUint64 || i < 0 { - return NewErr("range error converting %g to int", float64(d)) - } - return Uint(float64(i)) - case DoubleType: - return d - case StringType: - return String(fmt.Sprintf("%g", float64(d))) - case TypeType: - return DoubleType - } - return NewErr("type conversion error from '%s' to '%s'", DoubleType, typeVal) -} - -// Divide implements traits.Divider.Divide. -func (d Double) Divide(other ref.Val) ref.Val { - otherDouble, ok := other.(Double) - if !ok { - return ValOrErr(other, "no such overload") - } - return d / otherDouble -} - -// Equal implements ref.Val.Equal. -func (d Double) Equal(other ref.Val) ref.Val { - otherDouble, ok := other.(Double) - if !ok { - return ValOrErr(other, "no such overload") - } - // TODO: Handle NaNs properly. - return Bool(d == otherDouble) -} - -// Multiply implements traits.Multiplier.Multiply. -func (d Double) Multiply(other ref.Val) ref.Val { - otherDouble, ok := other.(Double) - if !ok { - return ValOrErr(other, "no such overload") - } - return d * otherDouble -} - -// Negate implements traits.Negater.Negate. -func (d Double) Negate() ref.Val { - return -d -} - -// Subtract implements traits.Subtractor.Subtract. -func (d Double) Subtract(subtrahend ref.Val) ref.Val { - subtraDouble, ok := subtrahend.(Double) - if !ok { - return ValOrErr(subtrahend, "no such overload") - } - return d - subtraDouble -} - -// Type implements ref.Val.Type. -func (d Double) Type() ref.Type { - return DoubleType -} - -// Value implements ref.Val.Value. -func (d Double) Value() interface{} { - return float64(d) -} diff --git a/vendor/github.com/google/cel-go/common/types/duration.go b/vendor/github.com/google/cel-go/common/types/duration.go deleted file mode 100644 index 5a14b024e3..0000000000 --- a/vendor/github.com/google/cel-go/common/types/duration.go +++ /dev/null @@ -1,180 +0,0 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "fmt" - "reflect" - "strconv" - "time" - - "github.com/google/cel-go/common/overloads" - "github.com/google/cel-go/common/types/ref" - "github.com/google/cel-go/common/types/traits" - - anypb "google.golang.org/protobuf/types/known/anypb" - dpb "google.golang.org/protobuf/types/known/durationpb" - structpb "google.golang.org/protobuf/types/known/structpb" -) - -// Duration type that implements ref.Val and supports add, compare, negate, -// and subtract operators. This type is also a receiver which means it can -// participate in dispatch to receiver functions. -type Duration struct { - time.Duration -} - -var ( - // DurationType singleton. - DurationType = NewTypeValue("google.protobuf.Duration", - traits.AdderType, - traits.ComparerType, - traits.NegatorType, - traits.ReceiverType, - traits.SubtractorType) -) - -// Add implements traits.Adder.Add. -func (d Duration) Add(other ref.Val) ref.Val { - switch other.Type() { - case DurationType: - dur2 := other.(Duration) - return Duration{Duration: d.Duration + dur2.Duration} - case TimestampType: - ts := other.(Timestamp).Time - return Timestamp{Time: ts.Add(d.Duration)} - } - return ValOrErr(other, "no such overload") -} - -// Compare implements traits.Comparer.Compare. -func (d Duration) Compare(other ref.Val) ref.Val { - otherDur, ok := other.(Duration) - if !ok { - return ValOrErr(other, "no such overload") - } - dur := d.Duration - otherDur.Duration - if dur < 0 { - return IntNegOne - } - if dur > 0 { - return IntOne - } - return IntZero -} - -// ConvertToNative implements ref.Val.ConvertToNative. -func (d Duration) ConvertToNative(typeDesc reflect.Type) (interface{}, error) { - // If the duration is already assignable to the desired type return it. - if reflect.TypeOf(d.Duration).AssignableTo(typeDesc) { - return d.Duration, nil - } - if reflect.TypeOf(d).AssignableTo(typeDesc) { - return d, nil - } - switch typeDesc { - case anyValueType: - // Pack the duration as a dpb.Duration into an Any value. - return anypb.New(dpb.New(d.Duration)) - case durationValueType: - // Unwrap the CEL value to its underlying proto value. - return dpb.New(d.Duration), nil - case jsonValueType: - // CEL follows the proto3 to JSON conversion. - // Note, using jsonpb would wrap the result in extra double quotes. - v := d.ConvertToType(StringType) - if IsError(v) { - return nil, v.(*Err) - } - return structpb.NewStringValue(string(v.(String))), nil - } - return nil, fmt.Errorf("type conversion error from 'Duration' to '%v'", typeDesc) -} - -// ConvertToType implements ref.Val.ConvertToType. -func (d Duration) ConvertToType(typeVal ref.Type) ref.Val { - switch typeVal { - case StringType: - return String(strconv.FormatFloat(d.Seconds(), 'f', -1, 64) + "s") - case IntType: - return Int(d.Duration) - case DurationType: - return d - case TypeType: - return DurationType - } - return NewErr("type conversion error from '%s' to '%s'", DurationType, typeVal) -} - -// Equal implements ref.Val.Equal. -func (d Duration) Equal(other ref.Val) ref.Val { - otherDur, ok := other.(Duration) - if !ok { - return ValOrErr(other, "no such overload") - } - return Bool(d.Duration == otherDur.Duration) -} - -// Negate implements traits.Negater.Negate. -func (d Duration) Negate() ref.Val { - return Duration{Duration: -d.Duration} -} - -// Receive implements traits.Receiver.Receive. -func (d Duration) Receive(function string, overload string, args []ref.Val) ref.Val { - if len(args) == 0 { - if f, found := durationZeroArgOverloads[function]; found { - return f(d.Duration) - } - } - return NewErr("no such overload") -} - -// Subtract implements traits.Subtractor.Subtract. -func (d Duration) Subtract(subtrahend ref.Val) ref.Val { - subtraDur, ok := subtrahend.(Duration) - if !ok { - return ValOrErr(subtrahend, "no such overload") - } - return d.Add(subtraDur.Negate()) -} - -// Type implements ref.Val.Type. -func (d Duration) Type() ref.Type { - return DurationType -} - -// Value implements ref.Val.Value. -func (d Duration) Value() interface{} { - return d.Duration -} - -var ( - durationValueType = reflect.TypeOf(&dpb.Duration{}) - - durationZeroArgOverloads = map[string]func(time.Duration) ref.Val{ - overloads.TimeGetHours: func(dur time.Duration) ref.Val { - return Int(dur.Hours()) - }, - overloads.TimeGetMinutes: func(dur time.Duration) ref.Val { - return Int(dur.Minutes()) - }, - overloads.TimeGetSeconds: func(dur time.Duration) ref.Val { - return Int(dur.Seconds()) - }, - overloads.TimeGetMilliseconds: func(dur time.Duration) ref.Val { - return Int(dur.Milliseconds()) - }} -) diff --git a/vendor/github.com/google/cel-go/common/types/err.go b/vendor/github.com/google/cel-go/common/types/err.go deleted file mode 100644 index 5cccb53d00..0000000000 --- a/vendor/github.com/google/cel-go/common/types/err.go +++ /dev/null @@ -1,107 +0,0 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "fmt" - "reflect" - - "github.com/google/cel-go/common/types/ref" -) - -// Err type which extends the built-in go error and implements ref.Val. -type Err struct { - error -} - -var ( - // ErrType singleton. - ErrType = NewTypeValue("error") -) - -// NewErr creates a new Err described by the format string and args. -// TODO: Audit the use of this function and standardize the error messages and codes. -func NewErr(format string, args ...interface{}) ref.Val { - return &Err{fmt.Errorf(format, args...)} -} - -// NoSuchOverloadErr returns a new types.Err instance with a no such overload message. -func NoSuchOverloadErr() ref.Val { - return NewErr("no such overload") -} - -// UnsupportedRefValConversionErr returns a types.NewErr instance with a no such conversion -// message that indicates that the native value could not be converted to a CEL ref.Val. -func UnsupportedRefValConversionErr(val interface{}) ref.Val { - return NewErr("unsupported conversion to ref.Val: (%T)%v", val, val) -} - -// MaybeNoSuchOverloadErr returns the error or unknown if the input ref.Val is one of these types, -// else a new no such overload error. -func MaybeNoSuchOverloadErr(val ref.Val) ref.Val { - return ValOrErr(val, "no such overload") -} - -// ValOrErr either returns the existing error or create a new one. -// TODO: Audit the use of this function and standardize the error messages and codes. -func ValOrErr(val ref.Val, format string, args ...interface{}) ref.Val { - if val == nil { - return NewErr(format, args...) - } - switch val.Type() { - case ErrType, UnknownType: - return val - default: - return NewErr(format, args...) - } -} - -// ConvertToNative implements ref.Val.ConvertToNative. -func (e *Err) ConvertToNative(typeDesc reflect.Type) (interface{}, error) { - return nil, e.error -} - -// ConvertToType implements ref.Val.ConvertToType. -func (e *Err) ConvertToType(typeVal ref.Type) ref.Val { - // Errors are not convertible to other representations. - return e -} - -// Equal implements ref.Val.Equal. -func (e *Err) Equal(other ref.Val) ref.Val { - // An error cannot be equal to any other value, so it returns itself. - return e -} - -// String implements fmt.Stringer. -func (e *Err) String() string { - return e.error.Error() -} - -// Type implements ref.Val.Type. -func (e *Err) Type() ref.Type { - return ErrType -} - -// Value implements ref.Val.Value. -func (e *Err) Value() interface{} { - return e.error -} - -// IsError returns whether the input element ref.Type or ref.Val is equal to -// the ErrType singleton. -func IsError(val ref.Val) bool { - return val.Type() == ErrType -} diff --git a/vendor/github.com/google/cel-go/common/types/int.go b/vendor/github.com/google/cel-go/common/types/int.go deleted file mode 100644 index 4b76ce265f..0000000000 --- a/vendor/github.com/google/cel-go/common/types/int.go +++ /dev/null @@ -1,276 +0,0 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "fmt" - "math" - "reflect" - "strconv" - "time" - - "github.com/google/cel-go/common/types/ref" - "github.com/google/cel-go/common/types/traits" - - anypb "google.golang.org/protobuf/types/known/anypb" - structpb "google.golang.org/protobuf/types/known/structpb" - wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" -) - -// Int type that implements ref.Val as well as comparison and math operators. -type Int int64 - -// Int constants used for comparison results. -const ( - // IntZero is the zero-value for Int - IntZero = Int(0) - IntOne = Int(1) - IntNegOne = Int(-1) -) - -var ( - // IntType singleton. - IntType = NewTypeValue("int", - traits.AdderType, - traits.ComparerType, - traits.DividerType, - traits.ModderType, - traits.MultiplierType, - traits.NegatorType, - traits.SubtractorType) - - // int32WrapperType reflected type for protobuf int32 wrapper type. - int32WrapperType = reflect.TypeOf(&wrapperspb.Int32Value{}) - - // int64WrapperType reflected type for protobuf int64 wrapper type. - int64WrapperType = reflect.TypeOf(&wrapperspb.Int64Value{}) -) - -// Add implements traits.Adder.Add. -func (i Int) Add(other ref.Val) ref.Val { - otherInt, ok := other.(Int) - if !ok { - return ValOrErr(other, "no such overload") - } - if (otherInt > 0 && i > math.MaxInt64-otherInt) || (otherInt < 0 && i < math.MinInt64-otherInt) { - return NewErr("integer overflow") - } - return i + otherInt -} - -// Compare implements traits.Comparer.Compare. -func (i Int) Compare(other ref.Val) ref.Val { - otherInt, ok := other.(Int) - if !ok { - return ValOrErr(other, "no such overload") - } - if i < otherInt { - return IntNegOne - } - if i > otherInt { - return IntOne - } - return IntZero -} - -// ConvertToNative implements ref.Val.ConvertToNative. -func (i Int) ConvertToNative(typeDesc reflect.Type) (interface{}, error) { - switch typeDesc.Kind() { - case reflect.Int, reflect.Int32, reflect.Int64: - // Enums are also mapped as int32 derivations. - // Note, the code doesn't convert to the enum value directly since this is not known, but - // the net effect with respect to proto-assignment is handled correctly by the reflection - // Convert method. - return reflect.ValueOf(i).Convert(typeDesc).Interface(), nil - case reflect.Ptr: - switch typeDesc { - case anyValueType: - // Primitives must be wrapped before being set on an Any field. - return anypb.New(wrapperspb.Int64(int64(i))) - case int32WrapperType: - // Convert the value to a wrapperspb.Int32Value (with truncation). - return wrapperspb.Int32(int32(i)), nil - case int64WrapperType: - // Convert the value to a wrapperspb.Int64Value. - return wrapperspb.Int64(int64(i)), nil - case jsonValueType: - // The proto-to-JSON conversion rules would convert all 64-bit integer values to JSON - // decimal strings. Because CEL ints might come from the automatic widening of 32-bit - // values in protos, the JSON type is chosen dynamically based on the value. - // - // - Integers -2^53-1 < n < 2^53-1 are encoded as JSON numbers. - // - Integers outside this range are encoded as JSON strings. - // - // The integer to float range represents the largest interval where such a conversion - // can round-trip accurately. Thus, conversions from a 32-bit source can expect a JSON - // number as with protobuf. Those consuming JSON from a 64-bit source must be able to - // handle either a JSON number or a JSON decimal string. To handle these cases safely - // the string values must be explicitly converted to int() within a CEL expression; - // however, it is best to simply stay within the JSON number range when building JSON - // objects in CEL. - if i.isJSONSafe() { - return structpb.NewNumberValue(float64(i)), nil - } - // Proto3 to JSON conversion requires string-formatted int64 values - // since the conversion to floating point would result in truncation. - return structpb.NewStringValue(strconv.FormatInt(int64(i), 10)), nil - } - switch typeDesc.Elem().Kind() { - case reflect.Int32: - v := int32(i) - p := reflect.New(typeDesc.Elem()) - p.Elem().Set(reflect.ValueOf(v).Convert(typeDesc.Elem())) - return p.Interface(), nil - case reflect.Int64: - v := int64(i) - p := reflect.New(typeDesc.Elem()) - p.Elem().Set(reflect.ValueOf(v).Convert(typeDesc.Elem())) - return p.Interface(), nil - } - case reflect.Interface: - iv := i.Value() - if reflect.TypeOf(iv).Implements(typeDesc) { - return iv, nil - } - if reflect.TypeOf(i).Implements(typeDesc) { - return i, nil - } - } - return nil, fmt.Errorf("unsupported type conversion from 'int' to %v", typeDesc) -} - -// ConvertToType implements ref.Val.ConvertToType. -func (i Int) ConvertToType(typeVal ref.Type) ref.Val { - switch typeVal { - case IntType: - return i - case UintType: - if i < 0 { - return NewErr("range error converting %d to uint", i) - } - return Uint(i) - case DoubleType: - return Double(i) - case StringType: - return String(fmt.Sprintf("%d", int64(i))) - case TimestampType: - t := time.Unix(int64(i), 0).UTC() - return Timestamp{Time: t} - case TypeType: - return IntType - } - return NewErr("type conversion error from '%s' to '%s'", IntType, typeVal) -} - -// Divide implements traits.Divider.Divide. -func (i Int) Divide(other ref.Val) ref.Val { - otherInt, ok := other.(Int) - if !ok { - return ValOrErr(other, "no such overload") - } - if otherInt == IntZero { - return NewErr("divide by zero") - } - // In twos complement, negating MinInt64 would result in a valid of MaxInt64+1. - if i == math.MinInt64 && otherInt == -1 { - return NewErr("integer overflow") - } - return i / otherInt -} - -// Equal implements ref.Val.Equal. -func (i Int) Equal(other ref.Val) ref.Val { - otherInt, ok := other.(Int) - if !ok { - return ValOrErr(other, "no such overload") - } - return Bool(i == otherInt) -} - -// Modulo implements traits.Modder.Modulo. -func (i Int) Modulo(other ref.Val) ref.Val { - otherInt, ok := other.(Int) - if !ok { - return ValOrErr(other, "no such overload") - } - if otherInt == IntZero { - return NewErr("modulus by zero") - } - // In twos complement, negating MinInt64 would result in a valid of MaxInt64+1. - if i == math.MinInt64 && otherInt == -1 { - return NewErr("integer overflow") - } - return i % otherInt -} - -// Multiply implements traits.Multiplier.Multiply. -func (i Int) Multiply(other ref.Val) ref.Val { - otherInt, ok := other.(Int) - if !ok { - return ValOrErr(other, "no such overload") - } - // Detecting multiplication overflow is more complicated than the others. The first two detect - // attempting to negate MinInt64, which would result in MaxInt64+1. The other four detect normal - // overflow conditions. - if (i == -1 && otherInt == math.MinInt64) || (otherInt == -1 && i == math.MinInt64) || - (i > 0 && otherInt > 0 && i > math.MaxInt64/otherInt) || (i > 0 && otherInt < 0 && otherInt < math.MinInt64/i) || - (i < 0 && otherInt > 0 && i < math.MinInt64/otherInt) || (i < 0 && otherInt < 0 && otherInt < math.MaxInt64/i) { - return NewErr("integer overflow") - } - return i * otherInt -} - -// Negate implements traits.Negater.Negate. -func (i Int) Negate() ref.Val { - // In twos complement, negating MinInt64 would result in a valid of MaxInt64+1. - if i == math.MinInt64 { - return NewErr("integer overflow") - } - return -i -} - -// Subtract implements traits.Subtractor.Subtract. -func (i Int) Subtract(subtrahend ref.Val) ref.Val { - subtraInt, ok := subtrahend.(Int) - if !ok { - return ValOrErr(subtrahend, "no such overload") - } - if (subtraInt < 0 && i > math.MaxInt64+subtraInt) || (subtraInt > 0 && i < math.MinInt64+subtraInt) { - return NewErr("integer overflow") - } - return i - subtraInt -} - -// Type implements ref.Val.Type. -func (i Int) Type() ref.Type { - return IntType -} - -// Value implements ref.Val.Value. -func (i Int) Value() interface{} { - return int64(i) -} - -// isJSONSafe indicates whether the int is safely representable as a floating point value in JSON. -func (i Int) isJSONSafe() bool { - return i >= minIntJSON && i <= maxIntJSON -} - -const ( - // maxIntJSON is defined as the Number.MAX_SAFE_INTEGER value per EcmaScript 6. - maxIntJSON = 1<<53 - 1 - // minIntJSON is defined as the Number.MIN_SAFE_INTEGER value per EcmaScript 6. - minIntJSON = -maxIntJSON -) diff --git a/vendor/github.com/google/cel-go/common/types/iterator.go b/vendor/github.com/google/cel-go/common/types/iterator.go deleted file mode 100644 index 4906627783..0000000000 --- a/vendor/github.com/google/cel-go/common/types/iterator.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "fmt" - "reflect" - - "github.com/google/cel-go/common/types/ref" - "github.com/google/cel-go/common/types/traits" -) - -var ( - // IteratorType singleton. - IteratorType = NewTypeValue("iterator", traits.IteratorType) -) - -// baseIterator is the basis for list, map, and object iterators. -// -// An iterator in and of itself should not be a valid value for comparison, but must implement the -// `ref.Val` methods in order to be well-supported within instruction arguments processed by the -// interpreter. -type baseIterator struct{} - -func (*baseIterator) ConvertToNative(typeDesc reflect.Type) (interface{}, error) { - return nil, fmt.Errorf("type conversion on iterators not supported") -} - -func (*baseIterator) ConvertToType(typeVal ref.Type) ref.Val { - return NewErr("no such overload") -} - -func (*baseIterator) Equal(other ref.Val) ref.Val { - return NewErr("no such overload") -} - -func (*baseIterator) Type() ref.Type { - return IteratorType -} - -func (*baseIterator) Value() interface{} { - return nil -} diff --git a/vendor/github.com/google/cel-go/common/types/json_value.go b/vendor/github.com/google/cel-go/common/types/json_value.go deleted file mode 100644 index cf75278844..0000000000 --- a/vendor/github.com/google/cel-go/common/types/json_value.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "reflect" - - "google.golang.org/protobuf/types/known/structpb" -) - -// JSON type constants representing the reflected types of protobuf JSON values. -var ( - jsonValueType = reflect.TypeOf(&structpb.Value{}) - jsonListValueType = reflect.TypeOf(&structpb.ListValue{}) - jsonStructType = reflect.TypeOf(&structpb.Struct{}) -) diff --git a/vendor/github.com/google/cel-go/common/types/list.go b/vendor/github.com/google/cel-go/common/types/list.go deleted file mode 100644 index fd577e95d7..0000000000 --- a/vendor/github.com/google/cel-go/common/types/list.go +++ /dev/null @@ -1,434 +0,0 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "fmt" - "reflect" - - "github.com/google/cel-go/common/types/ref" - "github.com/google/cel-go/common/types/traits" - - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/reflect/protoreflect" - - anypb "google.golang.org/protobuf/types/known/anypb" - structpb "google.golang.org/protobuf/types/known/structpb" -) - -var ( - // ListType singleton. - ListType = NewTypeValue("list", - traits.AdderType, - traits.ContainerType, - traits.IndexerType, - traits.IterableType, - traits.SizerType) -) - -// NewDynamicList returns a traits.Lister with heterogenous elements. -// value should be an array of "native" types, i.e. any type that -// NativeToValue() can convert to a ref.Val. -func NewDynamicList(adapter ref.TypeAdapter, value interface{}) traits.Lister { - refValue := reflect.ValueOf(value) - return &baseList{ - TypeAdapter: adapter, - value: value, - size: refValue.Len(), - get: func(i int) interface{} { - return refValue.Index(i).Interface() - }, - } -} - -// NewStringList returns a traits.Lister containing only strings. -func NewStringList(adapter ref.TypeAdapter, elems []string) traits.Lister { - return &baseList{ - TypeAdapter: adapter, - value: elems, - size: len(elems), - get: func(i int) interface{} { return elems[i] }, - } -} - -// NewRefValList returns a traits.Lister with ref.Val elements. -// -// This type specialization is used with list literals within CEL expressions. -func NewRefValList(adapter ref.TypeAdapter, elems []ref.Val) traits.Lister { - return &baseList{ - TypeAdapter: adapter, - value: elems, - size: len(elems), - get: func(i int) interface{} { return elems[i] }, - } -} - -// NewProtoList returns a traits.Lister based on a pb.List instance. -func NewProtoList(adapter ref.TypeAdapter, list protoreflect.List) traits.Lister { - return &baseList{ - TypeAdapter: adapter, - value: list, - size: list.Len(), - get: func(i int) interface{} { return list.Get(i).Interface() }, - } -} - -// NewJSONList returns a traits.Lister based on structpb.ListValue instance. -func NewJSONList(adapter ref.TypeAdapter, l *structpb.ListValue) traits.Lister { - vals := l.GetValues() - return &baseList{ - TypeAdapter: adapter, - value: l, - size: len(vals), - get: func(i int) interface{} { return vals[i] }, - } -} - -// baseList points to a list containing elements of any type. -// The `value` is an array of native values, and refValue is its reflection object. -// The `ref.TypeAdapter` enables native type to CEL type conversions. -type baseList struct { - ref.TypeAdapter - value interface{} - - // size indicates the number of elements within the list. - // Since objects are immutable the size of a list is static. - size int - - // get returns a value at the specified integer index. - // The index is guaranteed to be checked against the list index range. - get func(int) interface{} -} - -// Add implements the traits.Adder interface method. -func (l *baseList) Add(other ref.Val) ref.Val { - otherList, ok := other.(traits.Lister) - if !ok { - return MaybeNoSuchOverloadErr(other) - } - if l.Size() == IntZero { - return other - } - if otherList.Size() == IntZero { - return l - } - return &concatList{ - TypeAdapter: l.TypeAdapter, - prevList: l, - nextList: otherList} -} - -// Contains implements the traits.Container interface method. -func (l *baseList) Contains(elem ref.Val) ref.Val { - if IsUnknownOrError(elem) { - return elem - } - var err ref.Val - for i := 0; i < l.size; i++ { - val := l.NativeToValue(l.get(i)) - cmp := elem.Equal(val) - b, ok := cmp.(Bool) - // When there is an error on the contain check, this is not necessarily terminal. - // The contains call could find the element and return True, just as though the user - // had written a per-element comparison in an exists() macro or logical ||, e.g. - // list.exists(e, e == elem) - if !ok && err == nil { - err = ValOrErr(cmp, "no such overload") - } - if b == True { - return True - } - } - if err != nil { - return err - } - return False -} - -// ConvertToNative implements the ref.Val interface method. -func (l *baseList) ConvertToNative(typeDesc reflect.Type) (interface{}, error) { - // If the underlying list value is assignable to the reflected type return it. - if reflect.TypeOf(l.value).AssignableTo(typeDesc) { - return l.value, nil - } - // If the list wrapper is assignable to the desired type return it. - if reflect.TypeOf(l).AssignableTo(typeDesc) { - return l, nil - } - // Attempt to convert the list to a set of well known protobuf types. - switch typeDesc { - case anyValueType: - json, err := l.ConvertToNative(jsonListValueType) - if err != nil { - return nil, err - } - return anypb.New(json.(proto.Message)) - case jsonValueType, jsonListValueType: - jsonValues, err := - l.ConvertToNative(reflect.TypeOf([]*structpb.Value{})) - if err != nil { - return nil, err - } - jsonList := &structpb.ListValue{Values: jsonValues.([]*structpb.Value)} - if typeDesc == jsonListValueType { - return jsonList, nil - } - return structpb.NewListValue(jsonList), nil - } - // Non-list conversion. - if typeDesc.Kind() != reflect.Slice && typeDesc.Kind() != reflect.Array { - return nil, fmt.Errorf("type conversion error from list to '%v'", typeDesc) - } - - // List conversion. - // Allow the element ConvertToNative() function to determine whether conversion is possible. - otherElemType := typeDesc.Elem() - elemCount := l.size - nativeList := reflect.MakeSlice(typeDesc, elemCount, elemCount) - for i := 0; i < elemCount; i++ { - elem := l.NativeToValue(l.get(i)) - nativeElemVal, err := elem.ConvertToNative(otherElemType) - if err != nil { - return nil, err - } - nativeList.Index(i).Set(reflect.ValueOf(nativeElemVal)) - } - return nativeList.Interface(), nil -} - -// ConvertToType implements the ref.Val interface method. -func (l *baseList) ConvertToType(typeVal ref.Type) ref.Val { - switch typeVal { - case ListType: - return l - case TypeType: - return ListType - } - return NewErr("type conversion error from '%s' to '%s'", ListType, typeVal) -} - -// Equal implements the ref.Val interface method. -func (l *baseList) Equal(other ref.Val) ref.Val { - otherList, ok := other.(traits.Lister) - if !ok { - return MaybeNoSuchOverloadErr(other) - } - if l.Size() != otherList.Size() { - return False - } - for i := IntZero; i < l.Size().(Int); i++ { - thisElem := l.Get(i) - otherElem := otherList.Get(i) - elemEq := thisElem.Equal(otherElem) - if elemEq != True { - return elemEq - } - } - return True -} - -// Get implements the traits.Indexer interface method. -func (l *baseList) Get(index ref.Val) ref.Val { - i, ok := index.(Int) - if !ok { - return ValOrErr(index, "unsupported index type '%s' in list", index.Type()) - } - iv := int(i) - if iv < 0 || iv >= l.size { - return NewErr("index '%d' out of range in list size '%d'", i, l.Size()) - } - elem := l.get(iv) - return l.NativeToValue(elem) -} - -// Iterator implements the traits.Iterable interface method. -func (l *baseList) Iterator() traits.Iterator { - return newListIterator(l) -} - -// Size implements the traits.Sizer interface method. -func (l *baseList) Size() ref.Val { - return Int(l.size) -} - -// Type implements the ref.Val interface method. -func (l *baseList) Type() ref.Type { - return ListType -} - -// Value implements the ref.Val interface method. -func (l *baseList) Value() interface{} { - return l.value -} - -// concatList combines two list implementations together into a view. -// The `ref.TypeAdapter` enables native type to CEL type conversions. -type concatList struct { - ref.TypeAdapter - value interface{} - prevList traits.Lister - nextList traits.Lister -} - -// Add implements the traits.Adder interface method. -func (l *concatList) Add(other ref.Val) ref.Val { - otherList, ok := other.(traits.Lister) - if !ok { - return MaybeNoSuchOverloadErr(other) - } - if l.Size() == IntZero { - return other - } - if otherList.Size() == IntZero { - return l - } - return &concatList{ - TypeAdapter: l.TypeAdapter, - prevList: l, - nextList: otherList} -} - -// Contains implments the traits.Container interface method. -func (l *concatList) Contains(elem ref.Val) ref.Val { - // The concat list relies on the IsErrorOrUnknown checks against the input element to be - // performed by the `prevList` and/or `nextList`. - prev := l.prevList.Contains(elem) - // Short-circuit the return if the elem was found in the prev list. - if prev == True { - return prev - } - // Return if the elem was found in the next list. - next := l.nextList.Contains(elem) - if next == True { - return next - } - // Handle the case where an error or unknown was encountered before checking next. - if IsUnknownOrError(prev) { - return prev - } - // Otherwise, rely on the next value as the representative result. - return next -} - -// ConvertToNative implements the ref.Val interface method. -func (l *concatList) ConvertToNative(typeDesc reflect.Type) (interface{}, error) { - combined := NewDynamicList(l.TypeAdapter, l.Value().([]interface{})) - return combined.ConvertToNative(typeDesc) -} - -// ConvertToType implements the ref.Val interface method. -func (l *concatList) ConvertToType(typeVal ref.Type) ref.Val { - switch typeVal { - case ListType: - return l - case TypeType: - return ListType - } - return NewErr("type conversion error from '%s' to '%s'", ListType, typeVal) -} - -// Equal implements the ref.Val interface method. -func (l *concatList) Equal(other ref.Val) ref.Val { - otherList, ok := other.(traits.Lister) - if !ok { - return MaybeNoSuchOverloadErr(other) - } - if l.Size() != otherList.Size() { - return False - } - for i := IntZero; i < l.Size().(Int); i++ { - thisElem := l.Get(i) - otherElem := otherList.Get(i) - elemEq := thisElem.Equal(otherElem) - if elemEq != True { - return elemEq - } - } - return True -} - -// Get implements the traits.Indexer interface method. -func (l *concatList) Get(index ref.Val) ref.Val { - i, ok := index.(Int) - if !ok { - return MaybeNoSuchOverloadErr(index) - } - if i < l.prevList.Size().(Int) { - return l.prevList.Get(i) - } - offset := i - l.prevList.Size().(Int) - return l.nextList.Get(offset) -} - -// Iterator implements the traits.Iterable interface method. -func (l *concatList) Iterator() traits.Iterator { - return newListIterator(l) -} - -// Size implements the traits.Sizer interface method. -func (l *concatList) Size() ref.Val { - return l.prevList.Size().(Int).Add(l.nextList.Size()) -} - -// Type implements the ref.Val interface method. -func (l *concatList) Type() ref.Type { - return ListType -} - -// Value implements the ref.Val interface method. -func (l *concatList) Value() interface{} { - if l.value == nil { - merged := make([]interface{}, l.Size().(Int)) - prevLen := l.prevList.Size().(Int) - for i := Int(0); i < prevLen; i++ { - merged[i] = l.prevList.Get(i).Value() - } - nextLen := l.nextList.Size().(Int) - for j := Int(0); j < nextLen; j++ { - merged[prevLen+j] = l.nextList.Get(j).Value() - } - l.value = merged - } - return l.value -} - -func newListIterator(listValue traits.Lister) traits.Iterator { - return &listIterator{ - listValue: listValue, - len: listValue.Size().(Int), - } -} - -type listIterator struct { - *baseIterator - listValue traits.Lister - cursor Int - len Int -} - -// HasNext implements the traits.Iterator interface method. -func (it *listIterator) HasNext() ref.Val { - return Bool(it.cursor < it.len) -} - -// Next implements the traits.Iterator interface method. -func (it *listIterator) Next() ref.Val { - if it.HasNext() == True { - index := it.cursor - it.cursor++ - return it.listValue.Get(index) - } - return nil -} diff --git a/vendor/github.com/google/cel-go/common/types/map.go b/vendor/github.com/google/cel-go/common/types/map.go deleted file mode 100644 index 4c91f0da32..0000000000 --- a/vendor/github.com/google/cel-go/common/types/map.go +++ /dev/null @@ -1,787 +0,0 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "fmt" - "reflect" - - "github.com/google/cel-go/common/types/pb" - "github.com/google/cel-go/common/types/ref" - "github.com/google/cel-go/common/types/traits" - - "github.com/stoewer/go-strcase" - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/reflect/protoreflect" - - anypb "google.golang.org/protobuf/types/known/anypb" - structpb "google.golang.org/protobuf/types/known/structpb" -) - -// NewDynamicMap returns a traits.Mapper value with dynamic key, value pairs. -func NewDynamicMap(adapter ref.TypeAdapter, value interface{}) traits.Mapper { - refValue := reflect.ValueOf(value) - return &baseMap{ - TypeAdapter: adapter, - mapAccessor: newReflectMapAccessor(adapter, refValue), - value: value, - size: refValue.Len(), - } -} - -// NewJSONStruct creates a traits.Mapper implementation backed by a JSON struct that has been -// encoded in protocol buffer form. -// -// The `adapter` argument provides type adaptation capabilities from proto to CEL. -func NewJSONStruct(adapter ref.TypeAdapter, value *structpb.Struct) traits.Mapper { - fields := value.GetFields() - return &baseMap{ - TypeAdapter: adapter, - mapAccessor: newJSONStructAccessor(adapter, fields), - value: value, - size: len(fields), - } -} - -// NewRefValMap returns a specialized traits.Mapper with CEL valued keys and values. -func NewRefValMap(adapter ref.TypeAdapter, value map[ref.Val]ref.Val) traits.Mapper { - return &baseMap{ - TypeAdapter: adapter, - mapAccessor: newRefValMapAccessor(value), - value: value, - size: len(value), - } -} - -// NewStringInterfaceMap returns a specialized traits.Mapper with string keys and interface values. -func NewStringInterfaceMap(adapter ref.TypeAdapter, value map[string]interface{}) traits.Mapper { - return &baseMap{ - TypeAdapter: adapter, - mapAccessor: newStringIfaceMapAccessor(adapter, value), - value: value, - size: len(value), - } -} - -// NewStringStringMap returns a specialized traits.Mapper with string keys and values. -func NewStringStringMap(adapter ref.TypeAdapter, value map[string]string) traits.Mapper { - return &baseMap{ - TypeAdapter: adapter, - mapAccessor: newStringMapAccessor(value), - value: value, - size: len(value), - } -} - -// NewProtoMap returns a specialized traits.Mapper for handling protobuf map values. -func NewProtoMap(adapter ref.TypeAdapter, value *pb.Map) traits.Mapper { - return &protoMap{ - TypeAdapter: adapter, - value: value, - } -} - -var ( - // MapType singleton. - MapType = NewTypeValue("map", - traits.ContainerType, - traits.IndexerType, - traits.IterableType, - traits.SizerType) -) - -// mapAccessor is a private interface for finding values within a map and iterating over the keys. -// This interface implements portions of the API surface area required by the traits.Mapper -// interface. -type mapAccessor interface { - // Find returns a value, if one exists, for the inpput key. - // - // If the key is not found the function returns (nil, false). - // If the input key is not valid for the map, or is Err or Unknown the function returns - // (Unknown|Err, false). - Find(ref.Val) (ref.Val, bool) - - // Iterator returns an Iterator over the map key set. - Iterator() traits.Iterator -} - -// baseMap is a reflection based map implementation designed to handle a variety of map-like types. -// -// Since CEL is side-effect free, the base map represents an immutable object. -type baseMap struct { - // TypeAdapter used to convert keys and values accessed within the map. - ref.TypeAdapter - - // mapAccessor interface implementation used to find and iterate over map keys. - mapAccessor - - // value is the native Go value upon which the map type operators. - value interface{} - - // size is the number of entries in the map. - size int -} - -// Contains implements the traits.Container interface method. -func (m *baseMap) Contains(index ref.Val) ref.Val { - val, found := m.Find(index) - // When the index is not found and val is non-nil, this is an error or unknown value. - if !found && val != nil && IsUnknownOrError(val) { - return val - } - return Bool(found) -} - -// ConvertToNative implements the ref.Val interface method. -func (m *baseMap) ConvertToNative(typeDesc reflect.Type) (interface{}, error) { - // If the map is already assignable to the desired type return it, e.g. interfaces and - // maps with the same key value types. - if reflect.TypeOf(m.value).AssignableTo(typeDesc) { - return m.value, nil - } - if reflect.TypeOf(m).AssignableTo(typeDesc) { - return m, nil - } - switch typeDesc { - case anyValueType: - json, err := m.ConvertToNative(jsonStructType) - if err != nil { - return nil, err - } - return anypb.New(json.(proto.Message)) - case jsonValueType, jsonStructType: - jsonEntries, err := - m.ConvertToNative(reflect.TypeOf(map[string]*structpb.Value{})) - if err != nil { - return nil, err - } - jsonMap := &structpb.Struct{Fields: jsonEntries.(map[string]*structpb.Value)} - if typeDesc == jsonStructType { - return jsonMap, nil - } - return structpb.NewStructValue(jsonMap), nil - } - - // Unwrap pointers, but track their use. - isPtr := false - if typeDesc.Kind() == reflect.Ptr { - tk := typeDesc - typeDesc = typeDesc.Elem() - if typeDesc.Kind() == reflect.Ptr { - return nil, fmt.Errorf("unsupported type conversion to '%v'", tk) - } - isPtr = true - } - switch typeDesc.Kind() { - // Map conversion. - case reflect.Map: - otherKey := typeDesc.Key() - otherElem := typeDesc.Elem() - nativeMap := reflect.MakeMapWithSize(typeDesc, m.size) - it := m.Iterator() - for it.HasNext() == True { - key := it.Next() - refKeyValue, err := key.ConvertToNative(otherKey) - if err != nil { - return nil, err - } - refElemValue, err := m.Get(key).ConvertToNative(otherElem) - if err != nil { - return nil, err - } - nativeMap.SetMapIndex(reflect.ValueOf(refKeyValue), reflect.ValueOf(refElemValue)) - } - return nativeMap.Interface(), nil - case reflect.Struct: - nativeStructPtr := reflect.New(typeDesc) - nativeStruct := nativeStructPtr.Elem() - it := m.Iterator() - for it.HasNext() == True { - key := it.Next() - // Ensure the field name being referenced is exported. - // Only exported (public) field names can be set by reflection, where the name - // must be at least one character in length and start with an upper-case letter. - fieldName := key.ConvertToType(StringType) - if IsError(fieldName) { - return nil, fieldName.(*Err) - } - name := string(fieldName.(String)) - name = strcase.UpperCamelCase(name) - fieldRef := nativeStruct.FieldByName(name) - if !fieldRef.IsValid() { - return nil, fmt.Errorf("type conversion error, no such field '%s' in type '%v'", name, typeDesc) - } - fieldValue, err := m.Get(key).ConvertToNative(fieldRef.Type()) - if err != nil { - return nil, err - } - fieldRef.Set(reflect.ValueOf(fieldValue)) - } - if isPtr { - return nativeStructPtr.Interface(), nil - } - return nativeStruct.Interface(), nil - } - return nil, fmt.Errorf("type conversion error from map to '%v'", typeDesc) -} - -// ConvertToType implements the ref.Val interface method. -func (m *baseMap) ConvertToType(typeVal ref.Type) ref.Val { - switch typeVal { - case MapType: - return m - case TypeType: - return MapType - } - return NewErr("type conversion error from '%s' to '%s'", MapType, typeVal) -} - -// Equal implements the ref.Val interface method. -func (m *baseMap) Equal(other ref.Val) ref.Val { - otherMap, ok := other.(traits.Mapper) - if !ok { - return MaybeNoSuchOverloadErr(other) - } - if m.Size() != otherMap.Size() { - return False - } - it := m.Iterator() - for it.HasNext() == True { - key := it.Next() - thisVal, _ := m.Find(key) - otherVal, found := otherMap.Find(key) - if !found { - if otherVal == nil { - return False - } - return MaybeNoSuchOverloadErr(otherVal) - } - valEq := thisVal.Equal(otherVal) - if valEq != True { - return valEq - } - } - return True -} - -// Get implements the traits.Indexer interface method. -func (m *baseMap) Get(key ref.Val) ref.Val { - v, found := m.Find(key) - if !found { - return ValOrErr(v, "no such key: %v", key) - } - return v -} - -// Size implements the traits.Sizer interface method. -func (m *baseMap) Size() ref.Val { - return Int(m.size) -} - -// Type implements the ref.Val interface method. -func (m *baseMap) Type() ref.Type { - return MapType -} - -// Value implements the ref.Val interface method. -func (m *baseMap) Value() interface{} { - return m.value -} - -func newJSONStructAccessor(adapter ref.TypeAdapter, st map[string]*structpb.Value) mapAccessor { - return &jsonStructAccessor{ - TypeAdapter: adapter, - st: st, - } -} - -type jsonStructAccessor struct { - ref.TypeAdapter - st map[string]*structpb.Value -} - -// Find searches the json struct field map for the input key value and returns (value, true) if -// found. -// -// If the key is not found the function returns (nil, false). -// If the input key is not a String, or is an Err or Unknown, the function returns -// (Unknown|Err, false). -func (a *jsonStructAccessor) Find(key ref.Val) (ref.Val, bool) { - strKey, ok := key.(String) - if !ok { - return MaybeNoSuchOverloadErr(key), false - } - keyVal, found := a.st[string(strKey)] - if !found { - return nil, false - } - return a.NativeToValue(keyVal), true -} - -// Iterator creates a new traits.Iterator from the set of JSON struct field names. -func (a *jsonStructAccessor) Iterator() traits.Iterator { - // Copy the keys to make their order stable. - mapKeys := make([]string, len(a.st)) - i := 0 - for k := range a.st { - mapKeys[i] = k - i++ - } - return &stringKeyIterator{ - mapKeys: mapKeys, - len: len(mapKeys), - } -} - -func newReflectMapAccessor(adapter ref.TypeAdapter, value reflect.Value) mapAccessor { - keyType := value.Type().Key() - return &reflectMapAccessor{ - TypeAdapter: adapter, - refValue: value, - keyType: keyType, - } -} - -type reflectMapAccessor struct { - ref.TypeAdapter - refValue reflect.Value - keyType reflect.Type -} - -// Find converts the input key to a native Golang type and then uses reflection to find the key, -// returning (value, true) if present. -// -// If the key is not found the function returns (nil, false). -// If the input key is not a String, or is an Err or Unknown, the function returns -// (Unknown|Err, false). -func (a *reflectMapAccessor) Find(key ref.Val) (ref.Val, bool) { - if IsUnknownOrError(key) { - return MaybeNoSuchOverloadErr(key), false - } - k, err := key.ConvertToNative(a.keyType) - if err != nil { - return &Err{err}, false - } - var refKey reflect.Value - switch k := k.(type) { - case reflect.Value: - refKey = k - default: - refKey = reflect.ValueOf(k) - } - val := a.refValue.MapIndex(refKey) - if !val.IsValid() { - return nil, false - } - return a.NativeToValue(val.Interface()), true -} - -// Iterator creates a Golang reflection based traits.Iterator. -func (a *reflectMapAccessor) Iterator() traits.Iterator { - return &mapIterator{ - TypeAdapter: a.TypeAdapter, - mapKeys: a.refValue.MapKeys(), - len: a.refValue.Len(), - } -} - -func newRefValMapAccessor(mapVal map[ref.Val]ref.Val) mapAccessor { - return &refValMapAccessor{mapVal: mapVal} -} - -type refValMapAccessor struct { - mapVal map[ref.Val]ref.Val -} - -// Find uses native map accesses to find the key, returning (value, true) if present. -// -// If the key is not found the function returns (nil, false). -// If the input key is an Err or Unknown, the function returns (Unknown|Err, false). -func (a *refValMapAccessor) Find(key ref.Val) (ref.Val, bool) { - if IsUnknownOrError(key) { - return key, false - } - keyVal, found := a.mapVal[key] - return keyVal, found -} - -// Iterator produces a new traits.Iterator which iterates over the map keys via Golang reflection. -func (a *refValMapAccessor) Iterator() traits.Iterator { - return &mapIterator{ - TypeAdapter: DefaultTypeAdapter, - mapKeys: reflect.ValueOf(a.mapVal).MapKeys(), - len: len(a.mapVal), - } -} - -func newStringMapAccessor(strMap map[string]string) mapAccessor { - return &stringMapAccessor{mapVal: strMap} -} - -type stringMapAccessor struct { - mapVal map[string]string -} - -// Find uses native map accesses to find the key, returning (value, true) if present. -// -// If the key is not found the function returns (nil, false). -// If the input key is not a String, or is an Err or Unknown, the function returns -// (Unknown|Err, false). -func (a *stringMapAccessor) Find(key ref.Val) (ref.Val, bool) { - strKey, ok := key.(String) - if !ok { - return MaybeNoSuchOverloadErr(key), false - } - keyVal, found := a.mapVal[string(strKey)] - if !found { - return nil, false - } - return String(keyVal), true -} - -// Iterator creates a new traits.Iterator from the string key set of the map. -func (a *stringMapAccessor) Iterator() traits.Iterator { - // Copy the keys to make their order stable. - mapKeys := make([]string, len(a.mapVal)) - i := 0 - for k := range a.mapVal { - mapKeys[i] = k - i++ - } - return &stringKeyIterator{ - mapKeys: mapKeys, - len: len(mapKeys), - } -} - -func newStringIfaceMapAccessor(adapter ref.TypeAdapter, mapVal map[string]interface{}) mapAccessor { - return &stringIfaceMapAccessor{ - TypeAdapter: adapter, - mapVal: mapVal, - } -} - -type stringIfaceMapAccessor struct { - ref.TypeAdapter - mapVal map[string]interface{} -} - -// Find uses native map accesses to find the key, returning (value, true) if present. -// -// If the key is not found the function returns (nil, false). -// If the input key is not a String, or is an Err or Unknown, the function returns -// (Unknown|Err, false). -func (a *stringIfaceMapAccessor) Find(key ref.Val) (ref.Val, bool) { - strKey, ok := key.(String) - if !ok { - return MaybeNoSuchOverloadErr(key), false - } - keyVal, found := a.mapVal[string(strKey)] - if !found { - return nil, false - } - return a.NativeToValue(keyVal), true -} - -// Iterator creates a new traits.Iterator from the string key set of the map. -func (a *stringIfaceMapAccessor) Iterator() traits.Iterator { - // Copy the keys to make their order stable. - mapKeys := make([]string, len(a.mapVal)) - i := 0 - for k := range a.mapVal { - mapKeys[i] = k - i++ - } - return &stringKeyIterator{ - mapKeys: mapKeys, - len: len(mapKeys), - } -} - -// protoMap is a specialized, separate implementation of the traits.Mapper interfaces tailored to -// accessing protoreflect.Map values. -type protoMap struct { - ref.TypeAdapter - value *pb.Map -} - -// Contains returns whether the map contains the given key. -func (m *protoMap) Contains(key ref.Val) ref.Val { - val, found := m.Find(key) - // When the index is not found and val is non-nil, this is an error or unknown value. - if !found && val != nil && IsUnknownOrError(val) { - return val - } - return Bool(found) -} - -// ConvertToNative implements the ref.Val interface method. -// -// Note, assignment to Golang struct types is not yet supported. -func (m *protoMap) ConvertToNative(typeDesc reflect.Type) (interface{}, error) { - // If the map is already assignable to the desired type return it, e.g. interfaces and - // maps with the same key value types. - switch typeDesc { - case anyValueType: - json, err := m.ConvertToNative(jsonStructType) - if err != nil { - return nil, err - } - return anypb.New(json.(proto.Message)) - case jsonValueType, jsonStructType: - jsonEntries, err := - m.ConvertToNative(reflect.TypeOf(map[string]*structpb.Value{})) - if err != nil { - return nil, err - } - jsonMap := &structpb.Struct{ - Fields: jsonEntries.(map[string]*structpb.Value)} - if typeDesc == jsonStructType { - return jsonMap, nil - } - return structpb.NewStructValue(jsonMap), nil - } - switch typeDesc.Kind() { - case reflect.Struct, reflect.Ptr: - if reflect.TypeOf(m.value).AssignableTo(typeDesc) { - return m.value, nil - } - if reflect.TypeOf(m).AssignableTo(typeDesc) { - return m, nil - } - } - if typeDesc.Kind() != reflect.Map { - return nil, fmt.Errorf("unsupported type conversion: %v to map", typeDesc) - } - - keyType := m.value.KeyType.ReflectType() - valType := m.value.ValueType.ReflectType() - otherKeyType := typeDesc.Key() - otherValType := typeDesc.Elem() - mapVal := reflect.MakeMapWithSize(typeDesc, m.value.Len()) - var err error - m.value.Range(func(key protoreflect.MapKey, val protoreflect.Value) bool { - ntvKey := key.Interface() - ntvVal := val.Interface() - switch ntvVal.(type) { - case protoreflect.Message: - ntvVal = ntvVal.(protoreflect.Message).Interface() - } - if keyType == otherKeyType && valType == otherValType { - mapVal.SetMapIndex(reflect.ValueOf(ntvKey), reflect.ValueOf(ntvVal)) - return true - } - celKey := m.NativeToValue(ntvKey) - celVal := m.NativeToValue(ntvVal) - ntvKey, err = celKey.ConvertToNative(otherKeyType) - if err != nil { - // early terminate the range loop. - return false - } - ntvVal, err = celVal.ConvertToNative(otherValType) - if err != nil { - // early terminate the range loop. - return false - } - mapVal.SetMapIndex(reflect.ValueOf(ntvKey), reflect.ValueOf(ntvVal)) - return true - }) - if err != nil { - return nil, err - } - return mapVal.Interface(), nil -} - -// ConvertToType implements the ref.Val interface method. -func (m *protoMap) ConvertToType(typeVal ref.Type) ref.Val { - switch typeVal { - case MapType: - return m - case TypeType: - return MapType - } - return NewErr("type conversion error from '%s' to '%s'", MapType, typeVal) -} - -// Equal implements the ref.Val interface method. -func (m *protoMap) Equal(other ref.Val) ref.Val { - if MapType != other.Type() { - return MaybeNoSuchOverloadErr(other) - } - otherMap := other.(traits.Mapper) - if m.value.Map.Len() != int(otherMap.Size().(Int)) { - return False - } - var retVal ref.Val = True - m.value.Range(func(key protoreflect.MapKey, val protoreflect.Value) bool { - keyVal := m.NativeToValue(key.Interface()) - valVal := m.NativeToValue(val) - otherVal, found := otherMap.Find(keyVal) - if !found { - if otherVal == nil { - retVal = False - return false - } - retVal = MaybeNoSuchOverloadErr(otherVal) - return false - } - valEq := valVal.Equal(otherVal) - if valEq != True { - retVal = valEq - return false - } - return true - }) - return retVal -} - -// Find returns whether the protoreflect.Map contains the input key. -// -// If the key is not found the function returns (nil, false). -// If the input key is not a supported proto map key type, or is an Err or Unknown, -// the function returns -// (Unknown|Err, false). -func (m *protoMap) Find(key ref.Val) (ref.Val, bool) { - if IsUnknownOrError(key) { - return key, false - } - // Convert the input key to the expected protobuf key type. - ntvKey, err := key.ConvertToNative(m.value.KeyType.ReflectType()) - if err != nil { - return &Err{err}, false - } - // Use protoreflection to get the key value. - val := m.value.Get(protoreflect.ValueOf(ntvKey).MapKey()) - if !val.IsValid() { - return nil, false - } - // Perform nominal type unwrapping from the input value. - switch v := val.Interface().(type) { - case protoreflect.List, protoreflect.Map: - // Maps do not support list or map values - return NewErr("unsupported map element type: (%T)%v", v, v), false - default: - return m.NativeToValue(v), true - } -} - -// Get implements the traits.Indexer interface method. -func (m *protoMap) Get(key ref.Val) ref.Val { - v, found := m.Find(key) - if !found { - return ValOrErr(v, "no such key: %v", key) - } - return v -} - -// Iterator implements the traits.Iterable interface method. -func (m *protoMap) Iterator() traits.Iterator { - // Copy the keys to make their order stable. - mapKeys := make([]protoreflect.MapKey, 0, m.value.Len()) - m.value.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool { - mapKeys = append(mapKeys, k) - return true - }) - return &protoMapIterator{ - TypeAdapter: m.TypeAdapter, - mapKeys: mapKeys, - len: m.value.Len(), - } -} - -// Size returns the number of entries in the protoreflect.Map. -func (m *protoMap) Size() ref.Val { - return Int(m.value.Len()) -} - -// Type implements the ref.Val interface method. -func (m *protoMap) Type() ref.Type { - return MapType -} - -// Value implements the ref.Val interface method. -func (m *protoMap) Value() interface{} { - return m.value -} - -type mapIterator struct { - *baseIterator - ref.TypeAdapter - mapKeys []reflect.Value - cursor int - len int -} - -// HasNext implements the traits.Iterator interface method. -func (it *mapIterator) HasNext() ref.Val { - return Bool(it.cursor < it.len) -} - -// Next implements the traits.Iterator interface method. -func (it *mapIterator) Next() ref.Val { - if it.HasNext() == True { - index := it.cursor - it.cursor++ - refKey := it.mapKeys[index] - return it.NativeToValue(refKey.Interface()) - } - return nil -} - -type protoMapIterator struct { - *baseIterator - ref.TypeAdapter - mapKeys []protoreflect.MapKey - cursor int - len int -} - -// HasNext implements the traits.Iterator interface method. -func (it *protoMapIterator) HasNext() ref.Val { - return Bool(it.cursor < it.len) -} - -// Next implements the traits.Iterator interface method. -func (it *protoMapIterator) Next() ref.Val { - if it.HasNext() == True { - index := it.cursor - it.cursor++ - refKey := it.mapKeys[index] - return it.NativeToValue(refKey.Interface()) - } - return nil -} - -type stringKeyIterator struct { - *baseIterator - mapKeys []string - cursor int - len int -} - -// HasNext implements the traits.Iterator interface method. -func (it *stringKeyIterator) HasNext() ref.Val { - return Bool(it.cursor < it.len) -} - -// Next implements the traits.Iterator interface method. -func (it *stringKeyIterator) Next() ref.Val { - if it.HasNext() == True { - index := it.cursor - it.cursor++ - return String(it.mapKeys[index]) - } - return nil -} diff --git a/vendor/github.com/google/cel-go/common/types/null.go b/vendor/github.com/google/cel-go/common/types/null.go deleted file mode 100644 index 3dc688856d..0000000000 --- a/vendor/github.com/google/cel-go/common/types/null.go +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "fmt" - "reflect" - - "google.golang.org/protobuf/proto" - - "github.com/google/cel-go/common/types/ref" - - anypb "google.golang.org/protobuf/types/known/anypb" - structpb "google.golang.org/protobuf/types/known/structpb" -) - -// Null type implementation. -type Null structpb.NullValue - -var ( - // NullType singleton. - NullType = NewTypeValue("null_type") - // NullValue singleton. - NullValue = Null(structpb.NullValue_NULL_VALUE) - - jsonNullType = reflect.TypeOf(structpb.NullValue_NULL_VALUE) -) - -// ConvertToNative implements ref.Val.ConvertToNative. -func (n Null) ConvertToNative(typeDesc reflect.Type) (interface{}, error) { - switch typeDesc.Kind() { - case reflect.Int32: - return reflect.ValueOf(n).Convert(typeDesc).Interface(), nil - case reflect.Ptr: - switch typeDesc { - case anyValueType: - // Convert to a JSON-null before packing to an Any field since the enum value for JSON - // null cannot be packed directly. - pb, err := n.ConvertToNative(jsonValueType) - if err != nil { - return nil, err - } - return anypb.New(pb.(proto.Message)) - case jsonValueType: - return structpb.NewNullValue(), nil - } - case reflect.Interface: - nv := n.Value() - if reflect.TypeOf(nv).Implements(typeDesc) { - return nv, nil - } - if reflect.TypeOf(n).Implements(typeDesc) { - return n, nil - } - } - // If the type conversion isn't supported return an error. - return nil, fmt.Errorf("type conversion error from '%v' to '%v'", NullType, typeDesc) -} - -// ConvertToType implements ref.Val.ConvertToType. -func (n Null) ConvertToType(typeVal ref.Type) ref.Val { - switch typeVal { - case StringType: - return String("null") - case NullType: - return n - case TypeType: - return NullType - } - return NewErr("type conversion error from '%s' to '%s'", NullType, typeVal) -} - -// Equal implements ref.Val.Equal. -func (n Null) Equal(other ref.Val) ref.Val { - if NullType != other.Type() { - return ValOrErr(other, "no such overload") - } - return True -} - -// Type implements ref.Val.Type. -func (n Null) Type() ref.Type { - return NullType -} - -// Value implements ref.Val.Value. -func (n Null) Value() interface{} { - return structpb.NullValue_NULL_VALUE -} diff --git a/vendor/github.com/google/cel-go/common/types/object.go b/vendor/github.com/google/cel-go/common/types/object.go deleted file mode 100644 index e77b078fa6..0000000000 --- a/vendor/github.com/google/cel-go/common/types/object.go +++ /dev/null @@ -1,159 +0,0 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "fmt" - "reflect" - - "github.com/google/cel-go/common/types/pb" - "github.com/google/cel-go/common/types/ref" - - "google.golang.org/protobuf/encoding/protojson" - "google.golang.org/protobuf/proto" - - anypb "google.golang.org/protobuf/types/known/anypb" - structpb "google.golang.org/protobuf/types/known/structpb" -) - -type protoObj struct { - ref.TypeAdapter - value proto.Message - typeDesc *pb.TypeDescription - typeValue *TypeValue -} - -// NewObject returns an object based on a proto.Message value which handles -// conversion between protobuf type values and expression type values. -// Objects support indexing and iteration. -// -// Note: the type value is pulled from the list of registered types within the -// type provider. If the proto type is not registered within the type provider, -// then this will result in an error within the type adapter / provider. -func NewObject(adapter ref.TypeAdapter, - typeDesc *pb.TypeDescription, - typeValue *TypeValue, - value proto.Message) ref.Val { - return &protoObj{ - TypeAdapter: adapter, - value: value, - typeDesc: typeDesc, - typeValue: typeValue} -} - -func (o *protoObj) ConvertToNative(typeDesc reflect.Type) (interface{}, error) { - pb := o.value - if reflect.TypeOf(pb).AssignableTo(typeDesc) { - return pb, nil - } - if reflect.TypeOf(o).AssignableTo(typeDesc) { - return o, nil - } - switch typeDesc { - case anyValueType: - _, isAny := pb.(*anypb.Any) - if isAny { - return pb, nil - } - return anypb.New(pb) - case jsonValueType: - // Marshal the proto to JSON first, and then rehydrate as protobuf.Value as there is no - // support for direct conversion from proto.Message to protobuf.Value. - bytes, err := protojson.Marshal(pb) - if err != nil { - return nil, err - } - json := &structpb.Value{} - err = protojson.Unmarshal(bytes, json) - if err != nil { - return nil, err - } - return json, nil - default: - if typeDesc == o.typeDesc.ReflectType() { - return o.value, nil - } - if typeDesc.Kind() == reflect.Ptr { - val := reflect.New(typeDesc.Elem()).Interface() - dstPB, ok := val.(proto.Message) - if ok { - proto.Merge(dstPB, pb) - return dstPB, nil - } - } - } - return nil, fmt.Errorf("type conversion error from '%T' to '%v'", o.value, typeDesc) -} - -func (o *protoObj) ConvertToType(typeVal ref.Type) ref.Val { - switch typeVal { - default: - if o.Type().TypeName() == typeVal.TypeName() { - return o - } - case TypeType: - return o.typeValue - } - return NewErr("type conversion error from '%s' to '%s'", o.typeDesc.Name(), typeVal) -} - -func (o *protoObj) Equal(other ref.Val) ref.Val { - if o.typeDesc.Name() != other.Type().TypeName() { - return MaybeNoSuchOverloadErr(other) - } - return Bool(proto.Equal(o.value, other.Value().(proto.Message))) -} - -// IsSet tests whether a field which is defined is set to a non-default value. -func (o *protoObj) IsSet(field ref.Val) ref.Val { - protoFieldName, ok := field.(String) - if !ok { - return MaybeNoSuchOverloadErr(field) - } - protoFieldStr := string(protoFieldName) - fd, found := o.typeDesc.FieldByName(protoFieldStr) - if !found { - return NewErr("no such field '%s'", field) - } - if fd.IsSet(o.value) { - return True - } - return False -} - -func (o *protoObj) Get(index ref.Val) ref.Val { - protoFieldName, ok := index.(String) - if !ok { - return MaybeNoSuchOverloadErr(index) - } - protoFieldStr := string(protoFieldName) - fd, found := o.typeDesc.FieldByName(protoFieldStr) - if !found { - return NewErr("no such field '%s'", index) - } - fv, err := fd.GetFrom(o.value) - if err != nil { - return NewErr(err.Error()) - } - return o.NativeToValue(fv) -} - -func (o *protoObj) Type() ref.Type { - return o.typeValue -} - -func (o *protoObj) Value() interface{} { - return o.value -} diff --git a/vendor/github.com/google/cel-go/common/types/pb/BUILD.bazel b/vendor/github.com/google/cel-go/common/types/pb/BUILD.bazel deleted file mode 100644 index 50d8f4d286..0000000000 --- a/vendor/github.com/google/cel-go/common/types/pb/BUILD.bazel +++ /dev/null @@ -1,50 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -package( - default_visibility = ["//visibility:public"], - licenses = ["notice"], # Apache 2.0 -) - -go_library( - name = "go_default_library", - srcs = [ - "checked.go", - "enum.go", - "file.go", - "pb.go", - "type.go", - ], - importpath = "github.com/google/cel-go/common/types/pb", - deps = [ - "@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library", - "@org_golang_google_protobuf//proto:go_default_library", - "@org_golang_google_protobuf//reflect/protoreflect:go_default_library", - "@org_golang_google_protobuf//reflect/protoregistry:go_default_library", - "@org_golang_google_protobuf//types/dynamicpb:go_default_library", - "@org_golang_google_protobuf//types/known/anypb:go_default_library", - "@org_golang_google_protobuf//types/known/durationpb:go_default_library", - "@org_golang_google_protobuf//types/known/emptypb:go_default_library", - "@org_golang_google_protobuf//types/known/structpb:go_default_library", - "@org_golang_google_protobuf//types/known/timestamppb:go_default_library", - "@org_golang_google_protobuf//types/known/wrapperspb:go_default_library", - ], -) - -go_test( - name = "go_default_test", - srcs = [ - "file_test.go", - "pb_test.go", - "type_test.go", - ], - size = "small", - embed = [":go_default_library"], - deps = [ - "//checker/decls:go_default_library", - "//test/proto2pb:test_all_types_go_proto", - "//test/proto3pb:test_all_types_go_proto", - "@org_golang_google_protobuf//reflect/protodesc:go_default_library", - "@org_golang_google_protobuf//reflect/protoreflect:go_default_library", - "@org_golang_google_protobuf//types/descriptorpb:go_default_library", - ], -) diff --git a/vendor/github.com/google/cel-go/common/types/pb/checked.go b/vendor/github.com/google/cel-go/common/types/pb/checked.go deleted file mode 100644 index 312a6a072f..0000000000 --- a/vendor/github.com/google/cel-go/common/types/pb/checked.go +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package pb - -import ( - "google.golang.org/protobuf/reflect/protoreflect" - - exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" - emptypb "google.golang.org/protobuf/types/known/emptypb" - structpb "google.golang.org/protobuf/types/known/structpb" -) - -var ( - // CheckedPrimitives map from proto field descriptor type to expr.Type. - CheckedPrimitives = map[protoreflect.Kind]*exprpb.Type{ - protoreflect.BoolKind: checkedBool, - protoreflect.BytesKind: checkedBytes, - protoreflect.DoubleKind: checkedDouble, - protoreflect.FloatKind: checkedDouble, - protoreflect.Int32Kind: checkedInt, - protoreflect.Int64Kind: checkedInt, - protoreflect.Sint32Kind: checkedInt, - protoreflect.Sint64Kind: checkedInt, - protoreflect.Uint32Kind: checkedUint, - protoreflect.Uint64Kind: checkedUint, - protoreflect.Fixed32Kind: checkedUint, - protoreflect.Fixed64Kind: checkedUint, - protoreflect.Sfixed32Kind: checkedInt, - protoreflect.Sfixed64Kind: checkedInt, - protoreflect.StringKind: checkedString} - - // CheckedWellKnowns map from qualified proto type name to expr.Type for - // well-known proto types. - CheckedWellKnowns = map[string]*exprpb.Type{ - // Wrapper types. - "google.protobuf.BoolValue": checkedWrap(checkedBool), - "google.protobuf.BytesValue": checkedWrap(checkedBytes), - "google.protobuf.DoubleValue": checkedWrap(checkedDouble), - "google.protobuf.FloatValue": checkedWrap(checkedDouble), - "google.protobuf.Int64Value": checkedWrap(checkedInt), - "google.protobuf.Int32Value": checkedWrap(checkedInt), - "google.protobuf.UInt64Value": checkedWrap(checkedUint), - "google.protobuf.UInt32Value": checkedWrap(checkedUint), - "google.protobuf.StringValue": checkedWrap(checkedString), - // Well-known types. - "google.protobuf.Any": checkedAny, - "google.protobuf.Duration": checkedDuration, - "google.protobuf.Timestamp": checkedTimestamp, - // Json types. - "google.protobuf.ListValue": checkedListDyn, - "google.protobuf.NullValue": checkedNull, - "google.protobuf.Struct": checkedMapStringDyn, - "google.protobuf.Value": checkedDyn, - } - - // common types - checkedDyn = &exprpb.Type{TypeKind: &exprpb.Type_Dyn{Dyn: &emptypb.Empty{}}} - // Wrapper and primitive types. - checkedBool = checkedPrimitive(exprpb.Type_BOOL) - checkedBytes = checkedPrimitive(exprpb.Type_BYTES) - checkedDouble = checkedPrimitive(exprpb.Type_DOUBLE) - checkedInt = checkedPrimitive(exprpb.Type_INT64) - checkedString = checkedPrimitive(exprpb.Type_STRING) - checkedUint = checkedPrimitive(exprpb.Type_UINT64) - // Well-known type equivalents. - checkedAny = checkedWellKnown(exprpb.Type_ANY) - checkedDuration = checkedWellKnown(exprpb.Type_DURATION) - checkedTimestamp = checkedWellKnown(exprpb.Type_TIMESTAMP) - // Json-based type equivalents. - checkedNull = &exprpb.Type{ - TypeKind: &exprpb.Type_Null{ - Null: structpb.NullValue_NULL_VALUE}} - checkedListDyn = &exprpb.Type{ - TypeKind: &exprpb.Type_ListType_{ - ListType: &exprpb.Type_ListType{ElemType: checkedDyn}}} - checkedMapStringDyn = &exprpb.Type{ - TypeKind: &exprpb.Type_MapType_{ - MapType: &exprpb.Type_MapType{ - KeyType: checkedString, - ValueType: checkedDyn}}} -) diff --git a/vendor/github.com/google/cel-go/common/types/pb/enum.go b/vendor/github.com/google/cel-go/common/types/pb/enum.go deleted file mode 100644 index 4a26b5c7c3..0000000000 --- a/vendor/github.com/google/cel-go/common/types/pb/enum.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package pb - -import ( - "google.golang.org/protobuf/reflect/protoreflect" -) - -// NewEnumValueDescription produces an enum value description with the fully qualified enum value -// name and the enum value descriptor. -func NewEnumValueDescription(name string, desc protoreflect.EnumValueDescriptor) *EnumValueDescription { - return &EnumValueDescription{ - enumValueName: name, - desc: desc, - } -} - -// EnumValueDescription maps a fully-qualified enum value name to its numeric value. -type EnumValueDescription struct { - enumValueName string - desc protoreflect.EnumValueDescriptor -} - -// Name returns the fully-qualified identifier name for the enum value. -func (ed *EnumValueDescription) Name() string { - return ed.enumValueName -} - -// Value returns the (numeric) value of the enum. -func (ed *EnumValueDescription) Value() int32 { - return int32(ed.desc.Number()) -} diff --git a/vendor/github.com/google/cel-go/common/types/pb/file.go b/vendor/github.com/google/cel-go/common/types/pb/file.go deleted file mode 100644 index 0bcade75f9..0000000000 --- a/vendor/github.com/google/cel-go/common/types/pb/file.go +++ /dev/null @@ -1,141 +0,0 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package pb - -import ( - "fmt" - - "google.golang.org/protobuf/reflect/protoreflect" -) - -// NewFileDescription returns a FileDescription instance with a complete listing of all the message -// types and enum values declared within any scope in the file. -func NewFileDescription(fileDesc protoreflect.FileDescriptor, pbdb *Db) *FileDescription { - metadata := collectFileMetadata(fileDesc) - enums := make(map[string]*EnumValueDescription) - for name, enumVal := range metadata.enumValues { - enums[name] = NewEnumValueDescription(name, enumVal) - } - types := make(map[string]*TypeDescription) - for name, msgType := range metadata.msgTypes { - types[name] = NewTypeDescription(name, msgType) - } - return &FileDescription{ - types: types, - enums: enums, - } -} - -// FileDescription holds a map of all types and enum values declared within a proto file. -type FileDescription struct { - types map[string]*TypeDescription - enums map[string]*EnumValueDescription -} - -// GetEnumDescription returns an EnumDescription for a qualified enum value -// name declared within the .proto file. -func (fd *FileDescription) GetEnumDescription(enumName string) (*EnumValueDescription, bool) { - ed, found := fd.enums[sanitizeProtoName(enumName)] - return ed, found -} - -// GetEnumNames returns the string names of all enum values in the file. -func (fd *FileDescription) GetEnumNames() []string { - enumNames := make([]string, len(fd.enums)) - i := 0 - for _, e := range fd.enums { - enumNames[i] = e.Name() - i++ - } - return enumNames -} - -// GetTypeDescription returns a TypeDescription for a qualified protobuf message type name -// declared within the .proto file. -func (fd *FileDescription) GetTypeDescription(typeName string) (*TypeDescription, bool) { - td, found := fd.types[sanitizeProtoName(typeName)] - return td, found -} - -// GetTypeNames returns the list of all type names contained within the file. -func (fd *FileDescription) GetTypeNames() []string { - typeNames := make([]string, len(fd.types)) - i := 0 - for _, t := range fd.types { - typeNames[i] = t.Name() - i++ - } - return typeNames -} - -// sanitizeProtoName strips the leading '.' from the proto message name. -func sanitizeProtoName(name string) string { - if name != "" && name[0] == '.' { - return name[1:] - } - return name -} - -// fileMetadata is a flattened view of message types and enum values within a file descriptor. -type fileMetadata struct { - // msgTypes maps from fully-qualified message name to descriptor. - msgTypes map[string]protoreflect.MessageDescriptor - // enumValues maps from fully-qualified enum value to enum value descriptor. - enumValues map[string]protoreflect.EnumValueDescriptor - // TODO: support enum type definitions for use in future type-check enhancements. -} - -// collectFileMetadata traverses the proto file object graph to collect message types and enum -// values and index them by their fully qualified names. -func collectFileMetadata(fileDesc protoreflect.FileDescriptor) *fileMetadata { - msgTypes := make(map[string]protoreflect.MessageDescriptor) - enumValues := make(map[string]protoreflect.EnumValueDescriptor) - collectMsgTypes(fileDesc.Messages(), msgTypes, enumValues) - collectEnumValues(fileDesc.Enums(), enumValues) - return &fileMetadata{ - msgTypes: msgTypes, - enumValues: enumValues, - } -} - -// collectMsgTypes recursively collects messages, nested messages, and nested enums into a map of -// fully qualified protobuf names to descriptors. -func collectMsgTypes(msgTypes protoreflect.MessageDescriptors, msgTypeMap map[string]protoreflect.MessageDescriptor, enumValueMap map[string]protoreflect.EnumValueDescriptor) { - for i := 0; i < msgTypes.Len(); i++ { - msgType := msgTypes.Get(i) - msgTypeMap[string(msgType.FullName())] = msgType - nestedMsgTypes := msgType.Messages() - if nestedMsgTypes.Len() != 0 { - collectMsgTypes(nestedMsgTypes, msgTypeMap, enumValueMap) - } - nestedEnumTypes := msgType.Enums() - if nestedEnumTypes.Len() != 0 { - collectEnumValues(nestedEnumTypes, enumValueMap) - } - } -} - -// collectEnumValues accumulates the enum values within an enum declaration. -func collectEnumValues(enumTypes protoreflect.EnumDescriptors, enumValueMap map[string]protoreflect.EnumValueDescriptor) { - for i := 0; i < enumTypes.Len(); i++ { - enumType := enumTypes.Get(i) - enumTypeValues := enumType.Values() - for j := 0; j < enumTypeValues.Len(); j++ { - enumValue := enumTypeValues.Get(j) - enumValueName := fmt.Sprintf("%s.%s", string(enumType.FullName()), string(enumValue.Name())) - enumValueMap[enumValueName] = enumValue - } - } -} diff --git a/vendor/github.com/google/cel-go/common/types/pb/pb.go b/vendor/github.com/google/cel-go/common/types/pb/pb.go deleted file mode 100644 index d02fbe0c25..0000000000 --- a/vendor/github.com/google/cel-go/common/types/pb/pb.go +++ /dev/null @@ -1,196 +0,0 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package pb reflects over protocol buffer descriptors to generate objects -// that simplify type, enum, and field lookup. -package pb - -import ( - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" - - anypb "google.golang.org/protobuf/types/known/anypb" - durpb "google.golang.org/protobuf/types/known/durationpb" - emptypb "google.golang.org/protobuf/types/known/emptypb" - structpb "google.golang.org/protobuf/types/known/structpb" - tspb "google.golang.org/protobuf/types/known/timestamppb" - wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" -) - -// Db maps from file / message / enum name to file description. -// -// Each Db is isolated from each other, and while information about protobuf descriptors may be -// fetched from the global protobuf registry, no descriptors are added to this registry, else -// the isolation guarantees of the Db object would be violated. -type Db struct { - revFileDescriptorMap map[string]*FileDescription - // files contains the deduped set of FileDescriptions whose types are contained in the pb.Db. - files []*FileDescription -} - -var ( - // DefaultDb used at evaluation time or unless overridden at check time. - DefaultDb = &Db{ - revFileDescriptorMap: make(map[string]*FileDescription), - files: []*FileDescription{}, - } -) - -// NewDb creates a new `pb.Db` with an empty type name to file description map. -func NewDb() *Db { - pbdb := &Db{ - revFileDescriptorMap: make(map[string]*FileDescription), - files: []*FileDescription{}, - } - // The FileDescription objects in the default db contain lazily initialized TypeDescription - // values which may point to the state contained in the DefaultDb irrespective of this shallow - // copy; however, the type graph for a field is idempotently computed, and is guaranteed to - // only be initialized once thanks to atomic values within the TypeDescription objects, so it - // is safe to share these values across instances. - for k, v := range DefaultDb.revFileDescriptorMap { - pbdb.revFileDescriptorMap[k] = v - } - pbdb.files = append(pbdb.files, DefaultDb.files...) - return pbdb -} - -// Copy creates a copy of the current database with its own internal descriptor mapping. -func (pbdb *Db) Copy() *Db { - copy := NewDb() - for k, v := range pbdb.revFileDescriptorMap { - copy.revFileDescriptorMap[k] = v - } - for _, f := range pbdb.files { - hasFile := false - for _, f2 := range copy.files { - if f2 == f { - hasFile = true - } - } - if !hasFile { - copy.files = append(copy.files, f) - } - } - return copy -} - -// FileDescriptions returns the set of file descriptions associated with this db. -func (pbdb *Db) FileDescriptions() []*FileDescription { - return pbdb.files -} - -// RegisterDescriptor produces a `FileDescription` from a `FileDescriptor` and registers the -// message and enum types into the `pb.Db`. -func (pbdb *Db) RegisterDescriptor(fileDesc protoreflect.FileDescriptor) (*FileDescription, error) { - fd, found := pbdb.revFileDescriptorMap[fileDesc.Path()] - if found { - return fd, nil - } - // Make sure to search the global registry to see if a protoreflect.FileDescriptor for - // the file specified has been linked into the binary. If so, use the copy of the descriptor - // from the global cache. - // - // Note: Proto reflection relies on descriptor values being object equal rather than object - // equivalence. This choice means that a FieldDescriptor generated from a FileDescriptorProto - // will be incompatible with the FieldDescriptor in the global registry and any message created - // from that global registry. - globalFD, err := protoregistry.GlobalFiles.FindFileByPath(fileDesc.Path()) - if err == nil { - fileDesc = globalFD - } - fd = NewFileDescription(fileDesc, pbdb) - for _, enumValName := range fd.GetEnumNames() { - pbdb.revFileDescriptorMap[enumValName] = fd - } - for _, msgTypeName := range fd.GetTypeNames() { - pbdb.revFileDescriptorMap[msgTypeName] = fd - } - pbdb.revFileDescriptorMap[fileDesc.Path()] = fd - - // Return the specific file descriptor registered. - pbdb.files = append(pbdb.files, fd) - return fd, nil -} - -// RegisterMessage produces a `FileDescription` from a `message` and registers the message and all -// other definitions within the message file into the `pb.Db`. -func (pbdb *Db) RegisterMessage(message proto.Message) (*FileDescription, error) { - msgDesc := message.ProtoReflect().Descriptor() - msgName := msgDesc.FullName() - typeName := sanitizeProtoName(string(msgName)) - if fd, found := pbdb.revFileDescriptorMap[typeName]; found { - return fd, nil - } - return pbdb.RegisterDescriptor(msgDesc.ParentFile()) -} - -// DescribeEnum takes a qualified enum name and returns an `EnumDescription` if it exists in the -// `pb.Db`. -func (pbdb *Db) DescribeEnum(enumName string) (*EnumValueDescription, bool) { - enumName = sanitizeProtoName(enumName) - if fd, found := pbdb.revFileDescriptorMap[enumName]; found { - return fd.GetEnumDescription(enumName) - } - return nil, false -} - -// DescribeType returns a `TypeDescription` for the `typeName` if it exists in the `pb.Db`. -func (pbdb *Db) DescribeType(typeName string) (*TypeDescription, bool) { - typeName = sanitizeProtoName(typeName) - if fd, found := pbdb.revFileDescriptorMap[typeName]; found { - return fd.GetTypeDescription(typeName) - } - return nil, false -} - -// CollectFileDescriptorSet builds a file descriptor set associated with the file where the input -// message is declared. -func CollectFileDescriptorSet(message proto.Message) map[string]protoreflect.FileDescriptor { - fdMap := map[string]protoreflect.FileDescriptor{} - parentFile := message.ProtoReflect().Descriptor().ParentFile() - fdMap[parentFile.Path()] = parentFile - // Initialize list of dependencies - deps := make([]protoreflect.FileImport, parentFile.Imports().Len()) - for i := 0; i < parentFile.Imports().Len(); i++ { - deps[i] = parentFile.Imports().Get(i) - } - // Expand list for new dependencies - for i := 0; i < len(deps); i++ { - dep := deps[i] - if _, found := fdMap[dep.Path()]; found { - continue - } - fdMap[dep.Path()] = dep.FileDescriptor - for j := 0; j < dep.FileDescriptor.Imports().Len(); j++ { - deps = append(deps, dep.FileDescriptor.Imports().Get(j)) - } - } - return fdMap -} - -func init() { - // Describe well-known types to ensure they can always be resolved by the check and interpret - // execution phases. - // - // The following subset of message types is enough to ensure that all well-known types can - // resolved in the runtime, since describing the value results in describing the whole file - // where the message is declared. - DefaultDb.RegisterMessage(&anypb.Any{}) - DefaultDb.RegisterMessage(&durpb.Duration{}) - DefaultDb.RegisterMessage(&emptypb.Empty{}) - DefaultDb.RegisterMessage(&tspb.Timestamp{}) - DefaultDb.RegisterMessage(&structpb.Value{}) - DefaultDb.RegisterMessage(&wrapperspb.BoolValue{}) -} diff --git a/vendor/github.com/google/cel-go/common/types/pb/type.go b/vendor/github.com/google/cel-go/common/types/pb/type.go deleted file mode 100644 index c5060a4684..0000000000 --- a/vendor/github.com/google/cel-go/common/types/pb/type.go +++ /dev/null @@ -1,532 +0,0 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package pb - -import ( - "fmt" - "reflect" - - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/reflect/protoreflect" - - exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" - dynamicpb "google.golang.org/protobuf/types/dynamicpb" - anypb "google.golang.org/protobuf/types/known/anypb" - dpb "google.golang.org/protobuf/types/known/durationpb" - structpb "google.golang.org/protobuf/types/known/structpb" - tpb "google.golang.org/protobuf/types/known/timestamppb" - "google.golang.org/protobuf/types/known/wrapperspb" -) - -// description is a private interface used to make it convenient to perform type unwrapping at -// the TypeDescription or FieldDescription level. -type description interface { - // Zero returns an empty immutable protobuf message when the description is a protobuf message - // type. - Zero() proto.Message -} - -// NewTypeDescription produces a TypeDescription value for the fully-qualified proto type name -// with a given descriptor. -func NewTypeDescription(typeName string, desc protoreflect.MessageDescriptor) *TypeDescription { - msgType := dynamicpb.NewMessageType(desc) - msgZero := dynamicpb.NewMessage(desc) - fieldMap := map[string]*FieldDescription{} - fields := desc.Fields() - for i := 0; i < fields.Len(); i++ { - f := fields.Get(i) - fieldMap[string(f.Name())] = NewFieldDescription(f) - } - return &TypeDescription{ - typeName: typeName, - desc: desc, - msgType: msgType, - fieldMap: fieldMap, - reflectType: reflectTypeOf(msgZero), - zeroMsg: zeroValueOf(msgZero), - } -} - -// TypeDescription is a collection of type metadata relevant to expression -// checking and evaluation. -type TypeDescription struct { - typeName string - desc protoreflect.MessageDescriptor - msgType protoreflect.MessageType - fieldMap map[string]*FieldDescription - reflectType reflect.Type - zeroMsg proto.Message -} - -// FieldMap returns a string field name to FieldDescription map. -func (td *TypeDescription) FieldMap() map[string]*FieldDescription { - return td.fieldMap -} - -// FieldByName returns (FieldDescription, true) if the field name is declared within the type. -func (td *TypeDescription) FieldByName(name string) (*FieldDescription, bool) { - fd, found := td.fieldMap[name] - if !found { - return nil, false - } - return fd, true -} - -// MaybeUnwrap accepts a proto message as input and unwraps it to a primitive CEL type if possible. -// -// This method returns the unwrapped value and 'true', else the original value and 'false'. -func (td *TypeDescription) MaybeUnwrap(msg proto.Message) (interface{}, bool) { - return unwrap(td, msg) -} - -// Name returns the fully-qualified name of the type. -func (td *TypeDescription) Name() string { - return string(td.desc.FullName()) -} - -// New returns a mutable proto message -func (td *TypeDescription) New() protoreflect.Message { - return td.msgType.New() -} - -// ReflectType returns the Golang reflect.Type for this type. -func (td *TypeDescription) ReflectType() reflect.Type { - return td.reflectType -} - -// Zero returns the zero proto.Message value for this type. -func (td *TypeDescription) Zero() proto.Message { - return td.zeroMsg -} - -// NewFieldDescription creates a new field description from a protoreflect.FieldDescriptor. -func NewFieldDescription(fieldDesc protoreflect.FieldDescriptor) *FieldDescription { - var reflectType reflect.Type - var zeroMsg proto.Message - switch fieldDesc.Kind() { - case protoreflect.EnumKind: - reflectType = reflectTypeOf(protoreflect.EnumNumber(0)) - case protoreflect.MessageKind: - zeroMsg = dynamicpb.NewMessage(fieldDesc.Message()) - reflectType = reflectTypeOf(zeroMsg) - default: - reflectType = reflectTypeOf(fieldDesc.Default().Interface()) - if fieldDesc.IsList() { - parentMsg := dynamicpb.NewMessage(fieldDesc.ContainingMessage()) - listField := parentMsg.NewField(fieldDesc).List() - elem := listField.NewElement().Interface() - switch elemType := elem.(type) { - case protoreflect.Message: - elem = elemType.Interface() - } - reflectType = reflectTypeOf(elem) - } - } - // Ensure the list type is appropriately reflected as a Go-native list. - if fieldDesc.IsList() { - reflectType = reflect.SliceOf(reflectType) - } - var keyType, valType *FieldDescription - if fieldDesc.IsMap() { - keyType = NewFieldDescription(fieldDesc.MapKey()) - valType = NewFieldDescription(fieldDesc.MapValue()) - } - return &FieldDescription{ - desc: fieldDesc, - KeyType: keyType, - ValueType: valType, - reflectType: reflectType, - zeroMsg: zeroValueOf(zeroMsg), - } -} - -// FieldDescription holds metadata related to fields declared within a type. -type FieldDescription struct { - // KeyType holds the key FieldDescription for map fields. - KeyType *FieldDescription - // ValueType holds the value FieldDescription for map fields. - ValueType *FieldDescription - - desc protoreflect.FieldDescriptor - reflectType reflect.Type - zeroMsg proto.Message -} - -// CheckedType returns the type-definition used at type-check time. -func (fd *FieldDescription) CheckedType() *exprpb.Type { - if fd.desc.IsMap() { - return &exprpb.Type{ - TypeKind: &exprpb.Type_MapType_{ - MapType: &exprpb.Type_MapType{ - KeyType: fd.KeyType.typeDefToType(), - ValueType: fd.ValueType.typeDefToType(), - }, - }, - } - } - if fd.desc.IsList() { - return &exprpb.Type{ - TypeKind: &exprpb.Type_ListType_{ - ListType: &exprpb.Type_ListType{ - ElemType: fd.typeDefToType()}}} - } - return fd.typeDefToType() -} - -// Descriptor returns the protoreflect.FieldDescriptor for this type. -func (fd *FieldDescription) Descriptor() protoreflect.FieldDescriptor { - return fd.desc -} - -// IsSet returns whether the field is set on the target value, per the proto presence conventions -// of proto2 or proto3 accordingly. -// -// This function implements the FieldType.IsSet function contract which can be used to operate on -// more than just protobuf field accesses; however, the target here must be a protobuf.Message. -func (fd *FieldDescription) IsSet(target interface{}) bool { - switch v := target.(type) { - case proto.Message: - pbRef := v.ProtoReflect() - pbDesc := pbRef.Descriptor() - if pbDesc == fd.desc.ContainingMessage() { - // When the target protobuf shares the same message descriptor instance as the field - // descriptor, use the cached field descriptor value. - return pbRef.Has(fd.desc) - } - // Otherwise, fallback to a dynamic lookup of the field descriptor from the target - // instance as an attempt to use the cached field descriptor will result in a panic. - return pbRef.Has(pbDesc.Fields().ByName(protoreflect.Name(fd.Name()))) - default: - return false - } -} - -// GetFrom returns the accessor method associated with the field on the proto generated struct. -// -// If the field is not set, the proto default value is returned instead. -// -// This function implements the FieldType.GetFrom function contract which can be used to operate -// on more than just protobuf field accesses; however, the target here must be a protobuf.Message. -func (fd *FieldDescription) GetFrom(target interface{}) (interface{}, error) { - v, ok := target.(proto.Message) - if !ok { - return nil, fmt.Errorf("unsupported field selection target: (%T)%v", target, target) - } - pbRef := v.ProtoReflect() - pbDesc := pbRef.Descriptor() - var fieldVal interface{} - if pbDesc == fd.desc.ContainingMessage() { - // When the target protobuf shares the same message descriptor instance as the field - // descriptor, use the cached field descriptor value. - fieldVal = pbRef.Get(fd.desc).Interface() - } else { - // Otherwise, fallback to a dynamic lookup of the field descriptor from the target - // instance as an attempt to use the cached field descriptor will result in a panic. - fieldVal = pbRef.Get(pbDesc.Fields().ByName(protoreflect.Name(fd.Name()))).Interface() - } - switch fv := fieldVal.(type) { - // Fast-path return for primitive types. - case bool, []byte, float32, float64, int32, int64, string, uint32, uint64, protoreflect.List: - return fv, nil - case protoreflect.EnumNumber: - return int64(fv), nil - case protoreflect.Map: - // Return a wrapper around the protobuf-reflected Map types which carries additional - // information about the key and value definitions of the map. - return &Map{Map: fv, KeyType: fd.KeyType, ValueType: fd.ValueType}, nil - case protoreflect.Message: - // Make sure to unwrap well-known protobuf types before returning. - unwrapped, _ := fd.MaybeUnwrapDynamic(fv) - return unwrapped, nil - default: - return fv, nil - } -} - -// IsEnum returns true if the field type refers to an enum value. -func (fd *FieldDescription) IsEnum() bool { - return fd.desc.Kind() == protoreflect.EnumKind -} - -// IsMap returns true if the field is of map type. -func (fd *FieldDescription) IsMap() bool { - return fd.desc.IsMap() -} - -// IsMessage returns true if the field is of message type. -func (fd *FieldDescription) IsMessage() bool { - return fd.desc.Kind() == protoreflect.MessageKind -} - -// IsOneof returns true if the field is declared within a oneof block. -func (fd *FieldDescription) IsOneof() bool { - return fd.desc.ContainingOneof() != nil -} - -// IsList returns true if the field is a repeated value. -// -// This method will also return true for map values, so check whether the -// field is also a map. -func (fd *FieldDescription) IsList() bool { - return fd.desc.IsList() -} - -// MaybeUnwrapDynamic takes the reflected protoreflect.Message and determines whether the -// value can be unwrapped to a more primitive CEL type. -// -// This function returns the unwrapped value and 'true' on success, or the original value -// and 'false' otherwise. -func (fd *FieldDescription) MaybeUnwrapDynamic(msg protoreflect.Message) (interface{}, bool) { - return unwrapDynamic(fd, msg) -} - -// Name returns the CamelCase name of the field within the proto-based struct. -func (fd *FieldDescription) Name() string { - return string(fd.desc.Name()) -} - -// ReflectType returns the Golang reflect.Type for this field. -func (fd *FieldDescription) ReflectType() reflect.Type { - return fd.reflectType -} - -// String returns the fully qualified name of the field within its type as well as whether the -// field occurs within a oneof. -func (fd *FieldDescription) String() string { - return fmt.Sprintf("%v.%s `oneof=%t`", fd.desc.ContainingMessage().FullName(), fd.Name(), fd.IsOneof()) -} - -// Zero returns the zero value for the protobuf message represented by this field. -// -// If the field is not a proto.Message type, the zero value is nil. -func (fd *FieldDescription) Zero() proto.Message { - return fd.zeroMsg -} - -func (fd *FieldDescription) typeDefToType() *exprpb.Type { - if fd.desc.Kind() == protoreflect.MessageKind { - msgType := string(fd.desc.Message().FullName()) - if wk, found := CheckedWellKnowns[msgType]; found { - return wk - } - return checkedMessageType(msgType) - } - if fd.desc.Kind() == protoreflect.EnumKind { - return checkedInt - } - return CheckedPrimitives[fd.desc.Kind()] -} - -// Map wraps the protoreflect.Map object with a key and value FieldDescription for use in -// retrieving individual elements within CEL value data types. -type Map struct { - protoreflect.Map - KeyType *FieldDescription - ValueType *FieldDescription -} - -func checkedMessageType(name string) *exprpb.Type { - return &exprpb.Type{ - TypeKind: &exprpb.Type_MessageType{MessageType: name}} -} - -func checkedPrimitive(primitive exprpb.Type_PrimitiveType) *exprpb.Type { - return &exprpb.Type{ - TypeKind: &exprpb.Type_Primitive{Primitive: primitive}} -} - -func checkedWellKnown(wellKnown exprpb.Type_WellKnownType) *exprpb.Type { - return &exprpb.Type{ - TypeKind: &exprpb.Type_WellKnown{WellKnown: wellKnown}} -} - -func checkedWrap(t *exprpb.Type) *exprpb.Type { - return &exprpb.Type{ - TypeKind: &exprpb.Type_Wrapper{Wrapper: t.GetPrimitive()}} -} - -// unwrap unwraps the provided proto.Message value, potentially based on the description if the -// input message is a *dynamicpb.Message which obscures the typing information from Go. -// -// Returns the unwrapped value and 'true' if unwrapped, otherwise the input value and 'false'. -func unwrap(desc description, msg proto.Message) (interface{}, bool) { - switch v := msg.(type) { - case *anypb.Any: - dynMsg, err := v.UnmarshalNew() - if err != nil { - return v, false - } - return unwrapDynamic(desc, dynMsg.ProtoReflect()) - case *dynamicpb.Message: - return unwrapDynamic(desc, v) - case *dpb.Duration: - return v.AsDuration(), true - case *tpb.Timestamp: - return v.AsTime(), true - case *structpb.Value: - switch v.GetKind().(type) { - case *structpb.Value_BoolValue: - return v.GetBoolValue(), true - case *structpb.Value_ListValue: - return v.GetListValue(), true - case *structpb.Value_NullValue: - return structpb.NullValue_NULL_VALUE, true - case *structpb.Value_NumberValue: - return v.GetNumberValue(), true - case *structpb.Value_StringValue: - return v.GetStringValue(), true - case *structpb.Value_StructValue: - return v.GetStructValue(), true - default: - return structpb.NullValue_NULL_VALUE, true - } - case *wrapperspb.BoolValue: - return v.GetValue(), true - case *wrapperspb.BytesValue: - return v.GetValue(), true - case *wrapperspb.DoubleValue: - return v.GetValue(), true - case *wrapperspb.FloatValue: - return float64(v.GetValue()), true - case *wrapperspb.Int32Value: - return int64(v.GetValue()), true - case *wrapperspb.Int64Value: - return v.GetValue(), true - case *wrapperspb.StringValue: - return v.GetValue(), true - case *wrapperspb.UInt32Value: - return uint64(v.GetValue()), true - case *wrapperspb.UInt64Value: - return v.GetValue(), true - } - return msg, false -} - -// unwrapDynamic unwraps a reflected protobuf Message value. -// -// Returns the unwrapped value and 'true' if unwrapped, otherwise the input value and 'false'. -func unwrapDynamic(desc description, refMsg protoreflect.Message) (interface{}, bool) { - msg := refMsg.Interface() - if !refMsg.IsValid() { - msg = desc.Zero() - } - // In order to ensure that these wrapped types match the expectations of the CEL type system - // the dynamicpb.Message must be merged with an protobuf instance of the well-known type value. - typeName := string(refMsg.Descriptor().FullName()) - switch typeName { - case "google.protobuf.Any": - // Note, Any values require further unwrapping; however, this unwrapping may or may not - // be to a well-known type. If the unwrapped value is a well-known type it will be further - // unwrapped before being returned to the caller. Otherwise, the dynamic protobuf object - // represented by the Any will be returned. - unwrappedAny := &anypb.Any{} - proto.Merge(unwrappedAny, msg) - dynMsg, err := unwrappedAny.UnmarshalNew() - if err != nil { - // Allow the error to move further up the stack as it should result in an type - // conversion error if the caller does not recover it somehow. - return unwrappedAny, true - } - // Attempt to unwrap the dynamic type, otherwise return the dynamic message. - if unwrapped, nested := unwrapDynamic(desc, dynMsg.ProtoReflect()); nested { - return unwrapped, true - } - return dynMsg, true - case "google.protobuf.BoolValue", - "google.protobuf.BytesValue", - "google.protobuf.DoubleValue", - "google.protobuf.FloatValue", - "google.protobuf.Int32Value", - "google.protobuf.Int64Value", - "google.protobuf.StringValue", - "google.protobuf.UInt32Value", - "google.protobuf.UInt64Value": - // The msg value is ignored when dealing with wrapper types as they have a null or value - // behavior, rather than the standard zero value behavior of other proto message types. - if !refMsg.IsValid() { - return structpb.NullValue_NULL_VALUE, true - } - valueField := refMsg.Descriptor().Fields().ByName("value") - return refMsg.Get(valueField).Interface(), true - case "google.protobuf.Duration": - unwrapped := &dpb.Duration{} - proto.Merge(unwrapped, msg) - return unwrapped.AsDuration(), true - case "google.protobuf.ListValue": - unwrapped := &structpb.ListValue{} - proto.Merge(unwrapped, msg) - return unwrapped, true - case "google.protobuf.NullValue": - return structpb.NullValue_NULL_VALUE, true - case "google.protobuf.Struct": - unwrapped := &structpb.Struct{} - proto.Merge(unwrapped, msg) - return unwrapped, true - case "google.protobuf.Timestamp": - unwrapped := &tpb.Timestamp{} - proto.Merge(unwrapped, msg) - return unwrapped.AsTime(), true - case "google.protobuf.Value": - unwrapped := &structpb.Value{} - proto.Merge(unwrapped, msg) - return unwrap(desc, unwrapped) - } - return msg, false -} - -// reflectTypeOf intercepts the reflect.Type call to ensure that dynamicpb.Message types preserve -// well-known protobuf reflected types expected by the CEL type system. -func reflectTypeOf(val interface{}) reflect.Type { - switch v := val.(type) { - case proto.Message: - return reflect.TypeOf(zeroValueOf(v)) - default: - return reflect.TypeOf(v) - } -} - -// zeroValueOf will return the strongest possible proto.Message representing the default protobuf -// message value of the input msg type. -func zeroValueOf(msg proto.Message) proto.Message { - if msg == nil { - return nil - } - typeName := string(msg.ProtoReflect().Descriptor().FullName()) - zeroVal, found := zeroValueMap[typeName] - if found { - return zeroVal - } - return msg -} - -var ( - zeroValueMap = map[string]proto.Message{ - "google.protobuf.Any": &anypb.Any{}, - "google.protobuf.Duration": &dpb.Duration{}, - "google.protobuf.ListValue": &structpb.ListValue{}, - "google.protobuf.Struct": &structpb.Struct{}, - "google.protobuf.Timestamp": &tpb.Timestamp{}, - "google.protobuf.Value": &structpb.Value{}, - "google.protobuf.BoolValue": wrapperspb.Bool(false), - "google.protobuf.BytesValue": wrapperspb.Bytes([]byte{}), - "google.protobuf.DoubleValue": wrapperspb.Double(0.0), - "google.protobuf.FloatValue": wrapperspb.Float(0.0), - "google.protobuf.Int32Value": wrapperspb.Int32(0), - "google.protobuf.Int64Value": wrapperspb.Int64(0), - "google.protobuf.StringValue": wrapperspb.String(""), - "google.protobuf.UInt32Value": wrapperspb.UInt32(0), - "google.protobuf.UInt64Value": wrapperspb.UInt64(0), - } -) diff --git a/vendor/github.com/google/cel-go/common/types/provider.go b/vendor/github.com/google/cel-go/common/types/provider.go deleted file mode 100644 index 62be9f00cf..0000000000 --- a/vendor/github.com/google/cel-go/common/types/provider.go +++ /dev/null @@ -1,534 +0,0 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "fmt" - "reflect" - "time" - - "github.com/google/cel-go/common/types/pb" - "github.com/google/cel-go/common/types/ref" - "github.com/google/cel-go/common/types/traits" - - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/reflect/protoreflect" - - exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" - anypb "google.golang.org/protobuf/types/known/anypb" - dpb "google.golang.org/protobuf/types/known/durationpb" - structpb "google.golang.org/protobuf/types/known/structpb" - tpb "google.golang.org/protobuf/types/known/timestamppb" -) - -type protoTypeRegistry struct { - revTypeMap map[string]ref.Type - pbdb *pb.Db -} - -// NewRegistry accepts a list of proto message instances and returns a type -// provider which can create new instances of the provided message or any -// message that proto depends upon in its FileDescriptor. -func NewRegistry(types ...proto.Message) (ref.TypeRegistry, error) { - p := &protoTypeRegistry{ - revTypeMap: make(map[string]ref.Type), - pbdb: pb.NewDb(), - } - err := p.RegisterType( - BoolType, - BytesType, - DoubleType, - DurationType, - IntType, - ListType, - MapType, - NullType, - StringType, - TimestampType, - TypeType, - UintType) - if err != nil { - return nil, err - } - // This block ensures that the well-known protobuf types are registered by default. - for _, fd := range p.pbdb.FileDescriptions() { - err = p.registerAllTypes(fd) - if err != nil { - return nil, err - } - } - for _, msgType := range types { - err = p.RegisterMessage(msgType) - if err != nil { - return nil, err - } - } - return p, nil -} - -// NewEmptyRegistry returns a registry which is completely unconfigured. -func NewEmptyRegistry() ref.TypeRegistry { - return &protoTypeRegistry{ - revTypeMap: make(map[string]ref.Type), - pbdb: pb.NewDb(), - } -} - -// Copy implements the ref.TypeRegistry interface method which copies the current state of the -// registry into its own memory space. -func (p *protoTypeRegistry) Copy() ref.TypeRegistry { - copy := &protoTypeRegistry{ - revTypeMap: make(map[string]ref.Type), - pbdb: p.pbdb.Copy(), - } - for k, v := range p.revTypeMap { - copy.revTypeMap[k] = v - } - return copy -} - -func (p *protoTypeRegistry) EnumValue(enumName string) ref.Val { - enumVal, found := p.pbdb.DescribeEnum(enumName) - if !found { - return NewErr("unknown enum name '%s'", enumName) - } - return Int(enumVal.Value()) -} - -func (p *protoTypeRegistry) FindFieldType(messageType string, - fieldName string) (*ref.FieldType, bool) { - msgType, found := p.pbdb.DescribeType(messageType) - if !found { - return nil, false - } - field, found := msgType.FieldByName(fieldName) - if !found { - return nil, false - } - return &ref.FieldType{ - Type: field.CheckedType(), - IsSet: field.IsSet, - GetFrom: field.GetFrom}, - true -} - -func (p *protoTypeRegistry) FindIdent(identName string) (ref.Val, bool) { - if t, found := p.revTypeMap[identName]; found { - return t.(ref.Val), true - } - if enumVal, found := p.pbdb.DescribeEnum(identName); found { - return Int(enumVal.Value()), true - } - return nil, false -} - -func (p *protoTypeRegistry) FindType(typeName string) (*exprpb.Type, bool) { - if _, found := p.pbdb.DescribeType(typeName); !found { - return nil, false - } - if typeName != "" && typeName[0] == '.' { - typeName = typeName[1:] - } - return &exprpb.Type{ - TypeKind: &exprpb.Type_Type{ - Type: &exprpb.Type{ - TypeKind: &exprpb.Type_MessageType{ - MessageType: typeName}}}}, true -} - -func (p *protoTypeRegistry) NewValue(typeName string, fields map[string]ref.Val) ref.Val { - td, found := p.pbdb.DescribeType(typeName) - if !found { - return NewErr("unknown type '%s'", typeName) - } - msg := td.New() - fieldMap := td.FieldMap() - for name, value := range fields { - field, found := fieldMap[name] - if !found { - return NewErr("no such field: %s", name) - } - err := msgSetField(msg, field, value) - if err != nil { - return &Err{err} - } - } - return p.NativeToValue(msg.Interface()) -} - -func (p *protoTypeRegistry) RegisterDescriptor(fileDesc protoreflect.FileDescriptor) error { - fd, err := p.pbdb.RegisterDescriptor(fileDesc) - if err != nil { - return err - } - return p.registerAllTypes(fd) -} - -func (p *protoTypeRegistry) RegisterMessage(message proto.Message) error { - fd, err := p.pbdb.RegisterMessage(message) - if err != nil { - return err - } - return p.registerAllTypes(fd) -} - -func (p *protoTypeRegistry) RegisterType(types ...ref.Type) error { - for _, t := range types { - p.revTypeMap[t.TypeName()] = t - } - // TODO: generate an error when the type name is registered more than once. - return nil -} - -// NativeToValue converts various "native" types to ref.Val with this specific implementation -// providing support for custom proto-based types. -// -// This method should be the inverse of ref.Val.ConvertToNative. -func (p *protoTypeRegistry) NativeToValue(value interface{}) ref.Val { - if val, found := nativeToValue(p, value); found { - return val - } - switch v := value.(type) { - case proto.Message: - typeName := string(v.ProtoReflect().Descriptor().FullName()) - td, found := p.pbdb.DescribeType(typeName) - if !found { - return NewErr("unknown type: '%s'", typeName) - } - unwrapped, isUnwrapped := td.MaybeUnwrap(v) - if isUnwrapped { - return p.NativeToValue(unwrapped) - } - typeVal, found := p.FindIdent(typeName) - if !found { - return NewErr("unknown type: '%s'", typeName) - } - return NewObject(p, td, typeVal.(*TypeValue), v) - case *pb.Map: - return NewProtoMap(p, v) - case protoreflect.List: - return NewProtoList(p, v) - case protoreflect.Message: - return p.NativeToValue(v.Interface()) - case protoreflect.Value: - return p.NativeToValue(v.Interface()) - } - return UnsupportedRefValConversionErr(value) -} - -func (p *protoTypeRegistry) registerAllTypes(fd *pb.FileDescription) error { - for _, typeName := range fd.GetTypeNames() { - err := p.RegisterType(NewObjectTypeValue(typeName)) - if err != nil { - return err - } - } - return nil -} - -// defaultTypeAdapter converts go native types to CEL values. -type defaultTypeAdapter struct{} - -var ( - // DefaultTypeAdapter adapts canonical CEL types from their equivalent Go values. - DefaultTypeAdapter = &defaultTypeAdapter{} -) - -// NativeToValue implements the ref.TypeAdapter interface. -func (a *defaultTypeAdapter) NativeToValue(value interface{}) ref.Val { - if val, found := nativeToValue(a, value); found { - return val - } - return UnsupportedRefValConversionErr(value) -} - -// nativeToValue returns the converted (ref.Val, true) of a conversion is found, -// otherwise (nil, false) -func nativeToValue(a ref.TypeAdapter, value interface{}) (ref.Val, bool) { - switch v := value.(type) { - case nil: - return NullValue, true - case *Bool: - if v != nil { - return *v, true - } - case *Bytes: - if v != nil { - return *v, true - } - case *Double: - if v != nil { - return *v, true - } - case *Int: - if v != nil { - return *v, true - } - case *String: - if v != nil { - return *v, true - } - case *Uint: - if v != nil { - return *v, true - } - case bool: - return Bool(v), true - case int: - return Int(v), true - case int32: - return Int(v), true - case int64: - return Int(v), true - case uint: - return Uint(v), true - case uint32: - return Uint(v), true - case uint64: - return Uint(v), true - case float32: - return Double(v), true - case float64: - return Double(v), true - case string: - return String(v), true - case *dpb.Duration: - return Duration{Duration: v.AsDuration()}, true - case time.Duration: - return Duration{Duration: v}, true - case *tpb.Timestamp: - return Timestamp{Time: v.AsTime()}, true - case time.Time: - return Timestamp{Time: v}, true - case *bool: - if v != nil { - return Bool(*v), true - } - case *float32: - if v != nil { - return Double(*v), true - } - case *float64: - if v != nil { - return Double(*v), true - } - case *int: - if v != nil { - return Int(*v), true - } - case *int32: - if v != nil { - return Int(*v), true - } - case *int64: - if v != nil { - return Int(*v), true - } - case *string: - if v != nil { - return String(*v), true - } - case *uint: - if v != nil { - return Uint(*v), true - } - case *uint32: - if v != nil { - return Uint(*v), true - } - case *uint64: - if v != nil { - return Uint(*v), true - } - case []byte: - return Bytes(v), true - // specializations for common lists types. - case []string: - return NewStringList(a, v), true - case []ref.Val: - return NewRefValList(a, v), true - // specializations for common map types. - case map[string]string: - return NewStringStringMap(a, v), true - case map[string]interface{}: - return NewStringInterfaceMap(a, v), true - case map[ref.Val]ref.Val: - return NewRefValMap(a, v), true - // additional specializations may be added upon request / need. - case *anypb.Any: - if v == nil { - return UnsupportedRefValConversionErr(v), true - } - unpackedAny, err := v.UnmarshalNew() - if err != nil { - return NewErr("anypb.UnmarshalNew() failed for type %q: %v", v.GetTypeUrl(), err), true - } - return a.NativeToValue(unpackedAny), true - case *structpb.NullValue, structpb.NullValue: - return NullValue, true - case *structpb.ListValue: - return NewJSONList(a, v), true - case *structpb.Struct: - return NewJSONStruct(a, v), true - case ref.Val: - return v, true - case protoreflect.EnumNumber: - return Int(v), true - case proto.Message: - if v == nil { - return UnsupportedRefValConversionErr(v), true - } - typeName := string(v.ProtoReflect().Descriptor().FullName()) - td, found := pb.DefaultDb.DescribeType(typeName) - if !found { - return nil, false - } - val, unwrapped := td.MaybeUnwrap(v) - if !unwrapped { - return nil, false - } - return a.NativeToValue(val), true - // Note: dynamicpb.Message implements the proto.Message _and_ protoreflect.Message interfaces - // which means that this case must appear after handling a proto.Message type. - case protoreflect.Message: - return a.NativeToValue(v.Interface()), true - default: - refValue := reflect.ValueOf(v) - if refValue.Kind() == reflect.Ptr { - if refValue.IsNil() { - return UnsupportedRefValConversionErr(v), true - } - refValue = refValue.Elem() - } - refKind := refValue.Kind() - switch refKind { - case reflect.Array, reflect.Slice: - return NewDynamicList(a, v), true - case reflect.Map: - return NewDynamicMap(a, v), true - // type aliases of primitive types cannot be asserted as that type, but rather need - // to be downcast to int32 before being converted to a CEL representation. - case reflect.Int32: - intType := reflect.TypeOf(int32(0)) - return Int(refValue.Convert(intType).Interface().(int32)), true - case reflect.Int64: - intType := reflect.TypeOf(int64(0)) - return Int(refValue.Convert(intType).Interface().(int64)), true - case reflect.Uint32: - uintType := reflect.TypeOf(uint32(0)) - return Uint(refValue.Convert(uintType).Interface().(uint32)), true - case reflect.Uint64: - uintType := reflect.TypeOf(uint64(0)) - return Uint(refValue.Convert(uintType).Interface().(uint64)), true - case reflect.Float32: - doubleType := reflect.TypeOf(float32(0)) - return Double(refValue.Convert(doubleType).Interface().(float32)), true - case reflect.Float64: - doubleType := reflect.TypeOf(float64(0)) - return Double(refValue.Convert(doubleType).Interface().(float64)), true - } - } - return nil, false -} - -func msgSetField(target protoreflect.Message, field *pb.FieldDescription, val ref.Val) error { - if field.IsList() { - lv := target.NewField(field.Descriptor()) - list, ok := val.(traits.Lister) - if !ok { - return unsupportedTypeConversionError(field, val) - } - err := msgSetListField(lv.List(), field, list) - if err != nil { - return err - } - target.Set(field.Descriptor(), lv) - return nil - } - if field.IsMap() { - mv := target.NewField(field.Descriptor()) - mp, ok := val.(traits.Mapper) - if !ok { - return unsupportedTypeConversionError(field, val) - } - err := msgSetMapField(mv.Map(), field, mp) - if err != nil { - return err - } - target.Set(field.Descriptor(), mv) - return nil - } - v, err := val.ConvertToNative(field.ReflectType()) - if err != nil { - return fieldTypeConversionError(field, err) - } - switch v.(type) { - case proto.Message: - v = v.(proto.Message).ProtoReflect() - } - target.Set(field.Descriptor(), protoreflect.ValueOf(v)) - return nil -} - -func msgSetListField(target protoreflect.List, listField *pb.FieldDescription, listVal traits.Lister) error { - elemReflectType := listField.ReflectType().Elem() - for i := Int(0); i < listVal.Size().(Int); i++ { - elem := listVal.Get(i) - elemVal, err := elem.ConvertToNative(elemReflectType) - if err != nil { - return fieldTypeConversionError(listField, err) - } - switch ev := elemVal.(type) { - case proto.Message: - elemVal = ev.ProtoReflect() - } - target.Append(protoreflect.ValueOf(elemVal)) - } - return nil -} - -func msgSetMapField(target protoreflect.Map, mapField *pb.FieldDescription, mapVal traits.Mapper) error { - targetKeyType := mapField.KeyType.ReflectType() - targetValType := mapField.ValueType.ReflectType() - it := mapVal.Iterator() - for it.HasNext() == True { - key := it.Next() - val := mapVal.Get(key) - k, err := key.ConvertToNative(targetKeyType) - if err != nil { - return fieldTypeConversionError(mapField, err) - } - v, err := val.ConvertToNative(targetValType) - if err != nil { - return fieldTypeConversionError(mapField, err) - } - switch v.(type) { - case proto.Message: - v = v.(proto.Message).ProtoReflect() - } - target.Set(protoreflect.ValueOf(k).MapKey(), protoreflect.ValueOf(v)) - } - return nil -} - -func unsupportedTypeConversionError(field *pb.FieldDescription, val ref.Val) error { - msgName := field.Descriptor().ContainingMessage().FullName() - return fmt.Errorf("unsupported field type for %v.%v: %v", msgName, field.Name(), val.Type()) -} - -func fieldTypeConversionError(field *pb.FieldDescription, err error) error { - msgName := field.Descriptor().ContainingMessage().FullName() - return fmt.Errorf("field type conversion error for %v.%v value type: %v", msgName, field.Name(), err) -} diff --git a/vendor/github.com/google/cel-go/common/types/ref/BUILD.bazel b/vendor/github.com/google/cel-go/common/types/ref/BUILD.bazel deleted file mode 100644 index 9389256ad2..0000000000 --- a/vendor/github.com/google/cel-go/common/types/ref/BUILD.bazel +++ /dev/null @@ -1,20 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -package( - default_visibility = ["//visibility:public"], - licenses = ["notice"], # Apache 2.0 -) - -go_library( - name = "go_default_library", - srcs = [ - "provider.go", - "reference.go", - ], - importpath = "github.com/google/cel-go/common/types/ref", - deps = [ - "@org_golang_google_protobuf//proto:go_default_library", - "@org_golang_google_protobuf//reflect/protoreflect:go_default_library", - "@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library", - ], -) diff --git a/vendor/github.com/google/cel-go/common/types/ref/provider.go b/vendor/github.com/google/cel-go/common/types/ref/provider.go deleted file mode 100644 index 91a711fa70..0000000000 --- a/vendor/github.com/google/cel-go/common/types/ref/provider.go +++ /dev/null @@ -1,103 +0,0 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package ref - -import ( - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/reflect/protoreflect" - - exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" -) - -// TypeProvider specifies functions for creating new object instances and for -// resolving enum values by name. -type TypeProvider interface { - // EnumValue returns the numeric value of the given enum value name. - EnumValue(enumName string) Val - - // FindIdent takes a qualified identifier name and returns a Value if one - // exists. - FindIdent(identName string) (Val, bool) - - // FindType looks up the Type given a qualified typeName. Returns false - // if not found. - // - // Used during type-checking only. - FindType(typeName string) (*exprpb.Type, bool) - - // FieldFieldType returns the field type for a checked type value. Returns - // false if the field could not be found. - // - // Used during type-checking only. - FindFieldType(messageType string, fieldName string) (*FieldType, bool) - - // NewValue creates a new type value from a qualified name and map of field - // name to value. - // - // Note, for each value, the Val.ConvertToNative function will be invoked - // to convert the Val to the field's native type. If an error occurs during - // conversion, the NewValue will be a types.Err. - NewValue(typeName string, fields map[string]Val) Val -} - -// TypeAdapter converts native Go values of varying type and complexity to equivalent CEL values. -type TypeAdapter interface { - // NativeToValue converts the input `value` to a CEL `ref.Val`. - NativeToValue(value interface{}) Val -} - -// TypeRegistry allows third-parties to add custom types to CEL. Not all `TypeProvider` -// implementations support type-customization, so these features are optional. However, a -// `TypeRegistry` should be a `TypeProvider` and a `TypeAdapter` to ensure that types -// which are registered can be converted to CEL representations. -type TypeRegistry interface { - TypeAdapter - TypeProvider - - // RegisterDescriptor registers the contents of a protocol buffer `FileDescriptor`. - RegisterDescriptor(fileDesc protoreflect.FileDescriptor) error - - // RegisterMessage registers a protocol buffer message and its dependencies. - RegisterMessage(message proto.Message) error - - // RegisterType registers a type value with the provider which ensures the - // provider is aware of how to map the type to an identifier. - // - // If a type is provided more than once with an alternative definition, the - // call will result in an error. - RegisterType(types ...Type) error - - // Copy the TypeRegistry and return a new registry whose mutable state is isolated. - Copy() TypeRegistry -} - -// FieldType represents a field's type value and whether that field supports -// presence detection. -type FieldType struct { - // Type of the field. - Type *exprpb.Type - - // IsSet indicates whether the field is set on an input object. - IsSet FieldTester - - // GetFrom retrieves the field value on the input object, if set. - GetFrom FieldGetter -} - -// FieldTester is used to test field presence on an input object. -type FieldTester func(target interface{}) bool - -// FieldGetter is used to get the field value from an input object, if set. -type FieldGetter func(target interface{}) (interface{}, error) diff --git a/vendor/github.com/google/cel-go/common/types/ref/reference.go b/vendor/github.com/google/cel-go/common/types/ref/reference.go deleted file mode 100644 index e12169988d..0000000000 --- a/vendor/github.com/google/cel-go/common/types/ref/reference.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package ref contains the reference interfaces used throughout the types -// components. -package ref - -import ( - "reflect" -) - -// Type interface indicate the name of a given type. -type Type interface { - // HasTrait returns whether the type has a given trait associated with it. - // - // See common/types/traits/traits.go for a list of supported traits. - HasTrait(trait int) bool - - // TypeName returns the qualified type name of the type. - // - // The type name is also used as the type's identifier name at type-check - // and interpretation time. - TypeName() string -} - -// Val interface defines the functions supported by all expression values. -// Val implementations may specialize the behavior of the value through the -// addition of traits. -type Val interface { - // ConvertToNative converts the Value to a native Go struct according to the - // reflected type description, or error if the conversion is not feasible. - ConvertToNative(typeDesc reflect.Type) (interface{}, error) - - // ConvertToType supports type conversions between value types supported by - // the expression language. - ConvertToType(typeValue Type) Val - - // Equal returns true if the `other` value has the same type and content as - // the implementing struct. - Equal(other Val) Val - - // Type returns the TypeValue of the value. - Type() Type - - // Value returns the raw value of the instance which may not be directly - // compatible with the expression language types. - Value() interface{} -} diff --git a/vendor/github.com/google/cel-go/common/types/string.go b/vendor/github.com/google/cel-go/common/types/string.go deleted file mode 100644 index 792c1781ff..0000000000 --- a/vendor/github.com/google/cel-go/common/types/string.go +++ /dev/null @@ -1,218 +0,0 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "fmt" - "reflect" - "regexp" - "strconv" - "strings" - "time" - - "github.com/google/cel-go/common/overloads" - "github.com/google/cel-go/common/types/ref" - "github.com/google/cel-go/common/types/traits" - - anypb "google.golang.org/protobuf/types/known/anypb" - structpb "google.golang.org/protobuf/types/known/structpb" - wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" -) - -// String type implementation which supports addition, comparison, matching, -// and size functions. -type String string - -var ( - // StringType singleton. - StringType = NewTypeValue("string", - traits.AdderType, - traits.ComparerType, - traits.MatcherType, - traits.ReceiverType, - traits.SizerType) - - stringOneArgOverloads = map[string]func(String, ref.Val) ref.Val{ - overloads.Contains: stringContains, - overloads.EndsWith: stringEndsWith, - overloads.StartsWith: stringStartsWith, - } - - stringWrapperType = reflect.TypeOf(&wrapperspb.StringValue{}) -) - -// Add implements traits.Adder.Add. -func (s String) Add(other ref.Val) ref.Val { - otherString, ok := other.(String) - if !ok { - return ValOrErr(other, "no such overload") - } - return s + otherString -} - -// Compare implements traits.Comparer.Compare. -func (s String) Compare(other ref.Val) ref.Val { - otherString, ok := other.(String) - if !ok { - return ValOrErr(other, "no such overload") - } - return Int(strings.Compare(s.Value().(string), otherString.Value().(string))) -} - -// ConvertToNative implements ref.Val.ConvertToNative. -func (s String) ConvertToNative(typeDesc reflect.Type) (interface{}, error) { - switch typeDesc.Kind() { - case reflect.String: - if reflect.TypeOf(s).AssignableTo(typeDesc) { - return s, nil - } - return s.Value(), nil - case reflect.Ptr: - switch typeDesc { - case anyValueType: - // Primitives must be wrapped before being set on an Any field. - return anypb.New(wrapperspb.String(string(s))) - case jsonValueType: - // Convert to a protobuf representation of a JSON String. - return structpb.NewStringValue(string(s)), nil - case stringWrapperType: - // Convert to a wrapperspb.StringValue. - return wrapperspb.String(string(s)), nil - } - if typeDesc.Elem().Kind() == reflect.String { - p := s.Value().(string) - return &p, nil - } - case reflect.Interface: - sv := s.Value() - if reflect.TypeOf(sv).Implements(typeDesc) { - return sv, nil - } - if reflect.TypeOf(s).Implements(typeDesc) { - return s, nil - } - } - return nil, fmt.Errorf( - "unsupported native conversion from string to '%v'", typeDesc) -} - -// ConvertToType implements ref.Val.ConvertToType. -func (s String) ConvertToType(typeVal ref.Type) ref.Val { - switch typeVal { - case IntType: - if n, err := strconv.ParseInt(s.Value().(string), 10, 64); err == nil { - return Int(n) - } - case UintType: - if n, err := strconv.ParseUint(s.Value().(string), 10, 64); err == nil { - return Uint(n) - } - case DoubleType: - if n, err := strconv.ParseFloat(s.Value().(string), 64); err == nil { - return Double(n) - } - case BoolType: - if b, err := strconv.ParseBool(s.Value().(string)); err == nil { - return Bool(b) - } - case BytesType: - return Bytes(s) - case DurationType: - if d, err := time.ParseDuration(s.Value().(string)); err == nil { - return Duration{Duration: d} - } - case TimestampType: - if t, err := time.Parse(time.RFC3339, s.Value().(string)); err == nil { - return Timestamp{Time: t} - } - case StringType: - return s - case TypeType: - return StringType - } - return NewErr("type conversion error from '%s' to '%s'", StringType, typeVal) -} - -// Equal implements ref.Val.Equal. -func (s String) Equal(other ref.Val) ref.Val { - otherString, ok := other.(String) - if !ok { - return ValOrErr(other, "no such overload") - } - return Bool(s == otherString) -} - -// Match implements traits.Matcher.Match. -func (s String) Match(pattern ref.Val) ref.Val { - pat, ok := pattern.(String) - if !ok { - return ValOrErr(pattern, "no such overload") - } - matched, err := regexp.MatchString(pat.Value().(string), s.Value().(string)) - if err != nil { - return &Err{err} - } - return Bool(matched) -} - -// Receive implements traits.Reciever.Receive. -func (s String) Receive(function string, overload string, args []ref.Val) ref.Val { - switch len(args) { - case 1: - if f, found := stringOneArgOverloads[function]; found { - return f(s, args[0]) - } - } - return NewErr("no such overload") -} - -// Size implements traits.Sizer.Size. -func (s String) Size() ref.Val { - return Int(len([]rune(s.Value().(string)))) -} - -// Type implements ref.Val.Type. -func (s String) Type() ref.Type { - return StringType -} - -// Value implements ref.Val.Value. -func (s String) Value() interface{} { - return string(s) -} - -func stringContains(s String, sub ref.Val) ref.Val { - subStr, ok := sub.(String) - if !ok { - return ValOrErr(sub, "no such overload") - } - return Bool(strings.Contains(string(s), string(subStr))) -} - -func stringEndsWith(s String, suf ref.Val) ref.Val { - sufStr, ok := suf.(String) - if !ok { - return ValOrErr(suf, "no such overload") - } - return Bool(strings.HasSuffix(string(s), string(sufStr))) -} - -func stringStartsWith(s String, pre ref.Val) ref.Val { - preStr, ok := pre.(String) - if !ok { - return ValOrErr(pre, "no such overload") - } - return Bool(strings.HasPrefix(string(s), string(preStr))) -} diff --git a/vendor/github.com/google/cel-go/common/types/timestamp.go b/vendor/github.com/google/cel-go/common/types/timestamp.go deleted file mode 100644 index a2e6e1ab74..0000000000 --- a/vendor/github.com/google/cel-go/common/types/timestamp.go +++ /dev/null @@ -1,294 +0,0 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "fmt" - "reflect" - "strconv" - "strings" - "time" - - "github.com/google/cel-go/common/overloads" - "github.com/google/cel-go/common/types/ref" - "github.com/google/cel-go/common/types/traits" - - anypb "google.golang.org/protobuf/types/known/anypb" - structpb "google.golang.org/protobuf/types/known/structpb" - tpb "google.golang.org/protobuf/types/known/timestamppb" -) - -// Timestamp type implementation which supports add, compare, and subtract -// operations. Timestamps are also capable of participating in dynamic -// function dispatch to instance methods. -type Timestamp struct { - time.Time -} - -var ( - // TimestampType singleton. - TimestampType = NewTypeValue("google.protobuf.Timestamp", - traits.AdderType, - traits.ComparerType, - traits.ReceiverType, - traits.SubtractorType) -) - -// Add implements traits.Adder.Add. -func (t Timestamp) Add(other ref.Val) ref.Val { - switch other.Type() { - case DurationType: - return other.(Duration).Add(t) - } - return ValOrErr(other, "no such overload") -} - -// Compare implements traits.Comparer.Compare. -func (t Timestamp) Compare(other ref.Val) ref.Val { - if TimestampType != other.Type() { - return ValOrErr(other, "no such overload") - } - ts1 := t.Time - ts2 := other.(Timestamp).Time - ts := ts1.Sub(ts2) - if ts < 0 { - return IntNegOne - } - if ts > 0 { - return IntOne - } - return IntZero -} - -// ConvertToNative implements ref.Val.ConvertToNative. -func (t Timestamp) ConvertToNative(typeDesc reflect.Type) (interface{}, error) { - // If the timestamp is already assignable to the desired type return it. - if reflect.TypeOf(t.Time).AssignableTo(typeDesc) { - return t.Time, nil - } - if reflect.TypeOf(t).AssignableTo(typeDesc) { - return t, nil - } - switch typeDesc { - case anyValueType: - // Pack the underlying time as a tpb.Timestamp into an Any value. - return anypb.New(tpb.New(t.Time)) - case jsonValueType: - // CEL follows the proto3 to JSON conversion which formats as an RFC 3339 encoded JSON - // string. - v := t.ConvertToType(StringType) - if IsError(v) { - return nil, v.(*Err) - } - return structpb.NewStringValue(string(v.(String))), nil - case timestampValueType: - // Unwrap the underlying tpb.Timestamp. - return tpb.New(t.Time), nil - } - return nil, fmt.Errorf("type conversion error from 'Timestamp' to '%v'", typeDesc) -} - -// ConvertToType implements ref.Val.ConvertToType. -func (t Timestamp) ConvertToType(typeVal ref.Type) ref.Val { - switch typeVal { - case StringType: - return String(t.Format(time.RFC3339)) - case IntType: - // Return the Unix time in seconds since 1970 - return Int(t.Unix()) - case TimestampType: - return t - case TypeType: - return TimestampType - } - return NewErr("type conversion error from '%s' to '%s'", TimestampType, typeVal) -} - -// Equal implements ref.Val.Equal. -func (t Timestamp) Equal(other ref.Val) ref.Val { - if TimestampType != other.Type() { - return MaybeNoSuchOverloadErr(other) - } - return Bool(t.Time.Equal(other.(Timestamp).Time)) -} - -// Receive implements traits.Reciever.Receive. -func (t Timestamp) Receive(function string, overload string, args []ref.Val) ref.Val { - switch len(args) { - case 0: - if f, found := timestampZeroArgOverloads[function]; found { - return f(t.Time) - } - case 1: - if f, found := timestampOneArgOverloads[function]; found { - return f(t.Time, args[0]) - } - } - return NewErr("no such overload") -} - -// Subtract implements traits.Subtractor.Subtract. -func (t Timestamp) Subtract(subtrahend ref.Val) ref.Val { - switch subtrahend.Type() { - case DurationType: - dur := subtrahend.(Duration) - return Timestamp{Time: t.Time.Add(-dur.Duration)} - case TimestampType: - t2 := subtrahend.(Timestamp).Time - return Duration{Duration: t.Time.Sub(t2)} - } - return ValOrErr(subtrahend, "no such overload") -} - -// Type implements ref.Val.Type. -func (t Timestamp) Type() ref.Type { - return TimestampType -} - -// Value implements ref.Val.Value. -func (t Timestamp) Value() interface{} { - return t.Time -} - -var ( - timestampValueType = reflect.TypeOf(&tpb.Timestamp{}) - - timestampZeroArgOverloads = map[string]func(time.Time) ref.Val{ - overloads.TimeGetFullYear: timestampGetFullYear, - overloads.TimeGetMonth: timestampGetMonth, - overloads.TimeGetDayOfYear: timestampGetDayOfYear, - overloads.TimeGetDate: timestampGetDayOfMonthOneBased, - overloads.TimeGetDayOfMonth: timestampGetDayOfMonthZeroBased, - overloads.TimeGetDayOfWeek: timestampGetDayOfWeek, - overloads.TimeGetHours: timestampGetHours, - overloads.TimeGetMinutes: timestampGetMinutes, - overloads.TimeGetSeconds: timestampGetSeconds, - overloads.TimeGetMilliseconds: timestampGetMilliseconds} - - timestampOneArgOverloads = map[string]func(time.Time, ref.Val) ref.Val{ - overloads.TimeGetFullYear: timestampGetFullYearWithTz, - overloads.TimeGetMonth: timestampGetMonthWithTz, - overloads.TimeGetDayOfYear: timestampGetDayOfYearWithTz, - overloads.TimeGetDate: timestampGetDayOfMonthOneBasedWithTz, - overloads.TimeGetDayOfMonth: timestampGetDayOfMonthZeroBasedWithTz, - overloads.TimeGetDayOfWeek: timestampGetDayOfWeekWithTz, - overloads.TimeGetHours: timestampGetHoursWithTz, - overloads.TimeGetMinutes: timestampGetMinutesWithTz, - overloads.TimeGetSeconds: timestampGetSecondsWithTz, - overloads.TimeGetMilliseconds: timestampGetMillisecondsWithTz} -) - -type timestampVisitor func(time.Time) ref.Val - -func timestampGetFullYear(t time.Time) ref.Val { - return Int(t.Year()) -} -func timestampGetMonth(t time.Time) ref.Val { - // CEL spec indicates that the month should be 0-based, but the Time value - // for Month() is 1-based. - return Int(t.Month() - 1) -} -func timestampGetDayOfYear(t time.Time) ref.Val { - return Int(t.YearDay() - 1) -} -func timestampGetDayOfMonthZeroBased(t time.Time) ref.Val { - return Int(t.Day() - 1) -} -func timestampGetDayOfMonthOneBased(t time.Time) ref.Val { - return Int(t.Day()) -} -func timestampGetDayOfWeek(t time.Time) ref.Val { - return Int(t.Weekday()) -} -func timestampGetHours(t time.Time) ref.Val { - return Int(t.Hour()) -} -func timestampGetMinutes(t time.Time) ref.Val { - return Int(t.Minute()) -} -func timestampGetSeconds(t time.Time) ref.Val { - return Int(t.Second()) -} -func timestampGetMilliseconds(t time.Time) ref.Val { - return Int(t.Nanosecond() / 1000000) -} - -func timestampGetFullYearWithTz(t time.Time, tz ref.Val) ref.Val { - return timeZone(tz, timestampGetFullYear)(t) -} -func timestampGetMonthWithTz(t time.Time, tz ref.Val) ref.Val { - return timeZone(tz, timestampGetMonth)(t) -} -func timestampGetDayOfYearWithTz(t time.Time, tz ref.Val) ref.Val { - return timeZone(tz, timestampGetDayOfYear)(t) -} -func timestampGetDayOfMonthZeroBasedWithTz(t time.Time, tz ref.Val) ref.Val { - return timeZone(tz, timestampGetDayOfMonthZeroBased)(t) -} -func timestampGetDayOfMonthOneBasedWithTz(t time.Time, tz ref.Val) ref.Val { - return timeZone(tz, timestampGetDayOfMonthOneBased)(t) -} -func timestampGetDayOfWeekWithTz(t time.Time, tz ref.Val) ref.Val { - return timeZone(tz, timestampGetDayOfWeek)(t) -} -func timestampGetHoursWithTz(t time.Time, tz ref.Val) ref.Val { - return timeZone(tz, timestampGetHours)(t) -} -func timestampGetMinutesWithTz(t time.Time, tz ref.Val) ref.Val { - return timeZone(tz, timestampGetMinutes)(t) -} -func timestampGetSecondsWithTz(t time.Time, tz ref.Val) ref.Val { - return timeZone(tz, timestampGetSeconds)(t) -} -func timestampGetMillisecondsWithTz(t time.Time, tz ref.Val) ref.Val { - return timeZone(tz, timestampGetMilliseconds)(t) -} - -func timeZone(tz ref.Val, visitor timestampVisitor) timestampVisitor { - return func(t time.Time) ref.Val { - if StringType != tz.Type() { - return ValOrErr(tz, "no such overload") - } - val := string(tz.(String)) - ind := strings.Index(val, ":") - if ind == -1 { - loc, err := time.LoadLocation(val) - if err != nil { - return &Err{err} - } - return visitor(t.In(loc)) - } - - // If the input is not the name of a timezone (for example, 'US/Central'), it should be a numerical offset from UTC - // in the format ^(+|-)(0[0-9]|1[0-4]):[0-5][0-9]$. The numerical input is parsed in terms of hours and minutes. - hr, err := strconv.Atoi(string(val[0:ind])) - if err != nil { - return &Err{err} - } - min, err := strconv.Atoi(string(val[ind+1])) - if err != nil { - return &Err{err} - } - var offset int - if string(val[0]) == "-" { - offset = hr*60 - min - } else { - offset = hr*60 + min - } - secondsEastOfUTC := int((time.Duration(offset) * time.Minute).Seconds()) - timezone := time.FixedZone("", secondsEastOfUTC) - return visitor(t.In(timezone)) - } -} diff --git a/vendor/github.com/google/cel-go/common/types/traits/BUILD.bazel b/vendor/github.com/google/cel-go/common/types/traits/BUILD.bazel deleted file mode 100644 index 86e54af61a..0000000000 --- a/vendor/github.com/google/cel-go/common/types/traits/BUILD.bazel +++ /dev/null @@ -1,28 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -package( - default_visibility = ["//visibility:public"], - licenses = ["notice"], # Apache 2.0 -) - -go_library( - name = "go_default_library", - srcs = [ - "comparer.go", - "container.go", - "field_tester.go", - "indexer.go", - "iterator.go", - "lister.go", - "mapper.go", - "matcher.go", - "math.go", - "receiver.go", - "sizer.go", - "traits.go", - ], - importpath = "github.com/google/cel-go/common/types/traits", - deps = [ - "//common/types/ref:go_default_library", - ], -) diff --git a/vendor/github.com/google/cel-go/common/types/traits/comparer.go b/vendor/github.com/google/cel-go/common/types/traits/comparer.go deleted file mode 100644 index b531d9ae2b..0000000000 --- a/vendor/github.com/google/cel-go/common/types/traits/comparer.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package traits - -import ( - "github.com/google/cel-go/common/types/ref" -) - -// Comparer interface for ordering comparisons between values in order to -// support '<', '<=', '>=', '>' overloads. -type Comparer interface { - // Compare this value to the input other value, returning an Int: - // - // this < other -> Int(-1) - // this == other -> Int(0) - // this > other -> Int(1) - // - // If the comparison cannot be made or is not supported, an error should - // be returned. - Compare(other ref.Val) ref.Val -} diff --git a/vendor/github.com/google/cel-go/common/types/traits/container.go b/vendor/github.com/google/cel-go/common/types/traits/container.go deleted file mode 100644 index cf5c621ae9..0000000000 --- a/vendor/github.com/google/cel-go/common/types/traits/container.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package traits - -import "github.com/google/cel-go/common/types/ref" - -// Container interface which permits containment tests such as 'a in b'. -type Container interface { - // Contains returns true if the value exists within the object. - Contains(value ref.Val) ref.Val -} diff --git a/vendor/github.com/google/cel-go/common/types/traits/field_tester.go b/vendor/github.com/google/cel-go/common/types/traits/field_tester.go deleted file mode 100644 index 816a956523..0000000000 --- a/vendor/github.com/google/cel-go/common/types/traits/field_tester.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package traits - -import ( - "github.com/google/cel-go/common/types/ref" -) - -// FieldTester indicates if a defined field on an object type is set to a -// non-default value. -// -// For use with the `has()` macro. -type FieldTester interface { - // IsSet returns true if the field is defined and set to a non-default - // value. The method will return false if defined and not set, and an error - // if the field is not defined. - IsSet(field ref.Val) ref.Val -} diff --git a/vendor/github.com/google/cel-go/common/types/traits/indexer.go b/vendor/github.com/google/cel-go/common/types/traits/indexer.go deleted file mode 100644 index 662c6836c3..0000000000 --- a/vendor/github.com/google/cel-go/common/types/traits/indexer.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package traits - -import ( - "github.com/google/cel-go/common/types/ref" -) - -// Indexer permits random access of elements by index 'a[b()]'. -type Indexer interface { - // Get the value at the specified index or error. - Get(index ref.Val) ref.Val -} diff --git a/vendor/github.com/google/cel-go/common/types/traits/iterator.go b/vendor/github.com/google/cel-go/common/types/traits/iterator.go deleted file mode 100644 index 42dd371aa4..0000000000 --- a/vendor/github.com/google/cel-go/common/types/traits/iterator.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package traits - -import ( - "github.com/google/cel-go/common/types/ref" -) - -// Iterable aggregate types permit traversal over their elements. -type Iterable interface { - // Iterator returns a new iterator view of the struct. - Iterator() Iterator -} - -// Iterator permits safe traversal over the contents of an aggregate type. -type Iterator interface { - ref.Val - - // HasNext returns true if there are unvisited elements in the Iterator. - HasNext() ref.Val - - // Next returns the next element. - Next() ref.Val -} diff --git a/vendor/github.com/google/cel-go/common/types/traits/lister.go b/vendor/github.com/google/cel-go/common/types/traits/lister.go deleted file mode 100644 index 7b05c301d5..0000000000 --- a/vendor/github.com/google/cel-go/common/types/traits/lister.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package traits - -import "github.com/google/cel-go/common/types/ref" - -// Lister interface which aggregates the traits of a list. -type Lister interface { - ref.Val - Adder - Container - Indexer - Iterable - Sizer -} diff --git a/vendor/github.com/google/cel-go/common/types/traits/mapper.go b/vendor/github.com/google/cel-go/common/types/traits/mapper.go deleted file mode 100644 index 2f7c919a8b..0000000000 --- a/vendor/github.com/google/cel-go/common/types/traits/mapper.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package traits - -import "github.com/google/cel-go/common/types/ref" - -// Mapper interface which aggregates the traits of a maps. -type Mapper interface { - ref.Val - Container - Indexer - Iterable - Sizer - - // Find returns a value, if one exists, for the input key. - // - // If the key is not found the function returns (nil, false). - // If the input key is not valid for the map, or is Err or Unknown the function returns - // (Unknown|Err, false). - Find(key ref.Val) (ref.Val, bool) -} diff --git a/vendor/github.com/google/cel-go/common/types/traits/matcher.go b/vendor/github.com/google/cel-go/common/types/traits/matcher.go deleted file mode 100644 index 085dc94ff4..0000000000 --- a/vendor/github.com/google/cel-go/common/types/traits/matcher.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package traits - -import "github.com/google/cel-go/common/types/ref" - -// Matcher interface for supporting 'matches()' overloads. -type Matcher interface { - // Match returns true if the pattern matches the current value. - Match(pattern ref.Val) ref.Val -} diff --git a/vendor/github.com/google/cel-go/common/types/traits/math.go b/vendor/github.com/google/cel-go/common/types/traits/math.go deleted file mode 100644 index 86d5b9137e..0000000000 --- a/vendor/github.com/google/cel-go/common/types/traits/math.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package traits - -import "github.com/google/cel-go/common/types/ref" - -// Adder interface to support '+' operator overloads. -type Adder interface { - // Add returns a combination of the current value and other value. - // - // If the other value is an unsupported type, an error is returned. - Add(other ref.Val) ref.Val -} - -// Divider interface to support '/' operator overloads. -type Divider interface { - // Divide returns the result of dividing the current value by the input - // denominator. - // - // A denominator value of zero results in an error. - Divide(denominator ref.Val) ref.Val -} - -// Modder interface to support '%' operator overloads. -type Modder interface { - // Modulo returns the result of taking the modulus of the current value - // by the denominator. - // - // A denominator value of zero results in an error. - Modulo(denominator ref.Val) ref.Val -} - -// Multiplier interface to support '*' operator overloads. -type Multiplier interface { - // Multiply returns the result of multiplying the current and input value. - Multiply(other ref.Val) ref.Val -} - -// Negater interface to support unary '-' and '!' operator overloads. -type Negater interface { - // Negate returns the complement of the current value. - Negate() ref.Val -} - -// Subtractor interface to support binary '-' operator overloads. -type Subtractor interface { - // Subtract returns the result of subtracting the input from the current - // value. - Subtract(subtrahend ref.Val) ref.Val -} diff --git a/vendor/github.com/google/cel-go/common/types/traits/receiver.go b/vendor/github.com/google/cel-go/common/types/traits/receiver.go deleted file mode 100644 index 8f41db45e8..0000000000 --- a/vendor/github.com/google/cel-go/common/types/traits/receiver.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package traits - -import "github.com/google/cel-go/common/types/ref" - -// Receiver interface for routing instance method calls within a value. -type Receiver interface { - // Receive accepts a function name, overload id, and arguments and returns - // a value. - Receive(function string, overload string, args []ref.Val) ref.Val -} diff --git a/vendor/github.com/google/cel-go/common/types/traits/sizer.go b/vendor/github.com/google/cel-go/common/types/traits/sizer.go deleted file mode 100644 index b80d25137a..0000000000 --- a/vendor/github.com/google/cel-go/common/types/traits/sizer.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package traits - -import ( - "github.com/google/cel-go/common/types/ref" -) - -// Sizer interface for supporting 'size()' overloads. -type Sizer interface { - // Size returns the number of elements or length of the value. - Size() ref.Val -} diff --git a/vendor/github.com/google/cel-go/common/types/traits/traits.go b/vendor/github.com/google/cel-go/common/types/traits/traits.go deleted file mode 100644 index 6da3e6a3e1..0000000000 --- a/vendor/github.com/google/cel-go/common/types/traits/traits.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package traits defines interfaces that a type may implement to participate -// in operator overloads and function dispatch. -package traits - -const ( - // AdderType types provide a '+' operator overload. - AdderType = 1 << iota - - // ComparerType types support ordering comparisons '<', '<=', '>', '>='. - ComparerType - - // ContainerType types support 'in' operations. - ContainerType - - // DividerType types support '/' operations. - DividerType - - // FieldTesterType types support the detection of field value presence. - FieldTesterType - - // IndexerType types support index access with dynamic values. - IndexerType - - // IterableType types can be iterated over in comprehensions. - IterableType - - // IteratorType types support iterator semantics. - IteratorType - - // MatcherType types support pattern matching via 'matches' method. - MatcherType - - // ModderType types support modulus operations '%' - ModderType - - // MultiplierType types support '*' operations. - MultiplierType - - // NegatorType types support either negation via '!' or '-' - NegatorType - - // ReceiverType types support dynamic dispatch to instance methods. - ReceiverType - - // SizerType types support the size() method. - SizerType - - // SubtractorType type support '-' operations. - SubtractorType -) diff --git a/vendor/github.com/google/cel-go/common/types/type.go b/vendor/github.com/google/cel-go/common/types/type.go deleted file mode 100644 index a7253270a9..0000000000 --- a/vendor/github.com/google/cel-go/common/types/type.go +++ /dev/null @@ -1,104 +0,0 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "fmt" - "reflect" - - "github.com/google/cel-go/common/types/ref" - "github.com/google/cel-go/common/types/traits" -) - -var ( - // TypeType is the type of a TypeValue. - TypeType = NewTypeValue("type") -) - -// TypeValue is an instance of a Value that describes a value's type. -type TypeValue struct { - name string - traitMask int -} - -// NewTypeValue returns *TypeValue which is both a ref.Type and ref.Val. -func NewTypeValue(name string, traits ...int) *TypeValue { - traitMask := 0 - for _, trait := range traits { - traitMask |= trait - } - return &TypeValue{ - name: name, - traitMask: traitMask} -} - -// NewObjectTypeValue returns a *TypeValue based on the input name, which is -// annotated with the traits relevant to all objects. -func NewObjectTypeValue(name string) *TypeValue { - return NewTypeValue(name, - traits.FieldTesterType, - traits.IndexerType) -} - -// ConvertToNative implements ref.Val.ConvertToNative. -func (t *TypeValue) ConvertToNative(typeDesc reflect.Type) (interface{}, error) { - // TODO: replace the internal type representation with a proto-value. - return nil, fmt.Errorf("type conversion not supported for 'type'") -} - -// ConvertToType implements ref.Val.ConvertToType. -func (t *TypeValue) ConvertToType(typeVal ref.Type) ref.Val { - switch typeVal { - case TypeType: - return TypeType - case StringType: - return String(t.TypeName()) - } - return NewErr("type conversion error from '%s' to '%s'", TypeType, typeVal) -} - -// Equal implements ref.Val.Equal. -func (t *TypeValue) Equal(other ref.Val) ref.Val { - if TypeType != other.Type() { - return ValOrErr(other, "no such overload") - } - return Bool(t.TypeName() == other.(ref.Type).TypeName()) -} - -// HasTrait indicates whether the type supports the given trait. -// Trait codes are defined in the traits package, e.g. see traits.AdderType. -func (t *TypeValue) HasTrait(trait int) bool { - return trait&t.traitMask == trait -} - -// String implements fmt.Stringer. -func (t *TypeValue) String() string { - return t.name -} - -// Type implements ref.Val.Type. -func (t *TypeValue) Type() ref.Type { - return TypeType -} - -// TypeName gives the type's name as a string. -func (t *TypeValue) TypeName() string { - return t.name -} - -// Value implements ref.Val.Value. -func (t *TypeValue) Value() interface{} { - return t.name -} diff --git a/vendor/github.com/google/cel-go/common/types/uint.go b/vendor/github.com/google/cel-go/common/types/uint.go deleted file mode 100644 index a313d91714..0000000000 --- a/vendor/github.com/google/cel-go/common/types/uint.go +++ /dev/null @@ -1,220 +0,0 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "fmt" - "math" - "reflect" - "strconv" - - "github.com/google/cel-go/common/types/ref" - "github.com/google/cel-go/common/types/traits" - - anypb "google.golang.org/protobuf/types/known/anypb" - structpb "google.golang.org/protobuf/types/known/structpb" - wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" -) - -// Uint type implementation which supports comparison and math operators. -type Uint uint64 - -var ( - // UintType singleton. - UintType = NewTypeValue("uint", - traits.AdderType, - traits.ComparerType, - traits.DividerType, - traits.ModderType, - traits.MultiplierType, - traits.SubtractorType) - - uint32WrapperType = reflect.TypeOf(&wrapperspb.UInt32Value{}) - - uint64WrapperType = reflect.TypeOf(&wrapperspb.UInt64Value{}) -) - -// Uint constants -const ( - uintZero = Uint(0) -) - -// Add implements traits.Adder.Add. -func (i Uint) Add(other ref.Val) ref.Val { - otherUint, ok := other.(Uint) - if !ok { - return ValOrErr(other, "no such overload") - } - if otherUint > 0 && i > math.MaxUint64-otherUint { - return NewErr("unsigned integer overflow") - } - return i + otherUint -} - -// Compare implements traits.Comparer.Compare. -func (i Uint) Compare(other ref.Val) ref.Val { - otherUint, ok := other.(Uint) - if !ok { - return ValOrErr(other, "no such overload") - } - if i < otherUint { - return IntNegOne - } - if i > otherUint { - return IntOne - } - return IntZero -} - -// ConvertToNative implements ref.Val.ConvertToNative. -func (i Uint) ConvertToNative(typeDesc reflect.Type) (interface{}, error) { - switch typeDesc.Kind() { - case reflect.Uint, reflect.Uint32, reflect.Uint64: - return reflect.ValueOf(i).Convert(typeDesc).Interface(), nil - case reflect.Ptr: - switch typeDesc { - case anyValueType: - // Primitives must be wrapped before being set on an Any field. - return anypb.New(wrapperspb.UInt64(uint64(i))) - case jsonValueType: - // JSON can accurately represent 32-bit uints as floating point values. - if i.isJSONSafe() { - return structpb.NewNumberValue(float64(i)), nil - } - // Proto3 to JSON conversion requires string-formatted uint64 values - // since the conversion to floating point would result in truncation. - return structpb.NewStringValue(strconv.FormatUint(uint64(i), 10)), nil - case uint32WrapperType: - // Convert the value to a wrapperspb.UInt32Value (with truncation). - return wrapperspb.UInt32(uint32(i)), nil - case uint64WrapperType: - // Convert the value to a wrapperspb.UInt64Value. - return wrapperspb.UInt64(uint64(i)), nil - } - switch typeDesc.Elem().Kind() { - case reflect.Uint32: - v := uint32(i) - p := reflect.New(typeDesc.Elem()) - p.Elem().Set(reflect.ValueOf(v).Convert(typeDesc.Elem())) - return p.Interface(), nil - case reflect.Uint64: - v := uint64(i) - p := reflect.New(typeDesc.Elem()) - p.Elem().Set(reflect.ValueOf(v).Convert(typeDesc.Elem())) - return p.Interface(), nil - } - case reflect.Interface: - iv := i.Value() - if reflect.TypeOf(iv).Implements(typeDesc) { - return iv, nil - } - if reflect.TypeOf(i).Implements(typeDesc) { - return i, nil - } - } - return nil, fmt.Errorf("unsupported type conversion from 'uint' to %v", typeDesc) -} - -// ConvertToType implements ref.Val.ConvertToType. -func (i Uint) ConvertToType(typeVal ref.Type) ref.Val { - switch typeVal { - case IntType: - if i > math.MaxInt64 { - return NewErr("range error converting %d to int", i) - } - return Int(i) - case UintType: - return i - case DoubleType: - return Double(i) - case StringType: - return String(fmt.Sprintf("%d", uint64(i))) - case TypeType: - return UintType - } - return NewErr("type conversion error from '%s' to '%s'", UintType, typeVal) -} - -// Divide implements traits.Divider.Divide. -func (i Uint) Divide(other ref.Val) ref.Val { - otherUint, ok := other.(Uint) - if !ok { - return ValOrErr(other, "no such overload") - } - if otherUint == uintZero { - return NewErr("divide by zero") - } - return i / otherUint -} - -// Equal implements ref.Val.Equal. -func (i Uint) Equal(other ref.Val) ref.Val { - otherUint, ok := other.(Uint) - if !ok { - return ValOrErr(other, "no such overload") - } - return Bool(i == otherUint) -} - -// Modulo implements traits.Modder.Modulo. -func (i Uint) Modulo(other ref.Val) ref.Val { - otherUint, ok := other.(Uint) - if !ok { - return ValOrErr(other, "no such overload") - } - if otherUint == uintZero { - return NewErr("modulus by zero") - } - return i % otherUint -} - -// Multiply implements traits.Multiplier.Multiply. -func (i Uint) Multiply(other ref.Val) ref.Val { - otherUint, ok := other.(Uint) - if !ok { - return ValOrErr(other, "no such overload") - } - if otherUint != 0 && i > math.MaxUint64/otherUint { - return NewErr("unsigned integer overflow") - } - return i * otherUint -} - -// Subtract implements traits.Subtractor.Subtract. -func (i Uint) Subtract(subtrahend ref.Val) ref.Val { - subtraUint, ok := subtrahend.(Uint) - if !ok { - return ValOrErr(subtrahend, "no such overload") - } - if subtraUint > i { - return NewErr("unsigned integer overflow") - } - return i - subtraUint -} - -// Type implements ref.Val.Type. -func (i Uint) Type() ref.Type { - return UintType -} - -// Value implements ref.Val.Value. -func (i Uint) Value() interface{} { - return uint64(i) -} - -// isJSONSafe indicates whether the uint is safely representable as a floating point value in JSON. -func (i Uint) isJSONSafe() bool { - return i <= maxIntJSON -} diff --git a/vendor/github.com/google/cel-go/common/types/unknown.go b/vendor/github.com/google/cel-go/common/types/unknown.go deleted file mode 100644 index a950920d60..0000000000 --- a/vendor/github.com/google/cel-go/common/types/unknown.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "reflect" - - "github.com/google/cel-go/common/types/ref" -) - -// Unknown type implementation which collects expression ids which caused the -// current value to become unknown. -type Unknown []int64 - -var ( - // UnknownType singleton. - UnknownType = NewTypeValue("unknown") -) - -// ConvertToNative implements ref.Val.ConvertToNative. -func (u Unknown) ConvertToNative(typeDesc reflect.Type) (interface{}, error) { - return u.Value(), nil -} - -// ConvertToType implements ref.Val.ConvertToType. -func (u Unknown) ConvertToType(typeVal ref.Type) ref.Val { - return u -} - -// Equal implements ref.Val.Equal. -func (u Unknown) Equal(other ref.Val) ref.Val { - return u -} - -// Type implements ref.Val.Type. -func (u Unknown) Type() ref.Type { - return UnknownType -} - -// Value implements ref.Val.Value. -func (u Unknown) Value() interface{} { - return []int64(u) -} - -// IsUnknown returns whether the element ref.Type or ref.Val is equal to the -// UnknownType singleton. -func IsUnknown(val ref.Val) bool { - return val.Type() == UnknownType -} diff --git a/vendor/github.com/google/cel-go/common/types/util.go b/vendor/github.com/google/cel-go/common/types/util.go deleted file mode 100644 index 761c512d73..0000000000 --- a/vendor/github.com/google/cel-go/common/types/util.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "github.com/google/cel-go/common/types/ref" -) - -// IsUnknownOrError returns whether the input element ref.Val is an ErrType or UnknonwType. -func IsUnknownOrError(val ref.Val) bool { - switch val.Type() { - case UnknownType, ErrType: - return true - } - return false -} - -// IsPrimitiveType returns whether the input element ref.Val is a primitive type. -// Note, primitive types do not include well-known types such as Duration and Timestamp. -func IsPrimitiveType(val ref.Val) bool { - switch val.Type() { - case BoolType, BytesType, DoubleType, IntType, StringType, UintType: - return true - } - return false -} diff --git a/vendor/github.com/google/cel-go/interpreter/BUILD.bazel b/vendor/github.com/google/cel-go/interpreter/BUILD.bazel deleted file mode 100644 index 35854f112a..0000000000 --- a/vendor/github.com/google/cel-go/interpreter/BUILD.bazel +++ /dev/null @@ -1,70 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -package( - default_visibility = ["//visibility:public"], - licenses = ["notice"], # Apache 2.0 -) - -go_library( - name = "go_default_library", - srcs = [ - "activation.go", - "attributes.go", - "attribute_patterns.go", - "coster.go", - "decorators.go", - "dispatcher.go", - "evalstate.go", - "interpretable.go", - "interpreter.go", - "planner.go", - "prune.go", - ], - importpath = "github.com/google/cel-go/interpreter", - deps = [ - "//common:go_default_library", - "//common/operators:go_default_library", - "//common/overloads:go_default_library", - "//common/containers:go_default_library", - "//common/types:go_default_library", - "//common/types/ref:go_default_library", - "//common/types/traits:go_default_library", - "//interpreter/functions:go_default_library", - "@org_golang_google_protobuf//proto:go_default_library", - "@org_golang_google_protobuf//types/known/durationpb:go_default_library", - "@org_golang_google_protobuf//types/known/structpb:go_default_library", - "@org_golang_google_protobuf//types/known/timestamppb:go_default_library", - "@org_golang_google_protobuf//types/known/wrapperspb:go_default_library", - "@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library", - ] -) - -go_test( - name = "go_default_test", - srcs = [ - "activation_test.go", - "attributes_test.go", - "attribute_patterns_test.go", - "interpreter_test.go", - "prune_test.go", - ], - embed = [ - ":go_default_library", - ], - deps = [ - "//checker:go_default_library", - "//checker/decls:go_default_library", - "//common/debug:go_default_library", - "//common/operators:go_default_library", - "//common/containers:go_default_library", - "//common/types:go_default_library", - "//interpreter/functions:go_default_library", - "//parser:go_default_library", - "//test:go_default_library", - "//test/proto2pb:go_default_library", - "//test/proto3pb:go_default_library", - "@org_golang_google_protobuf//proto:go_default_library", - "@org_golang_google_protobuf//types/known/anypb:go_default_library", - "@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library", - ], -) diff --git a/vendor/github.com/google/cel-go/interpreter/activation.go b/vendor/github.com/google/cel-go/interpreter/activation.go deleted file mode 100644 index 4f13151997..0000000000 --- a/vendor/github.com/google/cel-go/interpreter/activation.go +++ /dev/null @@ -1,197 +0,0 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package interpreter - -import ( - "errors" - "fmt" - "sync" - - "github.com/google/cel-go/common/types/ref" -) - -// Activation used to resolve identifiers by name and references by id. -// -// An Activation is the primary mechanism by which a caller supplies input into a CEL program. -type Activation interface { - // ResolveName returns a value from the activation by qualified name, or false if the name - // could not be found. - ResolveName(name string) (interface{}, bool) - - // Parent returns the parent of the current activation, may be nil. - // If non-nil, the parent will be searched during resolve calls. - Parent() Activation -} - -// EmptyActivation returns a variable free activation. -func EmptyActivation() Activation { - // This call cannot fail. - a, _ := NewActivation(map[string]interface{}{}) - return a -} - -// NewActivation returns an activation based on a map-based binding where the map keys are -// expected to be qualified names used with ResolveName calls. -// -// The input `bindings` may either be of type `Activation` or `map[string]interface{}`. -// -// Lazy bindings may be supplied within the map-based input in either of the following forms: -// - func() interface{} -// - func() ref.Val -// -// The output of the lazy binding will overwrite the variable reference in the internal map. -// -// Values which are not represented as ref.Val types on input may be adapted to a ref.Val using -// the ref.TypeAdapter configured in the environment. -func NewActivation(bindings interface{}) (Activation, error) { - if bindings == nil { - return nil, errors.New("bindings must be non-nil") - } - a, isActivation := bindings.(Activation) - if isActivation { - return a, nil - } - m, isMap := bindings.(map[string]interface{}) - if !isMap { - return nil, fmt.Errorf( - "activation input must be an activation or map[string]interface: got %T", - bindings) - } - return &mapActivation{bindings: m}, nil -} - -// mapActivation which implements Activation and maps of named values. -// -// Named bindings may lazily supply values by providing a function which accepts no arguments and -// produces an interface value. -type mapActivation struct { - bindings map[string]interface{} -} - -// Parent implements the Activation interface method. -func (a *mapActivation) Parent() Activation { - return nil -} - -// ResolveName implements the Activation interface method. -func (a *mapActivation) ResolveName(name string) (interface{}, bool) { - obj, found := a.bindings[name] - if !found { - return nil, false - } - fn, isLazy := obj.(func() ref.Val) - if isLazy { - obj = fn() - a.bindings[name] = obj - } - fnRaw, isLazy := obj.(func() interface{}) - if isLazy { - obj = fnRaw() - a.bindings[name] = obj - } - return obj, found -} - -// hierarchicalActivation which implements Activation and contains a parent and -// child activation. -type hierarchicalActivation struct { - parent Activation - child Activation -} - -// Parent implements the Activation interface method. -func (a *hierarchicalActivation) Parent() Activation { - return a.parent -} - -// ResolveName implements the Activation interface method. -func (a *hierarchicalActivation) ResolveName(name string) (interface{}, bool) { - if object, found := a.child.ResolveName(name); found { - return object, found - } - return a.parent.ResolveName(name) -} - -// NewHierarchicalActivation takes two activations and produces a new one which prioritizes -// resolution in the child first and parent(s) second. -func NewHierarchicalActivation(parent Activation, child Activation) Activation { - return &hierarchicalActivation{parent, child} -} - -// NewPartialActivation returns an Activation which contains a list of AttributePattern values -// representing field and index operations that should result in a 'types.Unknown' result. -// -// The `bindings` value may be any value type supported by the interpreter.NewActivation call, -// but is typically either an existing Activation or map[string]interface{}. -func NewPartialActivation(bindings interface{}, - unknowns ...*AttributePattern) (PartialActivation, error) { - a, err := NewActivation(bindings) - if err != nil { - return nil, err - } - return &partActivation{Activation: a, unknowns: unknowns}, nil -} - -// PartialActivation extends the Activation interface with a set of UnknownAttributePatterns. -type PartialActivation interface { - Activation - - // UnknownAttributePaths returns a set of AttributePattern values which match Attribute - // expressions for data accesses whose values are not yet known. - UnknownAttributePatterns() []*AttributePattern -} - -// partActivation is the default implementations of the PartialActivation interface. -type partActivation struct { - Activation - unknowns []*AttributePattern -} - -// UnknownAttributePatterns implements the PartialActivation interface method. -func (a *partActivation) UnknownAttributePatterns() []*AttributePattern { - return a.unknowns -} - -// varActivation represents a single mutable variable binding. -// -// This activation type should only be used within folds as the fold loop controls the object -// life-cycle. -type varActivation struct { - parent Activation - name string - val ref.Val -} - -// Parent implements the Activation interface method. -func (v *varActivation) Parent() Activation { - return v.parent -} - -// ResolveName implements the Activation interface method. -func (v *varActivation) ResolveName(name string) (interface{}, bool) { - if name == v.name { - return v.val, true - } - return v.parent.ResolveName(name) -} - -var ( - // pool of var activations to reduce allocations during folds. - varActivationPool = &sync.Pool{ - New: func() interface{} { - return &varActivation{} - }, - } -) diff --git a/vendor/github.com/google/cel-go/interpreter/attribute_patterns.go b/vendor/github.com/google/cel-go/interpreter/attribute_patterns.go deleted file mode 100644 index 3633255d23..0000000000 --- a/vendor/github.com/google/cel-go/interpreter/attribute_patterns.go +++ /dev/null @@ -1,383 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package interpreter - -import ( - "fmt" - - "github.com/google/cel-go/common/containers" - "github.com/google/cel-go/common/types" - "github.com/google/cel-go/common/types/ref" -) - -// AttributePattern represents a top-level variable with an optional set of qualifier patterns. -// -// When using a CEL expression within a container, e.g. a package or namespace, the variable name -// in the pattern must match the qualified name produced during the variable namespace resolution. -// For example, if variable `c` appears in an expression whose container is `a.b`, the variable -// name supplied to the pattern must be `a.b.c` -// -// The qualifier patterns for attribute matching must be one of the following: -// -// - valid map key type: string, int, uint, bool -// - wildcard (*) -// -// Examples: -// -// 1. ns.myvar["complex-value"] -// 2. ns.myvar["complex-value"][0] -// 3. ns.myvar["complex-value"].*.name -// -// The first example is simple: match an attribute where the variable is 'ns.myvar' with a -// field access on 'complex-value'. The second example expands the match to indicate that only -// a specific index `0` should match. And lastly, the third example matches any indexed access -// that later selects the 'name' field. -type AttributePattern struct { - variable string - qualifierPatterns []*AttributeQualifierPattern -} - -// NewAttributePattern produces a new mutable AttributePattern based on a variable name. -func NewAttributePattern(variable string) *AttributePattern { - return &AttributePattern{ - variable: variable, - qualifierPatterns: []*AttributeQualifierPattern{}, - } -} - -// QualString adds a string qualifier pattern to the AttributePattern. The string may be a valid -// identifier, or string map key including empty string. -func (apat *AttributePattern) QualString(pattern string) *AttributePattern { - apat.qualifierPatterns = append(apat.qualifierPatterns, - &AttributeQualifierPattern{value: pattern}) - return apat -} - -// QualInt adds an int qualifier pattern to the AttributePattern. The index may be either a map or -// list index. -func (apat *AttributePattern) QualInt(pattern int64) *AttributePattern { - apat.qualifierPatterns = append(apat.qualifierPatterns, - &AttributeQualifierPattern{value: pattern}) - return apat -} - -// QualUint adds an uint qualifier pattern for a map index operation to the AttributePattern. -func (apat *AttributePattern) QualUint(pattern uint64) *AttributePattern { - apat.qualifierPatterns = append(apat.qualifierPatterns, - &AttributeQualifierPattern{value: pattern}) - return apat -} - -// QualBool adds a bool qualifier pattern for a map index operation to the AttributePattern. -func (apat *AttributePattern) QualBool(pattern bool) *AttributePattern { - apat.qualifierPatterns = append(apat.qualifierPatterns, - &AttributeQualifierPattern{value: pattern}) - return apat -} - -// Wildcard adds a special sentinel qualifier pattern that will match any single qualifier. -func (apat *AttributePattern) Wildcard() *AttributePattern { - apat.qualifierPatterns = append(apat.qualifierPatterns, - &AttributeQualifierPattern{wildcard: true}) - return apat -} - -// VariableMatches returns true if the fully qualified variable matches the AttributePattern -// fully qualified variable name. -func (apat *AttributePattern) VariableMatches(variable string) bool { - return apat.variable == variable -} - -// QualifierPatterns returns the set of AttributeQualifierPattern values on the AttributePattern. -func (apat *AttributePattern) QualifierPatterns() []*AttributeQualifierPattern { - return apat.qualifierPatterns -} - -// AttributeQualifierPattern holds a wilcard or valued qualifier pattern. -type AttributeQualifierPattern struct { - wildcard bool - value interface{} -} - -// Matches returns true if the qualifier pattern is a wildcard, or the Qualifier implements the -// qualifierValueEquator interface and its IsValueEqualTo returns true for the qualifier pattern. -func (qpat *AttributeQualifierPattern) Matches(q Qualifier) bool { - if qpat.wildcard { - return true - } - qve, ok := q.(qualifierValueEquator) - return ok && qve.QualifierValueEquals(qpat.value) -} - -// qualifierValueEquator defines an interface for determining if an input value, of valid map key -// type, is equal to the value held in the Qualifier. This interface is used by the -// AttributeQualifierPattern to determine pattern matches for non-wildcard qualifier patterns. -// -// Note: Attribute values are also Qualifier values; however, Attriutes are resolved before -// qualification happens. This is an implementation detail, but one relevant to why the Attribute -// types do not surface in the list of implementations. -// -// See: partialAttributeFactory.matchesUnknownPatterns for more details on how this interface is -// used. -type qualifierValueEquator interface { - // QualifierValueEquals returns true if the input value is equal to the value held in the - // Qualifier. - QualifierValueEquals(value interface{}) bool -} - -// QualifierValueEquals implementation for boolean qualifiers. -func (q *boolQualifier) QualifierValueEquals(value interface{}) bool { - bval, ok := value.(bool) - return ok && q.value == bval -} - -// QualifierValueEquals implementation for field qualifiers. -func (q *fieldQualifier) QualifierValueEquals(value interface{}) bool { - sval, ok := value.(string) - return ok && q.Name == sval -} - -// QualifierValueEquals implementation for string qualifiers. -func (q *stringQualifier) QualifierValueEquals(value interface{}) bool { - sval, ok := value.(string) - return ok && q.value == sval -} - -// QualifierValueEquals implementation for int qualifiers. -func (q *intQualifier) QualifierValueEquals(value interface{}) bool { - ival, ok := value.(int64) - return ok && q.value == ival -} - -// QualifierValueEquals implementation for uint qualifiers. -func (q *uintQualifier) QualifierValueEquals(value interface{}) bool { - uval, ok := value.(uint64) - return ok && q.value == uval -} - -// NewPartialAttributeFactory returns an AttributeFactory implementation capable of performing -// AttributePattern matches with PartialActivation inputs. -func NewPartialAttributeFactory(container *containers.Container, - adapter ref.TypeAdapter, - provider ref.TypeProvider) AttributeFactory { - fac := NewAttributeFactory(container, adapter, provider) - return &partialAttributeFactory{ - AttributeFactory: fac, - container: container, - adapter: adapter, - provider: provider, - } -} - -type partialAttributeFactory struct { - AttributeFactory - container *containers.Container - adapter ref.TypeAdapter - provider ref.TypeProvider -} - -// AbsoluteAttribute implementation of the AttributeFactory interface which wraps the -// NamespacedAttribute resolution in an internal attributeMatcher object to dynamically match -// unknown patterns from PartialActivation inputs if given. -func (fac *partialAttributeFactory) AbsoluteAttribute(id int64, names ...string) NamespacedAttribute { - attr := fac.AttributeFactory.AbsoluteAttribute(id, names...) - return &attributeMatcher{fac: fac, NamespacedAttribute: attr} -} - -// MaybeAttribute implementation of the AttributeFactory interface which ensure that the set of -// 'maybe' NamespacedAttribute values are produced using the PartialAttributeFactory rather than -// the base AttributeFactory implementation. -func (fac *partialAttributeFactory) MaybeAttribute(id int64, name string) Attribute { - return &maybeAttribute{ - id: id, - attrs: []NamespacedAttribute{ - fac.AbsoluteAttribute(id, fac.container.ResolveCandidateNames(name)...), - }, - adapter: fac.adapter, - provider: fac.provider, - fac: fac, - } -} - -// matchesUnknownPatterns returns true if the variable names and qualifiers for a given -// Attribute value match any of the ActivationPattern objects in the set of unknown activation -// patterns on the given PartialActivation. -// -// For example, in the expression `a.b`, the Attribute is composed of variable `a`, with string -// qualifier `b`. When a PartialActivation is supplied, it indicates that some or all of the data -// provided in the input is unknown by specifying unknown AttributePatterns. An AttributePattern -// that refers to variable `a` with a string qualifier of `c` will not match `a.b`; however, any -// of the following patterns will match Attribute `a.b`: -// -// - `AttributePattern("a")` -// - `AttributePattern("a").Wildcard()` -// - `AttributePattern("a").QualString("b")` -// - `AttributePattern("a").QualString("b").QualInt(0)` -// -// Any AttributePattern which overlaps an Attribute or vice-versa will produce an Unknown result -// for the last pattern matched variable or qualifier in the Attribute. In the first matching -// example, the expression id representing variable `a` would be listed in the Unknown result, -// whereas in the other pattern examples, the qualifier `b` would be returned as the Unknown. -func (fac *partialAttributeFactory) matchesUnknownPatterns( - vars PartialActivation, - attrID int64, - variableNames []string, - qualifiers []Qualifier) (types.Unknown, error) { - patterns := vars.UnknownAttributePatterns() - candidateIndices := map[int]struct{}{} - for _, variable := range variableNames { - for i, pat := range patterns { - if pat.VariableMatches(variable) { - candidateIndices[i] = struct{}{} - } - } - } - // Determine whether to return early if there are no candidate unknown patterns. - if len(candidateIndices) == 0 { - return nil, nil - } - // Determine whether to return early if there are no qualifiers. - if len(qualifiers) == 0 { - return types.Unknown{attrID}, nil - } - // Resolve the attribute qualifiers into a static set. This prevents more dynamic - // Attribute resolutions than necessary when there are multiple unknown patterns - // that traverse the same Attribute-based qualifier field. - newQuals := make([]Qualifier, len(qualifiers)) - for i, qual := range qualifiers { - attr, isAttr := qual.(Attribute) - if isAttr { - val, err := attr.Resolve(vars) - if err != nil { - return nil, err - } - unk, isUnk := val.(types.Unknown) - if isUnk { - return unk, nil - } - // If this resolution behavior ever changes, new implementations of the - // qualifierValueEquator may be required to handle proper resolution. - qual, err = fac.NewQualifier(nil, qual.ID(), val) - if err != nil { - return nil, err - } - } - newQuals[i] = qual - } - // Determine whether any of the unknown patterns match. - for patIdx := range candidateIndices { - pat := patterns[patIdx] - isUnk := true - matchExprID := attrID - qualPats := pat.QualifierPatterns() - for i, qual := range newQuals { - if i >= len(qualPats) { - break - } - matchExprID = qual.ID() - qualPat := qualPats[i] - // Note, the AttributeQualifierPattern relies on the input Qualifier not being an - // Attribute, since there is no way to resolve the Attribute with the information - // provided to the Matches call. - if !qualPat.Matches(qual) { - isUnk = false - break - } - } - if isUnk { - return types.Unknown{matchExprID}, nil - } - } - return nil, nil -} - -// attributeMatcher embeds the NamespacedAttribute interface which allows it to participate in -// AttributePattern matching against Attribute values without having to modify the code paths that -// identify Attributes in expressions. -type attributeMatcher struct { - NamespacedAttribute - qualifiers []Qualifier - fac *partialAttributeFactory -} - -// AddQualifier implements the Attribute interface method. -func (m *attributeMatcher) AddQualifier(qual Qualifier) (Attribute, error) { - // Add the qualifier to the embedded NamespacedAttribute. If the input to the Resolve - // method is not a PartialActivation, or does not match an unknown attribute pattern, the - // Resolve method is directly invoked on the underlying NamespacedAttribute. - _, err := m.NamespacedAttribute.AddQualifier(qual) - if err != nil { - return nil, err - } - // The attributeMatcher overloads TryResolve and will attempt to match unknown patterns against - // the variable name and qualifier set contained within the Attribute. These values are not - // directly inspectable on the top-level NamespacedAttribute interface and so are tracked within - // the attributeMatcher. - m.qualifiers = append(m.qualifiers, qual) - return m, nil -} - -// Resolve is an implementation of the Attribute interface method which uses the -// attributeMatcher TryResolve implementation rather than the embedded NamespacedAttribute -// Resolve implementation. -func (m *attributeMatcher) Resolve(vars Activation) (interface{}, error) { - obj, found, err := m.TryResolve(vars) - if err != nil { - return nil, err - } - if !found { - return nil, fmt.Errorf("no such attribute: %v", m.NamespacedAttribute) - } - return obj, nil -} - -// TryResolve is an implementation of the NamespacedAttribute interface method which tests -// for matching unknown attribute patterns and returns types.Unknown if present. Otherwise, -// the standard Resolve logic applies. -func (m *attributeMatcher) TryResolve(vars Activation) (interface{}, bool, error) { - id := m.NamespacedAttribute.ID() - partial, isPartial := vars.(PartialActivation) - if isPartial { - unk, err := m.fac.matchesUnknownPatterns( - partial, - id, - m.CandidateVariableNames(), - m.qualifiers) - if err != nil { - return nil, true, err - } - if unk != nil { - return unk, true, nil - } - } - return m.NamespacedAttribute.TryResolve(vars) -} - -// Qualify is an implementation of the Qualifier interface method. -func (m *attributeMatcher) Qualify(vars Activation, obj interface{}) (interface{}, error) { - val, err := m.Resolve(vars) - if err != nil { - return nil, err - } - unk, isUnk := val.(types.Unknown) - if isUnk { - return unk, nil - } - qual, err := m.fac.NewQualifier(nil, m.ID(), val) - if err != nil { - return nil, err - } - return qual.Qualify(vars, obj) -} diff --git a/vendor/github.com/google/cel-go/interpreter/attributes.go b/vendor/github.com/google/cel-go/interpreter/attributes.go deleted file mode 100644 index e101c669ba..0000000000 --- a/vendor/github.com/google/cel-go/interpreter/attributes.go +++ /dev/null @@ -1,1032 +0,0 @@ -// Copyright 2019 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package interpreter - -import ( - "errors" - "fmt" - "math" - - "github.com/google/cel-go/common/containers" - "github.com/google/cel-go/common/types" - "github.com/google/cel-go/common/types/ref" - "github.com/google/cel-go/common/types/traits" - - exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" -) - -// AttributeFactory provides methods creating Attribute and Qualifier values. -type AttributeFactory interface { - // AbsoluteAttribute creates an attribute that refers to a top-level variable name. - // - // Checked expressions generate absolute attribute with a single name. - // Parse-only expressions may have more than one possible absolute identifier when the - // expression is created within a container, e.g. package or namespace. - // - // When there is more than one name supplied to the AbsoluteAttribute call, the names - // must be in CEL's namespace resolution order. The name arguments provided here are - // returned in the same order as they were provided by the NamespacedAttribute - // CandidateVariableNames method. - AbsoluteAttribute(id int64, names ...string) NamespacedAttribute - - // ConditionalAttribute creates an attribute with two Attribute branches, where the Attribute - // that is resolved depends on the boolean evaluation of the input 'expr'. - ConditionalAttribute(id int64, expr Interpretable, t, f Attribute) Attribute - - // MaybeAttribute creates an attribute that refers to either a field selection or a namespaced - // variable name. - // - // Only expressions which have not been type-checked may generate oneof attributes. - MaybeAttribute(id int64, name string) Attribute - - // RelativeAttribute creates an attribute whose value is a qualification of a dynamic - // computation rather than a static variable reference. - RelativeAttribute(id int64, operand Interpretable) Attribute - - // NewQualifier creates a qualifier on the target object with a given value. - // - // The 'val' may be an Attribute or any proto-supported map key type: bool, int, string, uint. - // - // The qualifier may consider the object type being qualified, if present. If absent, the - // qualification should be considered dynamic and the qualification should still work, though - // it may be sub-optimal. - NewQualifier(objType *exprpb.Type, qualID int64, val interface{}) (Qualifier, error) -} - -// Qualifier marker interface for designating different qualifier values and where they appear -// within field selections and index call expressions (`_[_]`). -type Qualifier interface { - // ID where the qualifier appears within an expression. - ID() int64 - - // Qualify performs a qualification, e.g. field selection, on the input object and returns - // the value or error that results. - Qualify(vars Activation, obj interface{}) (interface{}, error) -} - -// ConstantQualifier interface embeds the Qualifier interface and provides an option to inspect the -// qualifier's constant value. -// -// Non-constant qualifiers are of Attribute type. -type ConstantQualifier interface { - Qualifier - - Value() ref.Val -} - -// Attribute values are a variable or value with an optional set of qualifiers, such as field, key, -// or index accesses. -type Attribute interface { - Qualifier - - // AddQualifier adds a qualifier on the Attribute or error if the qualification is not a valid - // qualifier type. - AddQualifier(Qualifier) (Attribute, error) - - // Resolve returns the value of the Attribute given the current Activation. - Resolve(Activation) (interface{}, error) -} - -// NamespacedAttribute values are a variable within a namespace, and an optional set of qualifiers -// such as field, key, or index accesses. -type NamespacedAttribute interface { - Attribute - - // CandidateVariableNames returns the possible namespaced variable names for this Attribute in - // the CEL namespace resolution order. - CandidateVariableNames() []string - - // Qualifiers returns the list of qualifiers associated with the Attribute.s - Qualifiers() []Qualifier - - // TryResolve attempts to return the value of the attribute given the current Activation. - // If an error is encountered during attribute resolution, it will be returned immediately. - // If the attribute cannot be resolved within the Activation, the result must be: `nil`, - // `false`, `nil`. - TryResolve(Activation) (interface{}, bool, error) -} - -// NewAttributeFactory returns a default AttributeFactory which is produces Attribute values -// capable of resolving types by simple names and qualify the values using the supported qualifier -// types: bool, int, string, and uint. -func NewAttributeFactory(cont *containers.Container, - a ref.TypeAdapter, - p ref.TypeProvider) AttributeFactory { - return &attrFactory{ - container: cont, - adapter: a, - provider: p, - } -} - -type attrFactory struct { - container *containers.Container - adapter ref.TypeAdapter - provider ref.TypeProvider -} - -// AbsoluteAttribute refers to a variable value and an optional qualifier path. -// -// The namespaceNames represent the names the variable could have based on namespace -// resolution rules. -func (r *attrFactory) AbsoluteAttribute(id int64, names ...string) NamespacedAttribute { - return &absoluteAttribute{ - id: id, - namespaceNames: names, - qualifiers: []Qualifier{}, - adapter: r.adapter, - provider: r.provider, - fac: r, - } -} - -// ConditionalAttribute supports the case where an attribute selection may occur on a conditional -// expression, e.g. (cond ? a : b).c -func (r *attrFactory) ConditionalAttribute(id int64, expr Interpretable, t, f Attribute) Attribute { - return &conditionalAttribute{ - id: id, - expr: expr, - truthy: t, - falsy: f, - adapter: r.adapter, - fac: r, - } -} - -// MaybeAttribute collects variants of unchecked AbsoluteAttribute values which could either be -// direct variable accesses or some combination of variable access with qualification. -func (r *attrFactory) MaybeAttribute(id int64, name string) Attribute { - return &maybeAttribute{ - id: id, - attrs: []NamespacedAttribute{ - r.AbsoluteAttribute(id, r.container.ResolveCandidateNames(name)...), - }, - adapter: r.adapter, - provider: r.provider, - fac: r, - } -} - -// RelativeAttribute refers to an expression and an optional qualifier path. -func (r *attrFactory) RelativeAttribute(id int64, operand Interpretable) Attribute { - return &relativeAttribute{ - id: id, - operand: operand, - qualifiers: []Qualifier{}, - adapter: r.adapter, - fac: r, - } -} - -// NewQualifier is an implementation of the AttributeFactory interface. -func (r *attrFactory) NewQualifier(objType *exprpb.Type, - qualID int64, - val interface{}) (Qualifier, error) { - // Before creating a new qualifier check to see if this is a protobuf message field access. - // If so, use the precomputed GetFrom qualification method rather than the standard - // stringQualifier. - str, isStr := val.(string) - if isStr && objType != nil && objType.GetMessageType() != "" { - ft, found := r.provider.FindFieldType(objType.GetMessageType(), str) - if found && ft.IsSet != nil && ft.GetFrom != nil { - return &fieldQualifier{ - id: qualID, - Name: str, - FieldType: ft, - adapter: r.adapter, - }, nil - } - } - return newQualifier(r.adapter, qualID, val) -} - -type absoluteAttribute struct { - id int64 - // namespaceNames represent the names the variable could have based on declared container - // (package) of the expression. - namespaceNames []string - qualifiers []Qualifier - adapter ref.TypeAdapter - provider ref.TypeProvider - fac AttributeFactory -} - -// ID implements the Attribute interface method. -func (a *absoluteAttribute) ID() int64 { - return a.id -} - -// Cost implements the Coster interface method. -func (a *absoluteAttribute) Cost() (min, max int64) { - for _, q := range a.qualifiers { - minQ, maxQ := estimateCost(q) - min += minQ - max += maxQ - } - min++ // For object retrieval. - max++ - return -} - -// AddQualifier implements the Attribute interface method. -func (a *absoluteAttribute) AddQualifier(qual Qualifier) (Attribute, error) { - a.qualifiers = append(a.qualifiers, qual) - return a, nil -} - -// CandidateVariableNames implements the NamespaceAttribute interface method. -func (a *absoluteAttribute) CandidateVariableNames() []string { - return a.namespaceNames -} - -// Qualifiers returns the list of Qualifier instances associated with the namespaced attribute. -func (a *absoluteAttribute) Qualifiers() []Qualifier { - return a.qualifiers -} - -// Qualify is an implementation of the Qualifier interface method. -func (a *absoluteAttribute) Qualify(vars Activation, obj interface{}) (interface{}, error) { - val, err := a.Resolve(vars) - if err != nil { - return nil, err - } - unk, isUnk := val.(types.Unknown) - if isUnk { - return unk, nil - } - qual, err := a.fac.NewQualifier(nil, a.id, val) - if err != nil { - return nil, err - } - return qual.Qualify(vars, obj) -} - -// Resolve returns the resolved Attribute value given the Activation, or error if the Attribute -// variable is not found, or if its Qualifiers cannot be applied successfully. -func (a *absoluteAttribute) Resolve(vars Activation) (interface{}, error) { - obj, found, err := a.TryResolve(vars) - if err != nil { - return nil, err - } - if found { - return obj, nil - } - return nil, fmt.Errorf("no such attribute: %v", a) -} - -// String implements the Stringer interface method. -func (a *absoluteAttribute) String() string { - return fmt.Sprintf("id: %v, names: %v", a.id, a.namespaceNames) -} - -// TryResolve iterates through the namespaced variable names until one is found within the -// Activation or TypeProvider. -// -// If the variable name cannot be found as an Activation variable or in the TypeProvider as -// a type, then the result is `nil`, `false`, `nil` per the interface requirement. -func (a *absoluteAttribute) TryResolve(vars Activation) (interface{}, bool, error) { - for _, nm := range a.namespaceNames { - // If the variable is found, process it. Otherwise, wait until the checks to - // determine whether the type is unknown before returning. - op, found := vars.ResolveName(nm) - if found { - var err error - for _, qual := range a.qualifiers { - op, err = qual.Qualify(vars, op) - if err != nil { - return nil, true, err - } - } - return op, true, nil - } - // Attempt to resolve the qualified type name if the name is not a variable identifier. - typ, found := a.provider.FindIdent(nm) - if found { - if len(a.qualifiers) == 0 { - return typ, true, nil - } - return nil, true, fmt.Errorf("no such attribute: %v", typ) - } - } - return nil, false, nil -} - -type conditionalAttribute struct { - id int64 - expr Interpretable - truthy Attribute - falsy Attribute - adapter ref.TypeAdapter - fac AttributeFactory -} - -// ID is an implementation of the Attribute interface method. -func (a *conditionalAttribute) ID() int64 { - return a.id -} - -// Cost provides the heuristic cost of a ternary operation ? : . -// The cost is computed as cost(expr) plus the min/max costs of evaluating either -// `t` or `f`. -func (a *conditionalAttribute) Cost() (min, max int64) { - tMin, tMax := estimateCost(a.truthy) - fMin, fMax := estimateCost(a.falsy) - eMin, eMax := estimateCost(a.expr) - return eMin + findMin(tMin, fMin), eMax + findMax(tMax, fMax) -} - -// AddQualifier appends the same qualifier to both sides of the conditional, in effect managing -// the qualification of alternate attributes. -func (a *conditionalAttribute) AddQualifier(qual Qualifier) (Attribute, error) { - _, err := a.truthy.AddQualifier(qual) - if err != nil { - return nil, err - } - _, err = a.falsy.AddQualifier(qual) - if err != nil { - return nil, err - } - return a, nil -} - -// Qualify is an implementation of the Qualifier interface method. -func (a *conditionalAttribute) Qualify(vars Activation, obj interface{}) (interface{}, error) { - val, err := a.Resolve(vars) - if err != nil { - return nil, err - } - unk, isUnk := val.(types.Unknown) - if isUnk { - return unk, nil - } - qual, err := a.fac.NewQualifier(nil, a.id, val) - if err != nil { - return nil, err - } - return qual.Qualify(vars, obj) -} - -// Resolve evaluates the condition, and then resolves the truthy or falsy branch accordingly. -func (a *conditionalAttribute) Resolve(vars Activation) (interface{}, error) { - val := a.expr.Eval(vars) - if types.IsError(val) { - return nil, val.(*types.Err) - } - if val == types.True { - return a.truthy.Resolve(vars) - } - if val == types.False { - return a.falsy.Resolve(vars) - } - if types.IsUnknown(val) { - return val, nil - } - return nil, types.MaybeNoSuchOverloadErr(val).(*types.Err) -} - -// String is an implementation of the Stringer interface method. -func (a *conditionalAttribute) String() string { - return fmt.Sprintf("id: %v, truthy attribute: %v, falsy attribute: %v", a.id, a.truthy, a.falsy) -} - -type maybeAttribute struct { - id int64 - attrs []NamespacedAttribute - adapter ref.TypeAdapter - provider ref.TypeProvider - fac AttributeFactory -} - -// ID is an implementation of the Attribute interface method. -func (a *maybeAttribute) ID() int64 { - return a.id -} - -// Cost implements the Coster interface method. The min cost is computed as the minimal cost among -// all the possible attributes, the max cost ditto. -func (a *maybeAttribute) Cost() (min, max int64) { - min, max = math.MaxInt64, 0 - for _, a := range a.attrs { - minA, maxA := estimateCost(a) - min = findMin(min, minA) - max = findMax(max, maxA) - } - return -} - -func findMin(x, y int64) int64 { - if x < y { - return x - } - return y -} - -func findMax(x, y int64) int64 { - if x > y { - return x - } - return y -} - -// AddQualifier adds a qualifier to each possible attribute variant, and also creates -// a new namespaced variable from the qualified value. -// -// The algorithm for building the maybe attribute is as follows: -// -// 1. Create a maybe attribute from a simple identifier when it occurs in a parsed-only expression -// -// mb = MaybeAttribute(, "a") -// -// Initializing the maybe attribute creates an absolute attribute internally which includes the -// possible namespaced names of the attribute. In this example, let's assume we are in namespace -// 'ns', then the maybe is either one of the following variable names: -// -// possible variables names -- ns.a, a -// -// 2. Adding a qualifier to the maybe means that the variable name could be a longer qualified -// name, or a field selection on one of the possible variable names produced earlier: -// -// mb.AddQualifier("b") -// -// possible variables names -- ns.a.b, a.b -// possible field selection -- ns.a['b'], a['b'] -// -// If none of the attributes within the maybe resolves a value, the result is an error. -func (a *maybeAttribute) AddQualifier(qual Qualifier) (Attribute, error) { - str := "" - isStr := false - cq, isConst := qual.(ConstantQualifier) - if isConst { - str, isStr = cq.Value().Value().(string) - } - var augmentedNames []string - // First add the qualifier to all existing attributes in the oneof. - for _, attr := range a.attrs { - if isStr && len(attr.Qualifiers()) == 0 { - candidateVars := attr.CandidateVariableNames() - augmentedNames = make([]string, len(candidateVars)) - for i, name := range candidateVars { - augmentedNames[i] = fmt.Sprintf("%s.%s", name, str) - } - } - _, err := attr.AddQualifier(qual) - if err != nil { - return nil, err - } - } - // Next, ensure the most specific variable / type reference is searched first. - a.attrs = append([]NamespacedAttribute{ - a.fac.AbsoluteAttribute(qual.ID(), augmentedNames...), - }, a.attrs...) - return a, nil -} - -// Qualify is an implementation of the Qualifier interface method. -func (a *maybeAttribute) Qualify(vars Activation, obj interface{}) (interface{}, error) { - val, err := a.Resolve(vars) - if err != nil { - return nil, err - } - unk, isUnk := val.(types.Unknown) - if isUnk { - return unk, nil - } - qual, err := a.fac.NewQualifier(nil, a.id, val) - if err != nil { - return nil, err - } - return qual.Qualify(vars, obj) -} - -// Resolve follows the variable resolution rules to determine whether the attribute is a variable -// or a field selection. -func (a *maybeAttribute) Resolve(vars Activation) (interface{}, error) { - for _, attr := range a.attrs { - obj, found, err := attr.TryResolve(vars) - // Return an error if one is encountered. - if err != nil { - return nil, err - } - // If the object was found, return it. - if found { - return obj, nil - } - } - // Else, produce a no such attribute error. - return nil, fmt.Errorf("no such attribute: %v", a) -} - -// String is an implementation of the Stringer interface method. -func (a *maybeAttribute) String() string { - return fmt.Sprintf("id: %v, attributes: %v", a.id, a.attrs) -} - -type relativeAttribute struct { - id int64 - operand Interpretable - qualifiers []Qualifier - adapter ref.TypeAdapter - fac AttributeFactory -} - -// ID is an implementation of the Attribute interface method. -func (a *relativeAttribute) ID() int64 { - return a.id -} - -// Cost implements the Coster interface method. -func (a *relativeAttribute) Cost() (min, max int64) { - min, max = estimateCost(a.operand) - for _, qual := range a.qualifiers { - minQ, maxQ := estimateCost(qual) - min += minQ - max += maxQ - } - return -} - -// AddQualifier implements the Attribute interface method. -func (a *relativeAttribute) AddQualifier(qual Qualifier) (Attribute, error) { - a.qualifiers = append(a.qualifiers, qual) - return a, nil -} - -// Qualify is an implementation of the Qualifier interface method. -func (a *relativeAttribute) Qualify(vars Activation, obj interface{}) (interface{}, error) { - val, err := a.Resolve(vars) - if err != nil { - return nil, err - } - unk, isUnk := val.(types.Unknown) - if isUnk { - return unk, nil - } - qual, err := a.fac.NewQualifier(nil, a.id, val) - if err != nil { - return nil, err - } - return qual.Qualify(vars, obj) -} - -// Resolve expression value and qualifier relative to the expression result. -func (a *relativeAttribute) Resolve(vars Activation) (interface{}, error) { - // First, evaluate the operand. - v := a.operand.Eval(vars) - if types.IsError(v) { - return nil, v.(*types.Err) - } - if types.IsUnknown(v) { - return v, nil - } - // Next, qualify it. Qualification handles unkonwns as well, so there's no need to recheck. - var err error - var obj interface{} = v - for _, qual := range a.qualifiers { - obj, err = qual.Qualify(vars, obj) - if err != nil { - return nil, err - } - } - return obj, nil -} - -// String is an implementation of the Stringer interface method. -func (a *relativeAttribute) String() string { - return fmt.Sprintf("id: %v, operand: %v", a.id, a.operand) -} - -func newQualifier(adapter ref.TypeAdapter, id int64, v interface{}) (Qualifier, error) { - var qual Qualifier - switch val := v.(type) { - case Attribute: - return &attrQualifier{id: id, Attribute: val}, nil - case string: - qual = &stringQualifier{id: id, value: val, celValue: types.String(val), adapter: adapter} - case int: - qual = &intQualifier{id: id, value: int64(val), celValue: types.Int(val), adapter: adapter} - case int32: - qual = &intQualifier{id: id, value: int64(val), celValue: types.Int(val), adapter: adapter} - case int64: - qual = &intQualifier{id: id, value: val, celValue: types.Int(val), adapter: adapter} - case uint: - qual = &uintQualifier{id: id, value: uint64(val), celValue: types.Uint(val), adapter: adapter} - case uint32: - qual = &uintQualifier{id: id, value: uint64(val), celValue: types.Uint(val), adapter: adapter} - case uint64: - qual = &uintQualifier{id: id, value: val, celValue: types.Uint(val), adapter: adapter} - case bool: - qual = &boolQualifier{id: id, value: val, celValue: types.Bool(val), adapter: adapter} - case types.String: - qual = &stringQualifier{id: id, value: string(val), celValue: val, adapter: adapter} - case types.Int: - qual = &intQualifier{id: id, value: int64(val), celValue: val, adapter: adapter} - case types.Uint: - qual = &uintQualifier{id: id, value: uint64(val), celValue: val, adapter: adapter} - case types.Bool: - qual = &boolQualifier{id: id, value: bool(val), celValue: val, adapter: adapter} - default: - return nil, fmt.Errorf("invalid qualifier type: %T", v) - } - return qual, nil -} - -type attrQualifier struct { - id int64 - Attribute -} - -func (q *attrQualifier) ID() int64 { - return q.id -} - -// Cost returns zero for constant field qualifiers -func (q *attrQualifier) Cost() (min, max int64) { - return estimateCost(q.Attribute) -} - -type stringQualifier struct { - id int64 - value string - celValue ref.Val - adapter ref.TypeAdapter -} - -// ID is an implementation of the Qualifier interface method. -func (q *stringQualifier) ID() int64 { - return q.id -} - -// Qualify implements the Qualifier interface method. -func (q *stringQualifier) Qualify(vars Activation, obj interface{}) (interface{}, error) { - s := q.value - isMap := false - isKey := false - switch o := obj.(type) { - case map[string]interface{}: - isMap = true - obj, isKey = o[s] - case map[string]string: - isMap = true - obj, isKey = o[s] - case map[string]int: - isMap = true - obj, isKey = o[s] - case map[string]int32: - isMap = true - obj, isKey = o[s] - case map[string]int64: - isMap = true - obj, isKey = o[s] - case map[string]uint: - isMap = true - obj, isKey = o[s] - case map[string]uint32: - isMap = true - obj, isKey = o[s] - case map[string]uint64: - isMap = true - obj, isKey = o[s] - case map[string]float32: - isMap = true - obj, isKey = o[s] - case map[string]float64: - isMap = true - obj, isKey = o[s] - case map[string]bool: - isMap = true - obj, isKey = o[s] - case types.Unknown: - return o, nil - default: - elem, err := refResolve(q.adapter, q.celValue, obj) - if err != nil { - return nil, err - } - if types.IsUnknown(elem) { - return elem, nil - } - return elem, nil - } - if isMap && !isKey { - return nil, fmt.Errorf("no such key: %v", s) - } - return obj, nil -} - -// Value implements the ConstantQualifier interface -func (q *stringQualifier) Value() ref.Val { - return q.celValue -} - -// Cost returns zero for constant field qualifiers -func (q *stringQualifier) Cost() (min, max int64) { - return 0, 0 -} - -type intQualifier struct { - id int64 - value int64 - celValue ref.Val - adapter ref.TypeAdapter -} - -// ID is an implementation of the Qualifier interface method. -func (q *intQualifier) ID() int64 { - return q.id -} - -// Qualify implements the Qualifier interface method. -func (q *intQualifier) Qualify(vars Activation, obj interface{}) (interface{}, error) { - i := q.value - isMap := false - isKey := false - isIndex := false - switch o := obj.(type) { - // The specialized map types supported by an int qualifier are considerably fewer than the set - // of specialized map types supported by string qualifiers since they are less frequently used - // than string-based map keys. Additional specializations may be added in the future if - // desired. - case map[int]interface{}: - isMap = true - obj, isKey = o[int(i)] - case map[int32]interface{}: - isMap = true - obj, isKey = o[int32(i)] - case map[int64]interface{}: - isMap = true - obj, isKey = o[i] - case []interface{}: - isIndex = i >= 0 && i < int64(len(o)) - if isIndex { - obj = o[i] - } - case []string: - isIndex = i >= 0 && i < int64(len(o)) - if isIndex { - obj = o[i] - } - case []int: - isIndex = i >= 0 && i < int64(len(o)) - if isIndex { - obj = o[i] - } - case []int32: - isIndex = i >= 0 && i < int64(len(o)) - if isIndex { - obj = o[i] - } - case []int64: - isIndex = i >= 0 && i < int64(len(o)) - if isIndex { - obj = o[i] - } - case []uint: - isIndex = i >= 0 && i < int64(len(o)) - if isIndex { - obj = o[i] - } - case []uint32: - isIndex = i >= 0 && i < int64(len(o)) - if isIndex { - obj = o[i] - } - case []uint64: - isIndex = i >= 0 && i < int64(len(o)) - if isIndex { - obj = o[i] - } - case []float32: - isIndex = i >= 0 && i < int64(len(o)) - if isIndex { - obj = o[i] - } - case []float64: - isIndex = i >= 0 && i < int64(len(o)) - if isIndex { - obj = o[i] - } - case []bool: - isIndex = i >= 0 && i < int64(len(o)) - if isIndex { - obj = o[i] - } - case types.Unknown: - return o, nil - default: - elem, err := refResolve(q.adapter, q.celValue, obj) - if err != nil { - return nil, err - } - if types.IsUnknown(elem) { - return elem, nil - } - return elem, nil - } - if isMap && !isKey { - return nil, fmt.Errorf("no such key: %v", i) - } - if !isMap && !isIndex { - return nil, fmt.Errorf("index out of bounds: %v", i) - } - return obj, nil -} - -// Value implements the ConstantQualifier interface -func (q *intQualifier) Value() ref.Val { - return q.celValue -} - -// Cost returns zero for constant field qualifiers -func (q *intQualifier) Cost() (min, max int64) { - return 0, 0 -} - -type uintQualifier struct { - id int64 - value uint64 - celValue ref.Val - adapter ref.TypeAdapter -} - -// ID is an implementation of the Qualifier interface method. -func (q *uintQualifier) ID() int64 { - return q.id -} - -// Qualify implements the Qualifier interface method. -func (q *uintQualifier) Qualify(vars Activation, obj interface{}) (interface{}, error) { - u := q.value - isMap := false - isKey := false - switch o := obj.(type) { - // The specialized map types supported by a uint qualifier are considerably fewer than the set - // of specialized map types supported by string qualifiers since they are less frequently used - // than string-based map keys. Additional specializations may be added in the future if - // desired. - case map[uint]interface{}: - isMap = true - obj, isKey = o[uint(u)] - case map[uint32]interface{}: - isMap = true - obj, isKey = o[uint32(u)] - case map[uint64]interface{}: - isMap = true - obj, isKey = o[u] - case types.Unknown: - return o, nil - default: - elem, err := refResolve(q.adapter, q.celValue, obj) - if err != nil { - return nil, err - } - if types.IsUnknown(elem) { - return elem, nil - } - return elem, nil - } - if isMap && !isKey { - return nil, fmt.Errorf("no such key: %v", u) - } - return obj, nil -} - -// Value implements the ConstantQualifier interface -func (q *uintQualifier) Value() ref.Val { - return q.celValue -} - -// Cost returns zero for constant field qualifiers -func (q *uintQualifier) Cost() (min, max int64) { - return 0, 0 -} - -type boolQualifier struct { - id int64 - value bool - celValue ref.Val - adapter ref.TypeAdapter -} - -// ID is an implementation of the Qualifier interface method. -func (q *boolQualifier) ID() int64 { - return q.id -} - -// Qualify implements the Qualifier interface method. -func (q *boolQualifier) Qualify(vars Activation, obj interface{}) (interface{}, error) { - b := q.value - isKey := false - switch o := obj.(type) { - // The specialized map types supported by a bool qualifier are considerably fewer than the set - // of specialized map types supported by string qualifiers since they are less frequently used - // than string-based map keys. Additional specializations may be added in the future if - // desired. - case map[bool]interface{}: - obj, isKey = o[b] - case types.Unknown: - return o, nil - default: - elem, err := refResolve(q.adapter, q.celValue, obj) - if err != nil { - return nil, err - } - if types.IsUnknown(elem) { - return elem, nil - } - return elem, nil - } - if !isKey { - return nil, fmt.Errorf("no such key: %v", b) - } - return obj, nil -} - -// Value implements the ConstantQualifier interface -func (q *boolQualifier) Value() ref.Val { - return q.celValue -} - -// Cost returns zero for constant field qualifiers -func (q *boolQualifier) Cost() (min, max int64) { - return 0, 0 -} - -// fieldQualifier indicates that the qualification is a well-defined field with a known -// field type. When the field type is known this can be used to improve the speed and -// efficiency of field resolution. -type fieldQualifier struct { - id int64 - Name string - FieldType *ref.FieldType - adapter ref.TypeAdapter -} - -// ID is an implementation of the Qualifier interface method. -func (q *fieldQualifier) ID() int64 { - return q.id -} - -// Qualify implements the Qualifier interface method. -func (q *fieldQualifier) Qualify(vars Activation, obj interface{}) (interface{}, error) { - if rv, ok := obj.(ref.Val); ok { - obj = rv.Value() - } - return q.FieldType.GetFrom(obj) -} - -// Value implements the ConstantQualifier interface -func (q *fieldQualifier) Value() ref.Val { - return types.String(q.Name) -} - -// Cost returns zero for constant field qualifiers -func (q *fieldQualifier) Cost() (min, max int64) { - return 0, 0 -} - -// refResolve attempts to convert the value to a CEL value and then uses reflection methods -// to try and resolve the qualifier. -func refResolve(adapter ref.TypeAdapter, idx ref.Val, obj interface{}) (ref.Val, error) { - celVal := adapter.NativeToValue(obj) - mapper, isMapper := celVal.(traits.Mapper) - if isMapper { - elem, found := mapper.Find(idx) - if !found { - return nil, fmt.Errorf("no such key: %v", idx) - } - if types.IsError(elem) { - return nil, elem.(*types.Err) - } - return elem, nil - } - indexer, isIndexer := celVal.(traits.Indexer) - if isIndexer { - elem := indexer.Get(idx) - if types.IsError(elem) { - return nil, elem.(*types.Err) - } - return elem, nil - } - if types.IsUnknown(celVal) { - return celVal, nil - } - // TODO: If the types.Err value contains more than just an error message at some point in the - // future, then it would be reasonable to return error values as ref.Val types rather than - // simple go error types. - if types.IsError(celVal) { - return nil, celVal.(*types.Err) - } - return nil, errors.New("no such overload") -} diff --git a/vendor/github.com/google/cel-go/interpreter/coster.go b/vendor/github.com/google/cel-go/interpreter/coster.go deleted file mode 100644 index 15cb6bc041..0000000000 --- a/vendor/github.com/google/cel-go/interpreter/coster.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package interpreter - -import "math" - -// Coster calculates the heuristic cost incurred during evaluation. -type Coster interface { - Cost() (min, max int64) -} - -// estimateCost returns the heuristic cost interval for the program. -func estimateCost(i interface{}) (min, max int64) { - c, ok := i.(Coster) - if !ok { - return 0, math.MaxInt64 - } - return c.Cost() -} diff --git a/vendor/github.com/google/cel-go/interpreter/decorators.go b/vendor/github.com/google/cel-go/interpreter/decorators.go deleted file mode 100644 index 23a7f695b2..0000000000 --- a/vendor/github.com/google/cel-go/interpreter/decorators.go +++ /dev/null @@ -1,201 +0,0 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package interpreter - -import ( - "github.com/google/cel-go/common/overloads" - "github.com/google/cel-go/common/types" - "github.com/google/cel-go/common/types/ref" - "github.com/google/cel-go/common/types/traits" -) - -// InterpretableDecorator is a functional interface for decorating or replacing -// Interpretable expression nodes at construction time. -type InterpretableDecorator func(Interpretable) (Interpretable, error) - -// evalObserver is a functional interface that accepts an expression id and an observed value. -type evalObserver func(int64, ref.Val) - -// decObserveEval records evaluation state into an EvalState object. -func decObserveEval(observer evalObserver) InterpretableDecorator { - return func(i Interpretable) (Interpretable, error) { - switch inst := i.(type) { - case *evalWatch, *evalWatchAttr, *evalWatchConst: - // these instruction are already watching, return straight-away. - return i, nil - case InterpretableAttribute: - return &evalWatchAttr{ - InterpretableAttribute: inst, - observer: observer, - }, nil - case InterpretableConst: - return &evalWatchConst{ - InterpretableConst: inst, - observer: observer, - }, nil - default: - return &evalWatch{ - Interpretable: i, - observer: observer, - }, nil - } - } -} - -// decDisableShortcircuits ensures that all branches of an expression will be evaluated, no short-circuiting. -func decDisableShortcircuits() InterpretableDecorator { - return func(i Interpretable) (Interpretable, error) { - switch expr := i.(type) { - case *evalOr: - return &evalExhaustiveOr{ - id: expr.id, - lhs: expr.lhs, - rhs: expr.rhs, - }, nil - case *evalAnd: - return &evalExhaustiveAnd{ - id: expr.id, - lhs: expr.lhs, - rhs: expr.rhs, - }, nil - case *evalFold: - return &evalExhaustiveFold{ - id: expr.id, - accu: expr.accu, - accuVar: expr.accuVar, - iterRange: expr.iterRange, - iterVar: expr.iterVar, - cond: expr.cond, - step: expr.step, - result: expr.result, - }, nil - case InterpretableAttribute: - cond, isCond := expr.Attr().(*conditionalAttribute) - if isCond { - return &evalExhaustiveConditional{ - id: cond.id, - attr: cond, - adapter: expr.Adapter(), - }, nil - } - } - return i, nil - } -} - -// decOptimize optimizes the program plan by looking for common evaluation patterns and -// conditionally precomputating the result. -// - build list and map values with constant elements. -// - convert 'in' operations to set membership tests if possible. -func decOptimize() InterpretableDecorator { - return func(i Interpretable) (Interpretable, error) { - switch inst := i.(type) { - case *evalList: - return maybeBuildListLiteral(i, inst) - case *evalMap: - return maybeBuildMapLiteral(i, inst) - case InterpretableCall: - if inst.OverloadID() == overloads.InList { - return maybeOptimizeSetMembership(i, inst) - } - if overloads.IsTypeConversionFunction(inst.Function()) { - return maybeOptimizeConstUnary(i, inst) - } - } - return i, nil - } -} - -func maybeOptimizeConstUnary(i Interpretable, call InterpretableCall) (Interpretable, error) { - args := call.Args() - if len(args) != 1 { - return i, nil - } - _, isConst := args[0].(InterpretableConst) - if !isConst { - return i, nil - } - val := call.Eval(EmptyActivation()) - if types.IsError(val) { - return nil, val.(*types.Err) - } - return NewConstValue(call.ID(), val), nil -} - -func maybeBuildListLiteral(i Interpretable, l *evalList) (Interpretable, error) { - for _, elem := range l.elems { - _, isConst := elem.(InterpretableConst) - if !isConst { - return i, nil - } - } - return NewConstValue(l.ID(), l.Eval(EmptyActivation())), nil -} - -func maybeBuildMapLiteral(i Interpretable, mp *evalMap) (Interpretable, error) { - for idx, key := range mp.keys { - _, isConst := key.(InterpretableConst) - if !isConst { - return i, nil - } - _, isConst = mp.vals[idx].(InterpretableConst) - if !isConst { - return i, nil - } - } - return NewConstValue(mp.ID(), mp.Eval(EmptyActivation())), nil -} - -// maybeOptimizeSetMembership may convert an 'in' operation against a list to map key membership -// test if the following conditions are true: -// - the list is a constant with homogeneous element types. -// - the elements are all of primitive type. -func maybeOptimizeSetMembership(i Interpretable, inlist InterpretableCall) (Interpretable, error) { - args := inlist.Args() - lhs := args[0] - rhs := args[1] - l, isConst := rhs.(InterpretableConst) - if !isConst { - return i, nil - } - // When the incoming binary call is flagged with as the InList overload, the value will - // always be convertible to a `traits.Lister` type. - list := l.Value().(traits.Lister) - if list.Size() == types.IntZero { - return NewConstValue(inlist.ID(), types.False), nil - } - it := list.Iterator() - var typ ref.Type - valueSet := make(map[ref.Val]ref.Val) - for it.HasNext() == types.True { - elem := it.Next() - if !types.IsPrimitiveType(elem) { - // Note, non-primitive type are not yet supported. - return i, nil - } - if typ == nil { - typ = elem.Type() - } else if typ.TypeName() != elem.Type().TypeName() { - return i, nil - } - valueSet[elem] = types.True - } - return &evalSetMembership{ - inst: inlist, - arg: lhs, - argTypeName: typ.TypeName(), - valueSet: valueSet, - }, nil -} diff --git a/vendor/github.com/google/cel-go/interpreter/dispatcher.go b/vendor/github.com/google/cel-go/interpreter/dispatcher.go deleted file mode 100644 index febf9d8a83..0000000000 --- a/vendor/github.com/google/cel-go/interpreter/dispatcher.go +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package interpreter - -import ( - "fmt" - - "github.com/google/cel-go/interpreter/functions" -) - -// Dispatcher resolves function calls to their appropriate overload. -type Dispatcher interface { - // Add one or more overloads, returning an error if any Overload has the same Overload#Name. - Add(overloads ...*functions.Overload) error - - // FindOverload returns an Overload definition matching the provided name. - FindOverload(overload string) (*functions.Overload, bool) - - // OverloadIds returns the set of all overload identifiers configured for dispatch. - OverloadIds() []string -} - -// NewDispatcher returns an empty Dispatcher instance. -func NewDispatcher() Dispatcher { - return &defaultDispatcher{ - overloads: make(map[string]*functions.Overload)} -} - -// ExtendDispatcher returns a Dispatcher which inherits the overloads of its parent, and -// provides an isolation layer between built-ins and extension functions which is useful -// for forward compatibility. -func ExtendDispatcher(parent Dispatcher) Dispatcher { - return &defaultDispatcher{ - parent: parent, - overloads: make(map[string]*functions.Overload)} -} - -// overloadMap helper type for indexing overloads by function name. -type overloadMap map[string]*functions.Overload - -// defaultDispatcher struct which contains an overload map. -type defaultDispatcher struct { - parent Dispatcher - overloads overloadMap -} - -// Add implements the Dispatcher.Add interface method. -func (d *defaultDispatcher) Add(overloads ...*functions.Overload) error { - for _, o := range overloads { - // add the overload unless an overload of the same name has already been provided. - if _, found := d.overloads[o.Operator]; found { - return fmt.Errorf("overload already exists '%s'", o.Operator) - } - // index the overload by function name. - d.overloads[o.Operator] = o - } - return nil -} - -// FindOverload implements the Dispatcher.FindOverload interface method. -func (d *defaultDispatcher) FindOverload(overload string) (*functions.Overload, bool) { - o, found := d.overloads[overload] - // Attempt to dispatch to an overload defined in the parent. - if !found && d.parent != nil { - return d.parent.FindOverload(overload) - } - return o, found -} - -// OverloadIds implements the Dispatcher interface method. -func (d *defaultDispatcher) OverloadIds() []string { - i := 0 - overloads := make([]string, len(d.overloads)) - for name := range d.overloads { - overloads[i] = name - i++ - } - if d.parent == nil { - return overloads - } - parentOverloads := d.parent.OverloadIds() - for _, pName := range parentOverloads { - if _, found := d.overloads[pName]; !found { - overloads = append(overloads, pName) - } - } - return overloads -} diff --git a/vendor/github.com/google/cel-go/interpreter/evalstate.go b/vendor/github.com/google/cel-go/interpreter/evalstate.go deleted file mode 100644 index cc0d3e6f94..0000000000 --- a/vendor/github.com/google/cel-go/interpreter/evalstate.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package interpreter - -import ( - "github.com/google/cel-go/common/types/ref" -) - -// EvalState tracks the values associated with expression ids during execution. -type EvalState interface { - // IDs returns the list of ids with recorded values. - IDs() []int64 - - // Value returns the observed value of the given expression id if found, and a nil false - // result if not. - Value(int64) (ref.Val, bool) - - // SetValue sets the observed value of the expression id. - SetValue(int64, ref.Val) - - // Reset clears the previously recorded expression values. - Reset() -} - -// evalState permits the mutation of evaluation state for a given expression id. -type evalState struct { - values map[int64]ref.Val -} - -// NewEvalState returns an EvalState instanced used to observe the intermediate -// evaluations of an expression. -func NewEvalState() EvalState { - return &evalState{ - values: make(map[int64]ref.Val), - } -} - -// IDs implements the EvalState interface method. -func (s *evalState) IDs() []int64 { - var ids []int64 - for k, v := range s.values { - if v != nil { - ids = append(ids, k) - } - } - return ids -} - -// Value is an implementation of the EvalState interface method. -func (s *evalState) Value(exprID int64) (ref.Val, bool) { - val, found := s.values[exprID] - return val, found -} - -// SetValue is an implementation of the EvalState interface method. -func (s *evalState) SetValue(exprID int64, val ref.Val) { - s.values[exprID] = val -} - -// Reset implements the EvalState interface method. -func (s *evalState) Reset() { - s.values = map[int64]ref.Val{} -} diff --git a/vendor/github.com/google/cel-go/interpreter/functions/BUILD.bazel b/vendor/github.com/google/cel-go/interpreter/functions/BUILD.bazel deleted file mode 100644 index ae6eff409c..0000000000 --- a/vendor/github.com/google/cel-go/interpreter/functions/BUILD.bazel +++ /dev/null @@ -1,22 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -package( - default_visibility = ["//visibility:public"], - licenses = ["notice"], # Apache 2.0 -) - -go_library( - name = "go_default_library", - srcs = [ - "functions.go", - "standard.go", - ], - importpath = "github.com/google/cel-go/interpreter/functions", - deps = [ - "//common/operators:go_default_library", - "//common/overloads:go_default_library", - "//common/types:go_default_library", - "//common/types/ref:go_default_library", - "//common/types/traits:go_default_library", - ] -) diff --git a/vendor/github.com/google/cel-go/interpreter/functions/functions.go b/vendor/github.com/google/cel-go/interpreter/functions/functions.go deleted file mode 100644 index 4ca706e96c..0000000000 --- a/vendor/github.com/google/cel-go/interpreter/functions/functions.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package functions defines the standard builtin functions supported by the -// interpreter and as declared within the checker#StandardDeclarations. -package functions - -import "github.com/google/cel-go/common/types/ref" - -// Overload defines a named overload of a function, indicating an operand trait -// which must be present on the first argument to the overload as well as one -// of either a unary, binary, or function implementation. -// -// The majority of operators within the expression language are unary or binary -// and the specializations simplify the call contract for implementers of -// types with operator overloads. Any added complexity is assumed to be handled -// by the generic FunctionOp. -type Overload struct { - // Operator name as written in an expression or defined within - // operators.go. - Operator string - - // Operand trait used to dispatch the call. The zero-value indicates a - // global function overload or that one of the Unary / Binary / Function - // definitions should be used to execute the call. - OperandTrait int - - // Unary defines the overload with a UnaryOp implementation. May be nil. - Unary UnaryOp - - // Binary defines the overload with a BinaryOp implementation. May be nil. - Binary BinaryOp - - // Function defines the overload with a FunctionOp implementation. May be - // nil. - Function FunctionOp -} - -// UnaryOp is a function that takes a single value and produces an output. -type UnaryOp func(value ref.Val) ref.Val - -// BinaryOp is a function that takes two values and produces an output. -type BinaryOp func(lhs ref.Val, rhs ref.Val) ref.Val - -// FunctionOp is a function with accepts zero or more arguments and produces -// an value (as interface{}) or error as a result. -type FunctionOp func(values ...ref.Val) ref.Val diff --git a/vendor/github.com/google/cel-go/interpreter/functions/standard.go b/vendor/github.com/google/cel-go/interpreter/functions/standard.go deleted file mode 100644 index c8767bc176..0000000000 --- a/vendor/github.com/google/cel-go/interpreter/functions/standard.go +++ /dev/null @@ -1,272 +0,0 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package functions - -import ( - "github.com/google/cel-go/common/operators" - "github.com/google/cel-go/common/overloads" - "github.com/google/cel-go/common/types" - "github.com/google/cel-go/common/types/ref" - "github.com/google/cel-go/common/types/traits" -) - -// StandardOverloads returns the definitions of the built-in overloads. -func StandardOverloads() []*Overload { - return []*Overload{ - // Logical not (!a) - { - Operator: operators.LogicalNot, - OperandTrait: traits.NegatorType, - Unary: func(value ref.Val) ref.Val { - if !types.IsBool(value) { - return types.ValOrErr(value, "no such overload") - } - return value.(traits.Negater).Negate() - }}, - // Not strictly false: IsBool(a) ? a : true - { - Operator: operators.NotStrictlyFalse, - Unary: notStrictlyFalse}, - // Deprecated: not strictly false, may be overridden in the environment. - { - Operator: operators.OldNotStrictlyFalse, - Unary: notStrictlyFalse}, - - // Less than operator - {Operator: operators.Less, - OperandTrait: traits.ComparerType, - Binary: func(lhs ref.Val, rhs ref.Val) ref.Val { - cmp := lhs.(traits.Comparer).Compare(rhs) - if cmp == types.IntNegOne { - return types.True - } - if cmp == types.IntOne || cmp == types.IntZero { - return types.False - } - return cmp - }}, - - // Less than or equal operator - {Operator: operators.LessEquals, - OperandTrait: traits.ComparerType, - Binary: func(lhs ref.Val, rhs ref.Val) ref.Val { - cmp := lhs.(traits.Comparer).Compare(rhs) - if cmp == types.IntNegOne || cmp == types.IntZero { - return types.True - } - if cmp == types.IntOne { - return types.False - } - return cmp - }}, - - // Greater than operator - {Operator: operators.Greater, - OperandTrait: traits.ComparerType, - Binary: func(lhs ref.Val, rhs ref.Val) ref.Val { - cmp := lhs.(traits.Comparer).Compare(rhs) - if cmp == types.IntOne { - return types.True - } - if cmp == types.IntNegOne || cmp == types.IntZero { - return types.False - } - return cmp - }}, - - // Greater than equal operators - {Operator: operators.GreaterEquals, - OperandTrait: traits.ComparerType, - Binary: func(lhs ref.Val, rhs ref.Val) ref.Val { - cmp := lhs.(traits.Comparer).Compare(rhs) - if cmp == types.IntOne || cmp == types.IntZero { - return types.True - } - if cmp == types.IntNegOne { - return types.False - } - return cmp - }}, - - // TODO: Verify overflow, NaN, underflow cases for numeric values. - - // Add operator - {Operator: operators.Add, - OperandTrait: traits.AdderType, - Binary: func(lhs ref.Val, rhs ref.Val) ref.Val { - return lhs.(traits.Adder).Add(rhs) - }}, - - // Subtract operators - {Operator: operators.Subtract, - OperandTrait: traits.SubtractorType, - Binary: func(lhs ref.Val, rhs ref.Val) ref.Val { - return lhs.(traits.Subtractor).Subtract(rhs) - }}, - - // Multiply operator - {Operator: operators.Multiply, - OperandTrait: traits.MultiplierType, - Binary: func(lhs ref.Val, rhs ref.Val) ref.Val { - return lhs.(traits.Multiplier).Multiply(rhs) - }}, - - // Divide operator - {Operator: operators.Divide, - OperandTrait: traits.DividerType, - Binary: func(lhs ref.Val, rhs ref.Val) ref.Val { - return lhs.(traits.Divider).Divide(rhs) - }}, - - // Modulo operator - {Operator: operators.Modulo, - OperandTrait: traits.ModderType, - Binary: func(lhs ref.Val, rhs ref.Val) ref.Val { - return lhs.(traits.Modder).Modulo(rhs) - }}, - - // Negate operator - {Operator: operators.Negate, - OperandTrait: traits.NegatorType, - Unary: func(value ref.Val) ref.Val { - if types.IsBool(value) { - return types.ValOrErr(value, "no such overload") - } - return value.(traits.Negater).Negate() - }}, - - // Index operator - {Operator: operators.Index, - OperandTrait: traits.IndexerType, - Binary: func(lhs ref.Val, rhs ref.Val) ref.Val { - return lhs.(traits.Indexer).Get(rhs) - }}, - - // Size function - {Operator: overloads.Size, - OperandTrait: traits.SizerType, - Unary: func(value ref.Val) ref.Val { - return value.(traits.Sizer).Size() - }}, - - // In operator - {Operator: operators.In, Binary: inAggregate}, - // Deprecated: in operator, may be overridden in the environment. - {Operator: operators.OldIn, Binary: inAggregate}, - - // Matches function - {Operator: overloads.Matches, - OperandTrait: traits.MatcherType, - Binary: func(lhs ref.Val, rhs ref.Val) ref.Val { - return lhs.(traits.Matcher).Match(rhs) - }}, - - // Type conversion functions - // TODO: verify type conversion safety of numeric values. - - // Int conversions. - {Operator: overloads.TypeConvertInt, - Unary: func(value ref.Val) ref.Val { - return value.ConvertToType(types.IntType) - }}, - - // Uint conversions. - {Operator: overloads.TypeConvertUint, - Unary: func(value ref.Val) ref.Val { - return value.ConvertToType(types.UintType) - }}, - - // Double conversions. - {Operator: overloads.TypeConvertDouble, - Unary: func(value ref.Val) ref.Val { - return value.ConvertToType(types.DoubleType) - }}, - - // Bool conversions. - {Operator: overloads.TypeConvertBool, - Unary: func(value ref.Val) ref.Val { - return value.ConvertToType(types.BoolType) - }}, - - // Bytes conversions. - {Operator: overloads.TypeConvertBytes, - Unary: func(value ref.Val) ref.Val { - return value.ConvertToType(types.BytesType) - }}, - - // String conversions. - {Operator: overloads.TypeConvertString, - Unary: func(value ref.Val) ref.Val { - return value.ConvertToType(types.StringType) - }}, - - // Timestamp conversions. - {Operator: overloads.TypeConvertTimestamp, - Unary: func(value ref.Val) ref.Val { - return value.ConvertToType(types.TimestampType) - }}, - - // Duration conversions. - {Operator: overloads.TypeConvertDuration, - Unary: func(value ref.Val) ref.Val { - return value.ConvertToType(types.DurationType) - }}, - - // Type operations. - {Operator: overloads.TypeConvertType, - Unary: func(value ref.Val) ref.Val { - return value.ConvertToType(types.TypeType) - }}, - - // Dyn conversion (identity function). - {Operator: overloads.TypeConvertDyn, - Unary: func(value ref.Val) ref.Val { - return value - }}, - - {Operator: overloads.Iterator, - OperandTrait: traits.IterableType, - Unary: func(value ref.Val) ref.Val { - return value.(traits.Iterable).Iterator() - }}, - - {Operator: overloads.HasNext, - OperandTrait: traits.IteratorType, - Unary: func(value ref.Val) ref.Val { - return value.(traits.Iterator).HasNext() - }}, - - {Operator: overloads.Next, - OperandTrait: traits.IteratorType, - Unary: func(value ref.Val) ref.Val { - return value.(traits.Iterator).Next() - }}, - } - -} - -func notStrictlyFalse(value ref.Val) ref.Val { - if types.IsBool(value) { - return value - } - return types.True -} - -func inAggregate(lhs ref.Val, rhs ref.Val) ref.Val { - if rhs.Type().HasTrait(traits.ContainerType) { - return rhs.(traits.Container).Contains(lhs) - } - return types.ValOrErr(rhs, "no such overload") -} diff --git a/vendor/github.com/google/cel-go/interpreter/interpretable.go b/vendor/github.com/google/cel-go/interpreter/interpretable.go deleted file mode 100644 index 28af6d44b0..0000000000 --- a/vendor/github.com/google/cel-go/interpreter/interpretable.go +++ /dev/null @@ -1,1204 +0,0 @@ -// Copyright 2019 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package interpreter - -import ( - "math" - - "github.com/google/cel-go/common/operators" - "github.com/google/cel-go/common/overloads" - "github.com/google/cel-go/common/types" - "github.com/google/cel-go/common/types/ref" - "github.com/google/cel-go/common/types/traits" - "github.com/google/cel-go/interpreter/functions" -) - -// Interpretable can accept a given Activation and produce a value along with -// an accompanying EvalState which can be used to inspect whether additional -// data might be necessary to complete the evaluation. -type Interpretable interface { - // ID value corresponding to the expression node. - ID() int64 - - // Eval an Activation to produce an output. - Eval(activation Activation) ref.Val -} - -// InterpretableConst interface for tracking whether the Interpretable is a constant value. -type InterpretableConst interface { - Interpretable - - // Value returns the constant value of the instruction. - Value() ref.Val -} - -// InterpretableAttribute interface for tracking whether the Interpretable is an attribute. -type InterpretableAttribute interface { - Interpretable - - // Attr returns the Attribute value. - Attr() Attribute - - // Adapter returns the type adapter to be used for adapting resolved Attribute values. - Adapter() ref.TypeAdapter - - // AddQualifier proxies the Attribute.AddQualifier method. - // - // Note, this method may mutate the current attribute state. If the desire is to clone the - // Attribute, the Attribute should first be copied before adding the qualifier. Attributes - // are not copyable by default, so this is a capable that would need to be added to the - // AttributeFactory or specifically to the underlying Attribute implementation. - AddQualifier(Qualifier) (Attribute, error) - - // Qualify replicates the Attribute.Qualify method to permit extension and interception - // of object qualification. - Qualify(vars Activation, obj interface{}) (interface{}, error) - - // Resolve returns the value of the Attribute given the current Activation. - Resolve(Activation) (interface{}, error) -} - -// InterpretableCall interface for inspecting Interpretable instructions related to function calls. -type InterpretableCall interface { - Interpretable - - // Function returns the function name as it appears in text or mangled operator name as it - // appears in the operators.go file. - Function() string - - // OverloadID returns the overload id associated with the function specialization. - // Overload ids are stable across language boundaries and can be treated as synonymous with a - // unique function signature. - OverloadID() string - - // Args returns the normalized arguments to the function overload. - // For receiver-style functions, the receiver target is arg 0. - Args() []Interpretable -} - -// Core Interpretable implementations used during the program planning phase. - -type evalTestOnly struct { - id int64 - op Interpretable - field types.String - fieldType *ref.FieldType -} - -// ID implements the Interpretable interface method. -func (test *evalTestOnly) ID() int64 { - return test.id -} - -// Eval implements the Interpretable interface method. -func (test *evalTestOnly) Eval(ctx Activation) ref.Val { - // Handle field selection on a proto in the most efficient way possible. - if test.fieldType != nil { - opAttr, ok := test.op.(InterpretableAttribute) - if ok { - opVal, err := opAttr.Resolve(ctx) - if err != nil { - return types.NewErr(err.Error()) - } - refVal, ok := opVal.(ref.Val) - if ok { - opVal = refVal.Value() - } - if test.fieldType.IsSet(opVal) { - return types.True - } - return types.False - } - } - - obj := test.op.Eval(ctx) - tester, ok := obj.(traits.FieldTester) - if ok { - return tester.IsSet(test.field) - } - container, ok := obj.(traits.Container) - if ok { - return container.Contains(test.field) - } - return types.ValOrErr(obj, "invalid type for field selection.") -} - -// Cost provides the heuristic cost of a `has(field)` macro. The cost has at least 1 for determining -// if the field exists, apart from the cost of accessing the field. -func (test *evalTestOnly) Cost() (min, max int64) { - min, max = estimateCost(test.op) - min++ - max++ - return -} - -// NewConstValue creates a new constant valued Interpretable. -func NewConstValue(id int64, val ref.Val) InterpretableConst { - return &evalConst{ - id: id, - val: val, - } -} - -type evalConst struct { - id int64 - val ref.Val -} - -// ID implements the Interpretable interface method. -func (cons *evalConst) ID() int64 { - return cons.id -} - -// Eval implements the Interpretable interface method. -func (cons *evalConst) Eval(ctx Activation) ref.Val { - return cons.val -} - -// Cost returns zero for a constant valued Interpretable. -func (cons *evalConst) Cost() (min, max int64) { - return 0, 0 -} - -// Value implements the InterpretableConst interface method. -func (cons *evalConst) Value() ref.Val { - return cons.val -} - -type evalOr struct { - id int64 - lhs Interpretable - rhs Interpretable -} - -// ID implements the Interpretable interface method. -func (or *evalOr) ID() int64 { - return or.id -} - -// Eval implements the Interpretable interface method. -func (or *evalOr) Eval(ctx Activation) ref.Val { - // short-circuit lhs. - lVal := or.lhs.Eval(ctx) - lBool, lok := lVal.(types.Bool) - if lok && lBool == types.True { - return types.True - } - // short-circuit on rhs. - rVal := or.rhs.Eval(ctx) - rBool, rok := rVal.(types.Bool) - if rok && rBool == types.True { - return types.True - } - // return if both sides are bool false. - if lok && rok { - return types.False - } - // TODO: return both values as a set if both are unknown or error. - // prefer left unknown to right unknown. - if types.IsUnknown(lVal) { - return lVal - } - if types.IsUnknown(rVal) { - return rVal - } - // If the left-hand side is non-boolean return it as the error. - if types.IsError(lVal) { - return lVal - } - return types.ValOrErr(rVal, "no such overload") -} - -// Cost implements the Coster interface method. The minimum possible cost incurs when the left-hand -// side expr is sufficient in determining the evaluation result. -func (or *evalOr) Cost() (min, max int64) { - return calShortCircuitBinaryOpsCost(or.lhs, or.rhs) -} - -type evalAnd struct { - id int64 - lhs Interpretable - rhs Interpretable -} - -// ID implements the Interpretable interface method. -func (and *evalAnd) ID() int64 { - return and.id -} - -// Eval implements the Interpretable interface method. -func (and *evalAnd) Eval(ctx Activation) ref.Val { - // short-circuit lhs. - lVal := and.lhs.Eval(ctx) - lBool, lok := lVal.(types.Bool) - if lok && lBool == types.False { - return types.False - } - // short-circuit on rhs. - rVal := and.rhs.Eval(ctx) - rBool, rok := rVal.(types.Bool) - if rok && rBool == types.False { - return types.False - } - // return if both sides are bool true. - if lok && rok { - return types.True - } - // TODO: return both values as a set if both are unknown or error. - // prefer left unknown to right unknown. - if types.IsUnknown(lVal) { - return lVal - } - if types.IsUnknown(rVal) { - return rVal - } - // If the left-hand side is non-boolean return it as the error. - if types.IsError(lVal) { - return lVal - } - return types.ValOrErr(rVal, "no such overload") -} - -// Cost implements the Coster interface method. The minimum possible cost incurs when the left-hand -// side expr is sufficient in determining the evaluation result. -func (and *evalAnd) Cost() (min, max int64) { - return calShortCircuitBinaryOpsCost(and.lhs, and.rhs) -} - -func calShortCircuitBinaryOpsCost(lhs, rhs Interpretable) (min, max int64) { - lMin, lMax := estimateCost(lhs) - _, rMax := estimateCost(rhs) - return lMin, lMax + rMax + 1 -} - -type evalEq struct { - id int64 - lhs Interpretable - rhs Interpretable -} - -// ID implements the Interpretable interface method. -func (eq *evalEq) ID() int64 { - return eq.id -} - -// Eval implements the Interpretable interface method. -func (eq *evalEq) Eval(ctx Activation) ref.Val { - lVal := eq.lhs.Eval(ctx) - rVal := eq.rhs.Eval(ctx) - return lVal.Equal(rVal) -} - -// Cost implements the Coster interface method. -func (eq *evalEq) Cost() (min, max int64) { - return calExhaustiveBinaryOpsCost(eq.lhs, eq.rhs) -} - -// Function implements the InterpretableCall interface method. -func (*evalEq) Function() string { - return operators.Equals -} - -// OverloadID implements the InterpretableCall interface method. -func (*evalEq) OverloadID() string { - return overloads.Equals -} - -// Args implements the InterpretableCall interface method. -func (eq *evalEq) Args() []Interpretable { - return []Interpretable{eq.lhs, eq.rhs} -} - -type evalNe struct { - id int64 - lhs Interpretable - rhs Interpretable -} - -// ID implements the Interpretable interface method. -func (ne *evalNe) ID() int64 { - return ne.id -} - -// Eval implements the Interpretable interface method. -func (ne *evalNe) Eval(ctx Activation) ref.Val { - lVal := ne.lhs.Eval(ctx) - rVal := ne.rhs.Eval(ctx) - eqVal := lVal.Equal(rVal) - eqBool, ok := eqVal.(types.Bool) - if !ok { - return types.ValOrErr(eqVal, "no such overload: _!=_") - } - return !eqBool -} - -// Cost implements the Coster interface method. -func (ne *evalNe) Cost() (min, max int64) { - return calExhaustiveBinaryOpsCost(ne.lhs, ne.rhs) -} - -// Function implements the InterpretableCall interface method. -func (*evalNe) Function() string { - return operators.NotEquals -} - -// OverloadID implements the InterpretableCall interface method. -func (*evalNe) OverloadID() string { - return overloads.NotEquals -} - -// Args implements the InterpretableCall interface method. -func (ne *evalNe) Args() []Interpretable { - return []Interpretable{ne.lhs, ne.rhs} -} - -type evalZeroArity struct { - id int64 - function string - overload string - impl functions.FunctionOp -} - -// ID implements the Interpretable interface method. -func (zero *evalZeroArity) ID() int64 { - return zero.id -} - -// Eval implements the Interpretable interface method. -func (zero *evalZeroArity) Eval(ctx Activation) ref.Val { - return zero.impl() -} - -// Cost returns 1 representing the heuristic cost of the function. -func (zero *evalZeroArity) Cost() (min, max int64) { - return 1, 1 -} - -// Function implements the InterpretableCall interface method. -func (zero *evalZeroArity) Function() string { - return zero.function -} - -// OverloadID implements the InterpretableCall interface method. -func (zero *evalZeroArity) OverloadID() string { - return zero.overload -} - -// Args returns the argument to the unary function. -func (zero *evalZeroArity) Args() []Interpretable { - return []Interpretable{} -} - -type evalUnary struct { - id int64 - function string - overload string - arg Interpretable - trait int - impl functions.UnaryOp -} - -// ID implements the Interpretable interface method. -func (un *evalUnary) ID() int64 { - return un.id -} - -// Eval implements the Interpretable interface method. -func (un *evalUnary) Eval(ctx Activation) ref.Val { - argVal := un.arg.Eval(ctx) - // Early return if the argument to the function is unknown or error. - if types.IsUnknownOrError(argVal) { - return argVal - } - // If the implementation is bound and the argument value has the right traits required to - // invoke it, then call the implementation. - if un.impl != nil && (un.trait == 0 || argVal.Type().HasTrait(un.trait)) { - return un.impl(argVal) - } - // Otherwise, if the argument is a ReceiverType attempt to invoke the receiver method on the - // operand (arg0). - if argVal.Type().HasTrait(traits.ReceiverType) { - return argVal.(traits.Receiver).Receive(un.function, un.overload, []ref.Val{}) - } - return types.NewErr("no such overload: %s", un.function) -} - -// Cost implements the Coster interface method. -func (un *evalUnary) Cost() (min, max int64) { - min, max = estimateCost(un.arg) - min++ // add cost for function - max++ - return -} - -// Function implements the InterpretableCall interface method. -func (un *evalUnary) Function() string { - return un.function -} - -// OverloadID implements the InterpretableCall interface method. -func (un *evalUnary) OverloadID() string { - return un.overload -} - -// Args returns the argument to the unary function. -func (un *evalUnary) Args() []Interpretable { - return []Interpretable{un.arg} -} - -type evalBinary struct { - id int64 - function string - overload string - lhs Interpretable - rhs Interpretable - trait int - impl functions.BinaryOp -} - -// ID implements the Interpretable interface method. -func (bin *evalBinary) ID() int64 { - return bin.id -} - -// Eval implements the Interpretable interface method. -func (bin *evalBinary) Eval(ctx Activation) ref.Val { - lVal := bin.lhs.Eval(ctx) - rVal := bin.rhs.Eval(ctx) - // Early return if any argument to the function is unknown or error. - if types.IsUnknownOrError(lVal) { - return lVal - } - if types.IsUnknownOrError(rVal) { - return rVal - } - // If the implementation is bound and the argument value has the right traits required to - // invoke it, then call the implementation. - if bin.impl != nil && (bin.trait == 0 || lVal.Type().HasTrait(bin.trait)) { - return bin.impl(lVal, rVal) - } - // Otherwise, if the argument is a ReceiverType attempt to invoke the receiver method on the - // operand (arg0). - if lVal.Type().HasTrait(traits.ReceiverType) { - return lVal.(traits.Receiver).Receive(bin.function, bin.overload, []ref.Val{rVal}) - } - return types.NewErr("no such overload: %s", bin.function) -} - -// Cost implements the Coster interface method. -func (bin *evalBinary) Cost() (min, max int64) { - return calExhaustiveBinaryOpsCost(bin.lhs, bin.rhs) -} - -// Function implements the InterpretableCall interface method. -func (bin *evalBinary) Function() string { - return bin.function -} - -// OverloadID implements the InterpretableCall interface method. -func (bin *evalBinary) OverloadID() string { - return bin.overload -} - -// Args returns the argument to the unary function. -func (bin *evalBinary) Args() []Interpretable { - return []Interpretable{bin.lhs, bin.rhs} -} - -type evalVarArgs struct { - id int64 - function string - overload string - args []Interpretable - trait int - impl functions.FunctionOp -} - -// ID implements the Interpretable interface method. -func (fn *evalVarArgs) ID() int64 { - return fn.id -} - -// Eval implements the Interpretable interface method. -func (fn *evalVarArgs) Eval(ctx Activation) ref.Val { - argVals := make([]ref.Val, len(fn.args)) - // Early return if any argument to the function is unknown or error. - for i, arg := range fn.args { - argVals[i] = arg.Eval(ctx) - if types.IsUnknownOrError(argVals[i]) { - return argVals[i] - } - } - // If the implementation is bound and the argument value has the right traits required to - // invoke it, then call the implementation. - arg0 := argVals[0] - if fn.impl != nil && (fn.trait == 0 || arg0.Type().HasTrait(fn.trait)) { - return fn.impl(argVals...) - } - // Otherwise, if the argument is a ReceiverType attempt to invoke the receiver method on the - // operand (arg0). - if arg0.Type().HasTrait(traits.ReceiverType) { - return arg0.(traits.Receiver).Receive(fn.function, fn.overload, argVals[1:]) - } - return types.NewErr("no such overload: %s", fn.function) -} - -// Cost implements the Coster interface method. -func (fn *evalVarArgs) Cost() (min, max int64) { - min, max = sumOfCost(fn.args) - min++ // add cost for function - max++ - return -} - -// Function implements the InterpretableCall interface method. -func (fn *evalVarArgs) Function() string { - return fn.function -} - -// OverloadID implements the InterpretableCall interface method. -func (fn *evalVarArgs) OverloadID() string { - return fn.overload -} - -// Args returns the argument to the unary function. -func (fn *evalVarArgs) Args() []Interpretable { - return fn.args -} - -type evalList struct { - id int64 - elems []Interpretable - adapter ref.TypeAdapter -} - -// ID implements the Interpretable interface method. -func (l *evalList) ID() int64 { - return l.id -} - -// Eval implements the Interpretable interface method. -func (l *evalList) Eval(ctx Activation) ref.Val { - elemVals := make([]ref.Val, len(l.elems)) - // If any argument is unknown or error early terminate. - for i, elem := range l.elems { - elemVal := elem.Eval(ctx) - if types.IsUnknownOrError(elemVal) { - return elemVal - } - elemVals[i] = elemVal - } - return l.adapter.NativeToValue(elemVals) -} - -// Cost implements the Coster interface method. -func (l *evalList) Cost() (min, max int64) { - return sumOfCost(l.elems) -} - -type evalMap struct { - id int64 - keys []Interpretable - vals []Interpretable - adapter ref.TypeAdapter -} - -// ID implements the Interpretable interface method. -func (m *evalMap) ID() int64 { - return m.id -} - -// Eval implements the Interpretable interface method. -func (m *evalMap) Eval(ctx Activation) ref.Val { - entries := make(map[ref.Val]ref.Val) - // If any argument is unknown or error early terminate. - for i, key := range m.keys { - keyVal := key.Eval(ctx) - if types.IsUnknownOrError(keyVal) { - return keyVal - } - valVal := m.vals[i].Eval(ctx) - if types.IsUnknownOrError(valVal) { - return valVal - } - entries[keyVal] = valVal - } - return m.adapter.NativeToValue(entries) -} - -// Cost implements the Coster interface method. -func (m *evalMap) Cost() (min, max int64) { - kMin, kMax := sumOfCost(m.keys) - vMin, vMax := sumOfCost(m.vals) - return kMin + vMin, kMax + vMax -} - -type evalObj struct { - id int64 - typeName string - fields []string - vals []Interpretable - provider ref.TypeProvider -} - -// ID implements the Interpretable interface method. -func (o *evalObj) ID() int64 { - return o.id -} - -// Eval implements the Interpretable interface method. -func (o *evalObj) Eval(ctx Activation) ref.Val { - fieldVals := make(map[string]ref.Val) - // If any argument is unknown or error early terminate. - for i, field := range o.fields { - val := o.vals[i].Eval(ctx) - if types.IsUnknownOrError(val) { - return val - } - fieldVals[field] = val - } - return o.provider.NewValue(o.typeName, fieldVals) -} - -// Cost implements the Coster interface method. -func (o *evalObj) Cost() (min, max int64) { - return sumOfCost(o.vals) -} - -func sumOfCost(interps []Interpretable) (min, max int64) { - min, max = 0, 0 - for _, in := range interps { - minT, maxT := estimateCost(in) - min += minT - max += maxT - } - return -} - -type evalFold struct { - id int64 - accuVar string - iterVar string - iterRange Interpretable - accu Interpretable - cond Interpretable - step Interpretable - result Interpretable -} - -// ID implements the Interpretable interface method. -func (fold *evalFold) ID() int64 { - return fold.id -} - -// Eval implements the Interpretable interface method. -func (fold *evalFold) Eval(ctx Activation) ref.Val { - foldRange := fold.iterRange.Eval(ctx) - if !foldRange.Type().HasTrait(traits.IterableType) { - return types.ValOrErr(foldRange, "got '%T', expected iterable type", foldRange) - } - // Configure the fold activation with the accumulator initial value. - accuCtx := varActivationPool.Get().(*varActivation) - accuCtx.parent = ctx - accuCtx.name = fold.accuVar - accuCtx.val = fold.accu.Eval(ctx) - iterCtx := varActivationPool.Get().(*varActivation) - iterCtx.parent = accuCtx - iterCtx.name = fold.iterVar - it := foldRange.(traits.Iterable).Iterator() - for it.HasNext() == types.True { - // Modify the iter var in the fold activation. - iterCtx.val = it.Next() - - // Evaluate the condition, terminate the loop if false. - cond := fold.cond.Eval(iterCtx) - condBool, ok := cond.(types.Bool) - if !types.IsUnknown(cond) && ok && condBool != types.True { - break - } - - // Evalute the evaluation step into accu var. - accuCtx.val = fold.step.Eval(iterCtx) - } - // Compute the result. - res := fold.result.Eval(accuCtx) - varActivationPool.Put(iterCtx) - varActivationPool.Put(accuCtx) - return res -} - -// Cost implements the Coster interface method. -func (fold *evalFold) Cost() (min, max int64) { - // Compute the cost for evaluating iterRange. - iMin, iMax := estimateCost(fold.iterRange) - - // Compute the size of iterRange. If the size depends on the input, return the maximum possible - // cost range. - foldRange := fold.iterRange.Eval(EmptyActivation()) - if !foldRange.Type().HasTrait(traits.IterableType) { - return 0, math.MaxInt64 - } - var rangeCnt int64 - it := foldRange.(traits.Iterable).Iterator() - for it.HasNext() == types.True { - it.Next() - rangeCnt++ - } - aMin, aMax := estimateCost(fold.accu) - cMin, cMax := estimateCost(fold.cond) - sMin, sMax := estimateCost(fold.step) - rMin, rMax := estimateCost(fold.result) - - // The cond and step costs are multiplied by size(iterRange). The minimum possible cost incurs - // when the evaluation result can be determined by the first iteration. - return iMin + aMin + cMin + sMin + rMin, - iMax + aMax + cMax*rangeCnt + sMax*rangeCnt + rMax -} - -// Optional Intepretable implementations that specialize, subsume, or extend the core evaluation -// plan via decorators. - -// evalSetMembership is an Interpretable implementation which tests whether an input value -// exists within the set of map keys used to model a set. -type evalSetMembership struct { - inst Interpretable - arg Interpretable - argTypeName string - valueSet map[ref.Val]ref.Val -} - -// ID implements the Interpretable interface method. -func (e *evalSetMembership) ID() int64 { - return e.inst.ID() -} - -// Eval implements the Interpretable interface method. -func (e *evalSetMembership) Eval(ctx Activation) ref.Val { - val := e.arg.Eval(ctx) - if val.Type().TypeName() != e.argTypeName { - return types.ValOrErr(val, "no such overload") - } - if ret, found := e.valueSet[val]; found { - return ret - } - return types.False -} - -// Cost implements the Coster interface method. -func (e *evalSetMembership) Cost() (min, max int64) { - return estimateCost(e.arg) -} - -// evalWatch is an Interpretable implementation that wraps the execution of a given -// expression so that it may observe the computed value and send it to an observer. -type evalWatch struct { - Interpretable - observer evalObserver -} - -// Eval implements the Interpretable interface method. -func (e *evalWatch) Eval(ctx Activation) ref.Val { - val := e.Interpretable.Eval(ctx) - e.observer(e.ID(), val) - return val -} - -// Cost implements the Coster interface method. -func (e *evalWatch) Cost() (min, max int64) { - return estimateCost(e.Interpretable) -} - -// evalWatchAttr describes a watcher of an instAttr Interpretable. -// -// Since the watcher may be selected against at a later stage in program planning, the watcher -// must implement the instAttr interface by proxy. -type evalWatchAttr struct { - InterpretableAttribute - observer evalObserver -} - -// AddQualifier creates a wrapper over the incoming qualifier which observes the qualification -// result. -func (e *evalWatchAttr) AddQualifier(q Qualifier) (Attribute, error) { - cq, isConst := q.(ConstantQualifier) - if isConst { - q = &evalWatchConstQual{ - ConstantQualifier: cq, - observer: e.observer, - adapter: e.InterpretableAttribute.Adapter(), - } - } else { - q = &evalWatchQual{ - Qualifier: q, - observer: e.observer, - adapter: e.InterpretableAttribute.Adapter(), - } - } - _, err := e.InterpretableAttribute.AddQualifier(q) - return e, err -} - -// evalWatchConstQual observes the qualification of an object using a constant boolean, int, -// string, or uint. -type evalWatchConstQual struct { - ConstantQualifier - observer evalObserver - adapter ref.TypeAdapter -} - -// Cost implements the Coster interface method. -func (e *evalWatchConstQual) Cost() (min, max int64) { - return estimateCost(e.ConstantQualifier) -} - -// Qualify observes the qualification of a object via a constant boolean, int, string, or uint. -func (e *evalWatchConstQual) Qualify(vars Activation, obj interface{}) (interface{}, error) { - out, err := e.ConstantQualifier.Qualify(vars, obj) - var val ref.Val - if err != nil { - val = types.NewErr(err.Error()) - } else { - val = e.adapter.NativeToValue(out) - } - e.observer(e.ID(), val) - return out, err -} - -// QualifierValueEquals tests whether the incoming value is equal to the qualificying constant. -func (e *evalWatchConstQual) QualifierValueEquals(value interface{}) bool { - qve, ok := e.ConstantQualifier.(qualifierValueEquator) - return ok && qve.QualifierValueEquals(value) -} - -// evalWatchQual observes the qualification of an object by a value computed at runtime. -type evalWatchQual struct { - Qualifier - observer evalObserver - adapter ref.TypeAdapter -} - -// Cost implements the Coster interface method. -func (e *evalWatchQual) Cost() (min, max int64) { - return estimateCost(e.Qualifier) -} - -// Qualify observes the qualification of a object via a value computed at runtime. -func (e *evalWatchQual) Qualify(vars Activation, obj interface{}) (interface{}, error) { - out, err := e.Qualifier.Qualify(vars, obj) - var val ref.Val - if err != nil { - val = types.NewErr(err.Error()) - } else { - val = e.adapter.NativeToValue(out) - } - e.observer(e.ID(), val) - return out, err -} - -// Cost implements the Coster interface method. -func (e *evalWatchAttr) Cost() (min, max int64) { - return estimateCost(e.InterpretableAttribute) -} - -// Eval implements the Interpretable interface method. -func (e *evalWatchAttr) Eval(vars Activation) ref.Val { - val := e.InterpretableAttribute.Eval(vars) - e.observer(e.ID(), val) - return val -} - -// evalWatchConst describes a watcher of an instConst Interpretable. -type evalWatchConst struct { - InterpretableConst - observer evalObserver -} - -// Eval implements the Interpretable interface method. -func (e *evalWatchConst) Eval(vars Activation) ref.Val { - val := e.Value() - e.observer(e.ID(), val) - return val -} - -// Cost implements the Coster interface method. -func (e *evalWatchConst) Cost() (min, max int64) { - return estimateCost(e.InterpretableConst) -} - -// evalExhaustiveOr is just like evalOr, but does not short-circuit argument evaluation. -type evalExhaustiveOr struct { - id int64 - lhs Interpretable - rhs Interpretable -} - -// ID implements the Interpretable interface method. -func (or *evalExhaustiveOr) ID() int64 { - return or.id -} - -// Eval implements the Interpretable interface method. -func (or *evalExhaustiveOr) Eval(ctx Activation) ref.Val { - lVal := or.lhs.Eval(ctx) - rVal := or.rhs.Eval(ctx) - lBool, lok := lVal.(types.Bool) - if lok && lBool == types.True { - return types.True - } - rBool, rok := rVal.(types.Bool) - if rok && rBool == types.True { - return types.True - } - if lok && rok { - return types.False - } - if types.IsUnknown(lVal) { - return lVal - } - if types.IsUnknown(rVal) { - return rVal - } - // TODO: Combine the errors into a set in the future. - // If the left-hand side is non-boolean return it as the error. - if types.IsError(lVal) { - return lVal - } - return types.ValOrErr(rVal, "no such overload") -} - -// Cost implements the Coster interface method. -func (or *evalExhaustiveOr) Cost() (min, max int64) { - return calExhaustiveBinaryOpsCost(or.lhs, or.rhs) -} - -// evalExhaustiveAnd is just like evalAnd, but does not short-circuit argument evaluation. -type evalExhaustiveAnd struct { - id int64 - lhs Interpretable - rhs Interpretable -} - -// ID implements the Interpretable interface method. -func (and *evalExhaustiveAnd) ID() int64 { - return and.id -} - -// Eval implements the Interpretable interface method. -func (and *evalExhaustiveAnd) Eval(ctx Activation) ref.Val { - lVal := and.lhs.Eval(ctx) - rVal := and.rhs.Eval(ctx) - lBool, lok := lVal.(types.Bool) - if lok && lBool == types.False { - return types.False - } - rBool, rok := rVal.(types.Bool) - if rok && rBool == types.False { - return types.False - } - if lok && rok { - return types.True - } - if types.IsUnknown(lVal) { - return lVal - } - if types.IsUnknown(rVal) { - return rVal - } - // TODO: Combine the errors into a set in the future. - // If the left-hand side is non-boolean return it as the error. - if types.IsError(lVal) { - return lVal - } - return types.ValOrErr(rVal, "no such overload") -} - -// Cost implements the Coster interface method. -func (and *evalExhaustiveAnd) Cost() (min, max int64) { - return calExhaustiveBinaryOpsCost(and.lhs, and.rhs) -} - -func calExhaustiveBinaryOpsCost(lhs, rhs Interpretable) (min, max int64) { - lMin, lMax := estimateCost(lhs) - rMin, rMax := estimateCost(rhs) - return lMin + rMin + 1, lMax + rMax + 1 -} - -// evalExhaustiveConditional is like evalConditional, but does not short-circuit argument -// evaluation. -type evalExhaustiveConditional struct { - id int64 - adapter ref.TypeAdapter - attr *conditionalAttribute -} - -// ID implements the Interpretable interface method. -func (cond *evalExhaustiveConditional) ID() int64 { - return cond.id -} - -// Eval implements the Interpretable interface method. -func (cond *evalExhaustiveConditional) Eval(ctx Activation) ref.Val { - cVal := cond.attr.expr.Eval(ctx) - tVal, err := cond.attr.truthy.Resolve(ctx) - if err != nil { - return types.NewErr(err.Error()) - } - fVal, err := cond.attr.falsy.Resolve(ctx) - if err != nil { - return types.NewErr(err.Error()) - } - cBool, ok := cVal.(types.Bool) - if !ok { - return types.ValOrErr(cVal, "no such overload") - } - if cBool { - return cond.adapter.NativeToValue(tVal) - } - return cond.adapter.NativeToValue(fVal) -} - -// Cost implements the Coster interface method. -func (cond *evalExhaustiveConditional) Cost() (min, max int64) { - return cond.attr.Cost() -} - -// evalExhaustiveFold is like evalFold, but does not short-circuit argument evaluation. -type evalExhaustiveFold struct { - id int64 - accuVar string - iterVar string - iterRange Interpretable - accu Interpretable - cond Interpretable - step Interpretable - result Interpretable -} - -// ID implements the Interpretable interface method. -func (fold *evalExhaustiveFold) ID() int64 { - return fold.id -} - -// Eval implements the Interpretable interface method. -func (fold *evalExhaustiveFold) Eval(ctx Activation) ref.Val { - foldRange := fold.iterRange.Eval(ctx) - if !foldRange.Type().HasTrait(traits.IterableType) { - return types.ValOrErr(foldRange, "got '%T', expected iterable type", foldRange) - } - // Configure the fold activation with the accumulator initial value. - accuCtx := varActivationPool.Get().(*varActivation) - accuCtx.parent = ctx - accuCtx.name = fold.accuVar - accuCtx.val = fold.accu.Eval(ctx) - iterCtx := varActivationPool.Get().(*varActivation) - iterCtx.parent = accuCtx - iterCtx.name = fold.iterVar - it := foldRange.(traits.Iterable).Iterator() - for it.HasNext() == types.True { - // Modify the iter var in the fold activation. - iterCtx.val = it.Next() - - // Evaluate the condition, but don't terminate the loop as this is exhaustive eval! - fold.cond.Eval(iterCtx) - - // Evalute the evaluation step into accu var. - accuCtx.val = fold.step.Eval(iterCtx) - } - // Compute the result. - res := fold.result.Eval(accuCtx) - varActivationPool.Put(iterCtx) - varActivationPool.Put(accuCtx) - return res -} - -// Cost implements the Coster interface method. -func (fold *evalExhaustiveFold) Cost() (min, max int64) { - // Compute the cost for evaluating iterRange. - iMin, iMax := estimateCost(fold.iterRange) - - // Compute the size of iterRange. If the size depends on the input, return the maximum possible - // cost range. - foldRange := fold.iterRange.Eval(EmptyActivation()) - if !foldRange.Type().HasTrait(traits.IterableType) { - return 0, math.MaxInt64 - } - var rangeCnt int64 - it := foldRange.(traits.Iterable).Iterator() - for it.HasNext() == types.True { - it.Next() - rangeCnt++ - } - - aMin, aMax := estimateCost(fold.accu) - cMin, cMax := estimateCost(fold.cond) - sMin, sMax := estimateCost(fold.step) - rMin, rMax := estimateCost(fold.result) - - // The cond and step costs are multiplied by size(iterRange). - return iMin + aMin + cMin*rangeCnt + sMin*rangeCnt + rMin, - iMax + aMax + cMax*rangeCnt + sMax*rangeCnt + rMax -} - -// evalAttr evaluates an Attribute value. -type evalAttr struct { - adapter ref.TypeAdapter - attr Attribute -} - -// ID of the attribute instruction. -func (a *evalAttr) ID() int64 { - return a.attr.ID() -} - -// AddQualifier implements the instAttr interface method. -func (a *evalAttr) AddQualifier(qual Qualifier) (Attribute, error) { - attr, err := a.attr.AddQualifier(qual) - a.attr = attr - return attr, err -} - -// Attr implements the instAttr interface method. -func (a *evalAttr) Attr() Attribute { - return a.attr -} - -// Adapter implements the instAttr interface method. -func (a *evalAttr) Adapter() ref.TypeAdapter { - return a.adapter -} - -// Cost implements the Coster interface method. -func (a *evalAttr) Cost() (min, max int64) { - return estimateCost(a.attr) -} - -// Eval implements the Interpretable interface method. -func (a *evalAttr) Eval(ctx Activation) ref.Val { - v, err := a.attr.Resolve(ctx) - if err != nil { - return types.NewErr(err.Error()) - } - return a.adapter.NativeToValue(v) -} - -// Qualify proxies to the Attribute's Qualify method. -func (a *evalAttr) Qualify(ctx Activation, obj interface{}) (interface{}, error) { - return a.attr.Qualify(ctx, obj) -} - -// Resolve proxies to the Attribute's Resolve method. -func (a *evalAttr) Resolve(ctx Activation) (interface{}, error) { - return a.attr.Resolve(ctx) -} diff --git a/vendor/github.com/google/cel-go/interpreter/interpreter.go b/vendor/github.com/google/cel-go/interpreter/interpreter.go deleted file mode 100644 index 6ffc41efe1..0000000000 --- a/vendor/github.com/google/cel-go/interpreter/interpreter.go +++ /dev/null @@ -1,137 +0,0 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package interpreter provides functions to evaluate parsed expressions with -// the option to augment the evaluation with inputs and functions supplied at -// evaluation time. -package interpreter - -import ( - "github.com/google/cel-go/common/containers" - "github.com/google/cel-go/common/types/ref" - "github.com/google/cel-go/interpreter/functions" - - exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" -) - -// Interpreter generates a new Interpretable from a checked or unchecked expression. -type Interpreter interface { - // NewInterpretable creates an Interpretable from a checked expression and an - // optional list of InterpretableDecorator values. - NewInterpretable(checked *exprpb.CheckedExpr, - decorators ...InterpretableDecorator) (Interpretable, error) - - // NewUncheckedInterpretable returns an Interpretable from a parsed expression - // and an optional list of InterpretableDecorator values. - NewUncheckedInterpretable(expr *exprpb.Expr, - decorators ...InterpretableDecorator) (Interpretable, error) -} - -// TrackState decorates each expression node with an observer which records the value -// associated with the given expression id. EvalState must be provided to the decorator. -// This decorator is not thread-safe, and the EvalState must be reset between Eval() -// calls. -func TrackState(state EvalState) InterpretableDecorator { - observer := func(id int64, val ref.Val) { - state.SetValue(id, val) - } - return decObserveEval(observer) -} - -// ExhaustiveEval replaces operations that short-circuit with versions that evaluate -// expressions and couples this behavior with the TrackState() decorator to provide -// insight into the evaluation state of the entire expression. EvalState must be -// provided to the decorator. This decorator is not thread-safe, and the EvalState -// must be reset between Eval() calls. -func ExhaustiveEval(state EvalState) InterpretableDecorator { - ex := decDisableShortcircuits() - obs := TrackState(state) - return func(i Interpretable) (Interpretable, error) { - var err error - i, err = ex(i) - if err != nil { - return nil, err - } - return obs(i) - } -} - -// Optimize will pre-compute operations such as list and map construction and optimize -// call arguments to set membership tests. The set of optimizations will increase over time. -func Optimize() InterpretableDecorator { - return decOptimize() -} - -type exprInterpreter struct { - dispatcher Dispatcher - container *containers.Container - provider ref.TypeProvider - adapter ref.TypeAdapter - attrFactory AttributeFactory -} - -// NewInterpreter builds an Interpreter from a Dispatcher and TypeProvider which will be used -// throughout the Eval of all Interpretable instances gerenated from it. -func NewInterpreter(dispatcher Dispatcher, - container *containers.Container, - provider ref.TypeProvider, - adapter ref.TypeAdapter, - attrFactory AttributeFactory) Interpreter { - return &exprInterpreter{ - dispatcher: dispatcher, - container: container, - provider: provider, - adapter: adapter, - attrFactory: attrFactory} -} - -// NewStandardInterpreter builds a Dispatcher and TypeProvider with support for all of the CEL -// builtins defined in the language definition. -func NewStandardInterpreter(container *containers.Container, - provider ref.TypeProvider, - adapter ref.TypeAdapter, - resolver AttributeFactory) Interpreter { - dispatcher := NewDispatcher() - dispatcher.Add(functions.StandardOverloads()...) - return NewInterpreter(dispatcher, container, provider, adapter, resolver) -} - -// NewIntepretable implements the Interpreter interface method. -func (i *exprInterpreter) NewInterpretable( - checked *exprpb.CheckedExpr, - decorators ...InterpretableDecorator) (Interpretable, error) { - p := newPlanner( - i.dispatcher, - i.provider, - i.adapter, - i.attrFactory, - i.container, - checked, - decorators...) - return p.Plan(checked.GetExpr()) -} - -// NewUncheckedIntepretable implements the Interpreter interface method. -func (i *exprInterpreter) NewUncheckedInterpretable( - expr *exprpb.Expr, - decorators ...InterpretableDecorator) (Interpretable, error) { - p := newUncheckedPlanner( - i.dispatcher, - i.provider, - i.adapter, - i.attrFactory, - i.container, - decorators...) - return p.Plan(expr) -} diff --git a/vendor/github.com/google/cel-go/interpreter/planner.go b/vendor/github.com/google/cel-go/interpreter/planner.go deleted file mode 100644 index acfe31e6f8..0000000000 --- a/vendor/github.com/google/cel-go/interpreter/planner.go +++ /dev/null @@ -1,773 +0,0 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package interpreter - -import ( - "fmt" - "strings" - - "github.com/google/cel-go/common/containers" - "github.com/google/cel-go/common/operators" - "github.com/google/cel-go/common/types" - "github.com/google/cel-go/common/types/ref" - "github.com/google/cel-go/interpreter/functions" - - exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" -) - -// interpretablePlanner creates an Interpretable evaluation plan from a proto Expr value. -type interpretablePlanner interface { - // Plan generates an Interpretable value (or error) from the input proto Expr. - Plan(expr *exprpb.Expr) (Interpretable, error) -} - -// newPlanner creates an interpretablePlanner which references a Dispatcher, TypeProvider, -// TypeAdapter, Container, and CheckedExpr value. These pieces of data are used to resolve -// functions, types, and namespaced identifiers at plan time rather than at runtime since -// it only needs to be done once and may be semi-expensive to compute. -func newPlanner(disp Dispatcher, - provider ref.TypeProvider, - adapter ref.TypeAdapter, - attrFactory AttributeFactory, - cont *containers.Container, - checked *exprpb.CheckedExpr, - decorators ...InterpretableDecorator) interpretablePlanner { - return &planner{ - disp: disp, - provider: provider, - adapter: adapter, - attrFactory: attrFactory, - container: cont, - refMap: checked.GetReferenceMap(), - typeMap: checked.GetTypeMap(), - decorators: decorators, - } -} - -// newUncheckedPlanner creates an interpretablePlanner which references a Dispatcher, TypeProvider, -// TypeAdapter, and Container to resolve functions and types at plan time. Namespaces present in -// Select expressions are resolved lazily at evaluation time. -func newUncheckedPlanner(disp Dispatcher, - provider ref.TypeProvider, - adapter ref.TypeAdapter, - attrFactory AttributeFactory, - cont *containers.Container, - decorators ...InterpretableDecorator) interpretablePlanner { - return &planner{ - disp: disp, - provider: provider, - adapter: adapter, - attrFactory: attrFactory, - container: cont, - refMap: make(map[int64]*exprpb.Reference), - typeMap: make(map[int64]*exprpb.Type), - decorators: decorators, - } -} - -// planner is an implementatio of the interpretablePlanner interface. -type planner struct { - disp Dispatcher - provider ref.TypeProvider - adapter ref.TypeAdapter - attrFactory AttributeFactory - container *containers.Container - refMap map[int64]*exprpb.Reference - typeMap map[int64]*exprpb.Type - decorators []InterpretableDecorator -} - -// Plan implements the interpretablePlanner interface. This implementation of the Plan method also -// applies decorators to each Interpretable generated as part of the overall plan. Decorators are -// useful for layering functionality into the evaluation that is not natively understood by CEL, -// such as state-tracking, expression re-write, and possibly efficient thread-safe memoization of -// repeated expressions. -func (p *planner) Plan(expr *exprpb.Expr) (Interpretable, error) { - switch expr.ExprKind.(type) { - case *exprpb.Expr_CallExpr: - return p.decorate(p.planCall(expr)) - case *exprpb.Expr_IdentExpr: - return p.decorate(p.planIdent(expr)) - case *exprpb.Expr_SelectExpr: - return p.decorate(p.planSelect(expr)) - case *exprpb.Expr_ListExpr: - return p.decorate(p.planCreateList(expr)) - case *exprpb.Expr_StructExpr: - return p.decorate(p.planCreateStruct(expr)) - case *exprpb.Expr_ComprehensionExpr: - return p.decorate(p.planComprehension(expr)) - case *exprpb.Expr_ConstExpr: - return p.decorate(p.planConst(expr)) - } - return nil, fmt.Errorf("unsupported expr: %v", expr) -} - -// decorate applies the InterpretableDecorator functions to the given Interpretable. -// Both the Interpretable and error generated by a Plan step are accepted as arguments -// for convenience. -func (p *planner) decorate(i Interpretable, err error) (Interpretable, error) { - if err != nil { - return nil, err - } - for _, dec := range p.decorators { - i, err = dec(i) - if err != nil { - return nil, err - } - } - return i, nil -} - -// planIdent creates an Interpretable that resolves an identifier from an Activation. -func (p *planner) planIdent(expr *exprpb.Expr) (Interpretable, error) { - // Establish whether the identifier is in the reference map. - if identRef, found := p.refMap[expr.GetId()]; found { - return p.planCheckedIdent(expr.GetId(), identRef) - } - // Create the possible attribute list for the unresolved reference. - ident := expr.GetIdentExpr() - return &evalAttr{ - adapter: p.adapter, - attr: p.attrFactory.MaybeAttribute(expr.GetId(), ident.Name), - }, nil -} - -func (p *planner) planCheckedIdent(id int64, identRef *exprpb.Reference) (Interpretable, error) { - // Plan a constant reference if this is the case for this simple identifier. - if identRef.Value != nil { - return p.Plan(&exprpb.Expr{Id: id, - ExprKind: &exprpb.Expr_ConstExpr{ - ConstExpr: identRef.Value, - }}) - } - - // Check to see whether the type map indicates this is a type name. All types should be - // registered with the provider. - cType := p.typeMap[id] - if cType.GetType() != nil { - cVal, found := p.provider.FindIdent(identRef.Name) - if !found { - return nil, fmt.Errorf("reference to undefined type: %s", identRef.Name) - } - return NewConstValue(id, cVal), nil - } - - // Otherwise, return the attribute for the resolved identifier name. - return &evalAttr{ - adapter: p.adapter, - attr: p.attrFactory.AbsoluteAttribute(id, identRef.Name), - }, nil -} - -// planSelect creates an Interpretable with either: -// a) selects a field from a map or proto. -// b) creates a field presence test for a select within a has() macro. -// c) resolves the select expression to a namespaced identifier. -func (p *planner) planSelect(expr *exprpb.Expr) (Interpretable, error) { - // If the Select id appears in the reference map from the CheckedExpr proto then it is either - // a namespaced identifier or enum value. - if identRef, found := p.refMap[expr.GetId()]; found { - return p.planCheckedIdent(expr.GetId(), identRef) - } - - sel := expr.GetSelectExpr() - // Plan the operand evaluation. - op, err := p.Plan(sel.GetOperand()) - if err != nil { - return nil, err - } - - // Determine the field type if this is a proto message type. - var fieldType *ref.FieldType - opType := p.typeMap[sel.GetOperand().GetId()] - if opType.GetMessageType() != "" { - ft, found := p.provider.FindFieldType(opType.GetMessageType(), sel.Field) - if found && ft.IsSet != nil && ft.GetFrom != nil { - fieldType = ft - } - } - - // If the Select was marked TestOnly, this is a presence test. - // - // Note: presence tests are defined for structured (e.g. proto) and dynamic values (map, json) - // as follows: - // - True if the object field has a non-default value, e.g. obj.str != "" - // - True if the dynamic value has the field defined, e.g. key in map - // - // However, presence tests are not defined for qualified identifier names with primitive types. - // If a string named 'a.b.c' is declared in the environment and referenced within `has(a.b.c)`, - // it is not clear whether has should error or follow the convention defined for structured - // values. - if sel.TestOnly { - // Return the test only eval expression. - return &evalTestOnly{ - id: expr.Id, - field: types.String(sel.Field), - fieldType: fieldType, - op: op, - }, nil - } - // Build a qualifier. - qual, err := p.attrFactory.NewQualifier( - opType, expr.Id, sel.Field) - if err != nil { - return nil, err - } - // Lastly, create a field selection Interpretable. - attr, isAttr := op.(InterpretableAttribute) - if isAttr { - _, err = attr.AddQualifier(qual) - return attr, err - } - - relAttr, err := p.relativeAttr(op.ID(), op) - if err != nil { - return nil, err - } - _, err = relAttr.AddQualifier(qual) - if err != nil { - return nil, err - } - return relAttr, nil -} - -// planCall creates a callable Interpretable while specializing for common functions and invocation -// patterns. Specifically, conditional operators &&, ||, ?:, and (in)equality functions result in -// optimized Interpretable values. -func (p *planner) planCall(expr *exprpb.Expr) (Interpretable, error) { - call := expr.GetCallExpr() - target, fnName, oName := p.resolveFunction(expr) - argCount := len(call.GetArgs()) - var offset int - if target != nil { - argCount++ - offset++ - } - - args := make([]Interpretable, argCount) - if target != nil { - arg, err := p.Plan(target) - if err != nil { - return nil, err - } - args[0] = arg - } - for i, argExpr := range call.GetArgs() { - arg, err := p.Plan(argExpr) - if err != nil { - return nil, err - } - args[i+offset] = arg - } - - // Generate specialized Interpretable operators by function name if possible. - switch fnName { - case operators.LogicalAnd: - return p.planCallLogicalAnd(expr, args) - case operators.LogicalOr: - return p.planCallLogicalOr(expr, args) - case operators.Conditional: - return p.planCallConditional(expr, args) - case operators.Equals: - return p.planCallEqual(expr, args) - case operators.NotEquals: - return p.planCallNotEqual(expr, args) - case operators.Index: - return p.planCallIndex(expr, args) - } - - // Otherwise, generate Interpretable calls specialized by argument count. - // Try to find the specific function by overload id. - var fnDef *functions.Overload - if oName != "" { - fnDef, _ = p.disp.FindOverload(oName) - } - // If the overload id couldn't resolve the function, try the simple function name. - if fnDef == nil { - fnDef, _ = p.disp.FindOverload(fnName) - } - switch argCount { - case 0: - return p.planCallZero(expr, fnName, oName, fnDef) - case 1: - return p.planCallUnary(expr, fnName, oName, fnDef, args) - case 2: - return p.planCallBinary(expr, fnName, oName, fnDef, args) - default: - return p.planCallVarArgs(expr, fnName, oName, fnDef, args) - } -} - -// planCallZero generates a zero-arity callable Interpretable. -func (p *planner) planCallZero(expr *exprpb.Expr, - function string, - overload string, - impl *functions.Overload) (Interpretable, error) { - if impl == nil || impl.Function == nil { - return nil, fmt.Errorf("no such overload: %s()", function) - } - return &evalZeroArity{ - id: expr.Id, - function: function, - overload: overload, - impl: impl.Function, - }, nil -} - -// planCallUnary generates a unary callable Interpretable. -func (p *planner) planCallUnary(expr *exprpb.Expr, - function string, - overload string, - impl *functions.Overload, - args []Interpretable) (Interpretable, error) { - var fn functions.UnaryOp - var trait int - if impl != nil { - if impl.Unary == nil { - return nil, fmt.Errorf("no such overload: %s(arg)", function) - } - fn = impl.Unary - trait = impl.OperandTrait - } - return &evalUnary{ - id: expr.Id, - function: function, - overload: overload, - arg: args[0], - trait: trait, - impl: fn, - }, nil -} - -// planCallBinary generates a binary callable Interpretable. -func (p *planner) planCallBinary(expr *exprpb.Expr, - function string, - overload string, - impl *functions.Overload, - args []Interpretable) (Interpretable, error) { - var fn functions.BinaryOp - var trait int - if impl != nil { - if impl.Binary == nil { - return nil, fmt.Errorf("no such overload: %s(lhs, rhs)", function) - } - fn = impl.Binary - trait = impl.OperandTrait - } - return &evalBinary{ - id: expr.Id, - function: function, - overload: overload, - lhs: args[0], - rhs: args[1], - trait: trait, - impl: fn, - }, nil -} - -// planCallVarArgs generates a variable argument callable Interpretable. -func (p *planner) planCallVarArgs(expr *exprpb.Expr, - function string, - overload string, - impl *functions.Overload, - args []Interpretable) (Interpretable, error) { - var fn functions.FunctionOp - var trait int - if impl != nil { - if impl.Function == nil { - return nil, fmt.Errorf("no such overload: %s(...)", function) - } - fn = impl.Function - trait = impl.OperandTrait - } - return &evalVarArgs{ - id: expr.Id, - function: function, - overload: overload, - args: args, - trait: trait, - impl: fn, - }, nil -} - -// planCallEqual generates an equals (==) Interpretable. -func (p *planner) planCallEqual(expr *exprpb.Expr, - args []Interpretable) (Interpretable, error) { - return &evalEq{ - id: expr.Id, - lhs: args[0], - rhs: args[1], - }, nil -} - -// planCallNotEqual generates a not equals (!=) Interpretable. -func (p *planner) planCallNotEqual(expr *exprpb.Expr, - args []Interpretable) (Interpretable, error) { - return &evalNe{ - id: expr.Id, - lhs: args[0], - rhs: args[1], - }, nil -} - -// planCallLogicalAnd generates a logical and (&&) Interpretable. -func (p *planner) planCallLogicalAnd(expr *exprpb.Expr, - args []Interpretable) (Interpretable, error) { - return &evalAnd{ - id: expr.Id, - lhs: args[0], - rhs: args[1], - }, nil -} - -// planCallLogicalOr generates a logical or (||) Interpretable. -func (p *planner) planCallLogicalOr(expr *exprpb.Expr, - args []Interpretable) (Interpretable, error) { - return &evalOr{ - id: expr.Id, - lhs: args[0], - rhs: args[1], - }, nil -} - -// planCallConditional generates a conditional / ternary (c ? t : f) Interpretable. -func (p *planner) planCallConditional(expr *exprpb.Expr, - args []Interpretable) (Interpretable, error) { - cond := args[0] - - t := args[1] - var tAttr Attribute - truthyAttr, isTruthyAttr := t.(InterpretableAttribute) - if isTruthyAttr { - tAttr = truthyAttr.Attr() - } else { - tAttr = p.attrFactory.RelativeAttribute(t.ID(), t) - } - - f := args[2] - var fAttr Attribute - falsyAttr, isFalsyAttr := f.(InterpretableAttribute) - if isFalsyAttr { - fAttr = falsyAttr.Attr() - } else { - fAttr = p.attrFactory.RelativeAttribute(f.ID(), f) - } - - return &evalAttr{ - adapter: p.adapter, - attr: p.attrFactory.ConditionalAttribute(expr.Id, cond, tAttr, fAttr), - }, nil -} - -// planCallIndex either extends an attribute with the argument to the index operation, or creates -// a relative attribute based on the return of a function call or operation. -func (p *planner) planCallIndex(expr *exprpb.Expr, - args []Interpretable) (Interpretable, error) { - op := args[0] - ind := args[1] - opAttr, err := p.relativeAttr(op.ID(), op) - if err != nil { - return nil, err - } - opType := p.typeMap[expr.GetCallExpr().GetTarget().GetId()] - indConst, isIndConst := ind.(InterpretableConst) - if isIndConst { - qual, err := p.attrFactory.NewQualifier( - opType, expr.GetId(), indConst.Value()) - if err != nil { - return nil, err - } - _, err = opAttr.AddQualifier(qual) - return opAttr, err - } - indAttr, isIndAttr := ind.(InterpretableAttribute) - if isIndAttr { - qual, err := p.attrFactory.NewQualifier( - opType, expr.GetId(), indAttr) - if err != nil { - return nil, err - } - _, err = opAttr.AddQualifier(qual) - return opAttr, err - } - indQual, err := p.relativeAttr(expr.GetId(), ind) - if err != nil { - return nil, err - } - _, err = opAttr.AddQualifier(indQual) - return opAttr, err -} - -// planCreateList generates a list construction Interpretable. -func (p *planner) planCreateList(expr *exprpb.Expr) (Interpretable, error) { - list := expr.GetListExpr() - elems := make([]Interpretable, len(list.GetElements())) - for i, elem := range list.GetElements() { - elemVal, err := p.Plan(elem) - if err != nil { - return nil, err - } - elems[i] = elemVal - } - return &evalList{ - id: expr.Id, - elems: elems, - adapter: p.adapter, - }, nil -} - -// planCreateStruct generates a map or object construction Interpretable. -func (p *planner) planCreateStruct(expr *exprpb.Expr) (Interpretable, error) { - str := expr.GetStructExpr() - if len(str.MessageName) != 0 { - return p.planCreateObj(expr) - } - entries := str.GetEntries() - keys := make([]Interpretable, len(entries)) - vals := make([]Interpretable, len(entries)) - for i, entry := range entries { - keyVal, err := p.Plan(entry.GetMapKey()) - if err != nil { - return nil, err - } - keys[i] = keyVal - - valVal, err := p.Plan(entry.GetValue()) - if err != nil { - return nil, err - } - vals[i] = valVal - } - return &evalMap{ - id: expr.Id, - keys: keys, - vals: vals, - adapter: p.adapter, - }, nil -} - -// planCreateObj generates an object construction Interpretable. -func (p *planner) planCreateObj(expr *exprpb.Expr) (Interpretable, error) { - obj := expr.GetStructExpr() - typeName, defined := p.resolveTypeName(obj.MessageName) - if !defined { - return nil, fmt.Errorf("unknown type: %s", typeName) - } - entries := obj.GetEntries() - fields := make([]string, len(entries)) - vals := make([]Interpretable, len(entries)) - for i, entry := range entries { - fields[i] = entry.GetFieldKey() - val, err := p.Plan(entry.GetValue()) - if err != nil { - return nil, err - } - vals[i] = val - } - return &evalObj{ - id: expr.Id, - typeName: typeName, - fields: fields, - vals: vals, - provider: p.provider, - }, nil -} - -// planComprehension generates an Interpretable fold operation. -func (p *planner) planComprehension(expr *exprpb.Expr) (Interpretable, error) { - fold := expr.GetComprehensionExpr() - accu, err := p.Plan(fold.GetAccuInit()) - if err != nil { - return nil, err - } - iterRange, err := p.Plan(fold.GetIterRange()) - if err != nil { - return nil, err - } - cond, err := p.Plan(fold.GetLoopCondition()) - if err != nil { - return nil, err - } - step, err := p.Plan(fold.GetLoopStep()) - if err != nil { - return nil, err - } - result, err := p.Plan(fold.GetResult()) - if err != nil { - return nil, err - } - return &evalFold{ - id: expr.Id, - accuVar: fold.AccuVar, - accu: accu, - iterVar: fold.IterVar, - iterRange: iterRange, - cond: cond, - step: step, - result: result, - }, nil -} - -// planConst generates a constant valued Interpretable. -func (p *planner) planConst(expr *exprpb.Expr) (Interpretable, error) { - val, err := p.constValue(expr.GetConstExpr()) - if err != nil { - return nil, err - } - return NewConstValue(expr.Id, val), nil -} - -// constValue converts a proto Constant value to a ref.Val. -func (p *planner) constValue(c *exprpb.Constant) (ref.Val, error) { - switch c.ConstantKind.(type) { - case *exprpb.Constant_BoolValue: - return types.Bool(c.GetBoolValue()), nil - case *exprpb.Constant_BytesValue: - return types.Bytes(c.GetBytesValue()), nil - case *exprpb.Constant_DoubleValue: - return types.Double(c.GetDoubleValue()), nil - case *exprpb.Constant_DurationValue: - return types.Duration{Duration: c.GetDurationValue().AsDuration()}, nil - case *exprpb.Constant_Int64Value: - return types.Int(c.GetInt64Value()), nil - case *exprpb.Constant_NullValue: - return types.Null(c.GetNullValue()), nil - case *exprpb.Constant_StringValue: - return types.String(c.GetStringValue()), nil - case *exprpb.Constant_TimestampValue: - return types.Timestamp{Time: c.GetTimestampValue().AsTime()}, nil - case *exprpb.Constant_Uint64Value: - return types.Uint(c.GetUint64Value()), nil - } - return nil, fmt.Errorf("unknown constant type: %v", c) -} - -// resolveTypeName takes a qualified string constructed at parse time, applies the proto -// namespace resolution rules to it in a scan over possible matching types in the TypeProvider. -func (p *planner) resolveTypeName(typeName string) (string, bool) { - for _, qualifiedTypeName := range p.container.ResolveCandidateNames(typeName) { - if _, found := p.provider.FindType(qualifiedTypeName); found { - return qualifiedTypeName, true - } - } - return "", false -} - -// resolveFunction determines the call target, function name, and overload name from a given Expr -// value. -// -// The resolveFunction resolves ambiguities where a function may either be a receiver-style -// invocation or a qualified global function name. -// - The target expression may only consist of ident and select expressions. -// - The function is declared in the environment using its fully-qualified name. -// - The fully-qualified function name matches the string serialized target value. -func (p *planner) resolveFunction(expr *exprpb.Expr) (*exprpb.Expr, string, string) { - // Note: similar logic exists within the `checker/checker.go`. If making changes here - // please consider the impact on checker.go and consolidate implementations or mirror code - // as appropriate. - call := expr.GetCallExpr() - target := call.GetTarget() - fnName := call.GetFunction() - - // Checked expressions always have a reference map entry, and _should_ have the fully qualified - // function name as the fnName value. - oRef, hasOverload := p.refMap[expr.GetId()] - if hasOverload { - if len(oRef.GetOverloadId()) == 1 { - return target, fnName, oRef.GetOverloadId()[0] - } - // Note, this namespaced function name will not appear as a fully qualified name in ASTs - // built and stored before cel-go v0.5.0; however, this functionality did not work at all - // before the v0.5.0 release. - return target, fnName, "" - } - - // Parse-only expressions need to handle the same logic as is normally performed at check time, - // but with potentially much less information. The only reliable source of information about - // which functions are configured is the dispatcher. - if target == nil { - // If the user has a parse-only expression, then it should have been configured as such in - // the interpreter dispatcher as it may have been omitted from the checker environment. - for _, qualifiedName := range p.container.ResolveCandidateNames(fnName) { - _, found := p.disp.FindOverload(qualifiedName) - if found { - return nil, qualifiedName, "" - } - } - // It's possible that the overload was not found, but this situation is accounted for in - // the planCall phase; however, the leading dot used for denoting fully-qualified - // namespaced identifiers must be stripped, as all declarations already use fully-qualified - // names. This stripping behavior is handled automatically by the ResolveCandidateNames - // call. - return target, stripLeadingDot(fnName), "" - } - - // Handle the situation where the function target actually indicates a qualified function name. - qualifiedPrefix, maybeQualified := p.toQualifiedName(target) - if maybeQualified { - maybeQualifiedName := qualifiedPrefix + "." + fnName - for _, qualifiedName := range p.container.ResolveCandidateNames(maybeQualifiedName) { - _, found := p.disp.FindOverload(qualifiedName) - if found { - // Clear the target to ensure the proper arity is used for finding the - // implementation. - return nil, qualifiedName, "" - } - } - } - // In the default case, the function is exactly as it was advertised: a receiver call on with - // an expression-based target with the given simple function name. - return target, fnName, "" -} - -func (p *planner) relativeAttr(id int64, eval Interpretable) (InterpretableAttribute, error) { - eAttr, ok := eval.(InterpretableAttribute) - if !ok { - eAttr = &evalAttr{ - adapter: p.adapter, - attr: p.attrFactory.RelativeAttribute(id, eval), - } - } - decAttr, err := p.decorate(eAttr, nil) - if err != nil { - return nil, err - } - eAttr, ok = decAttr.(InterpretableAttribute) - if !ok { - return nil, fmt.Errorf("invalid attribute decoration: %v(%T)", decAttr, decAttr) - } - return eAttr, nil -} - -// toQualifiedName converts an expression AST into a qualified name if possible, with a boolean -// 'found' value that indicates if the conversion is successful. -func (p *planner) toQualifiedName(operand *exprpb.Expr) (string, bool) { - // If the checker identified the expression as an attribute by the type-checker, then it can't - // possibly be part of qualified name in a namespace. - _, isAttr := p.refMap[operand.GetId()] - if isAttr { - return "", false - } - // Since functions cannot be both namespaced and receiver functions, if the operand is not an - // qualified variable name, return the (possibly) qualified name given the expressions. - return containers.ToQualifiedName(operand) -} - -func stripLeadingDot(name string) string { - if strings.HasPrefix(name, ".") { - return name[1:] - } - return name -} diff --git a/vendor/github.com/google/cel-go/interpreter/prune.go b/vendor/github.com/google/cel-go/interpreter/prune.go deleted file mode 100644 index 7bcbb6c9ef..0000000000 --- a/vendor/github.com/google/cel-go/interpreter/prune.go +++ /dev/null @@ -1,392 +0,0 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package interpreter - -import ( - "github.com/google/cel-go/common/operators" - "github.com/google/cel-go/common/types" - "github.com/google/cel-go/common/types/ref" - "github.com/google/cel-go/common/types/traits" - - structpb "google.golang.org/protobuf/types/known/structpb" - - exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" -) - -type astPruner struct { - expr *exprpb.Expr - state EvalState - nextExprID int64 -} - -// TODO Consider having a separate walk of the AST that finds common -// subexpressions. This can be called before or after constant folding to find -// common subexpressions. - -// PruneAst prunes the given AST based on the given EvalState and generates a new AST. -// Given AST is copied on write and a new AST is returned. -// Couple of typical use cases this interface would be: -// -// A) -// 1) Evaluate expr with some unknowns, -// 2) If result is unknown: -// a) PruneAst -// b) Goto 1 -// Functional call results which are known would be effectively cached across -// iterations. -// -// B) -// 1) Compile the expression (maybe via a service and maybe after checking a -// compiled expression does not exists in local cache) -// 2) Prepare the environment and the interpreter. Activation might be empty. -// 3) Eval the expression. This might return unknown or error or a concrete -// value. -// 4) PruneAst -// 4) Maybe cache the expression -// This is effectively constant folding the expression. How the environment is -// prepared in step 2 is flexible. For example, If the caller caches the -// compiled and constant folded expressions, but is not willing to constant -// fold(and thus cache results of) some external calls, then they can prepare -// the overloads accordingly. -func PruneAst(expr *exprpb.Expr, state EvalState) *exprpb.Expr { - pruner := &astPruner{ - expr: expr, - state: state, - nextExprID: 1} - newExpr, _ := pruner.prune(expr) - return newExpr -} - -func (p *astPruner) createLiteral(id int64, val *exprpb.Constant) *exprpb.Expr { - return &exprpb.Expr{ - Id: id, - ExprKind: &exprpb.Expr_ConstExpr{ - ConstExpr: val, - }, - } -} - -func (p *astPruner) maybeCreateLiteral(id int64, val ref.Val) (*exprpb.Expr, bool) { - switch val.Type() { - case types.BoolType: - return p.createLiteral(id, - &exprpb.Constant{ConstantKind: &exprpb.Constant_BoolValue{BoolValue: val.Value().(bool)}}), true - case types.IntType: - return p.createLiteral(id, - &exprpb.Constant{ConstantKind: &exprpb.Constant_Int64Value{Int64Value: val.Value().(int64)}}), true - case types.UintType: - return p.createLiteral(id, - &exprpb.Constant{ConstantKind: &exprpb.Constant_Uint64Value{Uint64Value: val.Value().(uint64)}}), true - case types.StringType: - return p.createLiteral(id, - &exprpb.Constant{ConstantKind: &exprpb.Constant_StringValue{StringValue: val.Value().(string)}}), true - case types.DoubleType: - return p.createLiteral(id, - &exprpb.Constant{ConstantKind: &exprpb.Constant_DoubleValue{DoubleValue: val.Value().(float64)}}), true - case types.BytesType: - return p.createLiteral(id, - &exprpb.Constant{ConstantKind: &exprpb.Constant_BytesValue{BytesValue: val.Value().([]byte)}}), true - case types.NullType: - return p.createLiteral(id, - &exprpb.Constant{ConstantKind: &exprpb.Constant_NullValue{NullValue: val.Value().(structpb.NullValue)}}), true - } - - // Attempt to build a list literal. - if list, isList := val.(traits.Lister); isList { - sz := list.Size().(types.Int) - elemExprs := make([]*exprpb.Expr, sz) - for i := types.Int(0); i < sz; i++ { - elem := list.Get(i) - if types.IsUnknownOrError(elem) { - return nil, false - } - elemExpr, ok := p.maybeCreateLiteral(p.nextID(), elem) - if !ok { - return nil, false - } - elemExprs[i] = elemExpr - } - return &exprpb.Expr{ - Id: id, - ExprKind: &exprpb.Expr_ListExpr{ - ListExpr: &exprpb.Expr_CreateList{ - Elements: elemExprs, - }, - }, - }, true - } - - // Create a map literal if possible. - if mp, isMap := val.(traits.Mapper); isMap { - it := mp.Iterator() - entries := make([]*exprpb.Expr_CreateStruct_Entry, mp.Size().(types.Int)) - i := 0 - for it.HasNext() != types.False { - key := it.Next() - val := mp.Get(key) - if types.IsUnknownOrError(key) || types.IsUnknownOrError(val) { - return nil, false - } - keyExpr, ok := p.maybeCreateLiteral(p.nextID(), key) - if !ok { - return nil, false - } - valExpr, ok := p.maybeCreateLiteral(p.nextID(), val) - if !ok { - return nil, false - } - entry := &exprpb.Expr_CreateStruct_Entry{ - Id: p.nextID(), - KeyKind: &exprpb.Expr_CreateStruct_Entry_MapKey{ - MapKey: keyExpr, - }, - Value: valExpr, - } - entries[i] = entry - i++ - } - return &exprpb.Expr{ - Id: id, - ExprKind: &exprpb.Expr_StructExpr{ - StructExpr: &exprpb.Expr_CreateStruct{ - Entries: entries, - }, - }, - }, true - } - - // TODO(issues/377) To construct message literals, the type provider will need to support - // the enumeration the fields for a given message. - return nil, false -} - -func (p *astPruner) maybePruneAndOr(node *exprpb.Expr) (*exprpb.Expr, bool) { - if !p.existsWithUnknownValue(node.GetId()) { - return nil, false - } - - call := node.GetCallExpr() - // We know result is unknown, so we have at least one unknown arg - // and if one side is a known value, we know we can ignore it. - if p.existsWithKnownValue(call.Args[0].GetId()) { - return call.Args[1], true - } - if p.existsWithKnownValue(call.Args[1].GetId()) { - return call.Args[0], true - } - return nil, false -} - -func (p *astPruner) maybePruneConditional(node *exprpb.Expr) (*exprpb.Expr, bool) { - if !p.existsWithUnknownValue(node.GetId()) { - return nil, false - } - - call := node.GetCallExpr() - condVal, condValueExists := p.value(call.Args[0].GetId()) - if !condValueExists || types.IsUnknownOrError(condVal) { - return nil, false - } - - if condVal.Value().(bool) { - return call.Args[1], true - } - return call.Args[2], true -} - -func (p *astPruner) maybePruneFunction(node *exprpb.Expr) (*exprpb.Expr, bool) { - call := node.GetCallExpr() - if call.Function == operators.LogicalOr || call.Function == operators.LogicalAnd { - return p.maybePruneAndOr(node) - } - if call.Function == operators.Conditional { - return p.maybePruneConditional(node) - } - - return nil, false -} - -func (p *astPruner) prune(node *exprpb.Expr) (*exprpb.Expr, bool) { - if node == nil { - return node, false - } - val, valueExists := p.value(node.GetId()) - if valueExists && !types.IsUnknownOrError(val) { - if newNode, ok := p.maybeCreateLiteral(node.GetId(), val); ok { - return newNode, true - } - } - - // We have either an unknown/error value, or something we dont want to - // transform, or expression was not evaluated. If possible, drill down - // more. - - switch node.ExprKind.(type) { - case *exprpb.Expr_SelectExpr: - if operand, pruned := p.prune(node.GetSelectExpr().Operand); pruned { - return &exprpb.Expr{ - Id: node.GetId(), - ExprKind: &exprpb.Expr_SelectExpr{ - SelectExpr: &exprpb.Expr_Select{ - Operand: operand, - Field: node.GetSelectExpr().GetField(), - TestOnly: node.GetSelectExpr().GetTestOnly(), - }, - }, - }, true - } - case *exprpb.Expr_CallExpr: - if newExpr, pruned := p.maybePruneFunction(node); pruned { - newExpr, _ = p.prune(newExpr) - return newExpr, true - } - var prunedCall bool - call := node.GetCallExpr() - args := call.GetArgs() - newArgs := make([]*exprpb.Expr, len(args)) - newCall := &exprpb.Expr_Call{ - Function: call.GetFunction(), - Target: call.GetTarget(), - Args: newArgs, - } - for i, arg := range args { - newArgs[i] = arg - if newArg, prunedArg := p.prune(arg); prunedArg { - prunedCall = true - newArgs[i] = newArg - } - } - if newTarget, prunedTarget := p.prune(call.GetTarget()); prunedTarget { - prunedCall = true - newCall.Target = newTarget - } - if prunedCall { - return &exprpb.Expr{ - Id: node.GetId(), - ExprKind: &exprpb.Expr_CallExpr{ - CallExpr: newCall, - }, - }, true - } - case *exprpb.Expr_ListExpr: - elems := node.GetListExpr().GetElements() - newElems := make([]*exprpb.Expr, len(elems)) - var prunedList bool - for i, elem := range elems { - newElems[i] = elem - if newElem, prunedElem := p.prune(elem); prunedElem { - newElems[i] = newElem - prunedList = true - } - } - if prunedList { - return &exprpb.Expr{ - Id: node.GetId(), - ExprKind: &exprpb.Expr_ListExpr{ - ListExpr: &exprpb.Expr_CreateList{ - Elements: newElems, - }, - }, - }, true - } - case *exprpb.Expr_StructExpr: - var prunedStruct bool - entries := node.GetStructExpr().GetEntries() - messageType := node.GetStructExpr().GetMessageName() - newEntries := make([]*exprpb.Expr_CreateStruct_Entry, len(entries)) - for i, entry := range entries { - newEntries[i] = entry - newKey, prunedKey := p.prune(entry.GetMapKey()) - newValue, prunedValue := p.prune(entry.GetValue()) - if !prunedKey && !prunedValue { - continue - } - prunedStruct = true - newEntry := &exprpb.Expr_CreateStruct_Entry{ - Value: newValue, - } - if messageType != "" { - newEntry.KeyKind = &exprpb.Expr_CreateStruct_Entry_FieldKey{ - FieldKey: entry.GetFieldKey(), - } - } else { - newEntry.KeyKind = &exprpb.Expr_CreateStruct_Entry_MapKey{ - MapKey: newKey, - } - } - newEntries[i] = newEntry - } - if prunedStruct { - return &exprpb.Expr{ - Id: node.GetId(), - ExprKind: &exprpb.Expr_StructExpr{ - StructExpr: &exprpb.Expr_CreateStruct{ - MessageName: messageType, - Entries: newEntries, - }, - }, - }, true - } - case *exprpb.Expr_ComprehensionExpr: - compre := node.GetComprehensionExpr() - // Only the range of the comprehension is pruned since the state tracking only records - // the last iteration of the comprehension and not each step in the evaluation which - // means that the any residuals computed in between might be inaccurate. - if newRange, pruned := p.prune(compre.GetIterRange()); pruned { - return &exprpb.Expr{ - Id: node.GetId(), - ExprKind: &exprpb.Expr_ComprehensionExpr{ - ComprehensionExpr: &exprpb.Expr_Comprehension{ - IterVar: compre.GetIterVar(), - IterRange: newRange, - AccuVar: compre.GetAccuVar(), - AccuInit: compre.GetAccuInit(), - LoopCondition: compre.GetLoopCondition(), - LoopStep: compre.GetLoopStep(), - Result: compre.GetResult(), - }, - }, - }, true - } - } - return node, false -} - -func (p *astPruner) value(id int64) (ref.Val, bool) { - val, found := p.state.Value(id) - return val, (found && val != nil) -} - -func (p *astPruner) existsWithUnknownValue(id int64) bool { - val, valueExists := p.value(id) - return valueExists && types.IsUnknown(val) -} - -func (p *astPruner) existsWithKnownValue(id int64) bool { - val, valueExists := p.value(id) - return valueExists && !types.IsUnknown(val) -} - -func (p *astPruner) nextID() int64 { - for { - _, found := p.state.Value(p.nextExprID) - if !found { - next := p.nextExprID - p.nextExprID++ - return next - } - p.nextExprID++ - } -} diff --git a/vendor/github.com/google/cel-go/parser/BUILD.bazel b/vendor/github.com/google/cel-go/parser/BUILD.bazel deleted file mode 100644 index 96b44da967..0000000000 --- a/vendor/github.com/google/cel-go/parser/BUILD.bazel +++ /dev/null @@ -1,49 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -package( - licenses = ["notice"], # Apache 2.0 -) - -go_library( - name = "go_default_library", - srcs = [ - "errors.go", - "helper.go", - "macro.go", - "parser.go", - "unescape.go", - "unparser.go", - ], - importpath = "github.com/google/cel-go/parser", - deps = [ - "//common:go_default_library", - "//common/operators:go_default_library", - "//parser/gen:go_default_library", - "@com_github_antlr//runtime/Go/antlr:go_default_library", - "@org_golang_google_protobuf//proto:go_default_library", - "@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library", - "@org_golang_google_protobuf//types/known/structpb:go_default_library", - ], - visibility = ["//visibility:public"], -) - - -go_test( - name = "go_default_test", - size = "small", - srcs = [ - "parser_test.go", - "unescape_test.go", - "unparser_test.go", - ], - embed = [ - ":go_default_library", - ], - deps = [ - "//common/debug:go_default_library", - "//parser/gen:go_default_library", - "//test:go_default_library", - "@org_golang_google_protobuf//proto:go_default_library", - "@com_github_antlr//runtime/Go/antlr:go_default_library", - ], -) diff --git a/vendor/github.com/google/cel-go/parser/errors.go b/vendor/github.com/google/cel-go/parser/errors.go deleted file mode 100644 index 140beb9532..0000000000 --- a/vendor/github.com/google/cel-go/parser/errors.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package parser - -import ( - "fmt" - - "github.com/google/cel-go/common" -) - -// parseErrors is a specialization of Errors. -type parseErrors struct { - *common.Errors -} - -func (e *parseErrors) syntaxError(l common.Location, message string) { - e.ReportError(l, fmt.Sprintf("Syntax error: %s", message)) -} - -func (e *parseErrors) invalidHasArgument(l common.Location) { - e.ReportError(l, "Argument to the function 'has' must be a field selection") -} - -func (e *parseErrors) argumentIsNotIdent(l common.Location) { - e.ReportError(l, "Argument must be a simple name") -} - -func (e *parseErrors) notAQualifiedName(l common.Location) { - e.ReportError(l, "Expected a qualified name") -} diff --git a/vendor/github.com/google/cel-go/parser/gen/BUILD.bazel b/vendor/github.com/google/cel-go/parser/gen/BUILD.bazel deleted file mode 100644 index e301c5532c..0000000000 --- a/vendor/github.com/google/cel-go/parser/gen/BUILD.bazel +++ /dev/null @@ -1,26 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -package( - default_visibility = ["//parser:__subpackages__"], - licenses = ["notice"], # Apache 2.0 -) - -go_library( - name = "go_default_library", - srcs = [ - "cel_base_listener.go", - "cel_base_visitor.go", - "cel_lexer.go", - "cel_listener.go", - "cel_parser.go", - "cel_visitor.go", - ], - importpath = "github.com/google/cel-go/parser/gen", - deps = [ - "@com_github_antlr//runtime/Go/antlr:go_default_library", - ], - data = [ - "CEL.tokens", - "CELLexer.tokens", - ], -) diff --git a/vendor/github.com/google/cel-go/parser/gen/CEL.g4 b/vendor/github.com/google/cel-go/parser/gen/CEL.g4 deleted file mode 100644 index 7928120289..0000000000 --- a/vendor/github.com/google/cel-go/parser/gen/CEL.g4 +++ /dev/null @@ -1,186 +0,0 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -grammar CEL; - -// Grammar Rules -// ============= - -start - : e=expr EOF - ; - -expr - : e=conditionalOr (op='?' e1=conditionalOr ':' e2=expr)? - ; - -conditionalOr - : e=conditionalAnd (ops+='||' e1+=conditionalAnd)* - ; - -conditionalAnd - : e=relation (ops+='&&' e1+=relation)* - ; - -relation - : calc - | relation op=('<'|'<='|'>='|'>'|'=='|'!='|'in') relation - ; - -calc - : unary - | calc op=('*'|'/'|'%') calc - | calc op=('+'|'-') calc - ; - -unary - : member # MemberExpr - | (ops+='!')+ member # LogicalNot - | (ops+='-')+ member # Negate - ; - -member - : primary # PrimaryExpr - | member op='.' id=IDENTIFIER (open='(' args=exprList? ')')? # SelectOrCall - | member op='[' index=expr ']' # Index - | member op='{' entries=fieldInitializerList? ','? '}' # CreateMessage - ; - -primary - : leadingDot='.'? id=IDENTIFIER (op='(' args=exprList? ')')? # IdentOrGlobalCall - | '(' e=expr ')' # Nested - | op='[' elems=exprList? ','? ']' # CreateList - | op='{' entries=mapInitializerList? ','? '}' # CreateStruct - | literal # ConstantLiteral - ; - -exprList - : e+=expr (',' e+=expr)* - ; - -fieldInitializerList - : fields+=IDENTIFIER cols+=':' values+=expr (',' fields+=IDENTIFIER cols+=':' values+=expr)* - ; - -mapInitializerList - : keys+=expr cols+=':' values+=expr (',' keys+=expr cols+=':' values+=expr)* - ; - -literal - : sign=MINUS? tok=NUM_INT # Int - | tok=NUM_UINT # Uint - | sign=MINUS? tok=NUM_FLOAT # Double - | tok=STRING # String - | tok=BYTES # Bytes - | tok='true' # BoolTrue - | tok='false' # BoolFalse - | tok='null' # Null - ; - -// Lexer Rules -// =========== - -EQUALS : '=='; -NOT_EQUALS : '!='; -IN: 'in'; -LESS : '<'; -LESS_EQUALS : '<='; -GREATER_EQUALS : '>='; -GREATER : '>'; -LOGICAL_AND : '&&'; -LOGICAL_OR : '||'; - -LBRACKET : '['; -RPRACKET : ']'; -LBRACE : '{'; -RBRACE : '}'; -LPAREN : '('; -RPAREN : ')'; -DOT : '.'; -COMMA : ','; -MINUS : '-'; -EXCLAM : '!'; -QUESTIONMARK : '?'; -COLON : ':'; -PLUS : '+'; -STAR : '*'; -SLASH : '/'; -PERCENT : '%'; -TRUE : 'true'; -FALSE : 'false'; -NULL : 'null'; - -fragment BACKSLASH : '\\'; -fragment LETTER : 'A'..'Z' | 'a'..'z' ; -fragment DIGIT : '0'..'9' ; -fragment EXPONENT : ('e' | 'E') ( '+' | '-' )? DIGIT+ ; -fragment HEXDIGIT : ('0'..'9'|'a'..'f'|'A'..'F') ; -fragment RAW : 'r' | 'R'; - -fragment ESC_SEQ - : ESC_CHAR_SEQ - | ESC_BYTE_SEQ - | ESC_UNI_SEQ - | ESC_OCT_SEQ - ; - -fragment ESC_CHAR_SEQ - : BACKSLASH ('a'|'b'|'f'|'n'|'r'|'t'|'v'|'"'|'\''|'\\'|'?'|'`') - ; - -fragment ESC_OCT_SEQ - : BACKSLASH ('0'..'3') ('0'..'7') ('0'..'7') - ; - -fragment ESC_BYTE_SEQ - : BACKSLASH ( 'x' | 'X' ) HEXDIGIT HEXDIGIT - ; - -fragment ESC_UNI_SEQ - : BACKSLASH 'u' HEXDIGIT HEXDIGIT HEXDIGIT HEXDIGIT - | BACKSLASH 'U' HEXDIGIT HEXDIGIT HEXDIGIT HEXDIGIT HEXDIGIT HEXDIGIT HEXDIGIT HEXDIGIT - ; - -WHITESPACE : ( '\t' | ' ' | '\r' | '\n'| '\u000C' )+ -> channel(HIDDEN) ; -COMMENT : '//' (~'\n')* -> channel(HIDDEN) ; - -NUM_FLOAT - : ( DIGIT+ ('.' DIGIT+) EXPONENT? - | DIGIT+ EXPONENT - | '.' DIGIT+ EXPONENT? - ) - ; - -NUM_INT - : ( DIGIT+ | '0x' HEXDIGIT+ ); - -NUM_UINT - : DIGIT+ ( 'u' | 'U' ) - | '0x' HEXDIGIT+ ( 'u' | 'U' ) - ; - -STRING - : '"' (ESC_SEQ | ~('\\'|'"'|'\n'|'\r'))* '"' - | '\'' (ESC_SEQ | ~('\\'|'\''|'\n'|'\r'))* '\'' - | '"""' (ESC_SEQ | ~('\\'))*? '"""' - | '\'\'\'' (ESC_SEQ | ~('\\'))*? '\'\'\'' - | RAW '"' ~('"'|'\n'|'\r')* '"' - | RAW '\'' ~('\''|'\n'|'\r')* '\'' - | RAW '"""' .*? '"""' - | RAW '\'\'\'' .*? '\'\'\'' - ; - -BYTES : ('b' | 'B') STRING; - -IDENTIFIER : (LETTER | '_') ( LETTER | DIGIT | '_')*; diff --git a/vendor/github.com/google/cel-go/parser/gen/CEL.tokens b/vendor/github.com/google/cel-go/parser/gen/CEL.tokens deleted file mode 100644 index c99e4c0210..0000000000 --- a/vendor/github.com/google/cel-go/parser/gen/CEL.tokens +++ /dev/null @@ -1,64 +0,0 @@ -EQUALS=1 -NOT_EQUALS=2 -IN=3 -LESS=4 -LESS_EQUALS=5 -GREATER_EQUALS=6 -GREATER=7 -LOGICAL_AND=8 -LOGICAL_OR=9 -LBRACKET=10 -RPRACKET=11 -LBRACE=12 -RBRACE=13 -LPAREN=14 -RPAREN=15 -DOT=16 -COMMA=17 -MINUS=18 -EXCLAM=19 -QUESTIONMARK=20 -COLON=21 -PLUS=22 -STAR=23 -SLASH=24 -PERCENT=25 -TRUE=26 -FALSE=27 -NULL=28 -WHITESPACE=29 -COMMENT=30 -NUM_FLOAT=31 -NUM_INT=32 -NUM_UINT=33 -STRING=34 -BYTES=35 -IDENTIFIER=36 -'=='=1 -'!='=2 -'in'=3 -'<'=4 -'<='=5 -'>='=6 -'>'=7 -'&&'=8 -'||'=9 -'['=10 -']'=11 -'{'=12 -'}'=13 -'('=14 -')'=15 -'.'=16 -','=17 -'-'=18 -'!'=19 -'?'=20 -':'=21 -'+'=22 -'*'=23 -'/'=24 -'%'=25 -'true'=26 -'false'=27 -'null'=28 diff --git a/vendor/github.com/google/cel-go/parser/gen/CELLexer.tokens b/vendor/github.com/google/cel-go/parser/gen/CELLexer.tokens deleted file mode 100644 index c99e4c0210..0000000000 --- a/vendor/github.com/google/cel-go/parser/gen/CELLexer.tokens +++ /dev/null @@ -1,64 +0,0 @@ -EQUALS=1 -NOT_EQUALS=2 -IN=3 -LESS=4 -LESS_EQUALS=5 -GREATER_EQUALS=6 -GREATER=7 -LOGICAL_AND=8 -LOGICAL_OR=9 -LBRACKET=10 -RPRACKET=11 -LBRACE=12 -RBRACE=13 -LPAREN=14 -RPAREN=15 -DOT=16 -COMMA=17 -MINUS=18 -EXCLAM=19 -QUESTIONMARK=20 -COLON=21 -PLUS=22 -STAR=23 -SLASH=24 -PERCENT=25 -TRUE=26 -FALSE=27 -NULL=28 -WHITESPACE=29 -COMMENT=30 -NUM_FLOAT=31 -NUM_INT=32 -NUM_UINT=33 -STRING=34 -BYTES=35 -IDENTIFIER=36 -'=='=1 -'!='=2 -'in'=3 -'<'=4 -'<='=5 -'>='=6 -'>'=7 -'&&'=8 -'||'=9 -'['=10 -']'=11 -'{'=12 -'}'=13 -'('=14 -')'=15 -'.'=16 -','=17 -'-'=18 -'!'=19 -'?'=20 -':'=21 -'+'=22 -'*'=23 -'/'=24 -'%'=25 -'true'=26 -'false'=27 -'null'=28 diff --git a/vendor/github.com/google/cel-go/parser/gen/cel_base_listener.go b/vendor/github.com/google/cel-go/parser/gen/cel_base_listener.go deleted file mode 100644 index 94f6c58812..0000000000 --- a/vendor/github.com/google/cel-go/parser/gen/cel_base_listener.go +++ /dev/null @@ -1,195 +0,0 @@ -// Generated from /Users/tswadell/go/src/github.com/google/cel-go/bin/../parser/gen/CEL.g4 by ANTLR 4.7. - -package gen // CEL -import "github.com/antlr/antlr4/runtime/Go/antlr" - -// BaseCELListener is a complete listener for a parse tree produced by CELParser. -type BaseCELListener struct{} - -var _ CELListener = &BaseCELListener{} - -// VisitTerminal is called when a terminal node is visited. -func (s *BaseCELListener) VisitTerminal(node antlr.TerminalNode) {} - -// VisitErrorNode is called when an error node is visited. -func (s *BaseCELListener) VisitErrorNode(node antlr.ErrorNode) {} - -// EnterEveryRule is called when any rule is entered. -func (s *BaseCELListener) EnterEveryRule(ctx antlr.ParserRuleContext) {} - -// ExitEveryRule is called when any rule is exited. -func (s *BaseCELListener) ExitEveryRule(ctx antlr.ParserRuleContext) {} - -// EnterStart is called when production start is entered. -func (s *BaseCELListener) EnterStart(ctx *StartContext) {} - -// ExitStart is called when production start is exited. -func (s *BaseCELListener) ExitStart(ctx *StartContext) {} - -// EnterExpr is called when production expr is entered. -func (s *BaseCELListener) EnterExpr(ctx *ExprContext) {} - -// ExitExpr is called when production expr is exited. -func (s *BaseCELListener) ExitExpr(ctx *ExprContext) {} - -// EnterConditionalOr is called when production conditionalOr is entered. -func (s *BaseCELListener) EnterConditionalOr(ctx *ConditionalOrContext) {} - -// ExitConditionalOr is called when production conditionalOr is exited. -func (s *BaseCELListener) ExitConditionalOr(ctx *ConditionalOrContext) {} - -// EnterConditionalAnd is called when production conditionalAnd is entered. -func (s *BaseCELListener) EnterConditionalAnd(ctx *ConditionalAndContext) {} - -// ExitConditionalAnd is called when production conditionalAnd is exited. -func (s *BaseCELListener) ExitConditionalAnd(ctx *ConditionalAndContext) {} - -// EnterRelation is called when production relation is entered. -func (s *BaseCELListener) EnterRelation(ctx *RelationContext) {} - -// ExitRelation is called when production relation is exited. -func (s *BaseCELListener) ExitRelation(ctx *RelationContext) {} - -// EnterCalc is called when production calc is entered. -func (s *BaseCELListener) EnterCalc(ctx *CalcContext) {} - -// ExitCalc is called when production calc is exited. -func (s *BaseCELListener) ExitCalc(ctx *CalcContext) {} - -// EnterMemberExpr is called when production MemberExpr is entered. -func (s *BaseCELListener) EnterMemberExpr(ctx *MemberExprContext) {} - -// ExitMemberExpr is called when production MemberExpr is exited. -func (s *BaseCELListener) ExitMemberExpr(ctx *MemberExprContext) {} - -// EnterLogicalNot is called when production LogicalNot is entered. -func (s *BaseCELListener) EnterLogicalNot(ctx *LogicalNotContext) {} - -// ExitLogicalNot is called when production LogicalNot is exited. -func (s *BaseCELListener) ExitLogicalNot(ctx *LogicalNotContext) {} - -// EnterNegate is called when production Negate is entered. -func (s *BaseCELListener) EnterNegate(ctx *NegateContext) {} - -// ExitNegate is called when production Negate is exited. -func (s *BaseCELListener) ExitNegate(ctx *NegateContext) {} - -// EnterSelectOrCall is called when production SelectOrCall is entered. -func (s *BaseCELListener) EnterSelectOrCall(ctx *SelectOrCallContext) {} - -// ExitSelectOrCall is called when production SelectOrCall is exited. -func (s *BaseCELListener) ExitSelectOrCall(ctx *SelectOrCallContext) {} - -// EnterPrimaryExpr is called when production PrimaryExpr is entered. -func (s *BaseCELListener) EnterPrimaryExpr(ctx *PrimaryExprContext) {} - -// ExitPrimaryExpr is called when production PrimaryExpr is exited. -func (s *BaseCELListener) ExitPrimaryExpr(ctx *PrimaryExprContext) {} - -// EnterIndex is called when production Index is entered. -func (s *BaseCELListener) EnterIndex(ctx *IndexContext) {} - -// ExitIndex is called when production Index is exited. -func (s *BaseCELListener) ExitIndex(ctx *IndexContext) {} - -// EnterCreateMessage is called when production CreateMessage is entered. -func (s *BaseCELListener) EnterCreateMessage(ctx *CreateMessageContext) {} - -// ExitCreateMessage is called when production CreateMessage is exited. -func (s *BaseCELListener) ExitCreateMessage(ctx *CreateMessageContext) {} - -// EnterIdentOrGlobalCall is called when production IdentOrGlobalCall is entered. -func (s *BaseCELListener) EnterIdentOrGlobalCall(ctx *IdentOrGlobalCallContext) {} - -// ExitIdentOrGlobalCall is called when production IdentOrGlobalCall is exited. -func (s *BaseCELListener) ExitIdentOrGlobalCall(ctx *IdentOrGlobalCallContext) {} - -// EnterNested is called when production Nested is entered. -func (s *BaseCELListener) EnterNested(ctx *NestedContext) {} - -// ExitNested is called when production Nested is exited. -func (s *BaseCELListener) ExitNested(ctx *NestedContext) {} - -// EnterCreateList is called when production CreateList is entered. -func (s *BaseCELListener) EnterCreateList(ctx *CreateListContext) {} - -// ExitCreateList is called when production CreateList is exited. -func (s *BaseCELListener) ExitCreateList(ctx *CreateListContext) {} - -// EnterCreateStruct is called when production CreateStruct is entered. -func (s *BaseCELListener) EnterCreateStruct(ctx *CreateStructContext) {} - -// ExitCreateStruct is called when production CreateStruct is exited. -func (s *BaseCELListener) ExitCreateStruct(ctx *CreateStructContext) {} - -// EnterConstantLiteral is called when production ConstantLiteral is entered. -func (s *BaseCELListener) EnterConstantLiteral(ctx *ConstantLiteralContext) {} - -// ExitConstantLiteral is called when production ConstantLiteral is exited. -func (s *BaseCELListener) ExitConstantLiteral(ctx *ConstantLiteralContext) {} - -// EnterExprList is called when production exprList is entered. -func (s *BaseCELListener) EnterExprList(ctx *ExprListContext) {} - -// ExitExprList is called when production exprList is exited. -func (s *BaseCELListener) ExitExprList(ctx *ExprListContext) {} - -// EnterFieldInitializerList is called when production fieldInitializerList is entered. -func (s *BaseCELListener) EnterFieldInitializerList(ctx *FieldInitializerListContext) {} - -// ExitFieldInitializerList is called when production fieldInitializerList is exited. -func (s *BaseCELListener) ExitFieldInitializerList(ctx *FieldInitializerListContext) {} - -// EnterMapInitializerList is called when production mapInitializerList is entered. -func (s *BaseCELListener) EnterMapInitializerList(ctx *MapInitializerListContext) {} - -// ExitMapInitializerList is called when production mapInitializerList is exited. -func (s *BaseCELListener) ExitMapInitializerList(ctx *MapInitializerListContext) {} - -// EnterInt is called when production Int is entered. -func (s *BaseCELListener) EnterInt(ctx *IntContext) {} - -// ExitInt is called when production Int is exited. -func (s *BaseCELListener) ExitInt(ctx *IntContext) {} - -// EnterUint is called when production Uint is entered. -func (s *BaseCELListener) EnterUint(ctx *UintContext) {} - -// ExitUint is called when production Uint is exited. -func (s *BaseCELListener) ExitUint(ctx *UintContext) {} - -// EnterDouble is called when production Double is entered. -func (s *BaseCELListener) EnterDouble(ctx *DoubleContext) {} - -// ExitDouble is called when production Double is exited. -func (s *BaseCELListener) ExitDouble(ctx *DoubleContext) {} - -// EnterString is called when production String is entered. -func (s *BaseCELListener) EnterString(ctx *StringContext) {} - -// ExitString is called when production String is exited. -func (s *BaseCELListener) ExitString(ctx *StringContext) {} - -// EnterBytes is called when production Bytes is entered. -func (s *BaseCELListener) EnterBytes(ctx *BytesContext) {} - -// ExitBytes is called when production Bytes is exited. -func (s *BaseCELListener) ExitBytes(ctx *BytesContext) {} - -// EnterBoolTrue is called when production BoolTrue is entered. -func (s *BaseCELListener) EnterBoolTrue(ctx *BoolTrueContext) {} - -// ExitBoolTrue is called when production BoolTrue is exited. -func (s *BaseCELListener) ExitBoolTrue(ctx *BoolTrueContext) {} - -// EnterBoolFalse is called when production BoolFalse is entered. -func (s *BaseCELListener) EnterBoolFalse(ctx *BoolFalseContext) {} - -// ExitBoolFalse is called when production BoolFalse is exited. -func (s *BaseCELListener) ExitBoolFalse(ctx *BoolFalseContext) {} - -// EnterNull is called when production Null is entered. -func (s *BaseCELListener) EnterNull(ctx *NullContext) {} - -// ExitNull is called when production Null is exited. -func (s *BaseCELListener) ExitNull(ctx *NullContext) {} diff --git a/vendor/github.com/google/cel-go/parser/gen/cel_base_visitor.go b/vendor/github.com/google/cel-go/parser/gen/cel_base_visitor.go deleted file mode 100644 index bb46a64d5e..0000000000 --- a/vendor/github.com/google/cel-go/parser/gen/cel_base_visitor.go +++ /dev/null @@ -1,124 +0,0 @@ -// Generated from /Users/tswadell/go/src/github.com/google/cel-go/bin/../parser/gen/CEL.g4 by ANTLR 4.7. - -package gen // CEL -import "github.com/antlr/antlr4/runtime/Go/antlr" - -type BaseCELVisitor struct { - *antlr.BaseParseTreeVisitor -} - -func (v *BaseCELVisitor) VisitStart(ctx *StartContext) interface{} { - return v.VisitChildren(ctx) -} - -func (v *BaseCELVisitor) VisitExpr(ctx *ExprContext) interface{} { - return v.VisitChildren(ctx) -} - -func (v *BaseCELVisitor) VisitConditionalOr(ctx *ConditionalOrContext) interface{} { - return v.VisitChildren(ctx) -} - -func (v *BaseCELVisitor) VisitConditionalAnd(ctx *ConditionalAndContext) interface{} { - return v.VisitChildren(ctx) -} - -func (v *BaseCELVisitor) VisitRelation(ctx *RelationContext) interface{} { - return v.VisitChildren(ctx) -} - -func (v *BaseCELVisitor) VisitCalc(ctx *CalcContext) interface{} { - return v.VisitChildren(ctx) -} - -func (v *BaseCELVisitor) VisitMemberExpr(ctx *MemberExprContext) interface{} { - return v.VisitChildren(ctx) -} - -func (v *BaseCELVisitor) VisitLogicalNot(ctx *LogicalNotContext) interface{} { - return v.VisitChildren(ctx) -} - -func (v *BaseCELVisitor) VisitNegate(ctx *NegateContext) interface{} { - return v.VisitChildren(ctx) -} - -func (v *BaseCELVisitor) VisitSelectOrCall(ctx *SelectOrCallContext) interface{} { - return v.VisitChildren(ctx) -} - -func (v *BaseCELVisitor) VisitPrimaryExpr(ctx *PrimaryExprContext) interface{} { - return v.VisitChildren(ctx) -} - -func (v *BaseCELVisitor) VisitIndex(ctx *IndexContext) interface{} { - return v.VisitChildren(ctx) -} - -func (v *BaseCELVisitor) VisitCreateMessage(ctx *CreateMessageContext) interface{} { - return v.VisitChildren(ctx) -} - -func (v *BaseCELVisitor) VisitIdentOrGlobalCall(ctx *IdentOrGlobalCallContext) interface{} { - return v.VisitChildren(ctx) -} - -func (v *BaseCELVisitor) VisitNested(ctx *NestedContext) interface{} { - return v.VisitChildren(ctx) -} - -func (v *BaseCELVisitor) VisitCreateList(ctx *CreateListContext) interface{} { - return v.VisitChildren(ctx) -} - -func (v *BaseCELVisitor) VisitCreateStruct(ctx *CreateStructContext) interface{} { - return v.VisitChildren(ctx) -} - -func (v *BaseCELVisitor) VisitConstantLiteral(ctx *ConstantLiteralContext) interface{} { - return v.VisitChildren(ctx) -} - -func (v *BaseCELVisitor) VisitExprList(ctx *ExprListContext) interface{} { - return v.VisitChildren(ctx) -} - -func (v *BaseCELVisitor) VisitFieldInitializerList(ctx *FieldInitializerListContext) interface{} { - return v.VisitChildren(ctx) -} - -func (v *BaseCELVisitor) VisitMapInitializerList(ctx *MapInitializerListContext) interface{} { - return v.VisitChildren(ctx) -} - -func (v *BaseCELVisitor) VisitInt(ctx *IntContext) interface{} { - return v.VisitChildren(ctx) -} - -func (v *BaseCELVisitor) VisitUint(ctx *UintContext) interface{} { - return v.VisitChildren(ctx) -} - -func (v *BaseCELVisitor) VisitDouble(ctx *DoubleContext) interface{} { - return v.VisitChildren(ctx) -} - -func (v *BaseCELVisitor) VisitString(ctx *StringContext) interface{} { - return v.VisitChildren(ctx) -} - -func (v *BaseCELVisitor) VisitBytes(ctx *BytesContext) interface{} { - return v.VisitChildren(ctx) -} - -func (v *BaseCELVisitor) VisitBoolTrue(ctx *BoolTrueContext) interface{} { - return v.VisitChildren(ctx) -} - -func (v *BaseCELVisitor) VisitBoolFalse(ctx *BoolFalseContext) interface{} { - return v.VisitChildren(ctx) -} - -func (v *BaseCELVisitor) VisitNull(ctx *NullContext) interface{} { - return v.VisitChildren(ctx) -} diff --git a/vendor/github.com/google/cel-go/parser/gen/cel_lexer.go b/vendor/github.com/google/cel-go/parser/gen/cel_lexer.go deleted file mode 100644 index b76f78a650..0000000000 --- a/vendor/github.com/google/cel-go/parser/gen/cel_lexer.go +++ /dev/null @@ -1,319 +0,0 @@ -// Generated from /Users/tswadell/go/src/github.com/google/cel-go/bin/../parser/gen/CEL.g4 by ANTLR 4.7. - -package gen - -import ( - "fmt" - "unicode" - - "github.com/antlr/antlr4/runtime/Go/antlr" -) - -// Suppress unused import error -var _ = fmt.Printf -var _ = unicode.IsLetter - -var serializedLexerAtn = []uint16{ - 3, 24715, 42794, 33075, 47597, 16764, 15335, 30598, 22884, 2, 38, 425, - 8, 1, 4, 2, 9, 2, 4, 3, 9, 3, 4, 4, 9, 4, 4, 5, 9, 5, 4, 6, 9, 6, 4, 7, - 9, 7, 4, 8, 9, 8, 4, 9, 9, 9, 4, 10, 9, 10, 4, 11, 9, 11, 4, 12, 9, 12, - 4, 13, 9, 13, 4, 14, 9, 14, 4, 15, 9, 15, 4, 16, 9, 16, 4, 17, 9, 17, 4, - 18, 9, 18, 4, 19, 9, 19, 4, 20, 9, 20, 4, 21, 9, 21, 4, 22, 9, 22, 4, 23, - 9, 23, 4, 24, 9, 24, 4, 25, 9, 25, 4, 26, 9, 26, 4, 27, 9, 27, 4, 28, 9, - 28, 4, 29, 9, 29, 4, 30, 9, 30, 4, 31, 9, 31, 4, 32, 9, 32, 4, 33, 9, 33, - 4, 34, 9, 34, 4, 35, 9, 35, 4, 36, 9, 36, 4, 37, 9, 37, 4, 38, 9, 38, 4, - 39, 9, 39, 4, 40, 9, 40, 4, 41, 9, 41, 4, 42, 9, 42, 4, 43, 9, 43, 4, 44, - 9, 44, 4, 45, 9, 45, 4, 46, 9, 46, 4, 47, 9, 47, 4, 48, 9, 48, 3, 2, 3, - 2, 3, 2, 3, 3, 3, 3, 3, 3, 3, 4, 3, 4, 3, 4, 3, 5, 3, 5, 3, 6, 3, 6, 3, - 6, 3, 7, 3, 7, 3, 7, 3, 8, 3, 8, 3, 9, 3, 9, 3, 9, 3, 10, 3, 10, 3, 10, - 3, 11, 3, 11, 3, 12, 3, 12, 3, 13, 3, 13, 3, 14, 3, 14, 3, 15, 3, 15, 3, - 16, 3, 16, 3, 17, 3, 17, 3, 18, 3, 18, 3, 19, 3, 19, 3, 20, 3, 20, 3, 21, - 3, 21, 3, 22, 3, 22, 3, 23, 3, 23, 3, 24, 3, 24, 3, 25, 3, 25, 3, 26, 3, - 26, 3, 27, 3, 27, 3, 27, 3, 27, 3, 27, 3, 28, 3, 28, 3, 28, 3, 28, 3, 28, - 3, 28, 3, 29, 3, 29, 3, 29, 3, 29, 3, 29, 3, 30, 3, 30, 3, 31, 3, 31, 3, - 32, 3, 32, 3, 33, 3, 33, 5, 33, 179, 10, 33, 3, 33, 6, 33, 182, 10, 33, - 13, 33, 14, 33, 183, 3, 34, 3, 34, 3, 35, 3, 35, 3, 36, 3, 36, 3, 36, 3, - 36, 5, 36, 194, 10, 36, 3, 37, 3, 37, 3, 37, 3, 38, 3, 38, 3, 38, 3, 38, - 3, 38, 3, 39, 3, 39, 3, 39, 3, 39, 3, 39, 3, 40, 3, 40, 3, 40, 3, 40, 3, - 40, 3, 40, 3, 40, 3, 40, 3, 40, 3, 40, 3, 40, 3, 40, 3, 40, 3, 40, 3, 40, - 3, 40, 3, 40, 3, 40, 5, 40, 227, 10, 40, 3, 41, 6, 41, 230, 10, 41, 13, - 41, 14, 41, 231, 3, 41, 3, 41, 3, 42, 3, 42, 3, 42, 3, 42, 7, 42, 240, - 10, 42, 12, 42, 14, 42, 243, 11, 42, 3, 42, 3, 42, 3, 43, 6, 43, 248, 10, - 43, 13, 43, 14, 43, 249, 3, 43, 3, 43, 6, 43, 254, 10, 43, 13, 43, 14, - 43, 255, 3, 43, 5, 43, 259, 10, 43, 3, 43, 6, 43, 262, 10, 43, 13, 43, - 14, 43, 263, 3, 43, 3, 43, 3, 43, 3, 43, 6, 43, 270, 10, 43, 13, 43, 14, - 43, 271, 3, 43, 5, 43, 275, 10, 43, 5, 43, 277, 10, 43, 3, 44, 6, 44, 280, - 10, 44, 13, 44, 14, 44, 281, 3, 44, 3, 44, 3, 44, 3, 44, 6, 44, 288, 10, - 44, 13, 44, 14, 44, 289, 5, 44, 292, 10, 44, 3, 45, 6, 45, 295, 10, 45, - 13, 45, 14, 45, 296, 3, 45, 3, 45, 3, 45, 3, 45, 3, 45, 3, 45, 6, 45, 305, - 10, 45, 13, 45, 14, 45, 306, 3, 45, 3, 45, 5, 45, 311, 10, 45, 3, 46, 3, - 46, 3, 46, 7, 46, 316, 10, 46, 12, 46, 14, 46, 319, 11, 46, 3, 46, 3, 46, - 3, 46, 3, 46, 7, 46, 325, 10, 46, 12, 46, 14, 46, 328, 11, 46, 3, 46, 3, - 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 7, 46, 337, 10, 46, 12, 46, 14, - 46, 340, 11, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, - 3, 46, 7, 46, 351, 10, 46, 12, 46, 14, 46, 354, 11, 46, 3, 46, 3, 46, 3, - 46, 3, 46, 3, 46, 3, 46, 7, 46, 362, 10, 46, 12, 46, 14, 46, 365, 11, 46, - 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 7, 46, 372, 10, 46, 12, 46, 14, 46, - 375, 11, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 7, - 46, 385, 10, 46, 12, 46, 14, 46, 388, 11, 46, 3, 46, 3, 46, 3, 46, 3, 46, - 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 7, 46, 400, 10, 46, 12, 46, 14, - 46, 403, 11, 46, 3, 46, 3, 46, 3, 46, 3, 46, 5, 46, 409, 10, 46, 3, 47, - 3, 47, 3, 47, 3, 48, 3, 48, 5, 48, 416, 10, 48, 3, 48, 3, 48, 3, 48, 7, - 48, 421, 10, 48, 12, 48, 14, 48, 424, 11, 48, 6, 338, 352, 386, 401, 2, - 49, 3, 3, 5, 4, 7, 5, 9, 6, 11, 7, 13, 8, 15, 9, 17, 10, 19, 11, 21, 12, - 23, 13, 25, 14, 27, 15, 29, 16, 31, 17, 33, 18, 35, 19, 37, 20, 39, 21, - 41, 22, 43, 23, 45, 24, 47, 25, 49, 26, 51, 27, 53, 28, 55, 29, 57, 30, - 59, 2, 61, 2, 63, 2, 65, 2, 67, 2, 69, 2, 71, 2, 73, 2, 75, 2, 77, 2, 79, - 2, 81, 31, 83, 32, 85, 33, 87, 34, 89, 35, 91, 36, 93, 37, 95, 38, 3, 2, - 18, 4, 2, 67, 92, 99, 124, 4, 2, 71, 71, 103, 103, 4, 2, 45, 45, 47, 47, - 5, 2, 50, 59, 67, 72, 99, 104, 4, 2, 84, 84, 116, 116, 12, 2, 36, 36, 41, - 41, 65, 65, 94, 94, 98, 100, 104, 104, 112, 112, 116, 116, 118, 118, 120, - 120, 4, 2, 90, 90, 122, 122, 5, 2, 11, 12, 14, 15, 34, 34, 3, 2, 12, 12, - 4, 2, 87, 87, 119, 119, 6, 2, 12, 12, 15, 15, 36, 36, 94, 94, 6, 2, 12, - 12, 15, 15, 41, 41, 94, 94, 3, 2, 94, 94, 5, 2, 12, 12, 15, 15, 36, 36, - 5, 2, 12, 12, 15, 15, 41, 41, 4, 2, 68, 68, 100, 100, 2, 458, 2, 3, 3, - 2, 2, 2, 2, 5, 3, 2, 2, 2, 2, 7, 3, 2, 2, 2, 2, 9, 3, 2, 2, 2, 2, 11, 3, - 2, 2, 2, 2, 13, 3, 2, 2, 2, 2, 15, 3, 2, 2, 2, 2, 17, 3, 2, 2, 2, 2, 19, - 3, 2, 2, 2, 2, 21, 3, 2, 2, 2, 2, 23, 3, 2, 2, 2, 2, 25, 3, 2, 2, 2, 2, - 27, 3, 2, 2, 2, 2, 29, 3, 2, 2, 2, 2, 31, 3, 2, 2, 2, 2, 33, 3, 2, 2, 2, - 2, 35, 3, 2, 2, 2, 2, 37, 3, 2, 2, 2, 2, 39, 3, 2, 2, 2, 2, 41, 3, 2, 2, - 2, 2, 43, 3, 2, 2, 2, 2, 45, 3, 2, 2, 2, 2, 47, 3, 2, 2, 2, 2, 49, 3, 2, - 2, 2, 2, 51, 3, 2, 2, 2, 2, 53, 3, 2, 2, 2, 2, 55, 3, 2, 2, 2, 2, 57, 3, - 2, 2, 2, 2, 81, 3, 2, 2, 2, 2, 83, 3, 2, 2, 2, 2, 85, 3, 2, 2, 2, 2, 87, - 3, 2, 2, 2, 2, 89, 3, 2, 2, 2, 2, 91, 3, 2, 2, 2, 2, 93, 3, 2, 2, 2, 2, - 95, 3, 2, 2, 2, 3, 97, 3, 2, 2, 2, 5, 100, 3, 2, 2, 2, 7, 103, 3, 2, 2, - 2, 9, 106, 3, 2, 2, 2, 11, 108, 3, 2, 2, 2, 13, 111, 3, 2, 2, 2, 15, 114, - 3, 2, 2, 2, 17, 116, 3, 2, 2, 2, 19, 119, 3, 2, 2, 2, 21, 122, 3, 2, 2, - 2, 23, 124, 3, 2, 2, 2, 25, 126, 3, 2, 2, 2, 27, 128, 3, 2, 2, 2, 29, 130, - 3, 2, 2, 2, 31, 132, 3, 2, 2, 2, 33, 134, 3, 2, 2, 2, 35, 136, 3, 2, 2, - 2, 37, 138, 3, 2, 2, 2, 39, 140, 3, 2, 2, 2, 41, 142, 3, 2, 2, 2, 43, 144, - 3, 2, 2, 2, 45, 146, 3, 2, 2, 2, 47, 148, 3, 2, 2, 2, 49, 150, 3, 2, 2, - 2, 51, 152, 3, 2, 2, 2, 53, 154, 3, 2, 2, 2, 55, 159, 3, 2, 2, 2, 57, 165, - 3, 2, 2, 2, 59, 170, 3, 2, 2, 2, 61, 172, 3, 2, 2, 2, 63, 174, 3, 2, 2, - 2, 65, 176, 3, 2, 2, 2, 67, 185, 3, 2, 2, 2, 69, 187, 3, 2, 2, 2, 71, 193, - 3, 2, 2, 2, 73, 195, 3, 2, 2, 2, 75, 198, 3, 2, 2, 2, 77, 203, 3, 2, 2, - 2, 79, 226, 3, 2, 2, 2, 81, 229, 3, 2, 2, 2, 83, 235, 3, 2, 2, 2, 85, 276, - 3, 2, 2, 2, 87, 291, 3, 2, 2, 2, 89, 310, 3, 2, 2, 2, 91, 408, 3, 2, 2, - 2, 93, 410, 3, 2, 2, 2, 95, 415, 3, 2, 2, 2, 97, 98, 7, 63, 2, 2, 98, 99, - 7, 63, 2, 2, 99, 4, 3, 2, 2, 2, 100, 101, 7, 35, 2, 2, 101, 102, 7, 63, - 2, 2, 102, 6, 3, 2, 2, 2, 103, 104, 7, 107, 2, 2, 104, 105, 7, 112, 2, - 2, 105, 8, 3, 2, 2, 2, 106, 107, 7, 62, 2, 2, 107, 10, 3, 2, 2, 2, 108, - 109, 7, 62, 2, 2, 109, 110, 7, 63, 2, 2, 110, 12, 3, 2, 2, 2, 111, 112, - 7, 64, 2, 2, 112, 113, 7, 63, 2, 2, 113, 14, 3, 2, 2, 2, 114, 115, 7, 64, - 2, 2, 115, 16, 3, 2, 2, 2, 116, 117, 7, 40, 2, 2, 117, 118, 7, 40, 2, 2, - 118, 18, 3, 2, 2, 2, 119, 120, 7, 126, 2, 2, 120, 121, 7, 126, 2, 2, 121, - 20, 3, 2, 2, 2, 122, 123, 7, 93, 2, 2, 123, 22, 3, 2, 2, 2, 124, 125, 7, - 95, 2, 2, 125, 24, 3, 2, 2, 2, 126, 127, 7, 125, 2, 2, 127, 26, 3, 2, 2, - 2, 128, 129, 7, 127, 2, 2, 129, 28, 3, 2, 2, 2, 130, 131, 7, 42, 2, 2, - 131, 30, 3, 2, 2, 2, 132, 133, 7, 43, 2, 2, 133, 32, 3, 2, 2, 2, 134, 135, - 7, 48, 2, 2, 135, 34, 3, 2, 2, 2, 136, 137, 7, 46, 2, 2, 137, 36, 3, 2, - 2, 2, 138, 139, 7, 47, 2, 2, 139, 38, 3, 2, 2, 2, 140, 141, 7, 35, 2, 2, - 141, 40, 3, 2, 2, 2, 142, 143, 7, 65, 2, 2, 143, 42, 3, 2, 2, 2, 144, 145, - 7, 60, 2, 2, 145, 44, 3, 2, 2, 2, 146, 147, 7, 45, 2, 2, 147, 46, 3, 2, - 2, 2, 148, 149, 7, 44, 2, 2, 149, 48, 3, 2, 2, 2, 150, 151, 7, 49, 2, 2, - 151, 50, 3, 2, 2, 2, 152, 153, 7, 39, 2, 2, 153, 52, 3, 2, 2, 2, 154, 155, - 7, 118, 2, 2, 155, 156, 7, 116, 2, 2, 156, 157, 7, 119, 2, 2, 157, 158, - 7, 103, 2, 2, 158, 54, 3, 2, 2, 2, 159, 160, 7, 104, 2, 2, 160, 161, 7, - 99, 2, 2, 161, 162, 7, 110, 2, 2, 162, 163, 7, 117, 2, 2, 163, 164, 7, - 103, 2, 2, 164, 56, 3, 2, 2, 2, 165, 166, 7, 112, 2, 2, 166, 167, 7, 119, - 2, 2, 167, 168, 7, 110, 2, 2, 168, 169, 7, 110, 2, 2, 169, 58, 3, 2, 2, - 2, 170, 171, 7, 94, 2, 2, 171, 60, 3, 2, 2, 2, 172, 173, 9, 2, 2, 2, 173, - 62, 3, 2, 2, 2, 174, 175, 4, 50, 59, 2, 175, 64, 3, 2, 2, 2, 176, 178, - 9, 3, 2, 2, 177, 179, 9, 4, 2, 2, 178, 177, 3, 2, 2, 2, 178, 179, 3, 2, - 2, 2, 179, 181, 3, 2, 2, 2, 180, 182, 5, 63, 32, 2, 181, 180, 3, 2, 2, - 2, 182, 183, 3, 2, 2, 2, 183, 181, 3, 2, 2, 2, 183, 184, 3, 2, 2, 2, 184, - 66, 3, 2, 2, 2, 185, 186, 9, 5, 2, 2, 186, 68, 3, 2, 2, 2, 187, 188, 9, - 6, 2, 2, 188, 70, 3, 2, 2, 2, 189, 194, 5, 73, 37, 2, 190, 194, 5, 77, - 39, 2, 191, 194, 5, 79, 40, 2, 192, 194, 5, 75, 38, 2, 193, 189, 3, 2, - 2, 2, 193, 190, 3, 2, 2, 2, 193, 191, 3, 2, 2, 2, 193, 192, 3, 2, 2, 2, - 194, 72, 3, 2, 2, 2, 195, 196, 5, 59, 30, 2, 196, 197, 9, 7, 2, 2, 197, - 74, 3, 2, 2, 2, 198, 199, 5, 59, 30, 2, 199, 200, 4, 50, 53, 2, 200, 201, - 4, 50, 57, 2, 201, 202, 4, 50, 57, 2, 202, 76, 3, 2, 2, 2, 203, 204, 5, - 59, 30, 2, 204, 205, 9, 8, 2, 2, 205, 206, 5, 67, 34, 2, 206, 207, 5, 67, - 34, 2, 207, 78, 3, 2, 2, 2, 208, 209, 5, 59, 30, 2, 209, 210, 7, 119, 2, - 2, 210, 211, 5, 67, 34, 2, 211, 212, 5, 67, 34, 2, 212, 213, 5, 67, 34, - 2, 213, 214, 5, 67, 34, 2, 214, 227, 3, 2, 2, 2, 215, 216, 5, 59, 30, 2, - 216, 217, 7, 87, 2, 2, 217, 218, 5, 67, 34, 2, 218, 219, 5, 67, 34, 2, - 219, 220, 5, 67, 34, 2, 220, 221, 5, 67, 34, 2, 221, 222, 5, 67, 34, 2, - 222, 223, 5, 67, 34, 2, 223, 224, 5, 67, 34, 2, 224, 225, 5, 67, 34, 2, - 225, 227, 3, 2, 2, 2, 226, 208, 3, 2, 2, 2, 226, 215, 3, 2, 2, 2, 227, - 80, 3, 2, 2, 2, 228, 230, 9, 9, 2, 2, 229, 228, 3, 2, 2, 2, 230, 231, 3, - 2, 2, 2, 231, 229, 3, 2, 2, 2, 231, 232, 3, 2, 2, 2, 232, 233, 3, 2, 2, - 2, 233, 234, 8, 41, 2, 2, 234, 82, 3, 2, 2, 2, 235, 236, 7, 49, 2, 2, 236, - 237, 7, 49, 2, 2, 237, 241, 3, 2, 2, 2, 238, 240, 10, 10, 2, 2, 239, 238, - 3, 2, 2, 2, 240, 243, 3, 2, 2, 2, 241, 239, 3, 2, 2, 2, 241, 242, 3, 2, - 2, 2, 242, 244, 3, 2, 2, 2, 243, 241, 3, 2, 2, 2, 244, 245, 8, 42, 2, 2, - 245, 84, 3, 2, 2, 2, 246, 248, 5, 63, 32, 2, 247, 246, 3, 2, 2, 2, 248, - 249, 3, 2, 2, 2, 249, 247, 3, 2, 2, 2, 249, 250, 3, 2, 2, 2, 250, 251, - 3, 2, 2, 2, 251, 253, 7, 48, 2, 2, 252, 254, 5, 63, 32, 2, 253, 252, 3, - 2, 2, 2, 254, 255, 3, 2, 2, 2, 255, 253, 3, 2, 2, 2, 255, 256, 3, 2, 2, - 2, 256, 258, 3, 2, 2, 2, 257, 259, 5, 65, 33, 2, 258, 257, 3, 2, 2, 2, - 258, 259, 3, 2, 2, 2, 259, 277, 3, 2, 2, 2, 260, 262, 5, 63, 32, 2, 261, - 260, 3, 2, 2, 2, 262, 263, 3, 2, 2, 2, 263, 261, 3, 2, 2, 2, 263, 264, - 3, 2, 2, 2, 264, 265, 3, 2, 2, 2, 265, 266, 5, 65, 33, 2, 266, 277, 3, - 2, 2, 2, 267, 269, 7, 48, 2, 2, 268, 270, 5, 63, 32, 2, 269, 268, 3, 2, - 2, 2, 270, 271, 3, 2, 2, 2, 271, 269, 3, 2, 2, 2, 271, 272, 3, 2, 2, 2, - 272, 274, 3, 2, 2, 2, 273, 275, 5, 65, 33, 2, 274, 273, 3, 2, 2, 2, 274, - 275, 3, 2, 2, 2, 275, 277, 3, 2, 2, 2, 276, 247, 3, 2, 2, 2, 276, 261, - 3, 2, 2, 2, 276, 267, 3, 2, 2, 2, 277, 86, 3, 2, 2, 2, 278, 280, 5, 63, - 32, 2, 279, 278, 3, 2, 2, 2, 280, 281, 3, 2, 2, 2, 281, 279, 3, 2, 2, 2, - 281, 282, 3, 2, 2, 2, 282, 292, 3, 2, 2, 2, 283, 284, 7, 50, 2, 2, 284, - 285, 7, 122, 2, 2, 285, 287, 3, 2, 2, 2, 286, 288, 5, 67, 34, 2, 287, 286, - 3, 2, 2, 2, 288, 289, 3, 2, 2, 2, 289, 287, 3, 2, 2, 2, 289, 290, 3, 2, - 2, 2, 290, 292, 3, 2, 2, 2, 291, 279, 3, 2, 2, 2, 291, 283, 3, 2, 2, 2, - 292, 88, 3, 2, 2, 2, 293, 295, 5, 63, 32, 2, 294, 293, 3, 2, 2, 2, 295, - 296, 3, 2, 2, 2, 296, 294, 3, 2, 2, 2, 296, 297, 3, 2, 2, 2, 297, 298, - 3, 2, 2, 2, 298, 299, 9, 11, 2, 2, 299, 311, 3, 2, 2, 2, 300, 301, 7, 50, - 2, 2, 301, 302, 7, 122, 2, 2, 302, 304, 3, 2, 2, 2, 303, 305, 5, 67, 34, - 2, 304, 303, 3, 2, 2, 2, 305, 306, 3, 2, 2, 2, 306, 304, 3, 2, 2, 2, 306, - 307, 3, 2, 2, 2, 307, 308, 3, 2, 2, 2, 308, 309, 9, 11, 2, 2, 309, 311, - 3, 2, 2, 2, 310, 294, 3, 2, 2, 2, 310, 300, 3, 2, 2, 2, 311, 90, 3, 2, - 2, 2, 312, 317, 7, 36, 2, 2, 313, 316, 5, 71, 36, 2, 314, 316, 10, 12, - 2, 2, 315, 313, 3, 2, 2, 2, 315, 314, 3, 2, 2, 2, 316, 319, 3, 2, 2, 2, - 317, 315, 3, 2, 2, 2, 317, 318, 3, 2, 2, 2, 318, 320, 3, 2, 2, 2, 319, - 317, 3, 2, 2, 2, 320, 409, 7, 36, 2, 2, 321, 326, 7, 41, 2, 2, 322, 325, - 5, 71, 36, 2, 323, 325, 10, 13, 2, 2, 324, 322, 3, 2, 2, 2, 324, 323, 3, - 2, 2, 2, 325, 328, 3, 2, 2, 2, 326, 324, 3, 2, 2, 2, 326, 327, 3, 2, 2, - 2, 327, 329, 3, 2, 2, 2, 328, 326, 3, 2, 2, 2, 329, 409, 7, 41, 2, 2, 330, - 331, 7, 36, 2, 2, 331, 332, 7, 36, 2, 2, 332, 333, 7, 36, 2, 2, 333, 338, - 3, 2, 2, 2, 334, 337, 5, 71, 36, 2, 335, 337, 10, 14, 2, 2, 336, 334, 3, - 2, 2, 2, 336, 335, 3, 2, 2, 2, 337, 340, 3, 2, 2, 2, 338, 339, 3, 2, 2, - 2, 338, 336, 3, 2, 2, 2, 339, 341, 3, 2, 2, 2, 340, 338, 3, 2, 2, 2, 341, - 342, 7, 36, 2, 2, 342, 343, 7, 36, 2, 2, 343, 409, 7, 36, 2, 2, 344, 345, - 7, 41, 2, 2, 345, 346, 7, 41, 2, 2, 346, 347, 7, 41, 2, 2, 347, 352, 3, - 2, 2, 2, 348, 351, 5, 71, 36, 2, 349, 351, 10, 14, 2, 2, 350, 348, 3, 2, - 2, 2, 350, 349, 3, 2, 2, 2, 351, 354, 3, 2, 2, 2, 352, 353, 3, 2, 2, 2, - 352, 350, 3, 2, 2, 2, 353, 355, 3, 2, 2, 2, 354, 352, 3, 2, 2, 2, 355, - 356, 7, 41, 2, 2, 356, 357, 7, 41, 2, 2, 357, 409, 7, 41, 2, 2, 358, 359, - 5, 69, 35, 2, 359, 363, 7, 36, 2, 2, 360, 362, 10, 15, 2, 2, 361, 360, - 3, 2, 2, 2, 362, 365, 3, 2, 2, 2, 363, 361, 3, 2, 2, 2, 363, 364, 3, 2, - 2, 2, 364, 366, 3, 2, 2, 2, 365, 363, 3, 2, 2, 2, 366, 367, 7, 36, 2, 2, - 367, 409, 3, 2, 2, 2, 368, 369, 5, 69, 35, 2, 369, 373, 7, 41, 2, 2, 370, - 372, 10, 16, 2, 2, 371, 370, 3, 2, 2, 2, 372, 375, 3, 2, 2, 2, 373, 371, - 3, 2, 2, 2, 373, 374, 3, 2, 2, 2, 374, 376, 3, 2, 2, 2, 375, 373, 3, 2, - 2, 2, 376, 377, 7, 41, 2, 2, 377, 409, 3, 2, 2, 2, 378, 379, 5, 69, 35, - 2, 379, 380, 7, 36, 2, 2, 380, 381, 7, 36, 2, 2, 381, 382, 7, 36, 2, 2, - 382, 386, 3, 2, 2, 2, 383, 385, 11, 2, 2, 2, 384, 383, 3, 2, 2, 2, 385, - 388, 3, 2, 2, 2, 386, 387, 3, 2, 2, 2, 386, 384, 3, 2, 2, 2, 387, 389, - 3, 2, 2, 2, 388, 386, 3, 2, 2, 2, 389, 390, 7, 36, 2, 2, 390, 391, 7, 36, - 2, 2, 391, 392, 7, 36, 2, 2, 392, 409, 3, 2, 2, 2, 393, 394, 5, 69, 35, - 2, 394, 395, 7, 41, 2, 2, 395, 396, 7, 41, 2, 2, 396, 397, 7, 41, 2, 2, - 397, 401, 3, 2, 2, 2, 398, 400, 11, 2, 2, 2, 399, 398, 3, 2, 2, 2, 400, - 403, 3, 2, 2, 2, 401, 402, 3, 2, 2, 2, 401, 399, 3, 2, 2, 2, 402, 404, - 3, 2, 2, 2, 403, 401, 3, 2, 2, 2, 404, 405, 7, 41, 2, 2, 405, 406, 7, 41, - 2, 2, 406, 407, 7, 41, 2, 2, 407, 409, 3, 2, 2, 2, 408, 312, 3, 2, 2, 2, - 408, 321, 3, 2, 2, 2, 408, 330, 3, 2, 2, 2, 408, 344, 3, 2, 2, 2, 408, - 358, 3, 2, 2, 2, 408, 368, 3, 2, 2, 2, 408, 378, 3, 2, 2, 2, 408, 393, - 3, 2, 2, 2, 409, 92, 3, 2, 2, 2, 410, 411, 9, 17, 2, 2, 411, 412, 5, 91, - 46, 2, 412, 94, 3, 2, 2, 2, 413, 416, 5, 61, 31, 2, 414, 416, 7, 97, 2, - 2, 415, 413, 3, 2, 2, 2, 415, 414, 3, 2, 2, 2, 416, 422, 3, 2, 2, 2, 417, - 421, 5, 61, 31, 2, 418, 421, 5, 63, 32, 2, 419, 421, 7, 97, 2, 2, 420, - 417, 3, 2, 2, 2, 420, 418, 3, 2, 2, 2, 420, 419, 3, 2, 2, 2, 421, 424, - 3, 2, 2, 2, 422, 420, 3, 2, 2, 2, 422, 423, 3, 2, 2, 2, 423, 96, 3, 2, - 2, 2, 424, 422, 3, 2, 2, 2, 38, 2, 178, 183, 193, 226, 231, 241, 249, 255, - 258, 263, 271, 274, 276, 281, 289, 291, 296, 306, 310, 315, 317, 324, 326, - 336, 338, 350, 352, 363, 373, 386, 401, 408, 415, 420, 422, 3, 2, 3, 2, -} - -var lexerChannelNames = []string{ - "DEFAULT_TOKEN_CHANNEL", "HIDDEN", -} - -var lexerModeNames = []string{ - "DEFAULT_MODE", -} - -var lexerLiteralNames = []string{ - "", "'=='", "'!='", "'in'", "'<'", "'<='", "'>='", "'>'", "'&&'", "'||'", - "'['", "']'", "'{'", "'}'", "'('", "')'", "'.'", "','", "'-'", "'!'", "'?'", - "':'", "'+'", "'*'", "'/'", "'%'", "'true'", "'false'", "'null'", -} - -var lexerSymbolicNames = []string{ - "", "EQUALS", "NOT_EQUALS", "IN", "LESS", "LESS_EQUALS", "GREATER_EQUALS", - "GREATER", "LOGICAL_AND", "LOGICAL_OR", "LBRACKET", "RPRACKET", "LBRACE", - "RBRACE", "LPAREN", "RPAREN", "DOT", "COMMA", "MINUS", "EXCLAM", "QUESTIONMARK", - "COLON", "PLUS", "STAR", "SLASH", "PERCENT", "TRUE", "FALSE", "NULL", "WHITESPACE", - "COMMENT", "NUM_FLOAT", "NUM_INT", "NUM_UINT", "STRING", "BYTES", "IDENTIFIER", -} - -var lexerRuleNames = []string{ - "EQUALS", "NOT_EQUALS", "IN", "LESS", "LESS_EQUALS", "GREATER_EQUALS", - "GREATER", "LOGICAL_AND", "LOGICAL_OR", "LBRACKET", "RPRACKET", "LBRACE", - "RBRACE", "LPAREN", "RPAREN", "DOT", "COMMA", "MINUS", "EXCLAM", "QUESTIONMARK", - "COLON", "PLUS", "STAR", "SLASH", "PERCENT", "TRUE", "FALSE", "NULL", "BACKSLASH", - "LETTER", "DIGIT", "EXPONENT", "HEXDIGIT", "RAW", "ESC_SEQ", "ESC_CHAR_SEQ", - "ESC_OCT_SEQ", "ESC_BYTE_SEQ", "ESC_UNI_SEQ", "WHITESPACE", "COMMENT", - "NUM_FLOAT", "NUM_INT", "NUM_UINT", "STRING", "BYTES", "IDENTIFIER", -} - -type CELLexer struct { - *antlr.BaseLexer - channelNames []string - modeNames []string - // TODO: EOF string -} - -func NewCELLexer(input antlr.CharStream) *CELLexer { - - l := new(CELLexer) - lexerDeserializer := antlr.NewATNDeserializer(nil) - lexerAtn := lexerDeserializer.DeserializeFromUInt16(serializedLexerAtn) - lexerDecisionToDFA := make([]*antlr.DFA, len(lexerAtn.DecisionToState)) - for index, ds := range lexerAtn.DecisionToState { - lexerDecisionToDFA[index] = antlr.NewDFA(ds, index) - } - - l.BaseLexer = antlr.NewBaseLexer(input) - l.Interpreter = antlr.NewLexerATNSimulator(l, lexerAtn, lexerDecisionToDFA, antlr.NewPredictionContextCache()) - - l.channelNames = lexerChannelNames - l.modeNames = lexerModeNames - l.RuleNames = lexerRuleNames - l.LiteralNames = lexerLiteralNames - l.SymbolicNames = lexerSymbolicNames - l.GrammarFileName = "CEL.g4" - // TODO: l.EOF = antlr.TokenEOF - - return l -} - -// CELLexer tokens. -const ( - CELLexerEQUALS = 1 - CELLexerNOT_EQUALS = 2 - CELLexerIN = 3 - CELLexerLESS = 4 - CELLexerLESS_EQUALS = 5 - CELLexerGREATER_EQUALS = 6 - CELLexerGREATER = 7 - CELLexerLOGICAL_AND = 8 - CELLexerLOGICAL_OR = 9 - CELLexerLBRACKET = 10 - CELLexerRPRACKET = 11 - CELLexerLBRACE = 12 - CELLexerRBRACE = 13 - CELLexerLPAREN = 14 - CELLexerRPAREN = 15 - CELLexerDOT = 16 - CELLexerCOMMA = 17 - CELLexerMINUS = 18 - CELLexerEXCLAM = 19 - CELLexerQUESTIONMARK = 20 - CELLexerCOLON = 21 - CELLexerPLUS = 22 - CELLexerSTAR = 23 - CELLexerSLASH = 24 - CELLexerPERCENT = 25 - CELLexerTRUE = 26 - CELLexerFALSE = 27 - CELLexerNULL = 28 - CELLexerWHITESPACE = 29 - CELLexerCOMMENT = 30 - CELLexerNUM_FLOAT = 31 - CELLexerNUM_INT = 32 - CELLexerNUM_UINT = 33 - CELLexerSTRING = 34 - CELLexerBYTES = 35 - CELLexerIDENTIFIER = 36 -) diff --git a/vendor/github.com/google/cel-go/parser/gen/cel_listener.go b/vendor/github.com/google/cel-go/parser/gen/cel_listener.go deleted file mode 100644 index 9abfc11219..0000000000 --- a/vendor/github.com/google/cel-go/parser/gen/cel_listener.go +++ /dev/null @@ -1,183 +0,0 @@ -// Generated from /Users/tswadell/go/src/github.com/google/cel-go/bin/../parser/gen/CEL.g4 by ANTLR 4.7. - -package gen // CEL -import "github.com/antlr/antlr4/runtime/Go/antlr" - -// CELListener is a complete listener for a parse tree produced by CELParser. -type CELListener interface { - antlr.ParseTreeListener - - // EnterStart is called when entering the start production. - EnterStart(c *StartContext) - - // EnterExpr is called when entering the expr production. - EnterExpr(c *ExprContext) - - // EnterConditionalOr is called when entering the conditionalOr production. - EnterConditionalOr(c *ConditionalOrContext) - - // EnterConditionalAnd is called when entering the conditionalAnd production. - EnterConditionalAnd(c *ConditionalAndContext) - - // EnterRelation is called when entering the relation production. - EnterRelation(c *RelationContext) - - // EnterCalc is called when entering the calc production. - EnterCalc(c *CalcContext) - - // EnterMemberExpr is called when entering the MemberExpr production. - EnterMemberExpr(c *MemberExprContext) - - // EnterLogicalNot is called when entering the LogicalNot production. - EnterLogicalNot(c *LogicalNotContext) - - // EnterNegate is called when entering the Negate production. - EnterNegate(c *NegateContext) - - // EnterSelectOrCall is called when entering the SelectOrCall production. - EnterSelectOrCall(c *SelectOrCallContext) - - // EnterPrimaryExpr is called when entering the PrimaryExpr production. - EnterPrimaryExpr(c *PrimaryExprContext) - - // EnterIndex is called when entering the Index production. - EnterIndex(c *IndexContext) - - // EnterCreateMessage is called when entering the CreateMessage production. - EnterCreateMessage(c *CreateMessageContext) - - // EnterIdentOrGlobalCall is called when entering the IdentOrGlobalCall production. - EnterIdentOrGlobalCall(c *IdentOrGlobalCallContext) - - // EnterNested is called when entering the Nested production. - EnterNested(c *NestedContext) - - // EnterCreateList is called when entering the CreateList production. - EnterCreateList(c *CreateListContext) - - // EnterCreateStruct is called when entering the CreateStruct production. - EnterCreateStruct(c *CreateStructContext) - - // EnterConstantLiteral is called when entering the ConstantLiteral production. - EnterConstantLiteral(c *ConstantLiteralContext) - - // EnterExprList is called when entering the exprList production. - EnterExprList(c *ExprListContext) - - // EnterFieldInitializerList is called when entering the fieldInitializerList production. - EnterFieldInitializerList(c *FieldInitializerListContext) - - // EnterMapInitializerList is called when entering the mapInitializerList production. - EnterMapInitializerList(c *MapInitializerListContext) - - // EnterInt is called when entering the Int production. - EnterInt(c *IntContext) - - // EnterUint is called when entering the Uint production. - EnterUint(c *UintContext) - - // EnterDouble is called when entering the Double production. - EnterDouble(c *DoubleContext) - - // EnterString is called when entering the String production. - EnterString(c *StringContext) - - // EnterBytes is called when entering the Bytes production. - EnterBytes(c *BytesContext) - - // EnterBoolTrue is called when entering the BoolTrue production. - EnterBoolTrue(c *BoolTrueContext) - - // EnterBoolFalse is called when entering the BoolFalse production. - EnterBoolFalse(c *BoolFalseContext) - - // EnterNull is called when entering the Null production. - EnterNull(c *NullContext) - - // ExitStart is called when exiting the start production. - ExitStart(c *StartContext) - - // ExitExpr is called when exiting the expr production. - ExitExpr(c *ExprContext) - - // ExitConditionalOr is called when exiting the conditionalOr production. - ExitConditionalOr(c *ConditionalOrContext) - - // ExitConditionalAnd is called when exiting the conditionalAnd production. - ExitConditionalAnd(c *ConditionalAndContext) - - // ExitRelation is called when exiting the relation production. - ExitRelation(c *RelationContext) - - // ExitCalc is called when exiting the calc production. - ExitCalc(c *CalcContext) - - // ExitMemberExpr is called when exiting the MemberExpr production. - ExitMemberExpr(c *MemberExprContext) - - // ExitLogicalNot is called when exiting the LogicalNot production. - ExitLogicalNot(c *LogicalNotContext) - - // ExitNegate is called when exiting the Negate production. - ExitNegate(c *NegateContext) - - // ExitSelectOrCall is called when exiting the SelectOrCall production. - ExitSelectOrCall(c *SelectOrCallContext) - - // ExitPrimaryExpr is called when exiting the PrimaryExpr production. - ExitPrimaryExpr(c *PrimaryExprContext) - - // ExitIndex is called when exiting the Index production. - ExitIndex(c *IndexContext) - - // ExitCreateMessage is called when exiting the CreateMessage production. - ExitCreateMessage(c *CreateMessageContext) - - // ExitIdentOrGlobalCall is called when exiting the IdentOrGlobalCall production. - ExitIdentOrGlobalCall(c *IdentOrGlobalCallContext) - - // ExitNested is called when exiting the Nested production. - ExitNested(c *NestedContext) - - // ExitCreateList is called when exiting the CreateList production. - ExitCreateList(c *CreateListContext) - - // ExitCreateStruct is called when exiting the CreateStruct production. - ExitCreateStruct(c *CreateStructContext) - - // ExitConstantLiteral is called when exiting the ConstantLiteral production. - ExitConstantLiteral(c *ConstantLiteralContext) - - // ExitExprList is called when exiting the exprList production. - ExitExprList(c *ExprListContext) - - // ExitFieldInitializerList is called when exiting the fieldInitializerList production. - ExitFieldInitializerList(c *FieldInitializerListContext) - - // ExitMapInitializerList is called when exiting the mapInitializerList production. - ExitMapInitializerList(c *MapInitializerListContext) - - // ExitInt is called when exiting the Int production. - ExitInt(c *IntContext) - - // ExitUint is called when exiting the Uint production. - ExitUint(c *UintContext) - - // ExitDouble is called when exiting the Double production. - ExitDouble(c *DoubleContext) - - // ExitString is called when exiting the String production. - ExitString(c *StringContext) - - // ExitBytes is called when exiting the Bytes production. - ExitBytes(c *BytesContext) - - // ExitBoolTrue is called when exiting the BoolTrue production. - ExitBoolTrue(c *BoolTrueContext) - - // ExitBoolFalse is called when exiting the BoolFalse production. - ExitBoolFalse(c *BoolFalseContext) - - // ExitNull is called when exiting the Null production. - ExitNull(c *NullContext) -} diff --git a/vendor/github.com/google/cel-go/parser/gen/cel_parser.go b/vendor/github.com/google/cel-go/parser/gen/cel_parser.go deleted file mode 100644 index d99ed1c6a9..0000000000 --- a/vendor/github.com/google/cel-go/parser/gen/cel_parser.go +++ /dev/null @@ -1,4097 +0,0 @@ -// Generated from /Users/tswadell/go/src/github.com/google/cel-go/bin/../parser/gen/CEL.g4 by ANTLR 4.7. - -package gen // CEL -import ( - "fmt" - "reflect" - "strconv" - - "github.com/antlr/antlr4/runtime/Go/antlr" -) - -// Suppress unused import errors -var _ = fmt.Printf -var _ = reflect.Copy -var _ = strconv.Itoa - -var parserATN = []uint16{ - 3, 24715, 42794, 33075, 47597, 16764, 15335, 30598, 22884, 3, 38, 211, - 4, 2, 9, 2, 4, 3, 9, 3, 4, 4, 9, 4, 4, 5, 9, 5, 4, 6, 9, 6, 4, 7, 9, 7, - 4, 8, 9, 8, 4, 9, 9, 9, 4, 10, 9, 10, 4, 11, 9, 11, 4, 12, 9, 12, 4, 13, - 9, 13, 4, 14, 9, 14, 3, 2, 3, 2, 3, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, - 3, 5, 3, 38, 10, 3, 3, 4, 3, 4, 3, 4, 7, 4, 43, 10, 4, 12, 4, 14, 4, 46, - 11, 4, 3, 5, 3, 5, 3, 5, 7, 5, 51, 10, 5, 12, 5, 14, 5, 54, 11, 5, 3, 6, - 3, 6, 3, 6, 3, 6, 3, 6, 3, 6, 7, 6, 62, 10, 6, 12, 6, 14, 6, 65, 11, 6, - 3, 7, 3, 7, 3, 7, 3, 7, 3, 7, 3, 7, 3, 7, 3, 7, 3, 7, 7, 7, 76, 10, 7, - 12, 7, 14, 7, 79, 11, 7, 3, 8, 3, 8, 6, 8, 83, 10, 8, 13, 8, 14, 8, 84, - 3, 8, 3, 8, 6, 8, 89, 10, 8, 13, 8, 14, 8, 90, 3, 8, 5, 8, 94, 10, 8, 3, - 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 5, 9, 104, 10, 9, 3, 9, 5, - 9, 107, 10, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 5, 9, 117, - 10, 9, 3, 9, 5, 9, 120, 10, 9, 3, 9, 7, 9, 123, 10, 9, 12, 9, 14, 9, 126, - 11, 9, 3, 10, 5, 10, 129, 10, 10, 3, 10, 3, 10, 3, 10, 5, 10, 134, 10, - 10, 3, 10, 5, 10, 137, 10, 10, 3, 10, 3, 10, 3, 10, 3, 10, 3, 10, 3, 10, - 5, 10, 145, 10, 10, 3, 10, 5, 10, 148, 10, 10, 3, 10, 3, 10, 3, 10, 5, - 10, 153, 10, 10, 3, 10, 5, 10, 156, 10, 10, 3, 10, 3, 10, 5, 10, 160, 10, - 10, 3, 11, 3, 11, 3, 11, 7, 11, 165, 10, 11, 12, 11, 14, 11, 168, 11, 11, - 3, 12, 3, 12, 3, 12, 3, 12, 3, 12, 3, 12, 3, 12, 7, 12, 177, 10, 12, 12, - 12, 14, 12, 180, 11, 12, 3, 13, 3, 13, 3, 13, 3, 13, 3, 13, 3, 13, 3, 13, - 3, 13, 7, 13, 190, 10, 13, 12, 13, 14, 13, 193, 11, 13, 3, 14, 5, 14, 196, - 10, 14, 3, 14, 3, 14, 3, 14, 5, 14, 201, 10, 14, 3, 14, 3, 14, 3, 14, 3, - 14, 3, 14, 3, 14, 5, 14, 209, 10, 14, 3, 14, 2, 5, 10, 12, 16, 15, 2, 4, - 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 2, 5, 3, 2, 3, 9, 3, 2, 25, 27, - 4, 2, 20, 20, 24, 24, 2, 237, 2, 28, 3, 2, 2, 2, 4, 31, 3, 2, 2, 2, 6, - 39, 3, 2, 2, 2, 8, 47, 3, 2, 2, 2, 10, 55, 3, 2, 2, 2, 12, 66, 3, 2, 2, - 2, 14, 93, 3, 2, 2, 2, 16, 95, 3, 2, 2, 2, 18, 159, 3, 2, 2, 2, 20, 161, - 3, 2, 2, 2, 22, 169, 3, 2, 2, 2, 24, 181, 3, 2, 2, 2, 26, 208, 3, 2, 2, - 2, 28, 29, 5, 4, 3, 2, 29, 30, 7, 2, 2, 3, 30, 3, 3, 2, 2, 2, 31, 37, 5, - 6, 4, 2, 32, 33, 7, 22, 2, 2, 33, 34, 5, 6, 4, 2, 34, 35, 7, 23, 2, 2, - 35, 36, 5, 4, 3, 2, 36, 38, 3, 2, 2, 2, 37, 32, 3, 2, 2, 2, 37, 38, 3, - 2, 2, 2, 38, 5, 3, 2, 2, 2, 39, 44, 5, 8, 5, 2, 40, 41, 7, 11, 2, 2, 41, - 43, 5, 8, 5, 2, 42, 40, 3, 2, 2, 2, 43, 46, 3, 2, 2, 2, 44, 42, 3, 2, 2, - 2, 44, 45, 3, 2, 2, 2, 45, 7, 3, 2, 2, 2, 46, 44, 3, 2, 2, 2, 47, 52, 5, - 10, 6, 2, 48, 49, 7, 10, 2, 2, 49, 51, 5, 10, 6, 2, 50, 48, 3, 2, 2, 2, - 51, 54, 3, 2, 2, 2, 52, 50, 3, 2, 2, 2, 52, 53, 3, 2, 2, 2, 53, 9, 3, 2, - 2, 2, 54, 52, 3, 2, 2, 2, 55, 56, 8, 6, 1, 2, 56, 57, 5, 12, 7, 2, 57, - 63, 3, 2, 2, 2, 58, 59, 12, 3, 2, 2, 59, 60, 9, 2, 2, 2, 60, 62, 5, 10, - 6, 4, 61, 58, 3, 2, 2, 2, 62, 65, 3, 2, 2, 2, 63, 61, 3, 2, 2, 2, 63, 64, - 3, 2, 2, 2, 64, 11, 3, 2, 2, 2, 65, 63, 3, 2, 2, 2, 66, 67, 8, 7, 1, 2, - 67, 68, 5, 14, 8, 2, 68, 77, 3, 2, 2, 2, 69, 70, 12, 4, 2, 2, 70, 71, 9, - 3, 2, 2, 71, 76, 5, 12, 7, 5, 72, 73, 12, 3, 2, 2, 73, 74, 9, 4, 2, 2, - 74, 76, 5, 12, 7, 4, 75, 69, 3, 2, 2, 2, 75, 72, 3, 2, 2, 2, 76, 79, 3, - 2, 2, 2, 77, 75, 3, 2, 2, 2, 77, 78, 3, 2, 2, 2, 78, 13, 3, 2, 2, 2, 79, - 77, 3, 2, 2, 2, 80, 94, 5, 16, 9, 2, 81, 83, 7, 21, 2, 2, 82, 81, 3, 2, - 2, 2, 83, 84, 3, 2, 2, 2, 84, 82, 3, 2, 2, 2, 84, 85, 3, 2, 2, 2, 85, 86, - 3, 2, 2, 2, 86, 94, 5, 16, 9, 2, 87, 89, 7, 20, 2, 2, 88, 87, 3, 2, 2, - 2, 89, 90, 3, 2, 2, 2, 90, 88, 3, 2, 2, 2, 90, 91, 3, 2, 2, 2, 91, 92, - 3, 2, 2, 2, 92, 94, 5, 16, 9, 2, 93, 80, 3, 2, 2, 2, 93, 82, 3, 2, 2, 2, - 93, 88, 3, 2, 2, 2, 94, 15, 3, 2, 2, 2, 95, 96, 8, 9, 1, 2, 96, 97, 5, - 18, 10, 2, 97, 124, 3, 2, 2, 2, 98, 99, 12, 5, 2, 2, 99, 100, 7, 18, 2, - 2, 100, 106, 7, 38, 2, 2, 101, 103, 7, 16, 2, 2, 102, 104, 5, 20, 11, 2, - 103, 102, 3, 2, 2, 2, 103, 104, 3, 2, 2, 2, 104, 105, 3, 2, 2, 2, 105, - 107, 7, 17, 2, 2, 106, 101, 3, 2, 2, 2, 106, 107, 3, 2, 2, 2, 107, 123, - 3, 2, 2, 2, 108, 109, 12, 4, 2, 2, 109, 110, 7, 12, 2, 2, 110, 111, 5, - 4, 3, 2, 111, 112, 7, 13, 2, 2, 112, 123, 3, 2, 2, 2, 113, 114, 12, 3, - 2, 2, 114, 116, 7, 14, 2, 2, 115, 117, 5, 22, 12, 2, 116, 115, 3, 2, 2, - 2, 116, 117, 3, 2, 2, 2, 117, 119, 3, 2, 2, 2, 118, 120, 7, 19, 2, 2, 119, - 118, 3, 2, 2, 2, 119, 120, 3, 2, 2, 2, 120, 121, 3, 2, 2, 2, 121, 123, - 7, 15, 2, 2, 122, 98, 3, 2, 2, 2, 122, 108, 3, 2, 2, 2, 122, 113, 3, 2, - 2, 2, 123, 126, 3, 2, 2, 2, 124, 122, 3, 2, 2, 2, 124, 125, 3, 2, 2, 2, - 125, 17, 3, 2, 2, 2, 126, 124, 3, 2, 2, 2, 127, 129, 7, 18, 2, 2, 128, - 127, 3, 2, 2, 2, 128, 129, 3, 2, 2, 2, 129, 130, 3, 2, 2, 2, 130, 136, - 7, 38, 2, 2, 131, 133, 7, 16, 2, 2, 132, 134, 5, 20, 11, 2, 133, 132, 3, - 2, 2, 2, 133, 134, 3, 2, 2, 2, 134, 135, 3, 2, 2, 2, 135, 137, 7, 17, 2, - 2, 136, 131, 3, 2, 2, 2, 136, 137, 3, 2, 2, 2, 137, 160, 3, 2, 2, 2, 138, - 139, 7, 16, 2, 2, 139, 140, 5, 4, 3, 2, 140, 141, 7, 17, 2, 2, 141, 160, - 3, 2, 2, 2, 142, 144, 7, 12, 2, 2, 143, 145, 5, 20, 11, 2, 144, 143, 3, - 2, 2, 2, 144, 145, 3, 2, 2, 2, 145, 147, 3, 2, 2, 2, 146, 148, 7, 19, 2, - 2, 147, 146, 3, 2, 2, 2, 147, 148, 3, 2, 2, 2, 148, 149, 3, 2, 2, 2, 149, - 160, 7, 13, 2, 2, 150, 152, 7, 14, 2, 2, 151, 153, 5, 24, 13, 2, 152, 151, - 3, 2, 2, 2, 152, 153, 3, 2, 2, 2, 153, 155, 3, 2, 2, 2, 154, 156, 7, 19, - 2, 2, 155, 154, 3, 2, 2, 2, 155, 156, 3, 2, 2, 2, 156, 157, 3, 2, 2, 2, - 157, 160, 7, 15, 2, 2, 158, 160, 5, 26, 14, 2, 159, 128, 3, 2, 2, 2, 159, - 138, 3, 2, 2, 2, 159, 142, 3, 2, 2, 2, 159, 150, 3, 2, 2, 2, 159, 158, - 3, 2, 2, 2, 160, 19, 3, 2, 2, 2, 161, 166, 5, 4, 3, 2, 162, 163, 7, 19, - 2, 2, 163, 165, 5, 4, 3, 2, 164, 162, 3, 2, 2, 2, 165, 168, 3, 2, 2, 2, - 166, 164, 3, 2, 2, 2, 166, 167, 3, 2, 2, 2, 167, 21, 3, 2, 2, 2, 168, 166, - 3, 2, 2, 2, 169, 170, 7, 38, 2, 2, 170, 171, 7, 23, 2, 2, 171, 178, 5, - 4, 3, 2, 172, 173, 7, 19, 2, 2, 173, 174, 7, 38, 2, 2, 174, 175, 7, 23, - 2, 2, 175, 177, 5, 4, 3, 2, 176, 172, 3, 2, 2, 2, 177, 180, 3, 2, 2, 2, - 178, 176, 3, 2, 2, 2, 178, 179, 3, 2, 2, 2, 179, 23, 3, 2, 2, 2, 180, 178, - 3, 2, 2, 2, 181, 182, 5, 4, 3, 2, 182, 183, 7, 23, 2, 2, 183, 191, 5, 4, - 3, 2, 184, 185, 7, 19, 2, 2, 185, 186, 5, 4, 3, 2, 186, 187, 7, 23, 2, - 2, 187, 188, 5, 4, 3, 2, 188, 190, 3, 2, 2, 2, 189, 184, 3, 2, 2, 2, 190, - 193, 3, 2, 2, 2, 191, 189, 3, 2, 2, 2, 191, 192, 3, 2, 2, 2, 192, 25, 3, - 2, 2, 2, 193, 191, 3, 2, 2, 2, 194, 196, 7, 20, 2, 2, 195, 194, 3, 2, 2, - 2, 195, 196, 3, 2, 2, 2, 196, 197, 3, 2, 2, 2, 197, 209, 7, 34, 2, 2, 198, - 209, 7, 35, 2, 2, 199, 201, 7, 20, 2, 2, 200, 199, 3, 2, 2, 2, 200, 201, - 3, 2, 2, 2, 201, 202, 3, 2, 2, 2, 202, 209, 7, 33, 2, 2, 203, 209, 7, 36, - 2, 2, 204, 209, 7, 37, 2, 2, 205, 209, 7, 28, 2, 2, 206, 209, 7, 29, 2, - 2, 207, 209, 7, 30, 2, 2, 208, 195, 3, 2, 2, 2, 208, 198, 3, 2, 2, 2, 208, - 200, 3, 2, 2, 2, 208, 203, 3, 2, 2, 2, 208, 204, 3, 2, 2, 2, 208, 205, - 3, 2, 2, 2, 208, 206, 3, 2, 2, 2, 208, 207, 3, 2, 2, 2, 209, 27, 3, 2, - 2, 2, 31, 37, 44, 52, 63, 75, 77, 84, 90, 93, 103, 106, 116, 119, 122, - 124, 128, 133, 136, 144, 147, 152, 155, 159, 166, 178, 191, 195, 200, 208, -} - -var literalNames = []string{ - "", "'=='", "'!='", "'in'", "'<'", "'<='", "'>='", "'>'", "'&&'", "'||'", - "'['", "']'", "'{'", "'}'", "'('", "')'", "'.'", "','", "'-'", "'!'", "'?'", - "':'", "'+'", "'*'", "'/'", "'%'", "'true'", "'false'", "'null'", -} -var symbolicNames = []string{ - "", "EQUALS", "NOT_EQUALS", "IN", "LESS", "LESS_EQUALS", "GREATER_EQUALS", - "GREATER", "LOGICAL_AND", "LOGICAL_OR", "LBRACKET", "RPRACKET", "LBRACE", - "RBRACE", "LPAREN", "RPAREN", "DOT", "COMMA", "MINUS", "EXCLAM", "QUESTIONMARK", - "COLON", "PLUS", "STAR", "SLASH", "PERCENT", "TRUE", "FALSE", "NULL", "WHITESPACE", - "COMMENT", "NUM_FLOAT", "NUM_INT", "NUM_UINT", "STRING", "BYTES", "IDENTIFIER", -} - -var ruleNames = []string{ - "start", "expr", "conditionalOr", "conditionalAnd", "relation", "calc", - "unary", "member", "primary", "exprList", "fieldInitializerList", "mapInitializerList", - "literal", -} - -type CELParser struct { - *antlr.BaseParser -} - -func NewCELParser(input antlr.TokenStream) *CELParser { - this := new(CELParser) - deserializer := antlr.NewATNDeserializer(nil) - deserializedATN := deserializer.DeserializeFromUInt16(parserATN) - decisionToDFA := make([]*antlr.DFA, len(deserializedATN.DecisionToState)) - for index, ds := range deserializedATN.DecisionToState { - decisionToDFA[index] = antlr.NewDFA(ds, index) - } - - this.BaseParser = antlr.NewBaseParser(input) - - this.Interpreter = antlr.NewParserATNSimulator(this, deserializedATN, decisionToDFA, antlr.NewPredictionContextCache()) - this.RuleNames = ruleNames - this.LiteralNames = literalNames - this.SymbolicNames = symbolicNames - this.GrammarFileName = "CEL.g4" - - return this -} - -// CELParser tokens. -const ( - CELParserEOF = antlr.TokenEOF - CELParserEQUALS = 1 - CELParserNOT_EQUALS = 2 - CELParserIN = 3 - CELParserLESS = 4 - CELParserLESS_EQUALS = 5 - CELParserGREATER_EQUALS = 6 - CELParserGREATER = 7 - CELParserLOGICAL_AND = 8 - CELParserLOGICAL_OR = 9 - CELParserLBRACKET = 10 - CELParserRPRACKET = 11 - CELParserLBRACE = 12 - CELParserRBRACE = 13 - CELParserLPAREN = 14 - CELParserRPAREN = 15 - CELParserDOT = 16 - CELParserCOMMA = 17 - CELParserMINUS = 18 - CELParserEXCLAM = 19 - CELParserQUESTIONMARK = 20 - CELParserCOLON = 21 - CELParserPLUS = 22 - CELParserSTAR = 23 - CELParserSLASH = 24 - CELParserPERCENT = 25 - CELParserTRUE = 26 - CELParserFALSE = 27 - CELParserNULL = 28 - CELParserWHITESPACE = 29 - CELParserCOMMENT = 30 - CELParserNUM_FLOAT = 31 - CELParserNUM_INT = 32 - CELParserNUM_UINT = 33 - CELParserSTRING = 34 - CELParserBYTES = 35 - CELParserIDENTIFIER = 36 -) - -// CELParser rules. -const ( - CELParserRULE_start = 0 - CELParserRULE_expr = 1 - CELParserRULE_conditionalOr = 2 - CELParserRULE_conditionalAnd = 3 - CELParserRULE_relation = 4 - CELParserRULE_calc = 5 - CELParserRULE_unary = 6 - CELParserRULE_member = 7 - CELParserRULE_primary = 8 - CELParserRULE_exprList = 9 - CELParserRULE_fieldInitializerList = 10 - CELParserRULE_mapInitializerList = 11 - CELParserRULE_literal = 12 -) - -// IStartContext is an interface to support dynamic dispatch. -type IStartContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // GetE returns the e rule contexts. - GetE() IExprContext - - // SetE sets the e rule contexts. - SetE(IExprContext) - - // IsStartContext differentiates from other interfaces. - IsStartContext() -} - -type StartContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser - e IExprContext -} - -func NewEmptyStartContext() *StartContext { - var p = new(StartContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = CELParserRULE_start - return p -} - -func (*StartContext) IsStartContext() {} - -func NewStartContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *StartContext { - var p = new(StartContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = CELParserRULE_start - - return p -} - -func (s *StartContext) GetParser() antlr.Parser { return s.parser } - -func (s *StartContext) GetE() IExprContext { return s.e } - -func (s *StartContext) SetE(v IExprContext) { s.e = v } - -func (s *StartContext) EOF() antlr.TerminalNode { - return s.GetToken(CELParserEOF, 0) -} - -func (s *StartContext) Expr() IExprContext { - var t = s.GetTypedRuleContext(reflect.TypeOf((*IExprContext)(nil)).Elem(), 0) - - if t == nil { - return nil - } - - return t.(IExprContext) -} - -func (s *StartContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *StartContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *StartContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(CELListener); ok { - listenerT.EnterStart(s) - } -} - -func (s *StartContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(CELListener); ok { - listenerT.ExitStart(s) - } -} - -func (s *StartContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { - switch t := visitor.(type) { - case CELVisitor: - return t.VisitStart(s) - - default: - return t.VisitChildren(s) - } -} - -func (p *CELParser) Start() (localctx IStartContext) { - localctx = NewStartContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 0, CELParserRULE_start) - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - { - p.SetState(26) - - var _x = p.Expr() - - localctx.(*StartContext).e = _x - } - { - p.SetState(27) - p.Match(CELParserEOF) - } - - return localctx -} - -// IExprContext is an interface to support dynamic dispatch. -type IExprContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // GetOp returns the op token. - GetOp() antlr.Token - - // SetOp sets the op token. - SetOp(antlr.Token) - - // GetE returns the e rule contexts. - GetE() IConditionalOrContext - - // GetE1 returns the e1 rule contexts. - GetE1() IConditionalOrContext - - // GetE2 returns the e2 rule contexts. - GetE2() IExprContext - - // SetE sets the e rule contexts. - SetE(IConditionalOrContext) - - // SetE1 sets the e1 rule contexts. - SetE1(IConditionalOrContext) - - // SetE2 sets the e2 rule contexts. - SetE2(IExprContext) - - // IsExprContext differentiates from other interfaces. - IsExprContext() -} - -type ExprContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser - e IConditionalOrContext - op antlr.Token - e1 IConditionalOrContext - e2 IExprContext -} - -func NewEmptyExprContext() *ExprContext { - var p = new(ExprContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = CELParserRULE_expr - return p -} - -func (*ExprContext) IsExprContext() {} - -func NewExprContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *ExprContext { - var p = new(ExprContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = CELParserRULE_expr - - return p -} - -func (s *ExprContext) GetParser() antlr.Parser { return s.parser } - -func (s *ExprContext) GetOp() antlr.Token { return s.op } - -func (s *ExprContext) SetOp(v antlr.Token) { s.op = v } - -func (s *ExprContext) GetE() IConditionalOrContext { return s.e } - -func (s *ExprContext) GetE1() IConditionalOrContext { return s.e1 } - -func (s *ExprContext) GetE2() IExprContext { return s.e2 } - -func (s *ExprContext) SetE(v IConditionalOrContext) { s.e = v } - -func (s *ExprContext) SetE1(v IConditionalOrContext) { s.e1 = v } - -func (s *ExprContext) SetE2(v IExprContext) { s.e2 = v } - -func (s *ExprContext) AllConditionalOr() []IConditionalOrContext { - var ts = s.GetTypedRuleContexts(reflect.TypeOf((*IConditionalOrContext)(nil)).Elem()) - var tst = make([]IConditionalOrContext, len(ts)) - - for i, t := range ts { - if t != nil { - tst[i] = t.(IConditionalOrContext) - } - } - - return tst -} - -func (s *ExprContext) ConditionalOr(i int) IConditionalOrContext { - var t = s.GetTypedRuleContext(reflect.TypeOf((*IConditionalOrContext)(nil)).Elem(), i) - - if t == nil { - return nil - } - - return t.(IConditionalOrContext) -} - -func (s *ExprContext) Expr() IExprContext { - var t = s.GetTypedRuleContext(reflect.TypeOf((*IExprContext)(nil)).Elem(), 0) - - if t == nil { - return nil - } - - return t.(IExprContext) -} - -func (s *ExprContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *ExprContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *ExprContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(CELListener); ok { - listenerT.EnterExpr(s) - } -} - -func (s *ExprContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(CELListener); ok { - listenerT.ExitExpr(s) - } -} - -func (s *ExprContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { - switch t := visitor.(type) { - case CELVisitor: - return t.VisitExpr(s) - - default: - return t.VisitChildren(s) - } -} - -func (p *CELParser) Expr() (localctx IExprContext) { - localctx = NewExprContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 2, CELParserRULE_expr) - var _la int - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - { - p.SetState(29) - - var _x = p.ConditionalOr() - - localctx.(*ExprContext).e = _x - } - p.SetState(35) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == CELParserQUESTIONMARK { - { - p.SetState(30) - - var _m = p.Match(CELParserQUESTIONMARK) - - localctx.(*ExprContext).op = _m - } - { - p.SetState(31) - - var _x = p.ConditionalOr() - - localctx.(*ExprContext).e1 = _x - } - { - p.SetState(32) - p.Match(CELParserCOLON) - } - { - p.SetState(33) - - var _x = p.Expr() - - localctx.(*ExprContext).e2 = _x - } - - } - - return localctx -} - -// IConditionalOrContext is an interface to support dynamic dispatch. -type IConditionalOrContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // GetS9 returns the s9 token. - GetS9() antlr.Token - - // SetS9 sets the s9 token. - SetS9(antlr.Token) - - // GetOps returns the ops token list. - GetOps() []antlr.Token - - // SetOps sets the ops token list. - SetOps([]antlr.Token) - - // GetE returns the e rule contexts. - GetE() IConditionalAndContext - - // Get_conditionalAnd returns the _conditionalAnd rule contexts. - Get_conditionalAnd() IConditionalAndContext - - // SetE sets the e rule contexts. - SetE(IConditionalAndContext) - - // Set_conditionalAnd sets the _conditionalAnd rule contexts. - Set_conditionalAnd(IConditionalAndContext) - - // GetE1 returns the e1 rule context list. - GetE1() []IConditionalAndContext - - // SetE1 sets the e1 rule context list. - SetE1([]IConditionalAndContext) - - // IsConditionalOrContext differentiates from other interfaces. - IsConditionalOrContext() -} - -type ConditionalOrContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser - e IConditionalAndContext - s9 antlr.Token - ops []antlr.Token - _conditionalAnd IConditionalAndContext - e1 []IConditionalAndContext -} - -func NewEmptyConditionalOrContext() *ConditionalOrContext { - var p = new(ConditionalOrContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = CELParserRULE_conditionalOr - return p -} - -func (*ConditionalOrContext) IsConditionalOrContext() {} - -func NewConditionalOrContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *ConditionalOrContext { - var p = new(ConditionalOrContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = CELParserRULE_conditionalOr - - return p -} - -func (s *ConditionalOrContext) GetParser() antlr.Parser { return s.parser } - -func (s *ConditionalOrContext) GetS9() antlr.Token { return s.s9 } - -func (s *ConditionalOrContext) SetS9(v antlr.Token) { s.s9 = v } - -func (s *ConditionalOrContext) GetOps() []antlr.Token { return s.ops } - -func (s *ConditionalOrContext) SetOps(v []antlr.Token) { s.ops = v } - -func (s *ConditionalOrContext) GetE() IConditionalAndContext { return s.e } - -func (s *ConditionalOrContext) Get_conditionalAnd() IConditionalAndContext { return s._conditionalAnd } - -func (s *ConditionalOrContext) SetE(v IConditionalAndContext) { s.e = v } - -func (s *ConditionalOrContext) Set_conditionalAnd(v IConditionalAndContext) { s._conditionalAnd = v } - -func (s *ConditionalOrContext) GetE1() []IConditionalAndContext { return s.e1 } - -func (s *ConditionalOrContext) SetE1(v []IConditionalAndContext) { s.e1 = v } - -func (s *ConditionalOrContext) AllConditionalAnd() []IConditionalAndContext { - var ts = s.GetTypedRuleContexts(reflect.TypeOf((*IConditionalAndContext)(nil)).Elem()) - var tst = make([]IConditionalAndContext, len(ts)) - - for i, t := range ts { - if t != nil { - tst[i] = t.(IConditionalAndContext) - } - } - - return tst -} - -func (s *ConditionalOrContext) ConditionalAnd(i int) IConditionalAndContext { - var t = s.GetTypedRuleContext(reflect.TypeOf((*IConditionalAndContext)(nil)).Elem(), i) - - if t == nil { - return nil - } - - return t.(IConditionalAndContext) -} - -func (s *ConditionalOrContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *ConditionalOrContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *ConditionalOrContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(CELListener); ok { - listenerT.EnterConditionalOr(s) - } -} - -func (s *ConditionalOrContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(CELListener); ok { - listenerT.ExitConditionalOr(s) - } -} - -func (s *ConditionalOrContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { - switch t := visitor.(type) { - case CELVisitor: - return t.VisitConditionalOr(s) - - default: - return t.VisitChildren(s) - } -} - -func (p *CELParser) ConditionalOr() (localctx IConditionalOrContext) { - localctx = NewConditionalOrContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 4, CELParserRULE_conditionalOr) - var _la int - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - { - p.SetState(37) - - var _x = p.ConditionalAnd() - - localctx.(*ConditionalOrContext).e = _x - } - p.SetState(42) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - for _la == CELParserLOGICAL_OR { - { - p.SetState(38) - - var _m = p.Match(CELParserLOGICAL_OR) - - localctx.(*ConditionalOrContext).s9 = _m - } - localctx.(*ConditionalOrContext).ops = append(localctx.(*ConditionalOrContext).ops, localctx.(*ConditionalOrContext).s9) - { - p.SetState(39) - - var _x = p.ConditionalAnd() - - localctx.(*ConditionalOrContext)._conditionalAnd = _x - } - localctx.(*ConditionalOrContext).e1 = append(localctx.(*ConditionalOrContext).e1, localctx.(*ConditionalOrContext)._conditionalAnd) - - p.SetState(44) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - } - - return localctx -} - -// IConditionalAndContext is an interface to support dynamic dispatch. -type IConditionalAndContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // GetS8 returns the s8 token. - GetS8() antlr.Token - - // SetS8 sets the s8 token. - SetS8(antlr.Token) - - // GetOps returns the ops token list. - GetOps() []antlr.Token - - // SetOps sets the ops token list. - SetOps([]antlr.Token) - - // GetE returns the e rule contexts. - GetE() IRelationContext - - // Get_relation returns the _relation rule contexts. - Get_relation() IRelationContext - - // SetE sets the e rule contexts. - SetE(IRelationContext) - - // Set_relation sets the _relation rule contexts. - Set_relation(IRelationContext) - - // GetE1 returns the e1 rule context list. - GetE1() []IRelationContext - - // SetE1 sets the e1 rule context list. - SetE1([]IRelationContext) - - // IsConditionalAndContext differentiates from other interfaces. - IsConditionalAndContext() -} - -type ConditionalAndContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser - e IRelationContext - s8 antlr.Token - ops []antlr.Token - _relation IRelationContext - e1 []IRelationContext -} - -func NewEmptyConditionalAndContext() *ConditionalAndContext { - var p = new(ConditionalAndContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = CELParserRULE_conditionalAnd - return p -} - -func (*ConditionalAndContext) IsConditionalAndContext() {} - -func NewConditionalAndContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *ConditionalAndContext { - var p = new(ConditionalAndContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = CELParserRULE_conditionalAnd - - return p -} - -func (s *ConditionalAndContext) GetParser() antlr.Parser { return s.parser } - -func (s *ConditionalAndContext) GetS8() antlr.Token { return s.s8 } - -func (s *ConditionalAndContext) SetS8(v antlr.Token) { s.s8 = v } - -func (s *ConditionalAndContext) GetOps() []antlr.Token { return s.ops } - -func (s *ConditionalAndContext) SetOps(v []antlr.Token) { s.ops = v } - -func (s *ConditionalAndContext) GetE() IRelationContext { return s.e } - -func (s *ConditionalAndContext) Get_relation() IRelationContext { return s._relation } - -func (s *ConditionalAndContext) SetE(v IRelationContext) { s.e = v } - -func (s *ConditionalAndContext) Set_relation(v IRelationContext) { s._relation = v } - -func (s *ConditionalAndContext) GetE1() []IRelationContext { return s.e1 } - -func (s *ConditionalAndContext) SetE1(v []IRelationContext) { s.e1 = v } - -func (s *ConditionalAndContext) AllRelation() []IRelationContext { - var ts = s.GetTypedRuleContexts(reflect.TypeOf((*IRelationContext)(nil)).Elem()) - var tst = make([]IRelationContext, len(ts)) - - for i, t := range ts { - if t != nil { - tst[i] = t.(IRelationContext) - } - } - - return tst -} - -func (s *ConditionalAndContext) Relation(i int) IRelationContext { - var t = s.GetTypedRuleContext(reflect.TypeOf((*IRelationContext)(nil)).Elem(), i) - - if t == nil { - return nil - } - - return t.(IRelationContext) -} - -func (s *ConditionalAndContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *ConditionalAndContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *ConditionalAndContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(CELListener); ok { - listenerT.EnterConditionalAnd(s) - } -} - -func (s *ConditionalAndContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(CELListener); ok { - listenerT.ExitConditionalAnd(s) - } -} - -func (s *ConditionalAndContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { - switch t := visitor.(type) { - case CELVisitor: - return t.VisitConditionalAnd(s) - - default: - return t.VisitChildren(s) - } -} - -func (p *CELParser) ConditionalAnd() (localctx IConditionalAndContext) { - localctx = NewConditionalAndContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 6, CELParserRULE_conditionalAnd) - var _la int - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - { - p.SetState(45) - - var _x = p.relation(0) - - localctx.(*ConditionalAndContext).e = _x - } - p.SetState(50) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - for _la == CELParserLOGICAL_AND { - { - p.SetState(46) - - var _m = p.Match(CELParserLOGICAL_AND) - - localctx.(*ConditionalAndContext).s8 = _m - } - localctx.(*ConditionalAndContext).ops = append(localctx.(*ConditionalAndContext).ops, localctx.(*ConditionalAndContext).s8) - { - p.SetState(47) - - var _x = p.relation(0) - - localctx.(*ConditionalAndContext)._relation = _x - } - localctx.(*ConditionalAndContext).e1 = append(localctx.(*ConditionalAndContext).e1, localctx.(*ConditionalAndContext)._relation) - - p.SetState(52) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - } - - return localctx -} - -// IRelationContext is an interface to support dynamic dispatch. -type IRelationContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // GetOp returns the op token. - GetOp() antlr.Token - - // SetOp sets the op token. - SetOp(antlr.Token) - - // IsRelationContext differentiates from other interfaces. - IsRelationContext() -} - -type RelationContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser - op antlr.Token -} - -func NewEmptyRelationContext() *RelationContext { - var p = new(RelationContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = CELParserRULE_relation - return p -} - -func (*RelationContext) IsRelationContext() {} - -func NewRelationContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *RelationContext { - var p = new(RelationContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = CELParserRULE_relation - - return p -} - -func (s *RelationContext) GetParser() antlr.Parser { return s.parser } - -func (s *RelationContext) GetOp() antlr.Token { return s.op } - -func (s *RelationContext) SetOp(v antlr.Token) { s.op = v } - -func (s *RelationContext) Calc() ICalcContext { - var t = s.GetTypedRuleContext(reflect.TypeOf((*ICalcContext)(nil)).Elem(), 0) - - if t == nil { - return nil - } - - return t.(ICalcContext) -} - -func (s *RelationContext) AllRelation() []IRelationContext { - var ts = s.GetTypedRuleContexts(reflect.TypeOf((*IRelationContext)(nil)).Elem()) - var tst = make([]IRelationContext, len(ts)) - - for i, t := range ts { - if t != nil { - tst[i] = t.(IRelationContext) - } - } - - return tst -} - -func (s *RelationContext) Relation(i int) IRelationContext { - var t = s.GetTypedRuleContext(reflect.TypeOf((*IRelationContext)(nil)).Elem(), i) - - if t == nil { - return nil - } - - return t.(IRelationContext) -} - -func (s *RelationContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *RelationContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *RelationContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(CELListener); ok { - listenerT.EnterRelation(s) - } -} - -func (s *RelationContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(CELListener); ok { - listenerT.ExitRelation(s) - } -} - -func (s *RelationContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { - switch t := visitor.(type) { - case CELVisitor: - return t.VisitRelation(s) - - default: - return t.VisitChildren(s) - } -} - -func (p *CELParser) Relation() (localctx IRelationContext) { - return p.relation(0) -} - -func (p *CELParser) relation(_p int) (localctx IRelationContext) { - var _parentctx antlr.ParserRuleContext = p.GetParserRuleContext() - _parentState := p.GetState() - localctx = NewRelationContext(p, p.GetParserRuleContext(), _parentState) - var _prevctx IRelationContext = localctx - var _ antlr.ParserRuleContext = _prevctx // TODO: To prevent unused variable warning. - _startState := 8 - p.EnterRecursionRule(localctx, 8, CELParserRULE_relation, _p) - var _la int - - defer func() { - p.UnrollRecursionContexts(_parentctx) - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - var _alt int - - p.EnterOuterAlt(localctx, 1) - { - p.SetState(54) - p.calc(0) - } - - p.GetParserRuleContext().SetStop(p.GetTokenStream().LT(-1)) - p.SetState(61) - p.GetErrorHandler().Sync(p) - _alt = p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 3, p.GetParserRuleContext()) - - for _alt != 2 && _alt != antlr.ATNInvalidAltNumber { - if _alt == 1 { - if p.GetParseListeners() != nil { - p.TriggerExitRuleEvent() - } - _prevctx = localctx - localctx = NewRelationContext(p, _parentctx, _parentState) - p.PushNewRecursionContext(localctx, _startState, CELParserRULE_relation) - p.SetState(56) - - if !(p.Precpred(p.GetParserRuleContext(), 1)) { - panic(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 1)", "")) - } - p.SetState(57) - - var _lt = p.GetTokenStream().LT(1) - - localctx.(*RelationContext).op = _lt - - _la = p.GetTokenStream().LA(1) - - if !(((_la)&-(0x1f+1)) == 0 && ((1<::`. - // - // When the macros is a var-arg style macro, the `arg-count` value is represented as a `*`. - MacroKey() string - - // Expander returns the MacroExpander to apply when the macro key matches the parsed call - // signature. - Expander() MacroExpander -} - -// Macro type which declares the function name and arg count expected for the -// macro, as well as a macro expansion function. -type macro struct { - function string - receiverStyle bool - varArgStyle bool - argCount int - expander MacroExpander -} - -// Function returns the macro's function name (i.e. the function whose syntax it mimics). -func (m *macro) Function() string { - return m.function -} - -// ArgCount returns the number of arguments the macro expects. -func (m *macro) ArgCount() int { - return m.argCount -} - -// IsReceiverStyle returns whether the macro is receiver style. -func (m *macro) IsReceiverStyle() bool { - return m.receiverStyle -} - -// Expander implements the Macro interface method. -func (m *macro) Expander() MacroExpander { - return m.expander -} - -// MacroKey implements the Macro interface method. -func (m *macro) MacroKey() string { - if m.varArgStyle { - return makeVarArgMacroKey(m.function, m.receiverStyle) - } - return makeMacroKey(m.function, m.argCount, m.receiverStyle) -} - -func makeMacroKey(name string, args int, receiverStyle bool) string { - return fmt.Sprintf("%s:%d:%v", name, args, receiverStyle) -} - -func makeVarArgMacroKey(name string, receiverStyle bool) string { - return fmt.Sprintf("%s:*:%v", name, receiverStyle) -} - -// MacroExpander converts the target and args of a function call that matches a Macro. -// -// Note: when the Macros.IsReceiverStyle() is true, the target argument will be nil. -type MacroExpander func(eh ExprHelper, - target *exprpb.Expr, - args []*exprpb.Expr) (*exprpb.Expr, *common.Error) - -// ExprHelper assists with the manipulation of proto-based Expr values in a manner which is -// consistent with the source position and expression id generation code leveraged by both -// the parser and type-checker. -type ExprHelper interface { - // LiteralBool creates an Expr value for a bool literal. - LiteralBool(value bool) *exprpb.Expr - - // LiteralBytes creates an Expr value for a byte literal. - LiteralBytes(value []byte) *exprpb.Expr - - // LiteralDouble creates an Expr value for double literal. - LiteralDouble(value float64) *exprpb.Expr - - // LiteralInt creates an Expr value for an int literal. - LiteralInt(value int64) *exprpb.Expr - - // LiteralString creates am Expr value for a string literal. - LiteralString(value string) *exprpb.Expr - - // LiteralUint creates an Expr value for a uint literal. - LiteralUint(value uint64) *exprpb.Expr - - // NewList creates a CreateList instruction where the list is comprised of the optional set - // of elements provided as arguments. - NewList(elems ...*exprpb.Expr) *exprpb.Expr - - // NewMap creates a CreateStruct instruction for a map where the map is comprised of the - // optional set of key, value entries. - NewMap(entries ...*exprpb.Expr_CreateStruct_Entry) *exprpb.Expr - - // NewMapEntry creates a Map Entry for the key, value pair. - NewMapEntry(key *exprpb.Expr, val *exprpb.Expr) *exprpb.Expr_CreateStruct_Entry - - // NewObject creates a CreateStruct instruction for an object with a given type name and - // optional set of field initializers. - NewObject(typeName string, fieldInits ...*exprpb.Expr_CreateStruct_Entry) *exprpb.Expr - - // NewObjectFieldInit creates a new Object field initializer from the field name and value. - NewObjectFieldInit(field string, init *exprpb.Expr) *exprpb.Expr_CreateStruct_Entry - - // Fold creates a fold comprehension instruction. - // - // - iterVar is the iteration variable name. - // - iterRange represents the expression that resolves to a list or map where the elements or - // keys (respectively) will be iterated over. - // - accuVar is the accumulation variable name, typically parser.AccumulatorName. - // - accuInit is the initial expression whose value will be set for the accuVar prior to - // folding. - // - condition is the expression to test to determine whether to continue folding. - // - step is the expression to evaluation at the conclusion of a single fold iteration. - // - result is the computation to evaluate at the conclusion of the fold. - // - // The accuVar should not shadow variable names that you would like to reference within the - // environment in the step and condition expressions. Presently, the name __result__ is commonly - // used by built-in macros but this may change in the future. - Fold(iterVar string, - iterRange *exprpb.Expr, - accuVar string, - accuInit *exprpb.Expr, - condition *exprpb.Expr, - step *exprpb.Expr, - result *exprpb.Expr) *exprpb.Expr - - // Ident creates an identifier Expr value. - Ident(name string) *exprpb.Expr - - // GlobalCall creates a function call Expr value for a global (free) function. - GlobalCall(function string, args ...*exprpb.Expr) *exprpb.Expr - - // ReceiverCall creates a function call Expr value for a receiver-style function. - ReceiverCall(function string, target *exprpb.Expr, args ...*exprpb.Expr) *exprpb.Expr - - // PresenceTest creates a Select TestOnly Expr value for modelling has() semantics. - PresenceTest(operand *exprpb.Expr, field string) *exprpb.Expr - - // Select create a field traversal Expr value. - Select(operand *exprpb.Expr, field string) *exprpb.Expr - - // OffsetLocation returns the Location of the expression identifier. - OffsetLocation(exprID int64) common.Location -} - -var ( - // AllMacros includes the list of all spec-supported macros. - AllMacros = []Macro{ - // The macro "has(m.f)" which tests the presence of a field, avoiding the need to specify - // the field as a string. - NewGlobalMacro(operators.Has, 1, makeHas), - - // The macro "range.all(var, predicate)", which is true if for all elements in range the - // predicate holds. - NewReceiverMacro(operators.All, 2, makeAll), - - // The macro "range.exists(var, predicate)", which is true if for at least one element in - // range the predicate holds. - NewReceiverMacro(operators.Exists, 2, makeExists), - - // The macro "range.exists_one(var, predicate)", which is true if for exactly one element - // in range the predicate holds. - NewReceiverMacro(operators.ExistsOne, 2, makeExistsOne), - - // The macro "range.map(var, function)", applies the function to the vars in the range. - NewReceiverMacro(operators.Map, 2, makeMap), - - // The macro "range.map(var, predicate, function)", applies the function to the vars in - // the range for which the predicate holds true. The other variables are filtered out. - NewReceiverMacro(operators.Map, 3, makeMap), - - // The macro "range.filter(var, predicate)", filters out the variables for which the - // predicate is false. - NewReceiverMacro(operators.Filter, 2, makeFilter), - } - - // NoMacros list. - NoMacros = []Macro{} -) - -// AccumulatorName is the traditional variable name assigned to the fold accumulator variable. -const AccumulatorName = "__result__" - -type quantifierKind int - -const ( - quantifierAll quantifierKind = iota - quantifierExists - quantifierExistsOne -) - -func makeAll(eh ExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) { - return makeQuantifier(quantifierAll, eh, target, args) -} - -func makeExists(eh ExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) { - return makeQuantifier(quantifierExists, eh, target, args) -} - -func makeExistsOne(eh ExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) { - return makeQuantifier(quantifierExistsOne, eh, target, args) -} - -func makeQuantifier(kind quantifierKind, eh ExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) { - v, found := extractIdent(args[0]) - if !found { - location := eh.OffsetLocation(args[0].Id) - return nil, &common.Error{ - Message: "argument must be a simple name", - Location: location} - } - accuIdent := func() *exprpb.Expr { - return eh.Ident(AccumulatorName) - } - - var init *exprpb.Expr - var condition *exprpb.Expr - var step *exprpb.Expr - var result *exprpb.Expr - switch kind { - case quantifierAll: - init = eh.LiteralBool(true) - condition = eh.GlobalCall(operators.NotStrictlyFalse, accuIdent()) - step = eh.GlobalCall(operators.LogicalAnd, accuIdent(), args[1]) - result = accuIdent() - case quantifierExists: - init = eh.LiteralBool(false) - condition = eh.GlobalCall( - operators.NotStrictlyFalse, - eh.GlobalCall(operators.LogicalNot, accuIdent())) - step = eh.GlobalCall(operators.LogicalOr, accuIdent(), args[1]) - result = accuIdent() - case quantifierExistsOne: - zeroExpr := eh.LiteralInt(0) - oneExpr := eh.LiteralInt(1) - init = zeroExpr - condition = eh.LiteralBool(true) - step = eh.GlobalCall(operators.Conditional, args[1], - eh.GlobalCall(operators.Add, accuIdent(), oneExpr), accuIdent()) - result = eh.GlobalCall(operators.Equals, accuIdent(), oneExpr) - default: - return nil, &common.Error{Message: fmt.Sprintf("unrecognized quantifier '%v'", kind)} - } - return eh.Fold(v, target, AccumulatorName, init, condition, step, result), nil -} - -func makeMap(eh ExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) { - v, found := extractIdent(args[0]) - if !found { - return nil, &common.Error{Message: "argument is not an identifier"} - } - - var fn *exprpb.Expr - var filter *exprpb.Expr - - if len(args) == 3 { - filter = args[1] - fn = args[2] - } else { - filter = nil - fn = args[1] - } - - accuExpr := eh.Ident(AccumulatorName) - init := eh.NewList() - condition := eh.LiteralBool(true) - // TODO: use compiler internal method for faster, stateful add. - step := eh.GlobalCall(operators.Add, accuExpr, eh.NewList(fn)) - - if filter != nil { - step = eh.GlobalCall(operators.Conditional, filter, step, accuExpr) - } - return eh.Fold(v, target, AccumulatorName, init, condition, step, accuExpr), nil -} - -func makeFilter(eh ExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) { - v, found := extractIdent(args[0]) - if !found { - return nil, &common.Error{Message: "argument is not an identifier"} - } - - filter := args[1] - accuExpr := eh.Ident(AccumulatorName) - init := eh.NewList() - condition := eh.LiteralBool(true) - // TODO: use compiler internal method for faster, stateful add. - step := eh.GlobalCall(operators.Add, accuExpr, eh.NewList(args[0])) - step = eh.GlobalCall(operators.Conditional, filter, step, accuExpr) - return eh.Fold(v, target, AccumulatorName, init, condition, step, accuExpr), nil -} - -func extractIdent(e *exprpb.Expr) (string, bool) { - switch e.ExprKind.(type) { - case *exprpb.Expr_IdentExpr: - return e.GetIdentExpr().Name, true - } - return "", false -} - -func makeHas(eh ExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) { - if s, ok := args[0].ExprKind.(*exprpb.Expr_SelectExpr); ok { - return eh.PresenceTest(s.SelectExpr.Operand, s.SelectExpr.Field), nil - } - return nil, &common.Error{Message: "invalid argument to has() macro"} -} diff --git a/vendor/github.com/google/cel-go/parser/parser.go b/vendor/github.com/google/cel-go/parser/parser.go deleted file mode 100644 index ea473bd114..0000000000 --- a/vendor/github.com/google/cel-go/parser/parser.go +++ /dev/null @@ -1,650 +0,0 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package parser declares an expression parser with support for macro -// expansion. -package parser - -import ( - "fmt" - "strconv" - "strings" - "sync" - - "github.com/antlr/antlr4/runtime/Go/antlr" - - "github.com/google/cel-go/common" - "github.com/google/cel-go/common/operators" - "github.com/google/cel-go/parser/gen" - - structpb "google.golang.org/protobuf/types/known/structpb" - exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" -) - -// reservedIds are not legal to use as variables. We exclude them post-parse, as they *are* valid -// field names for protos, and it would complicate the grammar to distinguish the cases. -var reservedIds = []string{ - "as", "break", "const", "continue", "else", "false", "for", "function", "if", - "import", "in", "let", "loop", "package", "namespace", "null", "return", - "true", "var", "void", "while", -} - -// Parse converts a source input a parsed expression. -// This function calls ParseWithMacros with AllMacros. -func Parse(source common.Source) (*exprpb.ParsedExpr, *common.Errors) { - return ParseWithMacros(source, AllMacros) -} - -// ParseWithMacros converts a source input and macros set to a parsed expression. -func ParseWithMacros(source common.Source, macros []Macro) (*exprpb.ParsedExpr, *common.Errors) { - macroMap := make(map[string]Macro) - for _, m := range macros { - macroMap[m.MacroKey()] = m - } - p := parser{ - errors: &parseErrors{common.NewErrors(source)}, - helper: newParserHelper(source), - macros: macroMap, - } - e := p.parse(source.Content()) - return &exprpb.ParsedExpr{ - Expr: e, - SourceInfo: p.helper.getSourceInfo(), - }, p.errors.Errors -} - -type parser struct { - gen.BaseCELVisitor - errors *parseErrors - helper *parserHelper - macros map[string]Macro -} - -var ( - _ gen.CELVisitor = (*parser)(nil) - - lexerPool *sync.Pool = &sync.Pool{ - New: func() interface{} { - return gen.NewCELLexer(nil) - }, - } - - parserPool *sync.Pool = &sync.Pool{ - New: func() interface{} { - return gen.NewCELParser(nil) - }, - } -) - -func (p *parser) parse(expression string) *exprpb.Expr { - lexer := lexerPool.Get().(*gen.CELLexer) - lexer.SetInputStream(antlr.NewInputStream(expression)) - defer lexerPool.Put(lexer) - - prsr := parserPool.Get().(*gen.CELParser) - prsr.SetInputStream(antlr.NewCommonTokenStream(lexer, 0)) - defer parserPool.Put(prsr) - - lexer.RemoveErrorListeners() - prsr.RemoveErrorListeners() - lexer.AddErrorListener(p) - prsr.AddErrorListener(p) - - return p.Visit(prsr.Start()).(*exprpb.Expr) -} - -// Visitor implementations. -func (p *parser) Visit(tree antlr.ParseTree) interface{} { - switch tree.(type) { - case *gen.StartContext: - return p.VisitStart(tree.(*gen.StartContext)) - case *gen.ExprContext: - return p.VisitExpr(tree.(*gen.ExprContext)) - case *gen.ConditionalAndContext: - return p.VisitConditionalAnd(tree.(*gen.ConditionalAndContext)) - case *gen.ConditionalOrContext: - return p.VisitConditionalOr(tree.(*gen.ConditionalOrContext)) - case *gen.RelationContext: - return p.VisitRelation(tree.(*gen.RelationContext)) - case *gen.CalcContext: - return p.VisitCalc(tree.(*gen.CalcContext)) - case *gen.LogicalNotContext: - return p.VisitLogicalNot(tree.(*gen.LogicalNotContext)) - case *gen.MemberExprContext: - return p.VisitMemberExpr(tree.(*gen.MemberExprContext)) - case *gen.PrimaryExprContext: - return p.VisitPrimaryExpr(tree.(*gen.PrimaryExprContext)) - case *gen.SelectOrCallContext: - return p.VisitSelectOrCall(tree.(*gen.SelectOrCallContext)) - case *gen.MapInitializerListContext: - return p.VisitMapInitializerList(tree.(*gen.MapInitializerListContext)) - case *gen.NegateContext: - return p.VisitNegate(tree.(*gen.NegateContext)) - case *gen.IndexContext: - return p.VisitIndex(tree.(*gen.IndexContext)) - case *gen.UnaryContext: - return p.VisitUnary(tree.(*gen.UnaryContext)) - case *gen.CreateListContext: - return p.VisitCreateList(tree.(*gen.CreateListContext)) - case *gen.CreateMessageContext: - return p.VisitCreateMessage(tree.(*gen.CreateMessageContext)) - case *gen.CreateStructContext: - return p.VisitCreateStruct(tree.(*gen.CreateStructContext)) - } - - // Report at least one error if the parser reaches an unknown parse element. - // Typically, this happens if the parser has already encountered a syntax error elsewhere. - if len(p.errors.GetErrors()) == 0 { - txt := "<>" - if tree != nil { - txt = fmt.Sprintf("<<%T>>", tree) - } - return p.reportError(common.NoLocation, "unknown parse element encountered: %s", txt) - } - return p.helper.newExpr(common.NoLocation) - -} - -// Visit a parse tree produced by CELParser#start. -func (p *parser) VisitStart(ctx *gen.StartContext) interface{} { - return p.Visit(ctx.Expr()) -} - -// Visit a parse tree produced by CELParser#expr. -func (p *parser) VisitExpr(ctx *gen.ExprContext) interface{} { - result := p.Visit(ctx.GetE()).(*exprpb.Expr) - if ctx.GetOp() == nil { - return result - } - opID := p.helper.id(ctx.GetOp()) - ifTrue := p.Visit(ctx.GetE1()).(*exprpb.Expr) - ifFalse := p.Visit(ctx.GetE2()).(*exprpb.Expr) - return p.globalCallOrMacro(opID, operators.Conditional, result, ifTrue, ifFalse) -} - -// Visit a parse tree produced by CELParser#conditionalOr. -func (p *parser) VisitConditionalOr(ctx *gen.ConditionalOrContext) interface{} { - result := p.Visit(ctx.GetE()).(*exprpb.Expr) - if ctx.GetOps() == nil { - return result - } - b := newBalancer(p.helper, operators.LogicalOr, result) - rest := ctx.GetE1() - for i, op := range ctx.GetOps() { - if i >= len(rest) { - return p.reportError(ctx, "unexpected character, wanted '||'") - } - next := p.Visit(rest[i]).(*exprpb.Expr) - opID := p.helper.id(op) - b.addTerm(opID, next) - } - return b.balance() -} - -// Visit a parse tree produced by CELParser#conditionalAnd. -func (p *parser) VisitConditionalAnd(ctx *gen.ConditionalAndContext) interface{} { - result := p.Visit(ctx.GetE()).(*exprpb.Expr) - if ctx.GetOps() == nil { - return result - } - b := newBalancer(p.helper, operators.LogicalAnd, result) - rest := ctx.GetE1() - for i, op := range ctx.GetOps() { - if i >= len(rest) { - return p.reportError(ctx, "unexpected character, wanted '&&'") - } - next := p.Visit(rest[i]).(*exprpb.Expr) - opID := p.helper.id(op) - b.addTerm(opID, next) - } - return b.balance() -} - -// Visit a parse tree produced by CELParser#relation. -func (p *parser) VisitRelation(ctx *gen.RelationContext) interface{} { - if ctx.Calc() != nil { - return p.Visit(ctx.Calc()) - } - opText := "" - if ctx.GetOp() != nil { - opText = ctx.GetOp().GetText() - } - if op, found := operators.Find(opText); found { - lhs := p.Visit(ctx.Relation(0)).(*exprpb.Expr) - opID := p.helper.id(ctx.GetOp()) - rhs := p.Visit(ctx.Relation(1)).(*exprpb.Expr) - return p.globalCallOrMacro(opID, op, lhs, rhs) - } - return p.reportError(ctx, "operator not found") -} - -// Visit a parse tree produced by CELParser#calc. -func (p *parser) VisitCalc(ctx *gen.CalcContext) interface{} { - if ctx.Unary() != nil { - return p.Visit(ctx.Unary()) - } - opText := "" - if ctx.GetOp() != nil { - opText = ctx.GetOp().GetText() - } - if op, found := operators.Find(opText); found { - lhs := p.Visit(ctx.Calc(0)).(*exprpb.Expr) - opID := p.helper.id(ctx.GetOp()) - rhs := p.Visit(ctx.Calc(1)).(*exprpb.Expr) - return p.globalCallOrMacro(opID, op, lhs, rhs) - } - return p.reportError(ctx, "operator not found") -} - -func (p *parser) VisitUnary(ctx *gen.UnaryContext) interface{} { - return p.helper.newLiteralString(ctx, "<>") -} - -// Visit a parse tree produced by CELParser#MemberExpr. -func (p *parser) VisitMemberExpr(ctx *gen.MemberExprContext) interface{} { - switch ctx.Member().(type) { - case *gen.PrimaryExprContext: - return p.VisitPrimaryExpr(ctx.Member().(*gen.PrimaryExprContext)) - case *gen.SelectOrCallContext: - return p.VisitSelectOrCall(ctx.Member().(*gen.SelectOrCallContext)) - case *gen.IndexContext: - return p.VisitIndex(ctx.Member().(*gen.IndexContext)) - case *gen.CreateMessageContext: - return p.VisitCreateMessage(ctx.Member().(*gen.CreateMessageContext)) - } - return p.reportError(ctx, "unsupported simple expression") -} - -// Visit a parse tree produced by CELParser#LogicalNot. -func (p *parser) VisitLogicalNot(ctx *gen.LogicalNotContext) interface{} { - if len(ctx.GetOps())%2 == 0 { - return p.Visit(ctx.Member()) - } - opID := p.helper.id(ctx.GetOps()[0]) - target := p.Visit(ctx.Member()).(*exprpb.Expr) - return p.globalCallOrMacro(opID, operators.LogicalNot, target) -} - -func (p *parser) VisitNegate(ctx *gen.NegateContext) interface{} { - if len(ctx.GetOps())%2 == 0 { - return p.Visit(ctx.Member()) - } - opID := p.helper.id(ctx.GetOps()[0]) - target := p.Visit(ctx.Member()).(*exprpb.Expr) - return p.globalCallOrMacro(opID, operators.Negate, target) -} - -// Visit a parse tree produced by CELParser#SelectOrCall. -func (p *parser) VisitSelectOrCall(ctx *gen.SelectOrCallContext) interface{} { - operand := p.Visit(ctx.Member()).(*exprpb.Expr) - // Handle the error case where no valid identifier is specified. - if ctx.GetId() == nil { - return p.helper.newExpr(ctx) - } - id := ctx.GetId().GetText() - if ctx.GetOpen() != nil { - opID := p.helper.id(ctx.GetOpen()) - return p.receiverCallOrMacro(opID, id, operand, p.visitList(ctx.GetArgs())...) - } - return p.helper.newSelect(ctx.GetOp(), operand, id) -} - -// Visit a parse tree produced by CELParser#PrimaryExpr. -func (p *parser) VisitPrimaryExpr(ctx *gen.PrimaryExprContext) interface{} { - switch ctx.Primary().(type) { - case *gen.NestedContext: - return p.VisitNested(ctx.Primary().(*gen.NestedContext)) - case *gen.IdentOrGlobalCallContext: - return p.VisitIdentOrGlobalCall(ctx.Primary().(*gen.IdentOrGlobalCallContext)) - case *gen.CreateListContext: - return p.VisitCreateList(ctx.Primary().(*gen.CreateListContext)) - case *gen.CreateStructContext: - return p.VisitCreateStruct(ctx.Primary().(*gen.CreateStructContext)) - case *gen.ConstantLiteralContext: - return p.VisitConstantLiteral(ctx.Primary().(*gen.ConstantLiteralContext)) - } - - return p.reportError(ctx, "invalid primary expression") -} - -// Visit a parse tree produced by CELParser#Index. -func (p *parser) VisitIndex(ctx *gen.IndexContext) interface{} { - target := p.Visit(ctx.Member()).(*exprpb.Expr) - opID := p.helper.id(ctx.GetOp()) - index := p.Visit(ctx.GetIndex()).(*exprpb.Expr) - return p.globalCallOrMacro(opID, operators.Index, target, index) -} - -// Visit a parse tree produced by CELParser#CreateMessage. -func (p *parser) VisitCreateMessage(ctx *gen.CreateMessageContext) interface{} { - target := p.Visit(ctx.Member()).(*exprpb.Expr) - objID := p.helper.id(ctx.GetOp()) - if messageName, found := p.extractQualifiedName(target); found { - entries := p.VisitIFieldInitializerList(ctx.GetEntries()).([]*exprpb.Expr_CreateStruct_Entry) - return p.helper.newObject(objID, messageName, entries...) - } - return p.helper.newExpr(objID) -} - -// Visit a parse tree of field initializers. -func (p *parser) VisitIFieldInitializerList(ctx gen.IFieldInitializerListContext) interface{} { - if ctx == nil || ctx.GetFields() == nil { - // This is the result of a syntax error handled elswhere, return empty. - return []*exprpb.Expr_CreateStruct_Entry{} - } - - result := make([]*exprpb.Expr_CreateStruct_Entry, len(ctx.GetFields())) - cols := ctx.GetCols() - vals := ctx.GetValues() - for i, f := range ctx.GetFields() { - if i >= len(cols) || i >= len(vals) { - // This is the result of a syntax error detected elsewhere. - return []*exprpb.Expr_CreateStruct_Entry{} - } - initID := p.helper.id(cols[i]) - value := p.Visit(vals[i]).(*exprpb.Expr) - field := p.helper.newObjectField(initID, f.GetText(), value) - result[i] = field - } - return result -} - -// Visit a parse tree produced by CELParser#IdentOrGlobalCall. -func (p *parser) VisitIdentOrGlobalCall(ctx *gen.IdentOrGlobalCallContext) interface{} { - identName := "" - if ctx.GetLeadingDot() != nil { - identName = "." - } - // Handle the error case where no valid identifier is specified. - if ctx.GetId() == nil { - return p.helper.newExpr(ctx) - } - // Handle reserved identifiers. - id := ctx.GetId().GetText() - for _, r := range reservedIds { - if r == id { - return p.reportError(ctx, "reserved identifier: %s", r) - } - } - identName += id - if ctx.GetOp() != nil { - opID := p.helper.id(ctx.GetOp()) - return p.globalCallOrMacro(opID, identName, p.visitList(ctx.GetArgs())...) - } - return p.helper.newIdent(ctx.GetId(), identName) -} - -// Visit a parse tree produced by CELParser#Nested. -func (p *parser) VisitNested(ctx *gen.NestedContext) interface{} { - return p.Visit(ctx.GetE()) -} - -// Visit a parse tree produced by CELParser#CreateList. -func (p *parser) VisitCreateList(ctx *gen.CreateListContext) interface{} { - listID := p.helper.id(ctx.GetOp()) - return p.helper.newList(listID, p.visitList(ctx.GetElems())...) -} - -// Visit a parse tree produced by CELParser#CreateStruct. -func (p *parser) VisitCreateStruct(ctx *gen.CreateStructContext) interface{} { - structID := p.helper.id(ctx.GetOp()) - entries := []*exprpb.Expr_CreateStruct_Entry{} - if ctx.GetEntries() != nil { - entries = p.Visit(ctx.GetEntries()).([]*exprpb.Expr_CreateStruct_Entry) - } - return p.helper.newMap(structID, entries...) -} - -// Visit a parse tree produced by CELParser#ConstantLiteral. -func (p *parser) VisitConstantLiteral(ctx *gen.ConstantLiteralContext) interface{} { - switch ctx.Literal().(type) { - case *gen.IntContext: - return p.VisitInt(ctx.Literal().(*gen.IntContext)) - case *gen.UintContext: - return p.VisitUint(ctx.Literal().(*gen.UintContext)) - case *gen.DoubleContext: - return p.VisitDouble(ctx.Literal().(*gen.DoubleContext)) - case *gen.StringContext: - return p.VisitString(ctx.Literal().(*gen.StringContext)) - case *gen.BytesContext: - return p.VisitBytes(ctx.Literal().(*gen.BytesContext)) - case *gen.BoolFalseContext: - return p.VisitBoolFalse(ctx.Literal().(*gen.BoolFalseContext)) - case *gen.BoolTrueContext: - return p.VisitBoolTrue(ctx.Literal().(*gen.BoolTrueContext)) - case *gen.NullContext: - return p.VisitNull(ctx.Literal().(*gen.NullContext)) - } - return p.reportError(ctx, "invalid literal") -} - -// Visit a parse tree produced by CELParser#mapInitializerList. -func (p *parser) VisitMapInitializerList(ctx *gen.MapInitializerListContext) interface{} { - if ctx == nil || ctx.GetKeys() == nil { - // This is the result of a syntax error handled elswhere, return empty. - return []*exprpb.Expr_CreateStruct_Entry{} - } - - result := make([]*exprpb.Expr_CreateStruct_Entry, len(ctx.GetCols())) - keys := ctx.GetKeys() - vals := ctx.GetValues() - for i, col := range ctx.GetCols() { - colID := p.helper.id(col) - if i >= len(keys) || i >= len(vals) { - // This is the result of a syntax error detected elsewhere. - return []*exprpb.Expr_CreateStruct_Entry{} - } - key := p.Visit(keys[i]).(*exprpb.Expr) - value := p.Visit(vals[i]).(*exprpb.Expr) - entry := p.helper.newMapEntry(colID, key, value) - result[i] = entry - } - return result -} - -// Visit a parse tree produced by CELParser#Int. -func (p *parser) VisitInt(ctx *gen.IntContext) interface{} { - text := ctx.GetTok().GetText() - base := 10 - if strings.HasPrefix(text, "0x") { - base = 16 - text = text[2:] - } - if ctx.GetSign() != nil { - text = ctx.GetSign().GetText() + text - } - i, err := strconv.ParseInt(text, base, 64) - if err != nil { - return p.reportError(ctx, "invalid int literal") - } - return p.helper.newLiteralInt(ctx, i) -} - -// Visit a parse tree produced by CELParser#Uint. -func (p *parser) VisitUint(ctx *gen.UintContext) interface{} { - text := ctx.GetTok().GetText() - // trim the 'u' designator included in the uint literal. - text = text[:len(text)-1] - base := 10 - if strings.HasPrefix(text, "0x") { - base = 16 - text = text[2:] - } - i, err := strconv.ParseUint(text, base, 64) - if err != nil { - return p.reportError(ctx, "invalid uint literal") - } - return p.helper.newLiteralUint(ctx, i) -} - -// Visit a parse tree produced by CELParser#Double. -func (p *parser) VisitDouble(ctx *gen.DoubleContext) interface{} { - txt := ctx.GetTok().GetText() - if ctx.GetSign() != nil { - txt = ctx.GetSign().GetText() + txt - } - f, err := strconv.ParseFloat(txt, 64) - if err != nil { - return p.reportError(ctx, "invalid double literal") - } - return p.helper.newLiteralDouble(ctx, f) - -} - -// Visit a parse tree produced by CELParser#String. -func (p *parser) VisitString(ctx *gen.StringContext) interface{} { - s := p.unquote(ctx, ctx.GetText(), false) - return p.helper.newLiteralString(ctx, s) -} - -// Visit a parse tree produced by CELParser#Bytes. -func (p *parser) VisitBytes(ctx *gen.BytesContext) interface{} { - b := []byte(p.unquote(ctx, ctx.GetTok().GetText()[1:], true)) - return p.helper.newLiteralBytes(ctx, b) -} - -// Visit a parse tree produced by CELParser#BoolTrue. -func (p *parser) VisitBoolTrue(ctx *gen.BoolTrueContext) interface{} { - return p.helper.newLiteralBool(ctx, true) -} - -// Visit a parse tree produced by CELParser#BoolFalse. -func (p *parser) VisitBoolFalse(ctx *gen.BoolFalseContext) interface{} { - return p.helper.newLiteralBool(ctx, false) -} - -// Visit a parse tree produced by CELParser#Null. -func (p *parser) VisitNull(ctx *gen.NullContext) interface{} { - return p.helper.newLiteral(ctx, - &exprpb.Constant{ - ConstantKind: &exprpb.Constant_NullValue{ - NullValue: structpb.NullValue_NULL_VALUE}}) -} - -func (p *parser) visitList(ctx gen.IExprListContext) []*exprpb.Expr { - if ctx == nil { - return []*exprpb.Expr{} - } - return p.visitSlice(ctx.GetE()) -} - -func (p *parser) visitSlice(expressions []gen.IExprContext) []*exprpb.Expr { - if expressions == nil { - return []*exprpb.Expr{} - } - result := make([]*exprpb.Expr, len(expressions)) - for i, e := range expressions { - ex := p.Visit(e).(*exprpb.Expr) - result[i] = ex - } - return result -} - -func (p *parser) extractQualifiedName(e *exprpb.Expr) (string, bool) { - if e == nil { - return "", false - } - switch e.ExprKind.(type) { - case *exprpb.Expr_IdentExpr: - return e.GetIdentExpr().Name, true - case *exprpb.Expr_SelectExpr: - s := e.GetSelectExpr() - if prefix, found := p.extractQualifiedName(s.Operand); found { - return prefix + "." + s.Field, true - } - } - // TODO: Add a method to Source to get location from character offset. - location := p.helper.getLocation(e.Id) - p.reportError(location, "expected a qualified name") - return "", false -} - -func (p *parser) unquote(ctx interface{}, value string, isBytes bool) string { - text, err := unescape(value, isBytes) - if err != nil { - p.reportError(ctx, "%s", err.Error()) - return value - } - return text -} - -func (p *parser) reportError(ctx interface{}, format string, args ...interface{}) *exprpb.Expr { - var location common.Location - switch ctx.(type) { - case common.Location: - location = ctx.(common.Location) - case antlr.Token, antlr.ParserRuleContext: - err := p.helper.newExpr(ctx) - location = p.helper.getLocation(err.Id) - } - err := p.helper.newExpr(ctx) - // Provide arguments to the report error. - p.errors.ReportError(location, format, args...) - return err -} - -// ANTLR Parse listener implementations -func (p *parser) SyntaxError(recognizer antlr.Recognizer, offendingSymbol interface{}, line, column int, msg string, e antlr.RecognitionException) { - // TODO: Snippet - l := p.helper.source.NewLocation(line, column) - p.errors.syntaxError(l, msg) -} - -func (p *parser) ReportAmbiguity(recognizer antlr.Parser, dfa *antlr.DFA, startIndex, stopIndex int, exact bool, ambigAlts *antlr.BitSet, configs antlr.ATNConfigSet) { - // Intentional -} - -func (p *parser) ReportAttemptingFullContext(recognizer antlr.Parser, dfa *antlr.DFA, startIndex, stopIndex int, conflictingAlts *antlr.BitSet, configs antlr.ATNConfigSet) { - // Intentional -} - -func (p *parser) ReportContextSensitivity(recognizer antlr.Parser, dfa *antlr.DFA, startIndex, stopIndex, prediction int, configs antlr.ATNConfigSet) { - // Intentional -} - -func (p *parser) globalCallOrMacro(exprID int64, function string, args ...*exprpb.Expr) *exprpb.Expr { - if expr, found := p.expandMacro(exprID, function, nil, args...); found { - return expr - } - return p.helper.newGlobalCall(exprID, function, args...) -} - -func (p *parser) receiverCallOrMacro(exprID int64, function string, target *exprpb.Expr, args ...*exprpb.Expr) *exprpb.Expr { - if expr, found := p.expandMacro(exprID, function, target, args...); found { - return expr - } - return p.helper.newReceiverCall(exprID, function, target, args...) -} - -func (p *parser) expandMacro(exprID int64, function string, target *exprpb.Expr, args ...*exprpb.Expr) (*exprpb.Expr, bool) { - macro, found := p.macros[makeMacroKey(function, len(args), target != nil)] - if !found { - macro, found = p.macros[makeVarArgMacroKey(function, target != nil)] - if !found { - return nil, false - } - } - eh := exprHelperPool.Get().(*exprHelper) - defer exprHelperPool.Put(eh) - eh.parserHelper = p.helper - eh.id = exprID - expr, err := macro.Expander()(eh, target, args) - if err != nil { - if err.Location != nil { - return p.reportError(err.Location, err.Message), true - } - return p.reportError(p.helper.getLocation(exprID), err.Message), true - } - return expr, true -} diff --git a/vendor/github.com/google/cel-go/parser/unescape.go b/vendor/github.com/google/cel-go/parser/unescape.go deleted file mode 100644 index 27c57a9f3a..0000000000 --- a/vendor/github.com/google/cel-go/parser/unescape.go +++ /dev/null @@ -1,237 +0,0 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package parser - -import ( - "fmt" - "strings" - "unicode/utf8" -) - -// Unescape takes a quoted string, unquotes, and unescapes it. -// -// This function performs escaping compatible with GoogleSQL. -func unescape(value string, isBytes bool) (string, error) { - // All strings normalize newlines to the \n representation. - value = newlineNormalizer.Replace(value) - n := len(value) - - // Nothing to unescape / decode. - if n < 2 { - return value, fmt.Errorf("unable to unescape string") - } - - // Raw string preceded by the 'r|R' prefix. - isRawLiteral := false - if value[0] == 'r' || value[0] == 'R' { - value = value[1:] - n = len(value) - isRawLiteral = true - } - - // Quoted string of some form, must have same first and last char. - if value[0] != value[n-1] || (value[0] != '"' && value[0] != '\'') { - return value, fmt.Errorf("unable to unescape string") - } - - // Normalize the multi-line CEL string representation to a standard - // Go quoted string. - if n >= 6 { - if strings.HasPrefix(value, "'''") { - if !strings.HasSuffix(value, "'''") { - return value, fmt.Errorf("unable to unescape string") - } - value = "\"" + value[3:n-3] + "\"" - } else if strings.HasPrefix(value, `"""`) { - if !strings.HasSuffix(value, `"""`) { - return value, fmt.Errorf("unable to unescape string") - } - value = "\"" + value[3:n-3] + "\"" - } - n = len(value) - } - value = value[1 : n-1] - // If there is nothing to escape, then return. - if isRawLiteral || !strings.ContainsRune(value, '\\') { - return value, nil - } - - // Otherwise the string contains escape characters. - // The following logic is adapted from `strconv/quote.go` - var runeTmp [utf8.UTFMax]byte - buf := make([]byte, 0, 3*n/2) - for len(value) > 0 { - c, encode, rest, err := unescapeChar(value, isBytes) - if err != nil { - return "", err - } - value = rest - if c < utf8.RuneSelf || !encode { - buf = append(buf, byte(c)) - } else { - n := utf8.EncodeRune(runeTmp[:], c) - buf = append(buf, runeTmp[:n]...) - } - } - return string(buf), nil -} - -// unescapeChar takes a string input and returns the following info: -// -// value - the escaped unicode rune at the front of the string. -// encode - the value should be unicode-encoded -// tail - the remainder of the input string. -// err - error value, if the character could not be unescaped. -// -// When encode is true the return value may still fit within a single byte, -// but unicode encoding is attempted which is more expensive than when the -// value is known to self-represent as a single byte. -// -// If isBytes is set, unescape as a bytes literal so octal and hex escapes -// represent byte values, not unicode code points. -func unescapeChar(s string, isBytes bool) (value rune, encode bool, tail string, err error) { - // 1. Character is not an escape sequence. - switch c := s[0]; { - case c >= utf8.RuneSelf: - r, size := utf8.DecodeRuneInString(s) - return r, true, s[size:], nil - case c != '\\': - return rune(s[0]), false, s[1:], nil - } - - // 2. Last character is the start of an escape sequence. - if len(s) <= 1 { - err = fmt.Errorf("unable to unescape string, found '\\' as last character") - return - } - - c := s[1] - s = s[2:] - // 3. Common escape sequences shared with Google SQL - switch c { - case 'a': - value = '\a' - case 'b': - value = '\b' - case 'f': - value = '\f' - case 'n': - value = '\n' - case 'r': - value = '\r' - case 't': - value = '\t' - case 'v': - value = '\v' - case '\\': - value = '\\' - case '\'': - value = '\'' - case '"': - value = '"' - case '`': - value = '`' - case '?': - value = '?' - - // 4. Unicode escape sequences, reproduced from `strconv/quote.go` - case 'x', 'X', 'u', 'U': - n := 0 - encode = true - switch c { - case 'x', 'X': - n = 2 - encode = !isBytes - case 'u': - n = 4 - if isBytes { - err = fmt.Errorf("unable to unescape string") - return - } - case 'U': - n = 8 - if isBytes { - err = fmt.Errorf("unable to unescape string") - return - } - } - var v rune - if len(s) < n { - err = fmt.Errorf("unable to unescape string") - return - } - for j := 0; j < n; j++ { - x, ok := unhex(s[j]) - if !ok { - err = fmt.Errorf("unable to unescape string") - return - } - v = v<<4 | x - } - s = s[n:] - if !isBytes && v > utf8.MaxRune { - err = fmt.Errorf("unable to unescape string") - return - } - value = v - - // 5. Octal escape sequences, must be three digits \[0-3][0-7][0-7] - case '0', '1', '2', '3': - if len(s) < 2 { - err = fmt.Errorf("unable to unescape octal sequence in string") - return - } - v := rune(c - '0') - for j := 0; j < 2; j++ { - x := s[j] - if x < '0' || x > '7' { - err = fmt.Errorf("unable to unescape octal sequence in string") - return - } - v = v*8 + rune(x-'0') - } - if !isBytes && v > utf8.MaxRune { - err = fmt.Errorf("unable to unescape string") - return - } - value = v - s = s[2:] - encode = !isBytes - - // Unknown escape sequence. - default: - err = fmt.Errorf("unable to unescape string") - } - - tail = s - return -} - -func unhex(b byte) (rune, bool) { - c := rune(b) - switch { - case '0' <= c && c <= '9': - return c - '0', true - case 'a' <= c && c <= 'f': - return c - 'a' + 10, true - case 'A' <= c && c <= 'F': - return c - 'A' + 10, true - } - return 0, false -} - -var ( - newlineNormalizer = strings.NewReplacer("\r\n", "\n", "\r", "\n") -) diff --git a/vendor/github.com/google/cel-go/parser/unparser.go b/vendor/github.com/google/cel-go/parser/unparser.go deleted file mode 100644 index 654f4b9570..0000000000 --- a/vendor/github.com/google/cel-go/parser/unparser.go +++ /dev/null @@ -1,442 +0,0 @@ -// Copyright 2019 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package parser - -import ( - "fmt" - "strconv" - "strings" - - "github.com/google/cel-go/common/operators" - - exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" -) - -// Unparse takes an input expression and source position information and generates a human-readable -// expression. -// -// Note, unparsing an AST will often generate the same expression as was originally parsed, but some -// formatting may be lost in translation, notably: -// -// - All quoted literals are doubled quoted. -// - Byte literals are represented as octal escapes (same as Google SQL). -// - Floating point values are converted to the small number of digits needed to represent the value. -// - Spacing around punctuation marks may be lost. -// - Parentheses will only be applied when they affect operator precedence. -func Unparse(expr *exprpb.Expr, info *exprpb.SourceInfo) (string, error) { - un := &unparser{info: info} - err := un.visit(expr) - if err != nil { - return "", err - } - return un.str.String(), nil -} - -// unparser visits an expression to reconstruct a human-readable string from an AST. -type unparser struct { - str strings.Builder - offset int32 - // TODO: use the source info to rescontruct macros into function calls. - info *exprpb.SourceInfo -} - -func (un *unparser) visit(expr *exprpb.Expr) error { - switch expr.ExprKind.(type) { - case *exprpb.Expr_CallExpr: - return un.visitCall(expr) - // TODO: Comprehensions are currently not supported. - case *exprpb.Expr_ComprehensionExpr: - return un.visitComprehension(expr) - case *exprpb.Expr_ConstExpr: - return un.visitConst(expr) - case *exprpb.Expr_IdentExpr: - return un.visitIdent(expr) - case *exprpb.Expr_ListExpr: - return un.visitList(expr) - case *exprpb.Expr_SelectExpr: - return un.visitSelect(expr) - case *exprpb.Expr_StructExpr: - return un.visitStruct(expr) - } - return fmt.Errorf("unsupported expr: %v", expr) -} - -func (un *unparser) visitCall(expr *exprpb.Expr) error { - c := expr.GetCallExpr() - fun := c.GetFunction() - switch fun { - // ternary operator - case operators.Conditional: - return un.visitCallConditional(expr) - // index operator - case operators.Index: - return un.visitCallIndex(expr) - // unary operators - case operators.LogicalNot, operators.Negate: - return un.visitCallUnary(expr) - // binary operators - case operators.Add, - operators.Divide, - operators.Equals, - operators.Greater, - operators.GreaterEquals, - operators.In, - operators.Less, - operators.LessEquals, - operators.LogicalAnd, - operators.LogicalOr, - operators.Modulo, - operators.Multiply, - operators.NotEquals, - operators.OldIn, - operators.Subtract: - return un.visitCallBinary(expr) - // standard function calls. - default: - return un.visitCallFunc(expr) - } -} - -func (un *unparser) visitCallBinary(expr *exprpb.Expr) error { - c := expr.GetCallExpr() - fun := c.GetFunction() - args := c.GetArgs() - lhs := args[0] - // add parens if the current operator is lower precedence than the lhs expr operator. - lhsParen := isComplexOperatorWithRespectTo(fun, lhs) - rhs := args[1] - // add parens if the current operator is lower precedence than the rhs expr operator, - // or the same precedence and the operator is left recursive. - rhsParen := isComplexOperatorWithRespectTo(fun, rhs) - if !rhsParen && isLeftRecursive(fun) { - rhsParen = isSamePrecedence(fun, rhs) - } - err := un.visitMaybeNested(lhs, lhsParen) - if err != nil { - return err - } - unmangled, found := operators.FindReverseBinaryOperator(fun) - if !found { - return fmt.Errorf("cannot unmangle operator: %s", fun) - } - un.str.WriteString(" ") - un.str.WriteString(unmangled) - un.str.WriteString(" ") - return un.visitMaybeNested(rhs, rhsParen) -} - -func (un *unparser) visitCallConditional(expr *exprpb.Expr) error { - c := expr.GetCallExpr() - args := c.GetArgs() - // add parens if operand is a conditional itself. - nested := isSamePrecedence(operators.Conditional, args[0]) || - isComplexOperator(args[0]) - err := un.visitMaybeNested(args[0], nested) - if err != nil { - return err - } - un.str.WriteString(" ? ") - // add parens if operand is a conditional itself. - nested = isSamePrecedence(operators.Conditional, args[1]) || - isComplexOperator(args[1]) - err = un.visitMaybeNested(args[1], nested) - if err != nil { - return err - } - un.str.WriteString(" : ") - // add parens if operand is a conditional itself. - nested = isSamePrecedence(operators.Conditional, args[2]) || - isComplexOperator(args[2]) - - return un.visitMaybeNested(args[2], nested) -} - -func (un *unparser) visitCallFunc(expr *exprpb.Expr) error { - c := expr.GetCallExpr() - fun := c.GetFunction() - args := c.GetArgs() - if c.GetTarget() != nil { - nested := isBinaryOrTernaryOperator(c.GetTarget()) - err := un.visitMaybeNested(c.GetTarget(), nested) - if err != nil { - return err - } - un.str.WriteString(".") - } - un.str.WriteString(fun) - un.str.WriteString("(") - for i, arg := range args { - err := un.visit(arg) - if err != nil { - return err - } - if i < len(args)-1 { - un.str.WriteString(", ") - } - } - un.str.WriteString(")") - return nil -} - -func (un *unparser) visitCallIndex(expr *exprpb.Expr) error { - c := expr.GetCallExpr() - args := c.GetArgs() - nested := isBinaryOrTernaryOperator(args[0]) - err := un.visitMaybeNested(args[0], nested) - if err != nil { - return err - } - un.str.WriteString("[") - err = un.visit(args[1]) - if err != nil { - return err - } - un.str.WriteString("]") - return nil -} - -func (un *unparser) visitCallUnary(expr *exprpb.Expr) error { - c := expr.GetCallExpr() - fun := c.GetFunction() - args := c.GetArgs() - unmangled, found := operators.FindReverse(fun) - if !found { - return fmt.Errorf("cannot unmangle operator: %s", fun) - } - un.str.WriteString(unmangled) - nested := isComplexOperator(args[0]) - return un.visitMaybeNested(args[0], nested) -} - -func (un *unparser) visitComprehension(expr *exprpb.Expr) error { - // TODO: introduce a macro expansion map between the top-level comprehension id and the - // function call that the macro replaces. - return fmt.Errorf("unimplemented : %v", expr) -} - -func (un *unparser) visitConst(expr *exprpb.Expr) error { - c := expr.GetConstExpr() - switch c.ConstantKind.(type) { - case *exprpb.Constant_BoolValue: - un.str.WriteString(strconv.FormatBool(c.GetBoolValue())) - case *exprpb.Constant_BytesValue: - // bytes constants are surrounded with b"" - b := c.GetBytesValue() - un.str.WriteString(`b"`) - un.str.WriteString(bytesToOctets(b)) - un.str.WriteString(`"`) - case *exprpb.Constant_DoubleValue: - // represent the float using the minimum required digits - d := strconv.FormatFloat(c.GetDoubleValue(), 'g', -1, 64) - un.str.WriteString(d) - case *exprpb.Constant_Int64Value: - i := strconv.FormatInt(c.GetInt64Value(), 10) - un.str.WriteString(i) - case *exprpb.Constant_NullValue: - un.str.WriteString("null") - case *exprpb.Constant_StringValue: - // strings will be double quoted with quotes escaped. - un.str.WriteString(strconv.Quote(c.GetStringValue())) - case *exprpb.Constant_Uint64Value: - // uint literals have a 'u' suffix. - ui := strconv.FormatUint(c.GetUint64Value(), 10) - un.str.WriteString(ui) - un.str.WriteString("u") - default: - return fmt.Errorf("unimplemented : %v", expr) - } - return nil -} - -func (un *unparser) visitIdent(expr *exprpb.Expr) error { - un.str.WriteString(expr.GetIdentExpr().GetName()) - return nil -} - -func (un *unparser) visitList(expr *exprpb.Expr) error { - l := expr.GetListExpr() - elems := l.GetElements() - un.str.WriteString("[") - for i, elem := range elems { - err := un.visit(elem) - if err != nil { - return err - } - if i < len(elems)-1 { - un.str.WriteString(", ") - } - } - un.str.WriteString("]") - return nil -} - -func (un *unparser) visitSelect(expr *exprpb.Expr) error { - sel := expr.GetSelectExpr() - // handle the case when the select expression was generated by the has() macro. - if sel.GetTestOnly() { - un.str.WriteString("has(") - } - nested := !sel.GetTestOnly() && isBinaryOrTernaryOperator(sel.GetOperand()) - err := un.visitMaybeNested(sel.GetOperand(), nested) - if err != nil { - return err - } - un.str.WriteString(".") - un.str.WriteString(sel.GetField()) - if sel.GetTestOnly() { - un.str.WriteString(")") - } - return nil -} - -func (un *unparser) visitStruct(expr *exprpb.Expr) error { - s := expr.GetStructExpr() - // If the message name is non-empty, then this should be treated as message construction. - if s.GetMessageName() != "" { - return un.visitStructMsg(expr) - } - // Otherwise, build a map. - return un.visitStructMap(expr) -} - -func (un *unparser) visitStructMsg(expr *exprpb.Expr) error { - m := expr.GetStructExpr() - entries := m.GetEntries() - un.str.WriteString(m.GetMessageName()) - un.str.WriteString("{") - for i, entry := range entries { - f := entry.GetFieldKey() - un.str.WriteString(f) - un.str.WriteString(": ") - v := entry.GetValue() - err := un.visit(v) - if err != nil { - return err - } - if i < len(entries)-1 { - un.str.WriteString(", ") - } - } - un.str.WriteString("}") - return nil -} - -func (un *unparser) visitStructMap(expr *exprpb.Expr) error { - m := expr.GetStructExpr() - entries := m.GetEntries() - un.str.WriteString("{") - for i, entry := range entries { - k := entry.GetMapKey() - err := un.visit(k) - if err != nil { - return err - } - un.str.WriteString(": ") - v := entry.GetValue() - err = un.visit(v) - if err != nil { - return err - } - if i < len(entries)-1 { - un.str.WriteString(", ") - } - } - un.str.WriteString("}") - return nil -} - -func (un *unparser) visitMaybeNested(expr *exprpb.Expr, nested bool) error { - if nested { - un.str.WriteString("(") - } - err := un.visit(expr) - if err != nil { - return err - } - if nested { - un.str.WriteString(")") - } - return nil -} - -// isLeftRecursive indicates whether the parser resolves the call in a left-recursive manner as -// this can have an effect of how parentheses affect the order of operations in the AST. -func isLeftRecursive(op string) bool { - return op != operators.LogicalAnd && op != operators.LogicalOr -} - -// isSamePrecedence indicates whether the precedence of the input operator is the same as the -// precedence of the (possible) operation represented in the input Expr. -// -// If the expr is not a Call, the result is false. -func isSamePrecedence(op string, expr *exprpb.Expr) bool { - if expr.GetCallExpr() == nil { - return false - } - c := expr.GetCallExpr() - other := c.GetFunction() - return operators.Precedence(op) == operators.Precedence(other) -} - -// isLowerPrecedence indicates whether the precedence of the input operator is lower precedence -// than the (possible) operation represented in the input Expr. -// -// If the expr is not a Call, the result is false. -func isLowerPrecedence(op string, expr *exprpb.Expr) bool { - if expr.GetCallExpr() == nil { - return false - } - c := expr.GetCallExpr() - other := c.GetFunction() - return operators.Precedence(op) < operators.Precedence(other) -} - -// Indicates whether the expr is a complex operator, i.e., a call expression -// with 2 or more arguments. -func isComplexOperator(expr *exprpb.Expr) bool { - if expr.GetCallExpr() != nil && len(expr.GetCallExpr().GetArgs()) >= 2 { - return true - } - return false -} - -// Indicates whether it is a complex operation compared to another. -// expr is *not* considered complex if it is not a call expression or has -// less than two arguments, or if it has a higher precedence than op. -func isComplexOperatorWithRespectTo(op string, expr *exprpb.Expr) bool { - if expr.GetCallExpr() == nil || len(expr.GetCallExpr().GetArgs()) < 2 { - return false - } - return isLowerPrecedence(op, expr) -} - -// Indicate whether this is a binary or ternary operator. -func isBinaryOrTernaryOperator(expr *exprpb.Expr) bool { - if expr.GetCallExpr() == nil || len(expr.GetCallExpr().GetArgs()) < 2 { - return false - } - _, isBinaryOp := operators.FindReverseBinaryOperator(expr.GetCallExpr().GetFunction()) - return isBinaryOp || isSamePrecedence(operators.Conditional, expr) -} - -// bytesToOctets converts byte sequences to a string using a three digit octal encoded value -// per byte. -func bytesToOctets(byteVal []byte) string { - var b strings.Builder - for _, c := range byteVal { - fmt.Fprintf(&b, "\\%03o", c) - } - return b.String() -} diff --git a/vendor/github.com/google/go-cmp/cmp/path.go b/vendor/github.com/google/go-cmp/cmp/path.go index 3d45c1a47f..f01eff318c 100644 --- a/vendor/github.com/google/go-cmp/cmp/path.go +++ b/vendor/github.com/google/go-cmp/cmp/path.go @@ -315,7 +315,7 @@ func (tf Transform) Option() Option { return tf.trans } // pops the address from the stack. Thus, when traversing into a pointer from // reflect.Ptr, reflect.Slice element, or reflect.Map, we can detect cycles // by checking whether the pointer has already been visited. The cycle detection -// uses a seperate stack for the x and y values. +// uses a separate stack for the x and y values. // // If a cycle is detected we need to determine whether the two pointers // should be considered equal. The definition of equality chosen by Equal diff --git a/vendor/github.com/google/go-cmp/cmp/report_slices.go b/vendor/github.com/google/go-cmp/cmp/report_slices.go index 168f92f3c1..2ad3bc85ba 100644 --- a/vendor/github.com/google/go-cmp/cmp/report_slices.go +++ b/vendor/github.com/google/go-cmp/cmp/report_slices.go @@ -7,6 +7,7 @@ package cmp import ( "bytes" "fmt" + "math" "reflect" "strconv" "strings" @@ -96,15 +97,16 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { } // Auto-detect the type of the data. - var isLinedText, isText, isBinary bool var sx, sy string + var ssx, ssy []string + var isString, isMostlyText, isPureLinedText, isBinary bool switch { case t.Kind() == reflect.String: sx, sy = vx.String(), vy.String() - isText = true // Initial estimate, verify later + isString = true case t.Kind() == reflect.Slice && t.Elem() == reflect.TypeOf(byte(0)): sx, sy = string(vx.Bytes()), string(vy.Bytes()) - isBinary = true // Initial estimate, verify later + isString = true case t.Kind() == reflect.Array: // Arrays need to be addressable for slice operations to work. vx2, vy2 := reflect.New(t).Elem(), reflect.New(t).Elem() @@ -112,13 +114,12 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { vy2.Set(vy) vx, vy = vx2, vy2 } - if isText || isBinary { - var numLines, lastLineIdx, maxLineLen int - isBinary = !utf8.ValidString(sx) || !utf8.ValidString(sy) + if isString { + var numTotalRunes, numValidRunes, numLines, lastLineIdx, maxLineLen int for i, r := range sx + sy { - if !(unicode.IsPrint(r) || unicode.IsSpace(r)) || r == utf8.RuneError { - isBinary = true - break + numTotalRunes++ + if (unicode.IsPrint(r) || unicode.IsSpace(r)) && r != utf8.RuneError { + numValidRunes++ } if r == '\n' { if maxLineLen < i-lastLineIdx { @@ -128,8 +129,26 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { numLines++ } } - isText = !isBinary - isLinedText = isText && numLines >= 4 && maxLineLen <= 1024 + isPureText := numValidRunes == numTotalRunes + isMostlyText = float64(numValidRunes) > math.Floor(0.90*float64(numTotalRunes)) + isPureLinedText = isPureText && numLines >= 4 && maxLineLen <= 1024 + isBinary = !isMostlyText + + // Avoid diffing by lines if it produces a significantly more complex + // edit script than diffing by bytes. + if isPureLinedText { + ssx = strings.Split(sx, "\n") + ssy = strings.Split(sy, "\n") + esLines := diff.Difference(len(ssx), len(ssy), func(ix, iy int) diff.Result { + return diff.BoolResult(ssx[ix] == ssy[iy]) + }) + esBytes := diff.Difference(len(sx), len(sy), func(ix, iy int) diff.Result { + return diff.BoolResult(sx[ix] == sy[iy]) + }) + efficiencyLines := float64(esLines.Dist()) / float64(len(esLines)) + efficiencyBytes := float64(esBytes.Dist()) / float64(len(esBytes)) + isPureLinedText = efficiencyLines < 4*efficiencyBytes + } } // Format the string into printable records. @@ -138,9 +157,7 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { switch { // If the text appears to be multi-lined text, // then perform differencing across individual lines. - case isLinedText: - ssx := strings.Split(sx, "\n") - ssy := strings.Split(sy, "\n") + case isPureLinedText: list = opts.formatDiffSlice( reflect.ValueOf(ssx), reflect.ValueOf(ssy), 1, "line", func(v reflect.Value, d diffMode) textRecord { @@ -229,7 +246,7 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { // If the text appears to be single-lined text, // then perform differencing in approximately fixed-sized chunks. // The output is printed as quoted strings. - case isText: + case isMostlyText: list = opts.formatDiffSlice( reflect.ValueOf(sx), reflect.ValueOf(sy), 64, "byte", func(v reflect.Value, d diffMode) textRecord { @@ -237,7 +254,6 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { return textRecord{Diff: d, Value: textLine(s)} }, ) - delim = "" // If the text appears to be binary data, // then perform differencing in approximately fixed-sized chunks. @@ -299,7 +315,7 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { // Wrap the output with appropriate type information. var out textNode = &textWrap{Prefix: "{", Value: list, Suffix: "}"} - if !isText { + if !isMostlyText { // The "{...}" byte-sequence literal is not valid Go syntax for strings. // Emit the type for extra clarity (e.g. "string{...}"). if t.Kind() == reflect.String { @@ -338,8 +354,11 @@ func (opts formatOptions) formatDiffSlice( vx, vy reflect.Value, chunkSize int, name string, makeRec func(reflect.Value, diffMode) textRecord, ) (list textList) { - es := diff.Difference(vx.Len(), vy.Len(), func(ix int, iy int) diff.Result { - return diff.BoolResult(vx.Index(ix).Interface() == vy.Index(iy).Interface()) + eq := func(ix, iy int) bool { + return vx.Index(ix).Interface() == vy.Index(iy).Interface() + } + es := diff.Difference(vx.Len(), vy.Len(), func(ix, iy int) diff.Result { + return diff.BoolResult(eq(ix, iy)) }) appendChunks := func(v reflect.Value, d diffMode) int { @@ -364,6 +383,7 @@ func (opts formatOptions) formatDiffSlice( groups := coalesceAdjacentEdits(name, es) groups = coalesceInterveningIdentical(groups, chunkSize/4) + groups = cleanupSurroundingIdentical(groups, eq) maxGroup := diffStats{Name: name} for i, ds := range groups { if maxLen >= 0 && numDiffs >= maxLen { @@ -416,25 +436,36 @@ func (opts formatOptions) formatDiffSlice( // coalesceAdjacentEdits coalesces the list of edits into groups of adjacent // equal or unequal counts. +// +// Example: +// +// Input: "..XXY...Y" +// Output: [ +// {NumIdentical: 2}, +// {NumRemoved: 2, NumInserted 1}, +// {NumIdentical: 3}, +// {NumInserted: 1}, +// ] +// func coalesceAdjacentEdits(name string, es diff.EditScript) (groups []diffStats) { - var prevCase int // Arbitrary index into which case last occurred - lastStats := func(i int) *diffStats { - if prevCase != i { + var prevMode byte + lastStats := func(mode byte) *diffStats { + if prevMode != mode { groups = append(groups, diffStats{Name: name}) - prevCase = i + prevMode = mode } return &groups[len(groups)-1] } for _, e := range es { switch e { case diff.Identity: - lastStats(1).NumIdentical++ + lastStats('=').NumIdentical++ case diff.UniqueX: - lastStats(2).NumRemoved++ + lastStats('!').NumRemoved++ case diff.UniqueY: - lastStats(2).NumInserted++ + lastStats('!').NumInserted++ case diff.Modified: - lastStats(2).NumModified++ + lastStats('!').NumModified++ } } return groups @@ -444,6 +475,35 @@ func coalesceAdjacentEdits(name string, es diff.EditScript) (groups []diffStats) // equal groups into adjacent unequal groups that currently result in a // dual inserted/removed printout. This acts as a high-pass filter to smooth // out high-frequency changes within the windowSize. +// +// Example: +// +// WindowSize: 16, +// Input: [ +// {NumIdentical: 61}, // group 0 +// {NumRemoved: 3, NumInserted: 1}, // group 1 +// {NumIdentical: 6}, // ├── coalesce +// {NumInserted: 2}, // ├── coalesce +// {NumIdentical: 1}, // ├── coalesce +// {NumRemoved: 9}, // └── coalesce +// {NumIdentical: 64}, // group 2 +// {NumRemoved: 3, NumInserted: 1}, // group 3 +// {NumIdentical: 6}, // ├── coalesce +// {NumInserted: 2}, // ├── coalesce +// {NumIdentical: 1}, // ├── coalesce +// {NumRemoved: 7}, // ├── coalesce +// {NumIdentical: 1}, // ├── coalesce +// {NumRemoved: 2}, // └── coalesce +// {NumIdentical: 63}, // group 4 +// ] +// Output: [ +// {NumIdentical: 61}, +// {NumIdentical: 7, NumRemoved: 12, NumInserted: 3}, +// {NumIdentical: 64}, +// {NumIdentical: 8, NumRemoved: 12, NumInserted: 3}, +// {NumIdentical: 63}, +// ] +// func coalesceInterveningIdentical(groups []diffStats, windowSize int) []diffStats { groups, groupsOrig := groups[:0], groups for i, ds := range groupsOrig { @@ -463,3 +523,91 @@ func coalesceInterveningIdentical(groups []diffStats, windowSize int) []diffStat } return groups } + +// cleanupSurroundingIdentical scans through all unequal groups, and +// moves any leading sequence of equal elements to the preceding equal group and +// moves and trailing sequence of equal elements to the succeeding equal group. +// +// This is necessary since coalesceInterveningIdentical may coalesce edit groups +// together such that leading/trailing spans of equal elements becomes possible. +// Note that this can occur even with an optimal diffing algorithm. +// +// Example: +// +// Input: [ +// {NumIdentical: 61}, +// {NumIdentical: 1 , NumRemoved: 11, NumInserted: 2}, // assume 3 leading identical elements +// {NumIdentical: 67}, +// {NumIdentical: 7, NumRemoved: 12, NumInserted: 3}, // assume 10 trailing identical elements +// {NumIdentical: 54}, +// ] +// Output: [ +// {NumIdentical: 64}, // incremented by 3 +// {NumRemoved: 9}, +// {NumIdentical: 67}, +// {NumRemoved: 9}, +// {NumIdentical: 64}, // incremented by 10 +// ] +// +func cleanupSurroundingIdentical(groups []diffStats, eq func(i, j int) bool) []diffStats { + var ix, iy int // indexes into sequence x and y + for i, ds := range groups { + // Handle equal group. + if ds.NumDiff() == 0 { + ix += ds.NumIdentical + iy += ds.NumIdentical + continue + } + + // Handle unequal group. + nx := ds.NumIdentical + ds.NumRemoved + ds.NumModified + ny := ds.NumIdentical + ds.NumInserted + ds.NumModified + var numLeadingIdentical, numTrailingIdentical int + for i := 0; i < nx && i < ny && eq(ix+i, iy+i); i++ { + numLeadingIdentical++ + } + for i := 0; i < nx && i < ny && eq(ix+nx-1-i, iy+ny-1-i); i++ { + numTrailingIdentical++ + } + if numIdentical := numLeadingIdentical + numTrailingIdentical; numIdentical > 0 { + if numLeadingIdentical > 0 { + // Remove leading identical span from this group and + // insert it into the preceding group. + if i-1 >= 0 { + groups[i-1].NumIdentical += numLeadingIdentical + } else { + // No preceding group exists, so prepend a new group, + // but do so after we finish iterating over all groups. + defer func() { + groups = append([]diffStats{{Name: groups[0].Name, NumIdentical: numLeadingIdentical}}, groups...) + }() + } + // Increment indexes since the preceding group would have handled this. + ix += numLeadingIdentical + iy += numLeadingIdentical + } + if numTrailingIdentical > 0 { + // Remove trailing identical span from this group and + // insert it into the succeeding group. + if i+1 < len(groups) { + groups[i+1].NumIdentical += numTrailingIdentical + } else { + // No succeeding group exists, so append a new group, + // but do so after we finish iterating over all groups. + defer func() { + groups = append(groups, diffStats{Name: groups[len(groups)-1].Name, NumIdentical: numTrailingIdentical}) + }() + } + // Do not increment indexes since the succeeding group will handle this. + } + + // Update this group since some identical elements were removed. + nx -= numIdentical + ny -= numIdentical + groups[i] = diffStats{Name: ds.Name, NumRemoved: nx, NumInserted: ny} + } + ix += nx + iy += ny + } + return groups +} diff --git a/vendor/github.com/googleapis/gax-go/v2/LICENSE b/vendor/github.com/googleapis/gax-go/v2/LICENSE deleted file mode 100644 index 6d16b6578a..0000000000 --- a/vendor/github.com/googleapis/gax-go/v2/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright 2016, Google Inc. -All rights reserved. -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/googleapis/gax-go/v2/call_option.go b/vendor/github.com/googleapis/gax-go/v2/call_option.go deleted file mode 100644 index b1d53dd19c..0000000000 --- a/vendor/github.com/googleapis/gax-go/v2/call_option.go +++ /dev/null @@ -1,161 +0,0 @@ -// Copyright 2016, Google Inc. -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package gax - -import ( - "math/rand" - "time" - - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -// CallOption is an option used by Invoke to control behaviors of RPC calls. -// CallOption works by modifying relevant fields of CallSettings. -type CallOption interface { - // Resolve applies the option by modifying cs. - Resolve(cs *CallSettings) -} - -// Retryer is used by Invoke to determine retry behavior. -type Retryer interface { - // Retry reports whether a request should be retriedand how long to pause before retrying - // if the previous attempt returned with err. Invoke never calls Retry with nil error. - Retry(err error) (pause time.Duration, shouldRetry bool) -} - -type retryerOption func() Retryer - -func (o retryerOption) Resolve(s *CallSettings) { - s.Retry = o -} - -// WithRetry sets CallSettings.Retry to fn. -func WithRetry(fn func() Retryer) CallOption { - return retryerOption(fn) -} - -// OnCodes returns a Retryer that retries if and only if -// the previous attempt returns a GRPC error whose error code is stored in cc. -// Pause times between retries are specified by bo. -// -// bo is only used for its parameters; each Retryer has its own copy. -func OnCodes(cc []codes.Code, bo Backoff) Retryer { - return &boRetryer{ - backoff: bo, - codes: append([]codes.Code(nil), cc...), - } -} - -type boRetryer struct { - backoff Backoff - codes []codes.Code -} - -func (r *boRetryer) Retry(err error) (time.Duration, bool) { - st, ok := status.FromError(err) - if !ok { - return 0, false - } - c := st.Code() - for _, rc := range r.codes { - if c == rc { - return r.backoff.Pause(), true - } - } - return 0, false -} - -// Backoff implements exponential backoff. -// The wait time between retries is a random value between 0 and the "retry envelope". -// The envelope starts at Initial and increases by the factor of Multiplier every retry, -// but is capped at Max. -type Backoff struct { - // Initial is the initial value of the retry envelope, defaults to 1 second. - Initial time.Duration - - // Max is the maximum value of the retry envelope, defaults to 30 seconds. - Max time.Duration - - // Multiplier is the factor by which the retry envelope increases. - // It should be greater than 1 and defaults to 2. - Multiplier float64 - - // cur is the current retry envelope - cur time.Duration -} - -// Pause returns the next time.Duration that the caller should use to backoff. -func (bo *Backoff) Pause() time.Duration { - if bo.Initial == 0 { - bo.Initial = time.Second - } - if bo.cur == 0 { - bo.cur = bo.Initial - } - if bo.Max == 0 { - bo.Max = 30 * time.Second - } - if bo.Multiplier < 1 { - bo.Multiplier = 2 - } - // Select a duration between 1ns and the current max. It might seem - // counterintuitive to have so much jitter, but - // https://www.awsarchitectureblog.com/2015/03/backoff.html argues that - // that is the best strategy. - d := time.Duration(1 + rand.Int63n(int64(bo.cur))) - bo.cur = time.Duration(float64(bo.cur) * bo.Multiplier) - if bo.cur > bo.Max { - bo.cur = bo.Max - } - return d -} - -type grpcOpt []grpc.CallOption - -func (o grpcOpt) Resolve(s *CallSettings) { - s.GRPC = o -} - -// WithGRPCOptions allows passing gRPC call options during client creation. -func WithGRPCOptions(opt ...grpc.CallOption) CallOption { - return grpcOpt(append([]grpc.CallOption(nil), opt...)) -} - -// CallSettings allow fine-grained control over how calls are made. -type CallSettings struct { - // Retry returns a Retryer to be used to control retry logic of a method call. - // If Retry is nil or the returned Retryer is nil, the call will not be retried. - Retry func() Retryer - - // CallOptions to be forwarded to GRPC. - GRPC []grpc.CallOption -} diff --git a/vendor/github.com/googleapis/gax-go/v2/gax.go b/vendor/github.com/googleapis/gax-go/v2/gax.go deleted file mode 100644 index 3fd1b0b84b..0000000000 --- a/vendor/github.com/googleapis/gax-go/v2/gax.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2016, Google Inc. -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Package gax contains a set of modules which aid the development of APIs -// for clients and servers based on gRPC and Google API conventions. -// -// Application code will rarely need to use this library directly. -// However, code generated automatically from API definition files can use it -// to simplify code generation and to provide more convenient and idiomatic API surfaces. -package gax - -// Version specifies the gax-go version being used. -const Version = "2.0.4" diff --git a/vendor/github.com/googleapis/gax-go/v2/go.mod b/vendor/github.com/googleapis/gax-go/v2/go.mod deleted file mode 100644 index 9cdfaf4475..0000000000 --- a/vendor/github.com/googleapis/gax-go/v2/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/googleapis/gax-go/v2 - -require google.golang.org/grpc v1.19.0 diff --git a/vendor/github.com/googleapis/gax-go/v2/go.sum b/vendor/github.com/googleapis/gax-go/v2/go.sum deleted file mode 100644 index 7fa23ecf95..0000000000 --- a/vendor/github.com/googleapis/gax-go/v2/go.sum +++ /dev/null @@ -1,25 +0,0 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d h1:g9qWBGx4puODJTMVyoPrpoxPFgVGd+z1DZwjfRu4d0I= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522 h1:Ve1ORMCxvRmSXBwJK+t3Oy+V2vRW2OetUQBq4rJIkZE= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/grpc v1.19.0 h1:cfg4PD8YEdSFnm7qLV4++93WcmhH2nIUhMjhdCvl3j8= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/vendor/github.com/googleapis/gax-go/v2/header.go b/vendor/github.com/googleapis/gax-go/v2/header.go deleted file mode 100644 index 139371a0bf..0000000000 --- a/vendor/github.com/googleapis/gax-go/v2/header.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2018, Google Inc. -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package gax - -import "bytes" - -// XGoogHeader is for use by the Google Cloud Libraries only. -// -// XGoogHeader formats key-value pairs. -// The resulting string is suitable for x-goog-api-client header. -func XGoogHeader(keyval ...string) string { - if len(keyval) == 0 { - return "" - } - if len(keyval)%2 != 0 { - panic("gax.Header: odd argument count") - } - var buf bytes.Buffer - for i := 0; i < len(keyval); i += 2 { - buf.WriteByte(' ') - buf.WriteString(keyval[i]) - buf.WriteByte('/') - buf.WriteString(keyval[i+1]) - } - return buf.String()[1:] -} diff --git a/vendor/github.com/googleapis/gax-go/v2/invoke.go b/vendor/github.com/googleapis/gax-go/v2/invoke.go deleted file mode 100644 index fe31dd004e..0000000000 --- a/vendor/github.com/googleapis/gax-go/v2/invoke.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright 2016, Google Inc. -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package gax - -import ( - "context" - "strings" - "time" -) - -// APICall is a user defined call stub. -type APICall func(context.Context, CallSettings) error - -// Invoke calls the given APICall, -// performing retries as specified by opts, if any. -func Invoke(ctx context.Context, call APICall, opts ...CallOption) error { - var settings CallSettings - for _, opt := range opts { - opt.Resolve(&settings) - } - return invoke(ctx, call, settings, Sleep) -} - -// Sleep is similar to time.Sleep, but it can be interrupted by ctx.Done() closing. -// If interrupted, Sleep returns ctx.Err(). -func Sleep(ctx context.Context, d time.Duration) error { - t := time.NewTimer(d) - select { - case <-ctx.Done(): - t.Stop() - return ctx.Err() - case <-t.C: - return nil - } -} - -type sleeper func(ctx context.Context, d time.Duration) error - -// invoke implements Invoke, taking an additional sleeper argument for testing. -func invoke(ctx context.Context, call APICall, settings CallSettings, sp sleeper) error { - var retryer Retryer - for { - err := call(ctx, settings) - if err == nil { - return nil - } - if settings.Retry == nil { - return err - } - // Never retry permanent certificate errors. (e.x. if ca-certificates - // are not installed). We should only make very few, targeted - // exceptions: many (other) status=Unavailable should be retried, such - // as if there's a network hiccup, or the internet goes out for a - // minute. This is also why here we are doing string parsing instead of - // simply making Unavailable a non-retried code elsewhere. - if strings.Contains(err.Error(), "x509: certificate signed by unknown authority") { - return err - } - if retryer == nil { - if r := settings.Retry(); r != nil { - retryer = r - } else { - return err - } - } - if d, ok := retryer.Retry(err); !ok { - return err - } else if err = sp(ctx, d); err != nil { - return err - } - } -} diff --git a/vendor/github.com/jmespath/go-jmespath/.gitignore b/vendor/github.com/jmespath/go-jmespath/.gitignore deleted file mode 100644 index 5091fb0736..0000000000 --- a/vendor/github.com/jmespath/go-jmespath/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -/jpgo -jmespath-fuzz.zip -cpu.out -go-jmespath.test diff --git a/vendor/github.com/jmespath/go-jmespath/.travis.yml b/vendor/github.com/jmespath/go-jmespath/.travis.yml deleted file mode 100644 index 730c7fa51b..0000000000 --- a/vendor/github.com/jmespath/go-jmespath/.travis.yml +++ /dev/null @@ -1,17 +0,0 @@ -language: go - -sudo: false - -go: - - 1.5.x - - 1.6.x - - 1.7.x - - 1.8.x - - 1.9.x - - 1.10.x - - 1.11.x - - 1.12.x - - 1.13.x - -install: go get -v -t ./... -script: make test diff --git a/vendor/github.com/jmespath/go-jmespath/Makefile b/vendor/github.com/jmespath/go-jmespath/Makefile deleted file mode 100644 index a828d2848f..0000000000 --- a/vendor/github.com/jmespath/go-jmespath/Makefile +++ /dev/null @@ -1,44 +0,0 @@ - -CMD = jpgo - -help: - @echo "Please use \`make ' where is one of" - @echo " test to run all the tests" - @echo " build to build the library and jp executable" - @echo " generate to run codegen" - - -generate: - go generate ./... - -build: - rm -f $(CMD) - go build ./... - rm -f cmd/$(CMD)/$(CMD) && cd cmd/$(CMD)/ && go build ./... - mv cmd/$(CMD)/$(CMD) . - -test: - go test -v ./... - -check: - go vet ./... - @echo "golint ./..." - @lint=`golint ./...`; \ - lint=`echo "$$lint" | grep -v "astnodetype_string.go" | grep -v "toktype_string.go"`; \ - echo "$$lint"; \ - if [ "$$lint" != "" ]; then exit 1; fi - -htmlc: - go test -coverprofile="/tmp/jpcov" && go tool cover -html="/tmp/jpcov" && unlink /tmp/jpcov - -buildfuzz: - go-fuzz-build github.com/jmespath/go-jmespath/fuzz - -fuzz: buildfuzz - go-fuzz -bin=./jmespath-fuzz.zip -workdir=fuzz/testdata - -bench: - go test -bench . -cpuprofile cpu.out - -pprof-cpu: - go tool pprof ./go-jmespath.test ./cpu.out diff --git a/vendor/github.com/jmespath/go-jmespath/README.md b/vendor/github.com/jmespath/go-jmespath/README.md deleted file mode 100644 index 110ad79997..0000000000 --- a/vendor/github.com/jmespath/go-jmespath/README.md +++ /dev/null @@ -1,87 +0,0 @@ -# go-jmespath - A JMESPath implementation in Go - -[![Build Status](https://img.shields.io/travis/jmespath/go-jmespath.svg)](https://travis-ci.org/jmespath/go-jmespath) - - - -go-jmespath is a GO implementation of JMESPath, -which is a query language for JSON. It will take a JSON -document and transform it into another JSON document -through a JMESPath expression. - -Using go-jmespath is really easy. There's a single function -you use, `jmespath.search`: - - -```go -> import "github.com/jmespath/go-jmespath" -> -> var jsondata = []byte(`{"foo": {"bar": {"baz": [0, 1, 2, 3, 4]}}}`) // your data -> var data interface{} -> err := json.Unmarshal(jsondata, &data) -> result, err := jmespath.Search("foo.bar.baz[2]", data) -result = 2 -``` - -In the example we gave the ``search`` function input data of -`{"foo": {"bar": {"baz": [0, 1, 2, 3, 4]}}}` as well as the JMESPath -expression `foo.bar.baz[2]`, and the `search` function evaluated -the expression against the input data to produce the result ``2``. - -The JMESPath language can do a lot more than select an element -from a list. Here are a few more examples: - -```go -> var jsondata = []byte(`{"foo": {"bar": {"baz": [0, 1, 2, 3, 4]}}}`) // your data -> var data interface{} -> err := json.Unmarshal(jsondata, &data) -> result, err := jmespath.search("foo.bar", data) -result = { "baz": [ 0, 1, 2, 3, 4 ] } - - -> var jsondata = []byte(`{"foo": [{"first": "a", "last": "b"}, - {"first": "c", "last": "d"}]}`) // your data -> var data interface{} -> err := json.Unmarshal(jsondata, &data) -> result, err := jmespath.search({"foo[*].first", data) -result [ 'a', 'c' ] - - -> var jsondata = []byte(`{"foo": [{"age": 20}, {"age": 25}, - {"age": 30}, {"age": 35}, - {"age": 40}]}`) // your data -> var data interface{} -> err := json.Unmarshal(jsondata, &data) -> result, err := jmespath.search("foo[?age > `30`]") -result = [ { age: 35 }, { age: 40 } ] -``` - -You can also pre-compile your query. This is usefull if -you are going to run multiple searches with it: - -```go - > var jsondata = []byte(`{"foo": "bar"}`) - > var data interface{} - > err := json.Unmarshal(jsondata, &data) - > precompiled, err := Compile("foo") - > if err != nil{ - > // ... handle the error - > } - > result, err := precompiled.Search(data) - result = "bar" -``` - -## More Resources - -The example above only show a small amount of what -a JMESPath expression can do. If you want to take a -tour of the language, the *best* place to go is the -[JMESPath Tutorial](http://jmespath.org/tutorial.html). - -One of the best things about JMESPath is that it is -implemented in many different programming languages including -python, ruby, php, lua, etc. To see a complete list of libraries, -check out the [JMESPath libraries page](http://jmespath.org/libraries.html). - -And finally, the full JMESPath specification can be found -on the [JMESPath site](http://jmespath.org/specification.html). diff --git a/vendor/github.com/jmespath/go-jmespath/api.go b/vendor/github.com/jmespath/go-jmespath/api.go deleted file mode 100644 index 010efe9bfb..0000000000 --- a/vendor/github.com/jmespath/go-jmespath/api.go +++ /dev/null @@ -1,49 +0,0 @@ -package jmespath - -import "strconv" - -// JMESPath is the representation of a compiled JMES path query. A JMESPath is -// safe for concurrent use by multiple goroutines. -type JMESPath struct { - ast ASTNode - intr *treeInterpreter -} - -// Compile parses a JMESPath expression and returns, if successful, a JMESPath -// object that can be used to match against data. -func Compile(expression string) (*JMESPath, error) { - parser := NewParser() - ast, err := parser.Parse(expression) - if err != nil { - return nil, err - } - jmespath := &JMESPath{ast: ast, intr: newInterpreter()} - return jmespath, nil -} - -// MustCompile is like Compile but panics if the expression cannot be parsed. -// It simplifies safe initialization of global variables holding compiled -// JMESPaths. -func MustCompile(expression string) *JMESPath { - jmespath, err := Compile(expression) - if err != nil { - panic(`jmespath: Compile(` + strconv.Quote(expression) + `): ` + err.Error()) - } - return jmespath -} - -// Search evaluates a JMESPath expression against input data and returns the result. -func (jp *JMESPath) Search(data interface{}) (interface{}, error) { - return jp.intr.Execute(jp.ast, data) -} - -// Search evaluates a JMESPath expression against input data and returns the result. -func Search(expression string, data interface{}) (interface{}, error) { - intr := newInterpreter() - parser := NewParser() - ast, err := parser.Parse(expression) - if err != nil { - return nil, err - } - return intr.Execute(ast, data) -} diff --git a/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go b/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go deleted file mode 100644 index 1cd2d239c9..0000000000 --- a/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go +++ /dev/null @@ -1,16 +0,0 @@ -// generated by stringer -type astNodeType; DO NOT EDIT - -package jmespath - -import "fmt" - -const _astNodeType_name = "ASTEmptyASTComparatorASTCurrentNodeASTExpRefASTFunctionExpressionASTFieldASTFilterProjectionASTFlattenASTIdentityASTIndexASTIndexExpressionASTKeyValPairASTLiteralASTMultiSelectHashASTMultiSelectListASTOrExpressionASTAndExpressionASTNotExpressionASTPipeASTProjectionASTSubexpressionASTSliceASTValueProjection" - -var _astNodeType_index = [...]uint16{0, 8, 21, 35, 44, 65, 73, 92, 102, 113, 121, 139, 152, 162, 180, 198, 213, 229, 245, 252, 265, 281, 289, 307} - -func (i astNodeType) String() string { - if i < 0 || i >= astNodeType(len(_astNodeType_index)-1) { - return fmt.Sprintf("astNodeType(%d)", i) - } - return _astNodeType_name[_astNodeType_index[i]:_astNodeType_index[i+1]] -} diff --git a/vendor/github.com/jmespath/go-jmespath/functions.go b/vendor/github.com/jmespath/go-jmespath/functions.go deleted file mode 100644 index 9b7cd89b4b..0000000000 --- a/vendor/github.com/jmespath/go-jmespath/functions.go +++ /dev/null @@ -1,842 +0,0 @@ -package jmespath - -import ( - "encoding/json" - "errors" - "fmt" - "math" - "reflect" - "sort" - "strconv" - "strings" - "unicode/utf8" -) - -type jpFunction func(arguments []interface{}) (interface{}, error) - -type jpType string - -const ( - jpUnknown jpType = "unknown" - jpNumber jpType = "number" - jpString jpType = "string" - jpArray jpType = "array" - jpObject jpType = "object" - jpArrayNumber jpType = "array[number]" - jpArrayString jpType = "array[string]" - jpExpref jpType = "expref" - jpAny jpType = "any" -) - -type functionEntry struct { - name string - arguments []argSpec - handler jpFunction - hasExpRef bool -} - -type argSpec struct { - types []jpType - variadic bool -} - -type byExprString struct { - intr *treeInterpreter - node ASTNode - items []interface{} - hasError bool -} - -func (a *byExprString) Len() int { - return len(a.items) -} -func (a *byExprString) Swap(i, j int) { - a.items[i], a.items[j] = a.items[j], a.items[i] -} -func (a *byExprString) Less(i, j int) bool { - first, err := a.intr.Execute(a.node, a.items[i]) - if err != nil { - a.hasError = true - // Return a dummy value. - return true - } - ith, ok := first.(string) - if !ok { - a.hasError = true - return true - } - second, err := a.intr.Execute(a.node, a.items[j]) - if err != nil { - a.hasError = true - // Return a dummy value. - return true - } - jth, ok := second.(string) - if !ok { - a.hasError = true - return true - } - return ith < jth -} - -type byExprFloat struct { - intr *treeInterpreter - node ASTNode - items []interface{} - hasError bool -} - -func (a *byExprFloat) Len() int { - return len(a.items) -} -func (a *byExprFloat) Swap(i, j int) { - a.items[i], a.items[j] = a.items[j], a.items[i] -} -func (a *byExprFloat) Less(i, j int) bool { - first, err := a.intr.Execute(a.node, a.items[i]) - if err != nil { - a.hasError = true - // Return a dummy value. - return true - } - ith, ok := first.(float64) - if !ok { - a.hasError = true - return true - } - second, err := a.intr.Execute(a.node, a.items[j]) - if err != nil { - a.hasError = true - // Return a dummy value. - return true - } - jth, ok := second.(float64) - if !ok { - a.hasError = true - return true - } - return ith < jth -} - -type functionCaller struct { - functionTable map[string]functionEntry -} - -func newFunctionCaller() *functionCaller { - caller := &functionCaller{} - caller.functionTable = map[string]functionEntry{ - "length": { - name: "length", - arguments: []argSpec{ - {types: []jpType{jpString, jpArray, jpObject}}, - }, - handler: jpfLength, - }, - "starts_with": { - name: "starts_with", - arguments: []argSpec{ - {types: []jpType{jpString}}, - {types: []jpType{jpString}}, - }, - handler: jpfStartsWith, - }, - "abs": { - name: "abs", - arguments: []argSpec{ - {types: []jpType{jpNumber}}, - }, - handler: jpfAbs, - }, - "avg": { - name: "avg", - arguments: []argSpec{ - {types: []jpType{jpArrayNumber}}, - }, - handler: jpfAvg, - }, - "ceil": { - name: "ceil", - arguments: []argSpec{ - {types: []jpType{jpNumber}}, - }, - handler: jpfCeil, - }, - "contains": { - name: "contains", - arguments: []argSpec{ - {types: []jpType{jpArray, jpString}}, - {types: []jpType{jpAny}}, - }, - handler: jpfContains, - }, - "ends_with": { - name: "ends_with", - arguments: []argSpec{ - {types: []jpType{jpString}}, - {types: []jpType{jpString}}, - }, - handler: jpfEndsWith, - }, - "floor": { - name: "floor", - arguments: []argSpec{ - {types: []jpType{jpNumber}}, - }, - handler: jpfFloor, - }, - "map": { - name: "amp", - arguments: []argSpec{ - {types: []jpType{jpExpref}}, - {types: []jpType{jpArray}}, - }, - handler: jpfMap, - hasExpRef: true, - }, - "max": { - name: "max", - arguments: []argSpec{ - {types: []jpType{jpArrayNumber, jpArrayString}}, - }, - handler: jpfMax, - }, - "merge": { - name: "merge", - arguments: []argSpec{ - {types: []jpType{jpObject}, variadic: true}, - }, - handler: jpfMerge, - }, - "max_by": { - name: "max_by", - arguments: []argSpec{ - {types: []jpType{jpArray}}, - {types: []jpType{jpExpref}}, - }, - handler: jpfMaxBy, - hasExpRef: true, - }, - "sum": { - name: "sum", - arguments: []argSpec{ - {types: []jpType{jpArrayNumber}}, - }, - handler: jpfSum, - }, - "min": { - name: "min", - arguments: []argSpec{ - {types: []jpType{jpArrayNumber, jpArrayString}}, - }, - handler: jpfMin, - }, - "min_by": { - name: "min_by", - arguments: []argSpec{ - {types: []jpType{jpArray}}, - {types: []jpType{jpExpref}}, - }, - handler: jpfMinBy, - hasExpRef: true, - }, - "type": { - name: "type", - arguments: []argSpec{ - {types: []jpType{jpAny}}, - }, - handler: jpfType, - }, - "keys": { - name: "keys", - arguments: []argSpec{ - {types: []jpType{jpObject}}, - }, - handler: jpfKeys, - }, - "values": { - name: "values", - arguments: []argSpec{ - {types: []jpType{jpObject}}, - }, - handler: jpfValues, - }, - "sort": { - name: "sort", - arguments: []argSpec{ - {types: []jpType{jpArrayString, jpArrayNumber}}, - }, - handler: jpfSort, - }, - "sort_by": { - name: "sort_by", - arguments: []argSpec{ - {types: []jpType{jpArray}}, - {types: []jpType{jpExpref}}, - }, - handler: jpfSortBy, - hasExpRef: true, - }, - "join": { - name: "join", - arguments: []argSpec{ - {types: []jpType{jpString}}, - {types: []jpType{jpArrayString}}, - }, - handler: jpfJoin, - }, - "reverse": { - name: "reverse", - arguments: []argSpec{ - {types: []jpType{jpArray, jpString}}, - }, - handler: jpfReverse, - }, - "to_array": { - name: "to_array", - arguments: []argSpec{ - {types: []jpType{jpAny}}, - }, - handler: jpfToArray, - }, - "to_string": { - name: "to_string", - arguments: []argSpec{ - {types: []jpType{jpAny}}, - }, - handler: jpfToString, - }, - "to_number": { - name: "to_number", - arguments: []argSpec{ - {types: []jpType{jpAny}}, - }, - handler: jpfToNumber, - }, - "not_null": { - name: "not_null", - arguments: []argSpec{ - {types: []jpType{jpAny}, variadic: true}, - }, - handler: jpfNotNull, - }, - } - return caller -} - -func (e *functionEntry) resolveArgs(arguments []interface{}) ([]interface{}, error) { - if len(e.arguments) == 0 { - return arguments, nil - } - if !e.arguments[len(e.arguments)-1].variadic { - if len(e.arguments) != len(arguments) { - return nil, errors.New("incorrect number of args") - } - for i, spec := range e.arguments { - userArg := arguments[i] - err := spec.typeCheck(userArg) - if err != nil { - return nil, err - } - } - return arguments, nil - } - if len(arguments) < len(e.arguments) { - return nil, errors.New("Invalid arity.") - } - return arguments, nil -} - -func (a *argSpec) typeCheck(arg interface{}) error { - for _, t := range a.types { - switch t { - case jpNumber: - if _, ok := arg.(float64); ok { - return nil - } - case jpString: - if _, ok := arg.(string); ok { - return nil - } - case jpArray: - if isSliceType(arg) { - return nil - } - case jpObject: - if _, ok := arg.(map[string]interface{}); ok { - return nil - } - case jpArrayNumber: - if _, ok := toArrayNum(arg); ok { - return nil - } - case jpArrayString: - if _, ok := toArrayStr(arg); ok { - return nil - } - case jpAny: - return nil - case jpExpref: - if _, ok := arg.(expRef); ok { - return nil - } - } - } - return fmt.Errorf("Invalid type for: %v, expected: %#v", arg, a.types) -} - -func (f *functionCaller) CallFunction(name string, arguments []interface{}, intr *treeInterpreter) (interface{}, error) { - entry, ok := f.functionTable[name] - if !ok { - return nil, errors.New("unknown function: " + name) - } - resolvedArgs, err := entry.resolveArgs(arguments) - if err != nil { - return nil, err - } - if entry.hasExpRef { - var extra []interface{} - extra = append(extra, intr) - resolvedArgs = append(extra, resolvedArgs...) - } - return entry.handler(resolvedArgs) -} - -func jpfAbs(arguments []interface{}) (interface{}, error) { - num := arguments[0].(float64) - return math.Abs(num), nil -} - -func jpfLength(arguments []interface{}) (interface{}, error) { - arg := arguments[0] - if c, ok := arg.(string); ok { - return float64(utf8.RuneCountInString(c)), nil - } else if isSliceType(arg) { - v := reflect.ValueOf(arg) - return float64(v.Len()), nil - } else if c, ok := arg.(map[string]interface{}); ok { - return float64(len(c)), nil - } - return nil, errors.New("could not compute length()") -} - -func jpfStartsWith(arguments []interface{}) (interface{}, error) { - search := arguments[0].(string) - prefix := arguments[1].(string) - return strings.HasPrefix(search, prefix), nil -} - -func jpfAvg(arguments []interface{}) (interface{}, error) { - // We've already type checked the value so we can safely use - // type assertions. - args := arguments[0].([]interface{}) - length := float64(len(args)) - numerator := 0.0 - for _, n := range args { - numerator += n.(float64) - } - return numerator / length, nil -} -func jpfCeil(arguments []interface{}) (interface{}, error) { - val := arguments[0].(float64) - return math.Ceil(val), nil -} -func jpfContains(arguments []interface{}) (interface{}, error) { - search := arguments[0] - el := arguments[1] - if searchStr, ok := search.(string); ok { - if elStr, ok := el.(string); ok { - return strings.Index(searchStr, elStr) != -1, nil - } - return false, nil - } - // Otherwise this is a generic contains for []interface{} - general := search.([]interface{}) - for _, item := range general { - if item == el { - return true, nil - } - } - return false, nil -} -func jpfEndsWith(arguments []interface{}) (interface{}, error) { - search := arguments[0].(string) - suffix := arguments[1].(string) - return strings.HasSuffix(search, suffix), nil -} -func jpfFloor(arguments []interface{}) (interface{}, error) { - val := arguments[0].(float64) - return math.Floor(val), nil -} -func jpfMap(arguments []interface{}) (interface{}, error) { - intr := arguments[0].(*treeInterpreter) - exp := arguments[1].(expRef) - node := exp.ref - arr := arguments[2].([]interface{}) - mapped := make([]interface{}, 0, len(arr)) - for _, value := range arr { - current, err := intr.Execute(node, value) - if err != nil { - return nil, err - } - mapped = append(mapped, current) - } - return mapped, nil -} -func jpfMax(arguments []interface{}) (interface{}, error) { - if items, ok := toArrayNum(arguments[0]); ok { - if len(items) == 0 { - return nil, nil - } - if len(items) == 1 { - return items[0], nil - } - best := items[0] - for _, item := range items[1:] { - if item > best { - best = item - } - } - return best, nil - } - // Otherwise we're dealing with a max() of strings. - items, _ := toArrayStr(arguments[0]) - if len(items) == 0 { - return nil, nil - } - if len(items) == 1 { - return items[0], nil - } - best := items[0] - for _, item := range items[1:] { - if item > best { - best = item - } - } - return best, nil -} -func jpfMerge(arguments []interface{}) (interface{}, error) { - final := make(map[string]interface{}) - for _, m := range arguments { - mapped := m.(map[string]interface{}) - for key, value := range mapped { - final[key] = value - } - } - return final, nil -} -func jpfMaxBy(arguments []interface{}) (interface{}, error) { - intr := arguments[0].(*treeInterpreter) - arr := arguments[1].([]interface{}) - exp := arguments[2].(expRef) - node := exp.ref - if len(arr) == 0 { - return nil, nil - } else if len(arr) == 1 { - return arr[0], nil - } - start, err := intr.Execute(node, arr[0]) - if err != nil { - return nil, err - } - switch t := start.(type) { - case float64: - bestVal := t - bestItem := arr[0] - for _, item := range arr[1:] { - result, err := intr.Execute(node, item) - if err != nil { - return nil, err - } - current, ok := result.(float64) - if !ok { - return nil, errors.New("invalid type, must be number") - } - if current > bestVal { - bestVal = current - bestItem = item - } - } - return bestItem, nil - case string: - bestVal := t - bestItem := arr[0] - for _, item := range arr[1:] { - result, err := intr.Execute(node, item) - if err != nil { - return nil, err - } - current, ok := result.(string) - if !ok { - return nil, errors.New("invalid type, must be string") - } - if current > bestVal { - bestVal = current - bestItem = item - } - } - return bestItem, nil - default: - return nil, errors.New("invalid type, must be number of string") - } -} -func jpfSum(arguments []interface{}) (interface{}, error) { - items, _ := toArrayNum(arguments[0]) - sum := 0.0 - for _, item := range items { - sum += item - } - return sum, nil -} - -func jpfMin(arguments []interface{}) (interface{}, error) { - if items, ok := toArrayNum(arguments[0]); ok { - if len(items) == 0 { - return nil, nil - } - if len(items) == 1 { - return items[0], nil - } - best := items[0] - for _, item := range items[1:] { - if item < best { - best = item - } - } - return best, nil - } - items, _ := toArrayStr(arguments[0]) - if len(items) == 0 { - return nil, nil - } - if len(items) == 1 { - return items[0], nil - } - best := items[0] - for _, item := range items[1:] { - if item < best { - best = item - } - } - return best, nil -} - -func jpfMinBy(arguments []interface{}) (interface{}, error) { - intr := arguments[0].(*treeInterpreter) - arr := arguments[1].([]interface{}) - exp := arguments[2].(expRef) - node := exp.ref - if len(arr) == 0 { - return nil, nil - } else if len(arr) == 1 { - return arr[0], nil - } - start, err := intr.Execute(node, arr[0]) - if err != nil { - return nil, err - } - if t, ok := start.(float64); ok { - bestVal := t - bestItem := arr[0] - for _, item := range arr[1:] { - result, err := intr.Execute(node, item) - if err != nil { - return nil, err - } - current, ok := result.(float64) - if !ok { - return nil, errors.New("invalid type, must be number") - } - if current < bestVal { - bestVal = current - bestItem = item - } - } - return bestItem, nil - } else if t, ok := start.(string); ok { - bestVal := t - bestItem := arr[0] - for _, item := range arr[1:] { - result, err := intr.Execute(node, item) - if err != nil { - return nil, err - } - current, ok := result.(string) - if !ok { - return nil, errors.New("invalid type, must be string") - } - if current < bestVal { - bestVal = current - bestItem = item - } - } - return bestItem, nil - } else { - return nil, errors.New("invalid type, must be number of string") - } -} -func jpfType(arguments []interface{}) (interface{}, error) { - arg := arguments[0] - if _, ok := arg.(float64); ok { - return "number", nil - } - if _, ok := arg.(string); ok { - return "string", nil - } - if _, ok := arg.([]interface{}); ok { - return "array", nil - } - if _, ok := arg.(map[string]interface{}); ok { - return "object", nil - } - if arg == nil { - return "null", nil - } - if arg == true || arg == false { - return "boolean", nil - } - return nil, errors.New("unknown type") -} -func jpfKeys(arguments []interface{}) (interface{}, error) { - arg := arguments[0].(map[string]interface{}) - collected := make([]interface{}, 0, len(arg)) - for key := range arg { - collected = append(collected, key) - } - return collected, nil -} -func jpfValues(arguments []interface{}) (interface{}, error) { - arg := arguments[0].(map[string]interface{}) - collected := make([]interface{}, 0, len(arg)) - for _, value := range arg { - collected = append(collected, value) - } - return collected, nil -} -func jpfSort(arguments []interface{}) (interface{}, error) { - if items, ok := toArrayNum(arguments[0]); ok { - d := sort.Float64Slice(items) - sort.Stable(d) - final := make([]interface{}, len(d)) - for i, val := range d { - final[i] = val - } - return final, nil - } - // Otherwise we're dealing with sort()'ing strings. - items, _ := toArrayStr(arguments[0]) - d := sort.StringSlice(items) - sort.Stable(d) - final := make([]interface{}, len(d)) - for i, val := range d { - final[i] = val - } - return final, nil -} -func jpfSortBy(arguments []interface{}) (interface{}, error) { - intr := arguments[0].(*treeInterpreter) - arr := arguments[1].([]interface{}) - exp := arguments[2].(expRef) - node := exp.ref - if len(arr) == 0 { - return arr, nil - } else if len(arr) == 1 { - return arr, nil - } - start, err := intr.Execute(node, arr[0]) - if err != nil { - return nil, err - } - if _, ok := start.(float64); ok { - sortable := &byExprFloat{intr, node, arr, false} - sort.Stable(sortable) - if sortable.hasError { - return nil, errors.New("error in sort_by comparison") - } - return arr, nil - } else if _, ok := start.(string); ok { - sortable := &byExprString{intr, node, arr, false} - sort.Stable(sortable) - if sortable.hasError { - return nil, errors.New("error in sort_by comparison") - } - return arr, nil - } else { - return nil, errors.New("invalid type, must be number of string") - } -} -func jpfJoin(arguments []interface{}) (interface{}, error) { - sep := arguments[0].(string) - // We can't just do arguments[1].([]string), we have to - // manually convert each item to a string. - arrayStr := []string{} - for _, item := range arguments[1].([]interface{}) { - arrayStr = append(arrayStr, item.(string)) - } - return strings.Join(arrayStr, sep), nil -} -func jpfReverse(arguments []interface{}) (interface{}, error) { - if s, ok := arguments[0].(string); ok { - r := []rune(s) - for i, j := 0, len(r)-1; i < len(r)/2; i, j = i+1, j-1 { - r[i], r[j] = r[j], r[i] - } - return string(r), nil - } - items := arguments[0].([]interface{}) - length := len(items) - reversed := make([]interface{}, length) - for i, item := range items { - reversed[length-(i+1)] = item - } - return reversed, nil -} -func jpfToArray(arguments []interface{}) (interface{}, error) { - if _, ok := arguments[0].([]interface{}); ok { - return arguments[0], nil - } - return arguments[:1:1], nil -} -func jpfToString(arguments []interface{}) (interface{}, error) { - if v, ok := arguments[0].(string); ok { - return v, nil - } - result, err := json.Marshal(arguments[0]) - if err != nil { - return nil, err - } - return string(result), nil -} -func jpfToNumber(arguments []interface{}) (interface{}, error) { - arg := arguments[0] - if v, ok := arg.(float64); ok { - return v, nil - } - if v, ok := arg.(string); ok { - conv, err := strconv.ParseFloat(v, 64) - if err != nil { - return nil, nil - } - return conv, nil - } - if _, ok := arg.([]interface{}); ok { - return nil, nil - } - if _, ok := arg.(map[string]interface{}); ok { - return nil, nil - } - if arg == nil { - return nil, nil - } - if arg == true || arg == false { - return nil, nil - } - return nil, errors.New("unknown type") -} -func jpfNotNull(arguments []interface{}) (interface{}, error) { - for _, arg := range arguments { - if arg != nil { - return arg, nil - } - } - return nil, nil -} diff --git a/vendor/github.com/jmespath/go-jmespath/go.mod b/vendor/github.com/jmespath/go-jmespath/go.mod deleted file mode 100644 index aa1e3f1c9f..0000000000 --- a/vendor/github.com/jmespath/go-jmespath/go.mod +++ /dev/null @@ -1,5 +0,0 @@ -module github.com/jmespath/go-jmespath - -go 1.14 - -require github.com/stretchr/testify v1.5.1 diff --git a/vendor/github.com/jmespath/go-jmespath/go.sum b/vendor/github.com/jmespath/go-jmespath/go.sum deleted file mode 100644 index 331fa69822..0000000000 --- a/vendor/github.com/jmespath/go-jmespath/go.sum +++ /dev/null @@ -1,11 +0,0 @@ -github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/jmespath/go-jmespath/interpreter.go b/vendor/github.com/jmespath/go-jmespath/interpreter.go deleted file mode 100644 index 13c74604c2..0000000000 --- a/vendor/github.com/jmespath/go-jmespath/interpreter.go +++ /dev/null @@ -1,418 +0,0 @@ -package jmespath - -import ( - "errors" - "reflect" - "unicode" - "unicode/utf8" -) - -/* This is a tree based interpreter. It walks the AST and directly - interprets the AST to search through a JSON document. -*/ - -type treeInterpreter struct { - fCall *functionCaller -} - -func newInterpreter() *treeInterpreter { - interpreter := treeInterpreter{} - interpreter.fCall = newFunctionCaller() - return &interpreter -} - -type expRef struct { - ref ASTNode -} - -// Execute takes an ASTNode and input data and interprets the AST directly. -// It will produce the result of applying the JMESPath expression associated -// with the ASTNode to the input data "value". -func (intr *treeInterpreter) Execute(node ASTNode, value interface{}) (interface{}, error) { - switch node.nodeType { - case ASTComparator: - left, err := intr.Execute(node.children[0], value) - if err != nil { - return nil, err - } - right, err := intr.Execute(node.children[1], value) - if err != nil { - return nil, err - } - switch node.value { - case tEQ: - return objsEqual(left, right), nil - case tNE: - return !objsEqual(left, right), nil - } - leftNum, ok := left.(float64) - if !ok { - return nil, nil - } - rightNum, ok := right.(float64) - if !ok { - return nil, nil - } - switch node.value { - case tGT: - return leftNum > rightNum, nil - case tGTE: - return leftNum >= rightNum, nil - case tLT: - return leftNum < rightNum, nil - case tLTE: - return leftNum <= rightNum, nil - } - case ASTExpRef: - return expRef{ref: node.children[0]}, nil - case ASTFunctionExpression: - resolvedArgs := []interface{}{} - for _, arg := range node.children { - current, err := intr.Execute(arg, value) - if err != nil { - return nil, err - } - resolvedArgs = append(resolvedArgs, current) - } - return intr.fCall.CallFunction(node.value.(string), resolvedArgs, intr) - case ASTField: - if m, ok := value.(map[string]interface{}); ok { - key := node.value.(string) - return m[key], nil - } - return intr.fieldFromStruct(node.value.(string), value) - case ASTFilterProjection: - left, err := intr.Execute(node.children[0], value) - if err != nil { - return nil, nil - } - sliceType, ok := left.([]interface{}) - if !ok { - if isSliceType(left) { - return intr.filterProjectionWithReflection(node, left) - } - return nil, nil - } - compareNode := node.children[2] - collected := []interface{}{} - for _, element := range sliceType { - result, err := intr.Execute(compareNode, element) - if err != nil { - return nil, err - } - if !isFalse(result) { - current, err := intr.Execute(node.children[1], element) - if err != nil { - return nil, err - } - if current != nil { - collected = append(collected, current) - } - } - } - return collected, nil - case ASTFlatten: - left, err := intr.Execute(node.children[0], value) - if err != nil { - return nil, nil - } - sliceType, ok := left.([]interface{}) - if !ok { - // If we can't type convert to []interface{}, there's - // a chance this could still work via reflection if we're - // dealing with user provided types. - if isSliceType(left) { - return intr.flattenWithReflection(left) - } - return nil, nil - } - flattened := []interface{}{} - for _, element := range sliceType { - if elementSlice, ok := element.([]interface{}); ok { - flattened = append(flattened, elementSlice...) - } else if isSliceType(element) { - reflectFlat := []interface{}{} - v := reflect.ValueOf(element) - for i := 0; i < v.Len(); i++ { - reflectFlat = append(reflectFlat, v.Index(i).Interface()) - } - flattened = append(flattened, reflectFlat...) - } else { - flattened = append(flattened, element) - } - } - return flattened, nil - case ASTIdentity, ASTCurrentNode: - return value, nil - case ASTIndex: - if sliceType, ok := value.([]interface{}); ok { - index := node.value.(int) - if index < 0 { - index += len(sliceType) - } - if index < len(sliceType) && index >= 0 { - return sliceType[index], nil - } - return nil, nil - } - // Otherwise try via reflection. - rv := reflect.ValueOf(value) - if rv.Kind() == reflect.Slice { - index := node.value.(int) - if index < 0 { - index += rv.Len() - } - if index < rv.Len() && index >= 0 { - v := rv.Index(index) - return v.Interface(), nil - } - } - return nil, nil - case ASTKeyValPair: - return intr.Execute(node.children[0], value) - case ASTLiteral: - return node.value, nil - case ASTMultiSelectHash: - if value == nil { - return nil, nil - } - collected := make(map[string]interface{}) - for _, child := range node.children { - current, err := intr.Execute(child, value) - if err != nil { - return nil, err - } - key := child.value.(string) - collected[key] = current - } - return collected, nil - case ASTMultiSelectList: - if value == nil { - return nil, nil - } - collected := []interface{}{} - for _, child := range node.children { - current, err := intr.Execute(child, value) - if err != nil { - return nil, err - } - collected = append(collected, current) - } - return collected, nil - case ASTOrExpression: - matched, err := intr.Execute(node.children[0], value) - if err != nil { - return nil, err - } - if isFalse(matched) { - matched, err = intr.Execute(node.children[1], value) - if err != nil { - return nil, err - } - } - return matched, nil - case ASTAndExpression: - matched, err := intr.Execute(node.children[0], value) - if err != nil { - return nil, err - } - if isFalse(matched) { - return matched, nil - } - return intr.Execute(node.children[1], value) - case ASTNotExpression: - matched, err := intr.Execute(node.children[0], value) - if err != nil { - return nil, err - } - if isFalse(matched) { - return true, nil - } - return false, nil - case ASTPipe: - result := value - var err error - for _, child := range node.children { - result, err = intr.Execute(child, result) - if err != nil { - return nil, err - } - } - return result, nil - case ASTProjection: - left, err := intr.Execute(node.children[0], value) - if err != nil { - return nil, err - } - sliceType, ok := left.([]interface{}) - if !ok { - if isSliceType(left) { - return intr.projectWithReflection(node, left) - } - return nil, nil - } - collected := []interface{}{} - var current interface{} - for _, element := range sliceType { - current, err = intr.Execute(node.children[1], element) - if err != nil { - return nil, err - } - if current != nil { - collected = append(collected, current) - } - } - return collected, nil - case ASTSubexpression, ASTIndexExpression: - left, err := intr.Execute(node.children[0], value) - if err != nil { - return nil, err - } - return intr.Execute(node.children[1], left) - case ASTSlice: - sliceType, ok := value.([]interface{}) - if !ok { - if isSliceType(value) { - return intr.sliceWithReflection(node, value) - } - return nil, nil - } - parts := node.value.([]*int) - sliceParams := make([]sliceParam, 3) - for i, part := range parts { - if part != nil { - sliceParams[i].Specified = true - sliceParams[i].N = *part - } - } - return slice(sliceType, sliceParams) - case ASTValueProjection: - left, err := intr.Execute(node.children[0], value) - if err != nil { - return nil, nil - } - mapType, ok := left.(map[string]interface{}) - if !ok { - return nil, nil - } - values := make([]interface{}, len(mapType)) - for _, value := range mapType { - values = append(values, value) - } - collected := []interface{}{} - for _, element := range values { - current, err := intr.Execute(node.children[1], element) - if err != nil { - return nil, err - } - if current != nil { - collected = append(collected, current) - } - } - return collected, nil - } - return nil, errors.New("Unknown AST node: " + node.nodeType.String()) -} - -func (intr *treeInterpreter) fieldFromStruct(key string, value interface{}) (interface{}, error) { - rv := reflect.ValueOf(value) - first, n := utf8.DecodeRuneInString(key) - fieldName := string(unicode.ToUpper(first)) + key[n:] - if rv.Kind() == reflect.Struct { - v := rv.FieldByName(fieldName) - if !v.IsValid() { - return nil, nil - } - return v.Interface(), nil - } else if rv.Kind() == reflect.Ptr { - // Handle multiple levels of indirection? - if rv.IsNil() { - return nil, nil - } - rv = rv.Elem() - v := rv.FieldByName(fieldName) - if !v.IsValid() { - return nil, nil - } - return v.Interface(), nil - } - return nil, nil -} - -func (intr *treeInterpreter) flattenWithReflection(value interface{}) (interface{}, error) { - v := reflect.ValueOf(value) - flattened := []interface{}{} - for i := 0; i < v.Len(); i++ { - element := v.Index(i).Interface() - if reflect.TypeOf(element).Kind() == reflect.Slice { - // Then insert the contents of the element - // slice into the flattened slice, - // i.e flattened = append(flattened, mySlice...) - elementV := reflect.ValueOf(element) - for j := 0; j < elementV.Len(); j++ { - flattened = append( - flattened, elementV.Index(j).Interface()) - } - } else { - flattened = append(flattened, element) - } - } - return flattened, nil -} - -func (intr *treeInterpreter) sliceWithReflection(node ASTNode, value interface{}) (interface{}, error) { - v := reflect.ValueOf(value) - parts := node.value.([]*int) - sliceParams := make([]sliceParam, 3) - for i, part := range parts { - if part != nil { - sliceParams[i].Specified = true - sliceParams[i].N = *part - } - } - final := []interface{}{} - for i := 0; i < v.Len(); i++ { - element := v.Index(i).Interface() - final = append(final, element) - } - return slice(final, sliceParams) -} - -func (intr *treeInterpreter) filterProjectionWithReflection(node ASTNode, value interface{}) (interface{}, error) { - compareNode := node.children[2] - collected := []interface{}{} - v := reflect.ValueOf(value) - for i := 0; i < v.Len(); i++ { - element := v.Index(i).Interface() - result, err := intr.Execute(compareNode, element) - if err != nil { - return nil, err - } - if !isFalse(result) { - current, err := intr.Execute(node.children[1], element) - if err != nil { - return nil, err - } - if current != nil { - collected = append(collected, current) - } - } - } - return collected, nil -} - -func (intr *treeInterpreter) projectWithReflection(node ASTNode, value interface{}) (interface{}, error) { - collected := []interface{}{} - v := reflect.ValueOf(value) - for i := 0; i < v.Len(); i++ { - element := v.Index(i).Interface() - result, err := intr.Execute(node.children[1], element) - if err != nil { - return nil, err - } - if result != nil { - collected = append(collected, result) - } - } - return collected, nil -} diff --git a/vendor/github.com/jmespath/go-jmespath/lexer.go b/vendor/github.com/jmespath/go-jmespath/lexer.go deleted file mode 100644 index 817900c8f5..0000000000 --- a/vendor/github.com/jmespath/go-jmespath/lexer.go +++ /dev/null @@ -1,420 +0,0 @@ -package jmespath - -import ( - "bytes" - "encoding/json" - "fmt" - "strconv" - "strings" - "unicode/utf8" -) - -type token struct { - tokenType tokType - value string - position int - length int -} - -type tokType int - -const eof = -1 - -// Lexer contains information about the expression being tokenized. -type Lexer struct { - expression string // The expression provided by the user. - currentPos int // The current position in the string. - lastWidth int // The width of the current rune. This - buf bytes.Buffer // Internal buffer used for building up values. -} - -// SyntaxError is the main error used whenever a lexing or parsing error occurs. -type SyntaxError struct { - msg string // Error message displayed to user - Expression string // Expression that generated a SyntaxError - Offset int // The location in the string where the error occurred -} - -func (e SyntaxError) Error() string { - // In the future, it would be good to underline the specific - // location where the error occurred. - return "SyntaxError: " + e.msg -} - -// HighlightLocation will show where the syntax error occurred. -// It will place a "^" character on a line below the expression -// at the point where the syntax error occurred. -func (e SyntaxError) HighlightLocation() string { - return e.Expression + "\n" + strings.Repeat(" ", e.Offset) + "^" -} - -//go:generate stringer -type=tokType -const ( - tUnknown tokType = iota - tStar - tDot - tFilter - tFlatten - tLparen - tRparen - tLbracket - tRbracket - tLbrace - tRbrace - tOr - tPipe - tNumber - tUnquotedIdentifier - tQuotedIdentifier - tComma - tColon - tLT - tLTE - tGT - tGTE - tEQ - tNE - tJSONLiteral - tStringLiteral - tCurrent - tExpref - tAnd - tNot - tEOF -) - -var basicTokens = map[rune]tokType{ - '.': tDot, - '*': tStar, - ',': tComma, - ':': tColon, - '{': tLbrace, - '}': tRbrace, - ']': tRbracket, // tLbracket not included because it could be "[]" - '(': tLparen, - ')': tRparen, - '@': tCurrent, -} - -// Bit mask for [a-zA-Z_] shifted down 64 bits to fit in a single uint64. -// When using this bitmask just be sure to shift the rune down 64 bits -// before checking against identifierStartBits. -const identifierStartBits uint64 = 576460745995190270 - -// Bit mask for [a-zA-Z0-9], 128 bits -> 2 uint64s. -var identifierTrailingBits = [2]uint64{287948901175001088, 576460745995190270} - -var whiteSpace = map[rune]bool{ - ' ': true, '\t': true, '\n': true, '\r': true, -} - -func (t token) String() string { - return fmt.Sprintf("Token{%+v, %s, %d, %d}", - t.tokenType, t.value, t.position, t.length) -} - -// NewLexer creates a new JMESPath lexer. -func NewLexer() *Lexer { - lexer := Lexer{} - return &lexer -} - -func (lexer *Lexer) next() rune { - if lexer.currentPos >= len(lexer.expression) { - lexer.lastWidth = 0 - return eof - } - r, w := utf8.DecodeRuneInString(lexer.expression[lexer.currentPos:]) - lexer.lastWidth = w - lexer.currentPos += w - return r -} - -func (lexer *Lexer) back() { - lexer.currentPos -= lexer.lastWidth -} - -func (lexer *Lexer) peek() rune { - t := lexer.next() - lexer.back() - return t -} - -// tokenize takes an expression and returns corresponding tokens. -func (lexer *Lexer) tokenize(expression string) ([]token, error) { - var tokens []token - lexer.expression = expression - lexer.currentPos = 0 - lexer.lastWidth = 0 -loop: - for { - r := lexer.next() - if identifierStartBits&(1<<(uint64(r)-64)) > 0 { - t := lexer.consumeUnquotedIdentifier() - tokens = append(tokens, t) - } else if val, ok := basicTokens[r]; ok { - // Basic single char token. - t := token{ - tokenType: val, - value: string(r), - position: lexer.currentPos - lexer.lastWidth, - length: 1, - } - tokens = append(tokens, t) - } else if r == '-' || (r >= '0' && r <= '9') { - t := lexer.consumeNumber() - tokens = append(tokens, t) - } else if r == '[' { - t := lexer.consumeLBracket() - tokens = append(tokens, t) - } else if r == '"' { - t, err := lexer.consumeQuotedIdentifier() - if err != nil { - return tokens, err - } - tokens = append(tokens, t) - } else if r == '\'' { - t, err := lexer.consumeRawStringLiteral() - if err != nil { - return tokens, err - } - tokens = append(tokens, t) - } else if r == '`' { - t, err := lexer.consumeLiteral() - if err != nil { - return tokens, err - } - tokens = append(tokens, t) - } else if r == '|' { - t := lexer.matchOrElse(r, '|', tOr, tPipe) - tokens = append(tokens, t) - } else if r == '<' { - t := lexer.matchOrElse(r, '=', tLTE, tLT) - tokens = append(tokens, t) - } else if r == '>' { - t := lexer.matchOrElse(r, '=', tGTE, tGT) - tokens = append(tokens, t) - } else if r == '!' { - t := lexer.matchOrElse(r, '=', tNE, tNot) - tokens = append(tokens, t) - } else if r == '=' { - t := lexer.matchOrElse(r, '=', tEQ, tUnknown) - tokens = append(tokens, t) - } else if r == '&' { - t := lexer.matchOrElse(r, '&', tAnd, tExpref) - tokens = append(tokens, t) - } else if r == eof { - break loop - } else if _, ok := whiteSpace[r]; ok { - // Ignore whitespace - } else { - return tokens, lexer.syntaxError(fmt.Sprintf("Unknown char: %s", strconv.QuoteRuneToASCII(r))) - } - } - tokens = append(tokens, token{tEOF, "", len(lexer.expression), 0}) - return tokens, nil -} - -// Consume characters until the ending rune "r" is reached. -// If the end of the expression is reached before seeing the -// terminating rune "r", then an error is returned. -// If no error occurs then the matching substring is returned. -// The returned string will not include the ending rune. -func (lexer *Lexer) consumeUntil(end rune) (string, error) { - start := lexer.currentPos - current := lexer.next() - for current != end && current != eof { - if current == '\\' && lexer.peek() != eof { - lexer.next() - } - current = lexer.next() - } - if lexer.lastWidth == 0 { - // Then we hit an EOF so we never reached the closing - // delimiter. - return "", SyntaxError{ - msg: "Unclosed delimiter: " + string(end), - Expression: lexer.expression, - Offset: len(lexer.expression), - } - } - return lexer.expression[start : lexer.currentPos-lexer.lastWidth], nil -} - -func (lexer *Lexer) consumeLiteral() (token, error) { - start := lexer.currentPos - value, err := lexer.consumeUntil('`') - if err != nil { - return token{}, err - } - value = strings.Replace(value, "\\`", "`", -1) - return token{ - tokenType: tJSONLiteral, - value: value, - position: start, - length: len(value), - }, nil -} - -func (lexer *Lexer) consumeRawStringLiteral() (token, error) { - start := lexer.currentPos - currentIndex := start - current := lexer.next() - for current != '\'' && lexer.peek() != eof { - if current == '\\' && lexer.peek() == '\'' { - chunk := lexer.expression[currentIndex : lexer.currentPos-1] - lexer.buf.WriteString(chunk) - lexer.buf.WriteString("'") - lexer.next() - currentIndex = lexer.currentPos - } - current = lexer.next() - } - if lexer.lastWidth == 0 { - // Then we hit an EOF so we never reached the closing - // delimiter. - return token{}, SyntaxError{ - msg: "Unclosed delimiter: '", - Expression: lexer.expression, - Offset: len(lexer.expression), - } - } - if currentIndex < lexer.currentPos { - lexer.buf.WriteString(lexer.expression[currentIndex : lexer.currentPos-1]) - } - value := lexer.buf.String() - // Reset the buffer so it can reused again. - lexer.buf.Reset() - return token{ - tokenType: tStringLiteral, - value: value, - position: start, - length: len(value), - }, nil -} - -func (lexer *Lexer) syntaxError(msg string) SyntaxError { - return SyntaxError{ - msg: msg, - Expression: lexer.expression, - Offset: lexer.currentPos - 1, - } -} - -// Checks for a two char token, otherwise matches a single character -// token. This is used whenever a two char token overlaps a single -// char token, e.g. "||" -> tPipe, "|" -> tOr. -func (lexer *Lexer) matchOrElse(first rune, second rune, matchedType tokType, singleCharType tokType) token { - start := lexer.currentPos - lexer.lastWidth - nextRune := lexer.next() - var t token - if nextRune == second { - t = token{ - tokenType: matchedType, - value: string(first) + string(second), - position: start, - length: 2, - } - } else { - lexer.back() - t = token{ - tokenType: singleCharType, - value: string(first), - position: start, - length: 1, - } - } - return t -} - -func (lexer *Lexer) consumeLBracket() token { - // There's three options here: - // 1. A filter expression "[?" - // 2. A flatten operator "[]" - // 3. A bare rbracket "[" - start := lexer.currentPos - lexer.lastWidth - nextRune := lexer.next() - var t token - if nextRune == '?' { - t = token{ - tokenType: tFilter, - value: "[?", - position: start, - length: 2, - } - } else if nextRune == ']' { - t = token{ - tokenType: tFlatten, - value: "[]", - position: start, - length: 2, - } - } else { - t = token{ - tokenType: tLbracket, - value: "[", - position: start, - length: 1, - } - lexer.back() - } - return t -} - -func (lexer *Lexer) consumeQuotedIdentifier() (token, error) { - start := lexer.currentPos - value, err := lexer.consumeUntil('"') - if err != nil { - return token{}, err - } - var decoded string - asJSON := []byte("\"" + value + "\"") - if err := json.Unmarshal([]byte(asJSON), &decoded); err != nil { - return token{}, err - } - return token{ - tokenType: tQuotedIdentifier, - value: decoded, - position: start - 1, - length: len(decoded), - }, nil -} - -func (lexer *Lexer) consumeUnquotedIdentifier() token { - // Consume runes until we reach the end of an unquoted - // identifier. - start := lexer.currentPos - lexer.lastWidth - for { - r := lexer.next() - if r < 0 || r > 128 || identifierTrailingBits[uint64(r)/64]&(1<<(uint64(r)%64)) == 0 { - lexer.back() - break - } - } - value := lexer.expression[start:lexer.currentPos] - return token{ - tokenType: tUnquotedIdentifier, - value: value, - position: start, - length: lexer.currentPos - start, - } -} - -func (lexer *Lexer) consumeNumber() token { - // Consume runes until we reach something that's not a number. - start := lexer.currentPos - lexer.lastWidth - for { - r := lexer.next() - if r < '0' || r > '9' { - lexer.back() - break - } - } - value := lexer.expression[start:lexer.currentPos] - return token{ - tokenType: tNumber, - value: value, - position: start, - length: lexer.currentPos - start, - } -} diff --git a/vendor/github.com/jmespath/go-jmespath/parser.go b/vendor/github.com/jmespath/go-jmespath/parser.go deleted file mode 100644 index 4abc303ab4..0000000000 --- a/vendor/github.com/jmespath/go-jmespath/parser.go +++ /dev/null @@ -1,603 +0,0 @@ -package jmespath - -import ( - "encoding/json" - "fmt" - "strconv" - "strings" -) - -type astNodeType int - -//go:generate stringer -type astNodeType -const ( - ASTEmpty astNodeType = iota - ASTComparator - ASTCurrentNode - ASTExpRef - ASTFunctionExpression - ASTField - ASTFilterProjection - ASTFlatten - ASTIdentity - ASTIndex - ASTIndexExpression - ASTKeyValPair - ASTLiteral - ASTMultiSelectHash - ASTMultiSelectList - ASTOrExpression - ASTAndExpression - ASTNotExpression - ASTPipe - ASTProjection - ASTSubexpression - ASTSlice - ASTValueProjection -) - -// ASTNode represents the abstract syntax tree of a JMESPath expression. -type ASTNode struct { - nodeType astNodeType - value interface{} - children []ASTNode -} - -func (node ASTNode) String() string { - return node.PrettyPrint(0) -} - -// PrettyPrint will pretty print the parsed AST. -// The AST is an implementation detail and this pretty print -// function is provided as a convenience method to help with -// debugging. You should not rely on its output as the internal -// structure of the AST may change at any time. -func (node ASTNode) PrettyPrint(indent int) string { - spaces := strings.Repeat(" ", indent) - output := fmt.Sprintf("%s%s {\n", spaces, node.nodeType) - nextIndent := indent + 2 - if node.value != nil { - if converted, ok := node.value.(fmt.Stringer); ok { - // Account for things like comparator nodes - // that are enums with a String() method. - output += fmt.Sprintf("%svalue: %s\n", strings.Repeat(" ", nextIndent), converted.String()) - } else { - output += fmt.Sprintf("%svalue: %#v\n", strings.Repeat(" ", nextIndent), node.value) - } - } - lastIndex := len(node.children) - if lastIndex > 0 { - output += fmt.Sprintf("%schildren: {\n", strings.Repeat(" ", nextIndent)) - childIndent := nextIndent + 2 - for _, elem := range node.children { - output += elem.PrettyPrint(childIndent) - } - } - output += fmt.Sprintf("%s}\n", spaces) - return output -} - -var bindingPowers = map[tokType]int{ - tEOF: 0, - tUnquotedIdentifier: 0, - tQuotedIdentifier: 0, - tRbracket: 0, - tRparen: 0, - tComma: 0, - tRbrace: 0, - tNumber: 0, - tCurrent: 0, - tExpref: 0, - tColon: 0, - tPipe: 1, - tOr: 2, - tAnd: 3, - tEQ: 5, - tLT: 5, - tLTE: 5, - tGT: 5, - tGTE: 5, - tNE: 5, - tFlatten: 9, - tStar: 20, - tFilter: 21, - tDot: 40, - tNot: 45, - tLbrace: 50, - tLbracket: 55, - tLparen: 60, -} - -// Parser holds state about the current expression being parsed. -type Parser struct { - expression string - tokens []token - index int -} - -// NewParser creates a new JMESPath parser. -func NewParser() *Parser { - p := Parser{} - return &p -} - -// Parse will compile a JMESPath expression. -func (p *Parser) Parse(expression string) (ASTNode, error) { - lexer := NewLexer() - p.expression = expression - p.index = 0 - tokens, err := lexer.tokenize(expression) - if err != nil { - return ASTNode{}, err - } - p.tokens = tokens - parsed, err := p.parseExpression(0) - if err != nil { - return ASTNode{}, err - } - if p.current() != tEOF { - return ASTNode{}, p.syntaxError(fmt.Sprintf( - "Unexpected token at the end of the expression: %s", p.current())) - } - return parsed, nil -} - -func (p *Parser) parseExpression(bindingPower int) (ASTNode, error) { - var err error - leftToken := p.lookaheadToken(0) - p.advance() - leftNode, err := p.nud(leftToken) - if err != nil { - return ASTNode{}, err - } - currentToken := p.current() - for bindingPower < bindingPowers[currentToken] { - p.advance() - leftNode, err = p.led(currentToken, leftNode) - if err != nil { - return ASTNode{}, err - } - currentToken = p.current() - } - return leftNode, nil -} - -func (p *Parser) parseIndexExpression() (ASTNode, error) { - if p.lookahead(0) == tColon || p.lookahead(1) == tColon { - return p.parseSliceExpression() - } - indexStr := p.lookaheadToken(0).value - parsedInt, err := strconv.Atoi(indexStr) - if err != nil { - return ASTNode{}, err - } - indexNode := ASTNode{nodeType: ASTIndex, value: parsedInt} - p.advance() - if err := p.match(tRbracket); err != nil { - return ASTNode{}, err - } - return indexNode, nil -} - -func (p *Parser) parseSliceExpression() (ASTNode, error) { - parts := []*int{nil, nil, nil} - index := 0 - current := p.current() - for current != tRbracket && index < 3 { - if current == tColon { - index++ - p.advance() - } else if current == tNumber { - parsedInt, err := strconv.Atoi(p.lookaheadToken(0).value) - if err != nil { - return ASTNode{}, err - } - parts[index] = &parsedInt - p.advance() - } else { - return ASTNode{}, p.syntaxError( - "Expected tColon or tNumber" + ", received: " + p.current().String()) - } - current = p.current() - } - if err := p.match(tRbracket); err != nil { - return ASTNode{}, err - } - return ASTNode{ - nodeType: ASTSlice, - value: parts, - }, nil -} - -func (p *Parser) match(tokenType tokType) error { - if p.current() == tokenType { - p.advance() - return nil - } - return p.syntaxError("Expected " + tokenType.String() + ", received: " + p.current().String()) -} - -func (p *Parser) led(tokenType tokType, node ASTNode) (ASTNode, error) { - switch tokenType { - case tDot: - if p.current() != tStar { - right, err := p.parseDotRHS(bindingPowers[tDot]) - return ASTNode{ - nodeType: ASTSubexpression, - children: []ASTNode{node, right}, - }, err - } - p.advance() - right, err := p.parseProjectionRHS(bindingPowers[tDot]) - return ASTNode{ - nodeType: ASTValueProjection, - children: []ASTNode{node, right}, - }, err - case tPipe: - right, err := p.parseExpression(bindingPowers[tPipe]) - return ASTNode{nodeType: ASTPipe, children: []ASTNode{node, right}}, err - case tOr: - right, err := p.parseExpression(bindingPowers[tOr]) - return ASTNode{nodeType: ASTOrExpression, children: []ASTNode{node, right}}, err - case tAnd: - right, err := p.parseExpression(bindingPowers[tAnd]) - return ASTNode{nodeType: ASTAndExpression, children: []ASTNode{node, right}}, err - case tLparen: - name := node.value - var args []ASTNode - for p.current() != tRparen { - expression, err := p.parseExpression(0) - if err != nil { - return ASTNode{}, err - } - if p.current() == tComma { - if err := p.match(tComma); err != nil { - return ASTNode{}, err - } - } - args = append(args, expression) - } - if err := p.match(tRparen); err != nil { - return ASTNode{}, err - } - return ASTNode{ - nodeType: ASTFunctionExpression, - value: name, - children: args, - }, nil - case tFilter: - return p.parseFilter(node) - case tFlatten: - left := ASTNode{nodeType: ASTFlatten, children: []ASTNode{node}} - right, err := p.parseProjectionRHS(bindingPowers[tFlatten]) - return ASTNode{ - nodeType: ASTProjection, - children: []ASTNode{left, right}, - }, err - case tEQ, tNE, tGT, tGTE, tLT, tLTE: - right, err := p.parseExpression(bindingPowers[tokenType]) - if err != nil { - return ASTNode{}, err - } - return ASTNode{ - nodeType: ASTComparator, - value: tokenType, - children: []ASTNode{node, right}, - }, nil - case tLbracket: - tokenType := p.current() - var right ASTNode - var err error - if tokenType == tNumber || tokenType == tColon { - right, err = p.parseIndexExpression() - if err != nil { - return ASTNode{}, err - } - return p.projectIfSlice(node, right) - } - // Otherwise this is a projection. - if err := p.match(tStar); err != nil { - return ASTNode{}, err - } - if err := p.match(tRbracket); err != nil { - return ASTNode{}, err - } - right, err = p.parseProjectionRHS(bindingPowers[tStar]) - if err != nil { - return ASTNode{}, err - } - return ASTNode{ - nodeType: ASTProjection, - children: []ASTNode{node, right}, - }, nil - } - return ASTNode{}, p.syntaxError("Unexpected token: " + tokenType.String()) -} - -func (p *Parser) nud(token token) (ASTNode, error) { - switch token.tokenType { - case tJSONLiteral: - var parsed interface{} - err := json.Unmarshal([]byte(token.value), &parsed) - if err != nil { - return ASTNode{}, err - } - return ASTNode{nodeType: ASTLiteral, value: parsed}, nil - case tStringLiteral: - return ASTNode{nodeType: ASTLiteral, value: token.value}, nil - case tUnquotedIdentifier: - return ASTNode{ - nodeType: ASTField, - value: token.value, - }, nil - case tQuotedIdentifier: - node := ASTNode{nodeType: ASTField, value: token.value} - if p.current() == tLparen { - return ASTNode{}, p.syntaxErrorToken("Can't have quoted identifier as function name.", token) - } - return node, nil - case tStar: - left := ASTNode{nodeType: ASTIdentity} - var right ASTNode - var err error - if p.current() == tRbracket { - right = ASTNode{nodeType: ASTIdentity} - } else { - right, err = p.parseProjectionRHS(bindingPowers[tStar]) - } - return ASTNode{nodeType: ASTValueProjection, children: []ASTNode{left, right}}, err - case tFilter: - return p.parseFilter(ASTNode{nodeType: ASTIdentity}) - case tLbrace: - return p.parseMultiSelectHash() - case tFlatten: - left := ASTNode{ - nodeType: ASTFlatten, - children: []ASTNode{{nodeType: ASTIdentity}}, - } - right, err := p.parseProjectionRHS(bindingPowers[tFlatten]) - if err != nil { - return ASTNode{}, err - } - return ASTNode{nodeType: ASTProjection, children: []ASTNode{left, right}}, nil - case tLbracket: - tokenType := p.current() - //var right ASTNode - if tokenType == tNumber || tokenType == tColon { - right, err := p.parseIndexExpression() - if err != nil { - return ASTNode{}, nil - } - return p.projectIfSlice(ASTNode{nodeType: ASTIdentity}, right) - } else if tokenType == tStar && p.lookahead(1) == tRbracket { - p.advance() - p.advance() - right, err := p.parseProjectionRHS(bindingPowers[tStar]) - if err != nil { - return ASTNode{}, err - } - return ASTNode{ - nodeType: ASTProjection, - children: []ASTNode{{nodeType: ASTIdentity}, right}, - }, nil - } else { - return p.parseMultiSelectList() - } - case tCurrent: - return ASTNode{nodeType: ASTCurrentNode}, nil - case tExpref: - expression, err := p.parseExpression(bindingPowers[tExpref]) - if err != nil { - return ASTNode{}, err - } - return ASTNode{nodeType: ASTExpRef, children: []ASTNode{expression}}, nil - case tNot: - expression, err := p.parseExpression(bindingPowers[tNot]) - if err != nil { - return ASTNode{}, err - } - return ASTNode{nodeType: ASTNotExpression, children: []ASTNode{expression}}, nil - case tLparen: - expression, err := p.parseExpression(0) - if err != nil { - return ASTNode{}, err - } - if err := p.match(tRparen); err != nil { - return ASTNode{}, err - } - return expression, nil - case tEOF: - return ASTNode{}, p.syntaxErrorToken("Incomplete expression", token) - } - - return ASTNode{}, p.syntaxErrorToken("Invalid token: "+token.tokenType.String(), token) -} - -func (p *Parser) parseMultiSelectList() (ASTNode, error) { - var expressions []ASTNode - for { - expression, err := p.parseExpression(0) - if err != nil { - return ASTNode{}, err - } - expressions = append(expressions, expression) - if p.current() == tRbracket { - break - } - err = p.match(tComma) - if err != nil { - return ASTNode{}, err - } - } - err := p.match(tRbracket) - if err != nil { - return ASTNode{}, err - } - return ASTNode{ - nodeType: ASTMultiSelectList, - children: expressions, - }, nil -} - -func (p *Parser) parseMultiSelectHash() (ASTNode, error) { - var children []ASTNode - for { - keyToken := p.lookaheadToken(0) - if err := p.match(tUnquotedIdentifier); err != nil { - if err := p.match(tQuotedIdentifier); err != nil { - return ASTNode{}, p.syntaxError("Expected tQuotedIdentifier or tUnquotedIdentifier") - } - } - keyName := keyToken.value - err := p.match(tColon) - if err != nil { - return ASTNode{}, err - } - value, err := p.parseExpression(0) - if err != nil { - return ASTNode{}, err - } - node := ASTNode{ - nodeType: ASTKeyValPair, - value: keyName, - children: []ASTNode{value}, - } - children = append(children, node) - if p.current() == tComma { - err := p.match(tComma) - if err != nil { - return ASTNode{}, nil - } - } else if p.current() == tRbrace { - err := p.match(tRbrace) - if err != nil { - return ASTNode{}, nil - } - break - } - } - return ASTNode{ - nodeType: ASTMultiSelectHash, - children: children, - }, nil -} - -func (p *Parser) projectIfSlice(left ASTNode, right ASTNode) (ASTNode, error) { - indexExpr := ASTNode{ - nodeType: ASTIndexExpression, - children: []ASTNode{left, right}, - } - if right.nodeType == ASTSlice { - right, err := p.parseProjectionRHS(bindingPowers[tStar]) - return ASTNode{ - nodeType: ASTProjection, - children: []ASTNode{indexExpr, right}, - }, err - } - return indexExpr, nil -} -func (p *Parser) parseFilter(node ASTNode) (ASTNode, error) { - var right, condition ASTNode - var err error - condition, err = p.parseExpression(0) - if err != nil { - return ASTNode{}, err - } - if err := p.match(tRbracket); err != nil { - return ASTNode{}, err - } - if p.current() == tFlatten { - right = ASTNode{nodeType: ASTIdentity} - } else { - right, err = p.parseProjectionRHS(bindingPowers[tFilter]) - if err != nil { - return ASTNode{}, err - } - } - - return ASTNode{ - nodeType: ASTFilterProjection, - children: []ASTNode{node, right, condition}, - }, nil -} - -func (p *Parser) parseDotRHS(bindingPower int) (ASTNode, error) { - lookahead := p.current() - if tokensOneOf([]tokType{tQuotedIdentifier, tUnquotedIdentifier, tStar}, lookahead) { - return p.parseExpression(bindingPower) - } else if lookahead == tLbracket { - if err := p.match(tLbracket); err != nil { - return ASTNode{}, err - } - return p.parseMultiSelectList() - } else if lookahead == tLbrace { - if err := p.match(tLbrace); err != nil { - return ASTNode{}, err - } - return p.parseMultiSelectHash() - } - return ASTNode{}, p.syntaxError("Expected identifier, lbracket, or lbrace") -} - -func (p *Parser) parseProjectionRHS(bindingPower int) (ASTNode, error) { - current := p.current() - if bindingPowers[current] < 10 { - return ASTNode{nodeType: ASTIdentity}, nil - } else if current == tLbracket { - return p.parseExpression(bindingPower) - } else if current == tFilter { - return p.parseExpression(bindingPower) - } else if current == tDot { - err := p.match(tDot) - if err != nil { - return ASTNode{}, err - } - return p.parseDotRHS(bindingPower) - } else { - return ASTNode{}, p.syntaxError("Error") - } -} - -func (p *Parser) lookahead(number int) tokType { - return p.lookaheadToken(number).tokenType -} - -func (p *Parser) current() tokType { - return p.lookahead(0) -} - -func (p *Parser) lookaheadToken(number int) token { - return p.tokens[p.index+number] -} - -func (p *Parser) advance() { - p.index++ -} - -func tokensOneOf(elements []tokType, token tokType) bool { - for _, elem := range elements { - if elem == token { - return true - } - } - return false -} - -func (p *Parser) syntaxError(msg string) SyntaxError { - return SyntaxError{ - msg: msg, - Expression: p.expression, - Offset: p.lookaheadToken(0).position, - } -} - -// Create a SyntaxError based on the provided token. -// This differs from syntaxError() which creates a SyntaxError -// based on the current lookahead token. -func (p *Parser) syntaxErrorToken(msg string, t token) SyntaxError { - return SyntaxError{ - msg: msg, - Expression: p.expression, - Offset: t.position, - } -} diff --git a/vendor/github.com/jmespath/go-jmespath/toktype_string.go b/vendor/github.com/jmespath/go-jmespath/toktype_string.go deleted file mode 100644 index dae79cbdf3..0000000000 --- a/vendor/github.com/jmespath/go-jmespath/toktype_string.go +++ /dev/null @@ -1,16 +0,0 @@ -// generated by stringer -type=tokType; DO NOT EDIT - -package jmespath - -import "fmt" - -const _tokType_name = "tUnknowntStartDottFiltertFlattentLparentRparentLbrackettRbrackettLbracetRbracetOrtPipetNumbertUnquotedIdentifiertQuotedIdentifiertCommatColontLTtLTEtGTtGTEtEQtNEtJSONLiteraltStringLiteraltCurrenttExpreftAndtNottEOF" - -var _tokType_index = [...]uint8{0, 8, 13, 17, 24, 32, 39, 46, 55, 64, 71, 78, 81, 86, 93, 112, 129, 135, 141, 144, 148, 151, 155, 158, 161, 173, 187, 195, 202, 206, 210, 214} - -func (i tokType) String() string { - if i < 0 || i >= tokType(len(_tokType_index)-1) { - return fmt.Sprintf("tokType(%d)", i) - } - return _tokType_name[_tokType_index[i]:_tokType_index[i+1]] -} diff --git a/vendor/github.com/jmespath/go-jmespath/util.go b/vendor/github.com/jmespath/go-jmespath/util.go deleted file mode 100644 index ddc1b7d7d4..0000000000 --- a/vendor/github.com/jmespath/go-jmespath/util.go +++ /dev/null @@ -1,185 +0,0 @@ -package jmespath - -import ( - "errors" - "reflect" -) - -// IsFalse determines if an object is false based on the JMESPath spec. -// JMESPath defines false values to be any of: -// - An empty string array, or hash. -// - The boolean value false. -// - nil -func isFalse(value interface{}) bool { - switch v := value.(type) { - case bool: - return !v - case []interface{}: - return len(v) == 0 - case map[string]interface{}: - return len(v) == 0 - case string: - return len(v) == 0 - case nil: - return true - } - // Try the reflection cases before returning false. - rv := reflect.ValueOf(value) - switch rv.Kind() { - case reflect.Struct: - // A struct type will never be false, even if - // all of its values are the zero type. - return false - case reflect.Slice, reflect.Map: - return rv.Len() == 0 - case reflect.Ptr: - if rv.IsNil() { - return true - } - // If it's a pointer type, we'll try to deref the pointer - // and evaluate the pointer value for isFalse. - element := rv.Elem() - return isFalse(element.Interface()) - } - return false -} - -// ObjsEqual is a generic object equality check. -// It will take two arbitrary objects and recursively determine -// if they are equal. -func objsEqual(left interface{}, right interface{}) bool { - return reflect.DeepEqual(left, right) -} - -// SliceParam refers to a single part of a slice. -// A slice consists of a start, a stop, and a step, similar to -// python slices. -type sliceParam struct { - N int - Specified bool -} - -// Slice supports [start:stop:step] style slicing that's supported in JMESPath. -func slice(slice []interface{}, parts []sliceParam) ([]interface{}, error) { - computed, err := computeSliceParams(len(slice), parts) - if err != nil { - return nil, err - } - start, stop, step := computed[0], computed[1], computed[2] - result := []interface{}{} - if step > 0 { - for i := start; i < stop; i += step { - result = append(result, slice[i]) - } - } else { - for i := start; i > stop; i += step { - result = append(result, slice[i]) - } - } - return result, nil -} - -func computeSliceParams(length int, parts []sliceParam) ([]int, error) { - var start, stop, step int - if !parts[2].Specified { - step = 1 - } else if parts[2].N == 0 { - return nil, errors.New("Invalid slice, step cannot be 0") - } else { - step = parts[2].N - } - var stepValueNegative bool - if step < 0 { - stepValueNegative = true - } else { - stepValueNegative = false - } - - if !parts[0].Specified { - if stepValueNegative { - start = length - 1 - } else { - start = 0 - } - } else { - start = capSlice(length, parts[0].N, step) - } - - if !parts[1].Specified { - if stepValueNegative { - stop = -1 - } else { - stop = length - } - } else { - stop = capSlice(length, parts[1].N, step) - } - return []int{start, stop, step}, nil -} - -func capSlice(length int, actual int, step int) int { - if actual < 0 { - actual += length - if actual < 0 { - if step < 0 { - actual = -1 - } else { - actual = 0 - } - } - } else if actual >= length { - if step < 0 { - actual = length - 1 - } else { - actual = length - } - } - return actual -} - -// ToArrayNum converts an empty interface type to a slice of float64. -// If any element in the array cannot be converted, then nil is returned -// along with a second value of false. -func toArrayNum(data interface{}) ([]float64, bool) { - // Is there a better way to do this with reflect? - if d, ok := data.([]interface{}); ok { - result := make([]float64, len(d)) - for i, el := range d { - item, ok := el.(float64) - if !ok { - return nil, false - } - result[i] = item - } - return result, true - } - return nil, false -} - -// ToArrayStr converts an empty interface type to a slice of strings. -// If any element in the array cannot be converted, then nil is returned -// along with a second value of false. If the input data could be entirely -// converted, then the converted data, along with a second value of true, -// will be returned. -func toArrayStr(data interface{}) ([]string, bool) { - // Is there a better way to do this with reflect? - if d, ok := data.([]interface{}); ok { - result := make([]string, len(d)) - for i, el := range d { - item, ok := el.(string) - if !ok { - return nil, false - } - result[i] = item - } - return result, true - } - return nil, false -} - -func isSliceType(v interface{}) bool { - if v == nil { - return false - } - return reflect.TypeOf(v).Kind() == reflect.Slice -} diff --git a/vendor/github.com/json-iterator/go/go.sum b/vendor/github.com/json-iterator/go/go.sum index d778b5a14d..be00a6df96 100644 --- a/vendor/github.com/json-iterator/go/go.sum +++ b/vendor/github.com/json-iterator/go/go.sum @@ -9,6 +9,7 @@ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 h1:Esafd1046DLD github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= diff --git a/vendor/github.com/json-iterator/go/iter_float.go b/vendor/github.com/json-iterator/go/iter_float.go index b9754638e8..8a3d8b6fb4 100644 --- a/vendor/github.com/json-iterator/go/iter_float.go +++ b/vendor/github.com/json-iterator/go/iter_float.go @@ -288,6 +288,9 @@ non_decimal_loop: return iter.readFloat64SlowPath() } value = (value << 3) + (value << 1) + uint64(ind) + if value > maxFloat64 { + return iter.readFloat64SlowPath() + } } } return iter.readFloat64SlowPath() diff --git a/vendor/github.com/json-iterator/go/iter_int.go b/vendor/github.com/json-iterator/go/iter_int.go index 2142320355..d786a89fe1 100644 --- a/vendor/github.com/json-iterator/go/iter_int.go +++ b/vendor/github.com/json-iterator/go/iter_int.go @@ -9,6 +9,7 @@ var intDigits []int8 const uint32SafeToMultiply10 = uint32(0xffffffff)/10 - 1 const uint64SafeToMultiple10 = uint64(0xffffffffffffffff)/10 - 1 +const maxFloat64 = 1<<53 - 1 func init() { intDigits = make([]int8, 256) @@ -339,7 +340,7 @@ func (iter *Iterator) readUint64(c byte) (ret uint64) { } func (iter *Iterator) assertInteger() { - if iter.head < len(iter.buf) && iter.buf[iter.head] == '.' { + if iter.head < iter.tail && iter.buf[iter.head] == '.' { iter.ReportError("assertInteger", "can not decode float as int") } } diff --git a/vendor/github.com/json-iterator/go/reflect.go b/vendor/github.com/json-iterator/go/reflect.go index 74974ba74b..39acb320ac 100644 --- a/vendor/github.com/json-iterator/go/reflect.go +++ b/vendor/github.com/json-iterator/go/reflect.go @@ -65,7 +65,7 @@ func (iter *Iterator) ReadVal(obj interface{}) { decoder := iter.cfg.getDecoderFromCache(cacheKey) if decoder == nil { typ := reflect2.TypeOf(obj) - if typ.Kind() != reflect.Ptr { + if typ == nil || typ.Kind() != reflect.Ptr { iter.ReportError("ReadVal", "can only unmarshal into pointer") return } diff --git a/vendor/github.com/json-iterator/go/reflect_json_raw_message.go b/vendor/github.com/json-iterator/go/reflect_json_raw_message.go index f2619936c8..eba434f2f1 100644 --- a/vendor/github.com/json-iterator/go/reflect_json_raw_message.go +++ b/vendor/github.com/json-iterator/go/reflect_json_raw_message.go @@ -33,11 +33,19 @@ type jsonRawMessageCodec struct { } func (codec *jsonRawMessageCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - *((*json.RawMessage)(ptr)) = json.RawMessage(iter.SkipAndReturnBytes()) + if iter.ReadNil() { + *((*json.RawMessage)(ptr)) = nil + } else { + *((*json.RawMessage)(ptr)) = iter.SkipAndReturnBytes() + } } func (codec *jsonRawMessageCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteRaw(string(*((*json.RawMessage)(ptr)))) + if *((*json.RawMessage)(ptr)) == nil { + stream.WriteNil() + } else { + stream.WriteRaw(string(*((*json.RawMessage)(ptr)))) + } } func (codec *jsonRawMessageCodec) IsEmpty(ptr unsafe.Pointer) bool { @@ -48,11 +56,19 @@ type jsoniterRawMessageCodec struct { } func (codec *jsoniterRawMessageCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - *((*RawMessage)(ptr)) = RawMessage(iter.SkipAndReturnBytes()) + if iter.ReadNil() { + *((*RawMessage)(ptr)) = nil + } else { + *((*RawMessage)(ptr)) = iter.SkipAndReturnBytes() + } } func (codec *jsoniterRawMessageCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteRaw(string(*((*RawMessage)(ptr)))) + if *((*RawMessage)(ptr)) == nil { + stream.WriteNil() + } else { + stream.WriteRaw(string(*((*RawMessage)(ptr)))) + } } func (codec *jsoniterRawMessageCodec) IsEmpty(ptr unsafe.Pointer) bool { diff --git a/vendor/github.com/json-iterator/go/reflect_struct_decoder.go b/vendor/github.com/json-iterator/go/reflect_struct_decoder.go index d7eb0eb5ca..92ae912dc2 100644 --- a/vendor/github.com/json-iterator/go/reflect_struct_decoder.go +++ b/vendor/github.com/json-iterator/go/reflect_struct_decoder.go @@ -1075,6 +1075,11 @@ type stringModeNumberDecoder struct { } func (decoder *stringModeNumberDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if iter.WhatIsNext() == NilValue { + decoder.elemDecoder.Decode(ptr, iter) + return + } + c := iter.nextToken() if c != '"' { iter.ReportError("stringModeNumberDecoder", `expect ", but found `+string([]byte{c})) diff --git a/vendor/github.com/mailru/easyjson/jlexer/lexer.go b/vendor/github.com/mailru/easyjson/jlexer/lexer.go index a42e9d65ad..b5f5e26132 100644 --- a/vendor/github.com/mailru/easyjson/jlexer/lexer.go +++ b/vendor/github.com/mailru/easyjson/jlexer/lexer.go @@ -401,6 +401,7 @@ func (r *Lexer) scanToken() { // consume resets the current token to allow scanning the next one. func (r *Lexer) consume() { r.token.kind = tokenUndef + r.token.byteValueCloned = false r.token.delimValue = 0 } @@ -528,6 +529,7 @@ func (r *Lexer) Skip() { func (r *Lexer) SkipRecursive() { r.scanToken() var start, end byte + startPos := r.start switch r.token.delimValue { case '{': @@ -553,6 +555,14 @@ func (r *Lexer) SkipRecursive() { level-- if level == 0 { r.pos += i + 1 + if !json.Valid(r.Data[startPos:r.pos]) { + r.pos = len(r.Data) + r.fatalError = &LexerError{ + Reason: "skipped array/object json value is invalid", + Offset: r.pos, + Data: string(r.Data[r.pos:]), + } + } return } case c == '\\' && inQuotes: @@ -702,6 +712,10 @@ func (r *Lexer) Bytes() []byte { r.errInvalidToken("string") return nil } + if err := r.unescapeStringToken(); err != nil { + r.errInvalidToken("string") + return nil + } ret := make([]byte, base64.StdEncoding.DecodedLen(len(r.token.byteValue))) n, err := base64.StdEncoding.Decode(ret, r.token.byteValue) if err != nil { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/build_info.go b/vendor/github.com/prometheus/client_golang/prometheus/build_info.go deleted file mode 100644 index 288f0e8548..0000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/build_info.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build go1.12 - -package prometheus - -import "runtime/debug" - -// readBuildInfo is a wrapper around debug.ReadBuildInfo for Go 1.12+. -func readBuildInfo() (path, version, sum string) { - path, version, sum = "unknown", "unknown", "unknown" - if bi, ok := debug.ReadBuildInfo(); ok { - path = bi.Main.Path - version = bi.Main.Version - sum = bi.Main.Sum - } - return -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/build_info_pre_1.12.go b/vendor/github.com/prometheus/client_golang/prometheus/build_info_pre_1.12.go deleted file mode 100644 index 6609e2877c..0000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/build_info_pre_1.12.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build !go1.12 - -package prometheus - -// readBuildInfo is a wrapper around debug.ReadBuildInfo for Go versions before -// 1.12. Remove this whole file once the minimum supported Go version is 1.12. -func readBuildInfo() (path, version, sum string) { - return "unknown", "unknown", "unknown" -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/github.com/prometheus/client_golang/prometheus/desc.go index 957d93a2db..4bb816ab75 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/desc.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/desc.go @@ -20,7 +20,7 @@ import ( "strings" "github.com/cespare/xxhash/v2" - //lint:ignore SA1019 Need to keep deprecated package for compatibility. + //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. "github.com/golang/protobuf/proto" "github.com/prometheus/common/model" diff --git a/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go index 18a99d5faa..c41ab37f3b 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go @@ -22,43 +22,10 @@ type expvarCollector struct { exports map[string]*Desc } -// NewExpvarCollector returns a newly allocated expvar Collector that still has -// to be registered with a Prometheus registry. +// NewExpvarCollector is the obsolete version of collectors.NewExpvarCollector. +// See there for documentation. // -// An expvar Collector collects metrics from the expvar interface. It provides a -// quick way to expose numeric values that are already exported via expvar as -// Prometheus metrics. Note that the data models of expvar and Prometheus are -// fundamentally different, and that the expvar Collector is inherently slower -// than native Prometheus metrics. Thus, the expvar Collector is probably great -// for experiments and prototying, but you should seriously consider a more -// direct implementation of Prometheus metrics for monitoring production -// systems. -// -// The exports map has the following meaning: -// -// The keys in the map correspond to expvar keys, i.e. for every expvar key you -// want to export as Prometheus metric, you need an entry in the exports -// map. The descriptor mapped to each key describes how to export the expvar -// value. It defines the name and the help string of the Prometheus metric -// proxying the expvar value. The type will always be Untyped. -// -// For descriptors without variable labels, the expvar value must be a number or -// a bool. The number is then directly exported as the Prometheus sample -// value. (For a bool, 'false' translates to 0 and 'true' to 1). Expvar values -// that are not numbers or bools are silently ignored. -// -// If the descriptor has one variable label, the expvar value must be an expvar -// map. The keys in the expvar map become the various values of the one -// Prometheus label. The values in the expvar map must be numbers or bools again -// as above. -// -// For descriptors with more than one variable label, the expvar must be a -// nested expvar map, i.e. where the values of the topmost map are maps again -// etc. until a depth is reached that corresponds to the number of labels. The -// leaves of that structure must be numbers or bools as above to serve as the -// sample values. -// -// Anything that does not fit into the scheme above is silently ignored. +// Deprecated: Use collectors.NewExpvarCollector instead. func NewExpvarCollector(exports map[string]*Desc) Collector { return &expvarCollector{ exports: exports, diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go index 6f67d10464..a96ed1cee8 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go @@ -36,32 +36,10 @@ type goCollector struct { msMaxAge time.Duration // Maximum allowed age of old memstats. } -// NewGoCollector returns a collector that exports metrics about the current Go -// process. This includes memory stats. To collect those, runtime.ReadMemStats -// is called. This requires to “stop the world”, which usually only happens for -// garbage collection (GC). Take the following implications into account when -// deciding whether to use the Go collector: +// NewGoCollector is the obsolete version of collectors.NewGoCollector. +// See there for documentation. // -// 1. The performance impact of stopping the world is the more relevant the more -// frequently metrics are collected. However, with Go1.9 or later the -// stop-the-world time per metrics collection is very short (~25µs) so that the -// performance impact will only matter in rare cases. However, with older Go -// versions, the stop-the-world duration depends on the heap size and can be -// quite significant (~1.7 ms/GiB as per -// https://go-review.googlesource.com/c/go/+/34937). -// -// 2. During an ongoing GC, nothing else can stop the world. Therefore, if the -// metrics collection happens to coincide with GC, it will only complete after -// GC has finished. Usually, GC is fast enough to not cause problems. However, -// with a very large heap, GC might take multiple seconds, which is enough to -// cause scrape timeouts in common setups. To avoid this problem, the Go -// collector will use the memstats from a previous collection if -// runtime.ReadMemStats takes more than 1s. However, if there are no previously -// collected memstats, or their collection is more than 5m ago, the collection -// will block until runtime.ReadMemStats succeeds. -// -// NOTE: The problem is solved in Go 1.15, see -// https://github.com/golang/go/issues/19812 for the related Go issue. +// Deprecated: Use collectors.NewGoCollector instead. func NewGoCollector() Collector { return &goCollector{ goroutinesDesc: NewDesc( @@ -366,25 +344,17 @@ type memStatsMetrics []struct { valType ValueType } -// NewBuildInfoCollector returns a collector collecting a single metric -// "go_build_info" with the constant value 1 and three labels "path", "version", -// and "checksum". Their label values contain the main module path, version, and -// checksum, respectively. The labels will only have meaningful values if the -// binary is built with Go module support and from source code retrieved from -// the source repository (rather than the local file system). This is usually -// accomplished by building from outside of GOPATH, specifying the full address -// of the main package, e.g. "GO111MODULE=on go run -// github.com/prometheus/client_golang/examples/random". If built without Go -// module support, all label values will be "unknown". If built with Go module -// support but using the source code from the local file system, the "path" will -// be set appropriately, but "checksum" will be empty and "version" will be -// "(devel)". +// NewBuildInfoCollector is the obsolete version of collectors.NewBuildInfoCollector. +// See there for documentation. // -// This collector uses only the build information for the main module. See -// https://github.com/povilasv/prommod for an example of a collector for the -// module dependencies. +// Deprecated: Use collectors.NewBuildInfoCollector instead. func NewBuildInfoCollector() Collector { - path, version, sum := readBuildInfo() + path, version, sum := "unknown", "unknown", "unknown" + if bi, ok := debug.ReadBuildInfo(); ok { + path = bi.Main.Path + version = bi.Main.Version + sum = bi.Main.Sum + } c := &selfCollector{MustNewConstMetric( NewDesc( "go_build_info", diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go index f71e286be5..8425640b39 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go @@ -22,7 +22,7 @@ import ( "sync/atomic" "time" - //lint:ignore SA1019 Need to keep deprecated package for compatibility. + //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. "github.com/golang/protobuf/proto" dto "github.com/prometheus/client_model/go" @@ -47,7 +47,12 @@ type Histogram interface { Metric Collector - // Observe adds a single observation to the histogram. + // Observe adds a single observation to the histogram. Observations are + // usually positive or zero. Negative observations are accepted but + // prevent current versions of Prometheus from properly detecting + // counter resets in the sum of observations. See + // https://prometheus.io/docs/practices/histograms/#count-and-sum-of-observations + // for details. Observe(float64) } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go index a2b80b1c19..dc121910a5 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/metric.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/metric.go @@ -17,7 +17,7 @@ import ( "strings" "time" - //lint:ignore SA1019 Need to keep deprecated package for compatibility. + //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. "github.com/golang/protobuf/proto" "github.com/prometheus/common/model" @@ -58,7 +58,7 @@ type Metric interface { } // Opts bundles the options for creating most Metric types. Each metric -// implementation XXX has its own XXXOpts type, but in most cases, it is just be +// implementation XXX has its own XXXOpts type, but in most cases, it is just // an alias of this type (which might change when the requirement arises.) // // It is mandatory to set Name to a non-empty string. All other fields are diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go index c46702d60b..5bfe0ff5bb 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go @@ -54,16 +54,10 @@ type ProcessCollectorOpts struct { ReportErrors bool } -// NewProcessCollector returns a collector which exports the current state of -// process metrics including CPU, memory and file descriptor usage as well as -// the process start time. The detailed behavior is defined by the provided -// ProcessCollectorOpts. The zero value of ProcessCollectorOpts creates a -// collector for the current process with an empty namespace string and no error -// reporting. +// NewProcessCollector is the obsolete version of collectors.NewProcessCollector. +// See there for documentation. // -// The collector only works on operating systems with a Linux-style proc -// filesystem and on Microsoft Windows. On other operating systems, it will not -// collect any metrics. +// Deprecated: Use collectors.NewProcessCollector instead. func NewProcessCollector(opts ProcessCollectorOpts) Collector { ns := "" if len(opts.Namespace) > 0 { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go index 5070e72e28..e7c0d05464 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go @@ -83,8 +83,7 @@ type readerFromDelegator struct{ *responseWriterDelegator } type pusherDelegator struct{ *responseWriterDelegator } func (d closeNotifierDelegator) CloseNotify() <-chan bool { - //lint:ignore SA1019 http.CloseNotifier is deprecated but we don't want to - //remove support from client_golang yet. + //nolint:staticcheck // Ignore SA1019. http.CloseNotifier is deprecated but we keep it here to not break existing users. return d.ResponseWriter.(http.CloseNotifier).CloseNotify() } func (d flusherDelegator) Flush() { @@ -348,8 +347,7 @@ func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) deleg } id := 0 - //lint:ignore SA1019 http.CloseNotifier is deprecated but we don't want to - //remove support from client_golang yet. + //nolint:staticcheck // Ignore SA1019. http.CloseNotifier is deprecated but we keep it here to not break existing users. if _, ok := w.(http.CloseNotifier); ok { id += closeNotifier } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go index 48f5ef9d72..383a7f5941 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/registry.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go @@ -26,7 +26,7 @@ import ( "unicode/utf8" "github.com/cespare/xxhash/v2" - //lint:ignore SA1019 Need to keep deprecated package for compatibility. + //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. "github.com/golang/protobuf/proto" "github.com/prometheus/common/expfmt" diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go index cf70071496..c5fa8ed7c7 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/summary.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/summary.go @@ -23,7 +23,7 @@ import ( "time" "github.com/beorn7/perks/quantile" - //lint:ignore SA1019 Need to keep deprecated package for compatibility. + //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. "github.com/golang/protobuf/proto" dto "github.com/prometheus/client_model/go" @@ -55,7 +55,12 @@ type Summary interface { Metric Collector - // Observe adds a single observation to the summary. + // Observe adds a single observation to the summary. Observations are + // usually positive or zero. Negative observations are accepted but + // prevent current versions of Prometheus from properly detecting + // counter resets in the sum of observations. See + // https://prometheus.io/docs/practices/histograms/#count-and-sum-of-observations + // for details. Observe(float64) } @@ -121,7 +126,9 @@ type SummaryOpts struct { Objectives map[float64]float64 // MaxAge defines the duration for which an observation stays relevant - // for the summary. Must be positive. The default value is DefMaxAge. + // for the summary. Only applies to pre-calculated quantiles, does not + // apply to _sum and _count. Must be positive. The default value is + // DefMaxAge. MaxAge time.Duration // AgeBuckets is the number of buckets used to exclude observations that diff --git a/vendor/github.com/prometheus/client_golang/prometheus/value.go b/vendor/github.com/prometheus/client_golang/prometheus/value.go index 8304de4773..c778711b8a 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/value.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/value.go @@ -19,7 +19,7 @@ import ( "time" "unicode/utf8" - //lint:ignore SA1019 Need to keep deprecated package for compatibility. + //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes" diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/prometheus/client_golang/prometheus/vec.go index 6ba49d85bd..4ababe6c98 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/vec.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/vec.go @@ -167,8 +167,8 @@ func (m *MetricVec) CurryWith(labels Labels) (*MetricVec, error) { // calling the newMetric function provided during construction of the // MetricVec). // -// It is possible to call this method without using the returned Metry to only -// create the new Metric but leave it in its intitial state. +// It is possible to call this method without using the returned Metric to only +// create the new Metric but leave it in its initial state. // // Keeping the Metric for later use is possible (and should be considered if // performance is critical), but keep in mind that Reset, DeleteLabelValues and diff --git a/vendor/github.com/prometheus/client_golang/prometheus/wrap.go b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go index c1b12f0847..74ee93280f 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/wrap.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go @@ -17,7 +17,7 @@ import ( "fmt" "sort" - //lint:ignore SA1019 Need to keep deprecated package for compatibility. + //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. "github.com/golang/protobuf/proto" dto "github.com/prometheus/client_model/go" diff --git a/vendor/github.com/prometheus/common/model/labels.go b/vendor/github.com/prometheus/common/model/labels.go index 41051a01a3..ef89563354 100644 --- a/vendor/github.com/prometheus/common/model/labels.go +++ b/vendor/github.com/prometheus/common/model/labels.go @@ -45,6 +45,14 @@ const ( // scrape a target. MetricsPathLabel = "__metrics_path__" + // ScrapeIntervalLabel is the name of the label that holds the scrape interval + // used to scrape a target. + ScrapeIntervalLabel = "__scrape_interval__" + + // ScrapeTimeoutLabel is the name of the label that holds the scrape + // timeout used to scrape a target. + ScrapeTimeoutLabel = "__scrape_timeout__" + // ReservedLabelPrefix is a prefix which is not legal in user-supplied // label names. ReservedLabelPrefix = "__" diff --git a/vendor/github.com/prometheus/common/model/time.go b/vendor/github.com/prometheus/common/model/time.go index e684ebd11a..7f67b16e42 100644 --- a/vendor/github.com/prometheus/common/model/time.go +++ b/vendor/github.com/prometheus/common/model/time.go @@ -15,6 +15,7 @@ package model import ( "encoding/json" + "errors" "fmt" "math" "regexp" @@ -202,13 +203,23 @@ func ParseDuration(durationStr string) (Duration, error) { // Parse the match at pos `pos` in the regex and use `mult` to turn that // into ms, then add that value to the total parsed duration. + var overflowErr error m := func(pos int, mult time.Duration) { if matches[pos] == "" { return } n, _ := strconv.Atoi(matches[pos]) + + // Check if the provided duration overflows time.Duration (> ~ 290years). + if n > int((1<<63-1)/mult/time.Millisecond) { + overflowErr = errors.New("duration out of range") + } d := time.Duration(n) * time.Millisecond dur += d * mult + + if dur < 0 { + overflowErr = errors.New("duration out of range") + } } m(2, 1000*60*60*24*365) // y @@ -219,7 +230,7 @@ func ParseDuration(durationStr string) (Duration, error) { m(12, 1000) // s m(14, 1) // ms - return Duration(dur), nil + return Duration(dur), overflowErr } func (d Duration) String() string { diff --git a/vendor/github.com/prometheus/procfs/Makefile.common b/vendor/github.com/prometheus/procfs/Makefile.common index 9320176ca2..3ac29c636c 100644 --- a/vendor/github.com/prometheus/procfs/Makefile.common +++ b/vendor/github.com/prometheus/procfs/Makefile.common @@ -78,7 +78,7 @@ ifneq ($(shell which gotestsum),) endif endif -PROMU_VERSION ?= 0.5.0 +PROMU_VERSION ?= 0.7.0 PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz GOLANGCI_LINT := @@ -245,10 +245,12 @@ common-docker-publish: $(PUBLISH_DOCKER_ARCHS) $(PUBLISH_DOCKER_ARCHS): common-docker-publish-%: docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" +DOCKER_MAJOR_VERSION_TAG = $(firstword $(subst ., ,$(shell cat VERSION))) .PHONY: common-docker-tag-latest $(TAG_DOCKER_ARCHS) common-docker-tag-latest: $(TAG_DOCKER_ARCHS) $(TAG_DOCKER_ARCHS): common-docker-tag-latest-%: docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest" + docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)" .PHONY: common-docker-manifest common-docker-manifest: diff --git a/vendor/github.com/prometheus/procfs/SECURITY.md b/vendor/github.com/prometheus/procfs/SECURITY.md new file mode 100644 index 0000000000..67741f015a --- /dev/null +++ b/vendor/github.com/prometheus/procfs/SECURITY.md @@ -0,0 +1,6 @@ +# Reporting a security issue + +The Prometheus security policy, including how to report vulnerabilities, can be +found here: + +https://prometheus.io/docs/operating/security/ diff --git a/vendor/github.com/prometheus/procfs/arp.go b/vendor/github.com/prometheus/procfs/arp.go index 916c9182a8..4e47e61720 100644 --- a/vendor/github.com/prometheus/procfs/arp.go +++ b/vendor/github.com/prometheus/procfs/arp.go @@ -36,7 +36,7 @@ type ARPEntry struct { func (fs FS) GatherARPEntries() ([]ARPEntry, error) { data, err := ioutil.ReadFile(fs.proc.Path("net/arp")) if err != nil { - return nil, fmt.Errorf("error reading arp %s: %s", fs.proc.Path("net/arp"), err) + return nil, fmt.Errorf("error reading arp %q: %w", fs.proc.Path("net/arp"), err) } return parseARPEntries(data) @@ -59,7 +59,7 @@ func parseARPEntries(data []byte) ([]ARPEntry, error) { } else if width == expectedDataWidth { entry, err := parseARPEntry(columns) if err != nil { - return []ARPEntry{}, fmt.Errorf("failed to parse ARP entry: %s", err) + return []ARPEntry{}, fmt.Errorf("failed to parse ARP entry: %w", err) } entries = append(entries, entry) } else { diff --git a/vendor/github.com/prometheus/procfs/buddyinfo.go b/vendor/github.com/prometheus/procfs/buddyinfo.go index 10bd067a0a..f5b7939b26 100644 --- a/vendor/github.com/prometheus/procfs/buddyinfo.go +++ b/vendor/github.com/prometheus/procfs/buddyinfo.go @@ -74,7 +74,7 @@ func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) { for i := 0; i < arraySize; i++ { sizes[i], err = strconv.ParseFloat(parts[i+4], 64) if err != nil { - return nil, fmt.Errorf("invalid value in buddyinfo: %s", err) + return nil, fmt.Errorf("invalid value in buddyinfo: %w", err) } } diff --git a/vendor/github.com/prometheus/procfs/cpuinfo.go b/vendor/github.com/prometheus/procfs/cpuinfo.go index b9fb589aa1..5623b24a16 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo.go @@ -19,6 +19,7 @@ import ( "bufio" "bytes" "errors" + "fmt" "regexp" "strconv" "strings" @@ -77,7 +78,7 @@ func parseCPUInfoX86(info []byte) ([]CPUInfo, error) { // find the first "processor" line firstLine := firstNonEmptyLine(scanner) if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") { - return nil, errors.New("invalid cpuinfo file: " + firstLine) + return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) } field := strings.SplitN(firstLine, ": ", 2) v, err := strconv.ParseUint(field[1], 0, 32) @@ -192,7 +193,7 @@ func parseCPUInfoARM(info []byte) ([]CPUInfo, error) { firstLine := firstNonEmptyLine(scanner) match, _ := regexp.MatchString("^[Pp]rocessor", firstLine) if !match || !strings.Contains(firstLine, ":") { - return nil, errors.New("invalid cpuinfo file: " + firstLine) + return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) } field := strings.SplitN(firstLine, ": ", 2) cpuinfo := []CPUInfo{} @@ -256,7 +257,7 @@ func parseCPUInfoS390X(info []byte) ([]CPUInfo, error) { firstLine := firstNonEmptyLine(scanner) if !strings.HasPrefix(firstLine, "vendor_id") || !strings.Contains(firstLine, ":") { - return nil, errors.New("invalid cpuinfo file: " + firstLine) + return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) } field := strings.SplitN(firstLine, ": ", 2) cpuinfo := []CPUInfo{} @@ -281,7 +282,7 @@ func parseCPUInfoS390X(info []byte) ([]CPUInfo, error) { if strings.HasPrefix(line, "processor") { match := cpuinfoS390XProcessorRegexp.FindStringSubmatch(line) if len(match) < 2 { - return nil, errors.New("Invalid line found in cpuinfo: " + line) + return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) } cpu := commonCPUInfo v, err := strconv.ParseUint(match[1], 0, 32) @@ -313,6 +314,22 @@ func parseCPUInfoS390X(info []byte) ([]CPUInfo, error) { return nil, err } cpuinfo[i].CPUMHz = v + case "physical id": + cpuinfo[i].PhysicalID = field[1] + case "core id": + cpuinfo[i].CoreID = field[1] + case "cpu cores": + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + cpuinfo[i].CPUCores = uint(v) + case "siblings": + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + cpuinfo[i].Siblings = uint(v) } } @@ -325,7 +342,7 @@ func parseCPUInfoMips(info []byte) ([]CPUInfo, error) { // find the first "processor" line firstLine := firstNonEmptyLine(scanner) if !strings.HasPrefix(firstLine, "system type") || !strings.Contains(firstLine, ":") { - return nil, errors.New("invalid cpuinfo file: " + firstLine) + return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) } field := strings.SplitN(firstLine, ": ", 2) cpuinfo := []CPUInfo{} @@ -367,7 +384,7 @@ func parseCPUInfoPPC(info []byte) ([]CPUInfo, error) { firstLine := firstNonEmptyLine(scanner) if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") { - return nil, errors.New("invalid cpuinfo file: " + firstLine) + return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) } field := strings.SplitN(firstLine, ": ", 2) v, err := strconv.ParseUint(field[1], 0, 32) @@ -412,7 +429,7 @@ func parseCPUInfoRISCV(info []byte) ([]CPUInfo, error) { firstLine := firstNonEmptyLine(scanner) if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") { - return nil, errors.New("invalid cpuinfo file: " + firstLine) + return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) } field := strings.SplitN(firstLine, ": ", 2) v, err := strconv.ParseUint(field[1], 0, 32) diff --git a/vendor/github.com/google/cel-go/parser/gen/doc.go b/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go similarity index 73% rename from vendor/github.com/google/cel-go/parser/gen/doc.go rename to vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go index 57edd4434d..e83c2e207c 100644 --- a/vendor/github.com/google/cel-go/parser/gen/doc.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go @@ -1,10 +1,9 @@ -// Copyright 2021 Google LLC -// +// Copyright 2020 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -12,5 +11,9 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Package gen contains all of the ANTLR-generated sources used by the cel-go parser. -package gen +// +build linux +// +build riscv riscv64 + +package procfs + +var parseCPUInfo = parseCPUInfoRISCV diff --git a/vendor/github.com/prometheus/procfs/crypto.go b/vendor/github.com/prometheus/procfs/crypto.go index a958933757..5048ad1f21 100644 --- a/vendor/github.com/prometheus/procfs/crypto.go +++ b/vendor/github.com/prometheus/procfs/crypto.go @@ -55,12 +55,12 @@ func (fs FS) Crypto() ([]Crypto, error) { path := fs.proc.Path("crypto") b, err := util.ReadFileNoStat(path) if err != nil { - return nil, fmt.Errorf("error reading crypto %s: %s", path, err) + return nil, fmt.Errorf("error reading crypto %q: %w", path, err) } crypto, err := parseCrypto(bytes.NewReader(b)) if err != nil { - return nil, fmt.Errorf("error parsing crypto %s: %s", path, err) + return nil, fmt.Errorf("error parsing crypto %q: %w", path, err) } return crypto, nil diff --git a/vendor/github.com/prometheus/procfs/fixtures.ttar b/vendor/github.com/prometheus/procfs/fixtures.ttar index 12494d7424..1e76173da0 100644 --- a/vendor/github.com/prometheus/procfs/fixtures.ttar +++ b/vendor/github.com/prometheus/procfs/fixtures.ttar @@ -111,7 +111,7 @@ Max core file size 0 unlimited bytes Max resident set unlimited unlimited bytes Max processes 62898 62898 processes Max open files 2048 4096 files -Max locked memory 65536 65536 bytes +Max locked memory 18446744073708503040 18446744073708503040 bytes Max address space 8589934592 unlimited bytes Max file locks unlimited unlimited locks Max pending signals 62898 62898 signals @@ -1080,7 +1080,6 @@ internal : yes type : skcipher async : yes blocksize : 1 -min keysize : 16 max keysize : 32 ivsize : 16 chunksize : 16 @@ -1839,6 +1838,7 @@ min keysize : 16 max keysize : 32 Mode: 444 +Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/proc/diskstats Lines: 52 @@ -2129,6 +2129,24 @@ Lines: 6 4 1FB3C 0 1282A8F 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/net/protocols +Lines: 14 +protocol size sockets memory press maxhdr slab module cl co di ac io in de sh ss gs se re sp bi br ha uh gp em +PACKET 1344 2 -1 NI 0 no kernel n n n n n n n n n n n n n n n n n n n +PINGv6 1112 0 -1 NI 0 yes kernel y y y n n y n n y y y y n y y y y y n +RAWv6 1112 1 -1 NI 0 yes kernel y y y n y y y n y y y y n y y y y n n +UDPLITEv6 1216 0 57 NI 0 yes kernel y y y n y y y n y y y y n n n y y y n +UDPv6 1216 10 57 NI 0 yes kernel y y y n y y y n y y y y n n n y y y n +TCPv6 2144 1937 1225378 no 320 yes kernel y y y y y y y y y y y y y n y y y y y +UNIX 1024 120 -1 NI 0 yes kernel n n n n n n n n n n n n n n n n n n n +UDP-Lite 1024 0 57 NI 0 yes kernel y y y n y y y n y y y y y n n y y y n +PING 904 0 -1 NI 0 yes kernel y y y n n y n n y y y y n y y y y y n +RAW 912 0 -1 NI 0 yes kernel y y y n y y y n y y y y n y y y y n n +UDP 1024 73 57 NI 0 yes kernel y y y n y y y n y y y y y n n y y y n +TCP 1984 93064 1225378 yes 320 yes kernel y y y y y y y y y y y y y n y y y y y +NETLINK 1040 16 -1 NI 0 no kernel n n n n n n n n n n n n n n n n n n n +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: fixtures/proc/net/rpc Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -2186,10 +2204,25 @@ Lines: 1 00015c73 00020e76 F0000769 00000000 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/net/tcp +Lines: 4 + sl local_address rem_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode + 0: 0500000A:0016 00000000:0000 0A 00000000:00000001 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0 + 1: 00000000:0016 00000000:0000 0A 00000001:00000000 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0 + 2: 00000000:0016 00000000:0000 0A 00000001:00000001 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/net/tcp6 +Lines: 3 + sl local_address remote_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode ref pointer drops + 1315: 00000000000000000000000000000000:14EB 00000000000000000000000000000000:0000 07 00000000:00000000 00:00000000 00000000 981 0 21040 2 0000000013726323 0 + 6073: 000080FE00000000FFADE15609667CFE:C781 00000000000000000000000000000000:0000 07 00000000:00000000 00:00000000 00000000 1000 0 11337031 2 00000000b9256fdd 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/proc/net/udp Lines: 4 sl local_address rem_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode - 0: 0A000005:0016 00000000:0000 0A 00000000:00000001 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0 + 0: 0500000A:0016 00000000:0000 0A 00000000:00000001 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0 1: 00000000:0016 00000000:0000 0A 00000001:00000000 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0 2: 00000000:0016 00000000:0000 0A 00000001:00000001 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0 Mode: 644 @@ -2292,6 +2325,312 @@ Mode: 644 Path: fixtures/proc/self SymlinkTo: 26231 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/slabinfo +Lines: 302 +slabinfo - version: 2.1 +# name : tunables : slabdata +pid_3 375 532 576 28 4 : tunables 0 0 0 : slabdata 19 19 0 +pid_2 3 28 576 28 4 : tunables 0 0 0 : slabdata 1 1 0 +nvidia_p2p_page_cache 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 +nvidia_pte_cache 9022 9152 368 22 2 : tunables 0 0 0 : slabdata 416 416 0 +nvidia_stack_cache 321 326 12624 2 8 : tunables 0 0 0 : slabdata 163 163 0 +kvm_async_pf 0 0 472 34 4 : tunables 0 0 0 : slabdata 0 0 0 +kvm_vcpu 0 0 15552 2 8 : tunables 0 0 0 : slabdata 0 0 0 +kvm_mmu_page_header 0 0 504 32 4 : tunables 0 0 0 : slabdata 0 0 0 +pte_list_desc 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 +x86_emulator 0 0 3024 10 8 : tunables 0 0 0 : slabdata 0 0 0 +x86_fpu 0 0 4608 7 8 : tunables 0 0 0 : slabdata 0 0 0 +iwl_cmd_pool:0000:04:00.0 0 128 512 32 4 : tunables 0 0 0 : slabdata 4 4 0 +ext4_groupinfo_4k 3719 3740 480 34 4 : tunables 0 0 0 : slabdata 110 110 0 +bio-6 32 75 640 25 4 : tunables 0 0 0 : slabdata 3 3 0 +bio-5 16 48 1344 24 8 : tunables 0 0 0 : slabdata 2 2 0 +bio-4 17 92 1408 23 8 : tunables 0 0 0 : slabdata 4 4 0 +fat_inode_cache 0 0 1056 31 8 : tunables 0 0 0 : slabdata 0 0 0 +fat_cache 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 +ovl_aio_req 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0 +ovl_inode 0 0 1000 32 8 : tunables 0 0 0 : slabdata 0 0 0 +squashfs_inode_cache 0 0 1088 30 8 : tunables 0 0 0 : slabdata 0 0 0 +fuse_request 0 0 472 34 4 : tunables 0 0 0 : slabdata 0 0 0 +fuse_inode 0 0 1152 28 8 : tunables 0 0 0 : slabdata 0 0 0 +xfs_dqtrx 0 0 864 37 8 : tunables 0 0 0 : slabdata 0 0 0 +xfs_dquot 0 0 832 39 8 : tunables 0 0 0 : slabdata 0 0 0 +xfs_buf 0 0 768 21 4 : tunables 0 0 0 : slabdata 0 0 0 +xfs_bui_item 0 0 544 30 4 : tunables 0 0 0 : slabdata 0 0 0 +xfs_bud_item 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0 +xfs_cui_item 0 0 768 21 4 : tunables 0 0 0 : slabdata 0 0 0 +xfs_cud_item 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0 +xfs_rui_item 0 0 1024 32 8 : tunables 0 0 0 : slabdata 0 0 0 +xfs_rud_item 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0 +xfs_icr 0 0 520 31 4 : tunables 0 0 0 : slabdata 0 0 0 +xfs_ili 0 0 528 31 4 : tunables 0 0 0 : slabdata 0 0 0 +xfs_inode 0 0 1344 24 8 : tunables 0 0 0 : slabdata 0 0 0 +xfs_efi_item 0 0 768 21 4 : tunables 0 0 0 : slabdata 0 0 0 +xfs_efd_item 0 0 776 21 4 : tunables 0 0 0 : slabdata 0 0 0 +xfs_buf_item 0 0 608 26 4 : tunables 0 0 0 : slabdata 0 0 0 +xf_trans 0 0 568 28 4 : tunables 0 0 0 : slabdata 0 0 0 +xfs_ifork 0 0 376 21 2 : tunables 0 0 0 : slabdata 0 0 0 +xfs_da_state 0 0 816 20 4 : tunables 0 0 0 : slabdata 0 0 0 +xfs_btree_cur 0 0 560 29 4 : tunables 0 0 0 : slabdata 0 0 0 +xfs_bmap_free_item 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0 +xfs_log_ticket 0 0 520 31 4 : tunables 0 0 0 : slabdata 0 0 0 +nfs_direct_cache 0 0 560 29 4 : tunables 0 0 0 : slabdata 0 0 0 +nfs_commit_data 4 28 1152 28 8 : tunables 0 0 0 : slabdata 1 1 0 +nfs_write_data 32 50 1280 25 8 : tunables 0 0 0 : slabdata 2 2 0 +nfs_read_data 0 0 1280 25 8 : tunables 0 0 0 : slabdata 0 0 0 +nfs_inode_cache 0 0 1408 23 8 : tunables 0 0 0 : slabdata 0 0 0 +nfs_page 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0 +rpc_inode_cache 0 0 1024 32 8 : tunables 0 0 0 : slabdata 0 0 0 +rpc_buffers 8 13 2496 13 8 : tunables 0 0 0 : slabdata 1 1 0 +rpc_tasks 8 25 640 25 4 : tunables 0 0 0 : slabdata 1 1 0 +fscache_cookie_jar 1 35 464 35 4 : tunables 0 0 0 : slabdata 1 1 0 +jfs_mp 32 35 464 35 4 : tunables 0 0 0 : slabdata 1 1 0 +jfs_ip 0 0 1592 20 8 : tunables 0 0 0 : slabdata 0 0 0 +reiser_inode_cache 0 0 1096 29 8 : tunables 0 0 0 : slabdata 0 0 0 +btrfs_end_io_wq 0 0 464 35 4 : tunables 0 0 0 : slabdata 0 0 0 +btrfs_prelim_ref 0 0 424 38 4 : tunables 0 0 0 : slabdata 0 0 0 +btrfs_delayed_extent_op 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 +btrfs_delayed_data_ref 0 0 448 36 4 : tunables 0 0 0 : slabdata 0 0 0 +btrfs_delayed_tree_ref 0 0 440 37 4 : tunables 0 0 0 : slabdata 0 0 0 +btrfs_delayed_ref_head 0 0 480 34 4 : tunables 0 0 0 : slabdata 0 0 0 +btrfs_inode_defrag 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0 +btrfs_delayed_node 0 0 648 25 4 : tunables 0 0 0 : slabdata 0 0 0 +btrfs_ordered_extent 0 0 752 21 4 : tunables 0 0 0 : slabdata 0 0 0 +btrfs_extent_map 0 0 480 34 4 : tunables 0 0 0 : slabdata 0 0 0 +btrfs_extent_state 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0 +bio-3 35 92 704 23 4 : tunables 0 0 0 : slabdata 4 4 0 +btrfs_extent_buffer 0 0 600 27 4 : tunables 0 0 0 : slabdata 0 0 0 +btrfs_free_space_bitmap 0 0 12288 2 8 : tunables 0 0 0 : slabdata 0 0 0 +btrfs_free_space 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0 +btrfs_path 0 0 448 36 4 : tunables 0 0 0 : slabdata 0 0 0 +btrfs_trans_handle 0 0 440 37 4 : tunables 0 0 0 : slabdata 0 0 0 +btrfs_inode 0 0 1496 21 8 : tunables 0 0 0 : slabdata 0 0 0 +ext4_inode_cache 84136 84755 1400 23 8 : tunables 0 0 0 : slabdata 3685 3685 0 +ext4_free_data 22 80 392 20 2 : tunables 0 0 0 : slabdata 4 4 0 +ext4_allocation_context 0 70 464 35 4 : tunables 0 0 0 : slabdata 2 2 0 +ext4_prealloc_space 24 74 440 37 4 : tunables 0 0 0 : slabdata 2 2 0 +ext4_system_zone 267 273 376 21 2 : tunables 0 0 0 : slabdata 13 13 0 +ext4_io_end_vec 0 88 368 22 2 : tunables 0 0 0 : slabdata 4 4 0 +ext4_io_end 0 80 400 20 2 : tunables 0 0 0 : slabdata 4 4 0 +ext4_bio_post_read_ctx 128 147 384 21 2 : tunables 0 0 0 : slabdata 7 7 0 +ext4_pending_reservation 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 +ext4_extent_status 79351 79422 376 21 2 : tunables 0 0 0 : slabdata 3782 3782 0 +jbd2_transaction_s 44 100 640 25 4 : tunables 0 0 0 : slabdata 4 4 0 +jbd2_inode 6785 6840 400 20 2 : tunables 0 0 0 : slabdata 342 342 0 +jbd2_journal_handle 0 80 392 20 2 : tunables 0 0 0 : slabdata 4 4 0 +jbd2_journal_head 824 1944 448 36 4 : tunables 0 0 0 : slabdata 54 54 0 +jbd2_revoke_table_s 4 23 352 23 2 : tunables 0 0 0 : slabdata 1 1 0 +jbd2_revoke_record_s 0 156 416 39 4 : tunables 0 0 0 : slabdata 4 4 0 +ext2_inode_cache 0 0 1144 28 8 : tunables 0 0 0 : slabdata 0 0 0 +mbcache 0 0 392 20 2 : tunables 0 0 0 : slabdata 0 0 0 +dm_thin_new_mapping 0 152 424 38 4 : tunables 0 0 0 : slabdata 4 4 0 +dm_snap_pending_exception 0 0 464 35 4 : tunables 0 0 0 : slabdata 0 0 0 +dm_exception 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 +dm_dirty_log_flush_entry 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 +dm_bio_prison_cell_v2 0 0 432 37 4 : tunables 0 0 0 : slabdata 0 0 0 +dm_bio_prison_cell 0 148 432 37 4 : tunables 0 0 0 : slabdata 4 4 0 +kcopyd_job 0 8 3648 8 8 : tunables 0 0 0 : slabdata 1 1 0 +io 0 32 512 32 4 : tunables 0 0 0 : slabdata 1 1 0 +dm_uevent 0 0 3224 10 8 : tunables 0 0 0 : slabdata 0 0 0 +dax_cache 1 28 1152 28 8 : tunables 0 0 0 : slabdata 1 1 0 +aic94xx_ascb 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0 +aic94xx_dma_token 0 0 384 21 2 : tunables 0 0 0 : slabdata 0 0 0 +asd_sas_event 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0 +sas_task 0 0 704 23 4 : tunables 0 0 0 : slabdata 0 0 0 +qla2xxx_srbs 0 0 832 39 8 : tunables 0 0 0 : slabdata 0 0 0 +sd_ext_cdb 2 22 368 22 2 : tunables 0 0 0 : slabdata 1 1 0 +scsi_sense_cache 258 288 512 32 4 : tunables 0 0 0 : slabdata 9 9 0 +virtio_scsi_cmd 64 75 640 25 4 : tunables 0 0 0 : slabdata 3 3 0 +L2TP/IPv6 0 0 1536 21 8 : tunables 0 0 0 : slabdata 0 0 0 +L2TP/IP 0 0 1408 23 8 : tunables 0 0 0 : slabdata 0 0 0 +ip6-frags 0 0 520 31 4 : tunables 0 0 0 : slabdata 0 0 0 +fib6_nodes 5 32 512 32 4 : tunables 0 0 0 : slabdata 1 1 0 +ip6_dst_cache 4 25 640 25 4 : tunables 0 0 0 : slabdata 1 1 0 +ip6_mrt_cache 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0 +PINGv6 0 0 1600 20 8 : tunables 0 0 0 : slabdata 0 0 0 +RAWv6 25 40 1600 20 8 : tunables 0 0 0 : slabdata 2 2 0 +UDPLITEv6 0 0 1728 18 8 : tunables 0 0 0 : slabdata 0 0 0 +UDPv6 3 54 1728 18 8 : tunables 0 0 0 : slabdata 3 3 0 +tw_sock_TCPv6 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0 +request_sock_TCPv6 0 0 632 25 4 : tunables 0 0 0 : slabdata 0 0 0 +TCPv6 0 33 2752 11 8 : tunables 0 0 0 : slabdata 3 3 0 +uhci_urb_priv 0 0 392 20 2 : tunables 0 0 0 : slabdata 0 0 0 +sgpool-128 2 14 4544 7 8 : tunables 0 0 0 : slabdata 2 2 0 +sgpool-64 2 13 2496 13 8 : tunables 0 0 0 : slabdata 1 1 0 +sgpool-32 2 44 1472 22 8 : tunables 0 0 0 : slabdata 2 2 0 +sgpool-16 2 68 960 34 8 : tunables 0 0 0 : slabdata 2 2 0 +sgpool-8 2 46 704 23 4 : tunables 0 0 0 : slabdata 2 2 0 +btree_node 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0 +bfq_io_cq 0 0 488 33 4 : tunables 0 0 0 : slabdata 0 0 0 +bfq_queue 0 0 848 38 8 : tunables 0 0 0 : slabdata 0 0 0 +mqueue_inode_cache 1 24 1344 24 8 : tunables 0 0 0 : slabdata 1 1 0 +isofs_inode_cache 0 0 968 33 8 : tunables 0 0 0 : slabdata 0 0 0 +io_kiocb 0 0 640 25 4 : tunables 0 0 0 : slabdata 0 0 0 +kioctx 0 30 1088 30 8 : tunables 0 0 0 : slabdata 1 1 0 +aio_kiocb 0 28 576 28 4 : tunables 0 0 0 : slabdata 1 1 0 +userfaultfd_ctx_cache 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0 +fanotify_path_event 0 0 392 20 2 : tunables 0 0 0 : slabdata 0 0 0 +fanotify_fid_event 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0 +fsnotify_mark 0 0 408 20 2 : tunables 0 0 0 : slabdata 0 0 0 +dnotify_mark 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0 +dnotify_struct 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 +dio 0 0 1088 30 8 : tunables 0 0 0 : slabdata 0 0 0 +bio-2 4 25 640 25 4 : tunables 0 0 0 : slabdata 1 1 0 +fasync_cache 0 0 384 21 2 : tunables 0 0 0 : slabdata 0 0 0 +audit_tree_mark 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0 +pid_namespace 30 34 480 34 4 : tunables 0 0 0 : slabdata 1 1 0 +posix_timers_cache 0 27 592 27 4 : tunables 0 0 0 : slabdata 1 1 0 +iommu_devinfo 24 32 512 32 4 : tunables 0 0 0 : slabdata 1 1 0 +iommu_domain 10 10 3264 10 8 : tunables 0 0 0 : slabdata 1 1 0 +iommu_iova 8682 8748 448 36 4 : tunables 0 0 0 : slabdata 243 243 0 +UNIX 529 814 1472 22 8 : tunables 0 0 0 : slabdata 37 37 0 +ip4-frags 0 0 536 30 4 : tunables 0 0 0 : slabdata 0 0 0 +ip_mrt_cache 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0 +UDP-Lite 0 0 1536 21 8 : tunables 0 0 0 : slabdata 0 0 0 +tcp_bind_bucket 7 128 512 32 4 : tunables 0 0 0 : slabdata 4 4 0 +inet_peer_cache 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0 +xfrm_dst_cache 0 0 704 23 4 : tunables 0 0 0 : slabdata 0 0 0 +xfrm_state 0 0 1152 28 8 : tunables 0 0 0 : slabdata 0 0 0 +ip_fib_trie 7 21 384 21 2 : tunables 0 0 0 : slabdata 1 1 0 +ip_fib_alias 9 20 392 20 2 : tunables 0 0 0 : slabdata 1 1 0 +ip_dst_cache 27 84 576 28 4 : tunables 0 0 0 : slabdata 3 3 0 +PING 0 0 1408 23 8 : tunables 0 0 0 : slabdata 0 0 0 +RAW 32 46 1408 23 8 : tunables 0 0 0 : slabdata 2 2 0 +UDP 11 168 1536 21 8 : tunables 0 0 0 : slabdata 8 8 0 +tw_sock_TCP 1 56 576 28 4 : tunables 0 0 0 : slabdata 2 2 0 +request_sock_TCP 0 25 632 25 4 : tunables 0 0 0 : slabdata 1 1 0 +TCP 10 60 2624 12 8 : tunables 0 0 0 : slabdata 5 5 0 +hugetlbfs_inode_cache 2 35 928 35 8 : tunables 0 0 0 : slabdata 1 1 0 +dquot 0 0 640 25 4 : tunables 0 0 0 : slabdata 0 0 0 +bio-1 32 46 704 23 4 : tunables 0 0 0 : slabdata 2 2 0 +eventpoll_pwq 409 600 408 20 2 : tunables 0 0 0 : slabdata 30 30 0 +eventpoll_epi 408 672 576 28 4 : tunables 0 0 0 : slabdata 24 24 0 +inotify_inode_mark 58 195 416 39 4 : tunables 0 0 0 : slabdata 5 5 0 +scsi_data_buffer 0 0 360 22 2 : tunables 0 0 0 : slabdata 0 0 0 +bio_crypt_ctx 128 147 376 21 2 : tunables 0 0 0 : slabdata 7 7 0 +request_queue 29 39 2408 13 8 : tunables 0 0 0 : slabdata 3 3 0 +blkdev_ioc 81 148 440 37 4 : tunables 0 0 0 : slabdata 4 4 0 +bio-0 125 200 640 25 4 : tunables 0 0 0 : slabdata 8 8 0 +biovec-max 166 196 4544 7 8 : tunables 0 0 0 : slabdata 28 28 0 +biovec-128 0 52 2496 13 8 : tunables 0 0 0 : slabdata 4 4 0 +biovec-64 0 88 1472 22 8 : tunables 0 0 0 : slabdata 4 4 0 +biovec-16 0 92 704 23 4 : tunables 0 0 0 : slabdata 4 4 0 +bio_integrity_payload 4 28 576 28 4 : tunables 0 0 0 : slabdata 1 1 0 +khugepaged_mm_slot 59 180 448 36 4 : tunables 0 0 0 : slabdata 5 5 0 +ksm_mm_slot 0 0 384 21 2 : tunables 0 0 0 : slabdata 0 0 0 +ksm_stable_node 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0 +ksm_rmap_item 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0 +user_namespace 2 37 864 37 8 : tunables 0 0 0 : slabdata 1 1 0 +uid_cache 5 28 576 28 4 : tunables 0 0 0 : slabdata 1 1 0 +dmaengine-unmap-256 1 13 2496 13 8 : tunables 0 0 0 : slabdata 1 1 0 +dmaengine-unmap-128 1 22 1472 22 8 : tunables 0 0 0 : slabdata 1 1 0 +dmaengine-unmap-16 1 28 576 28 4 : tunables 0 0 0 : slabdata 1 1 0 +dmaengine-unmap-2 1 36 448 36 4 : tunables 0 0 0 : slabdata 1 1 0 +audit_buffer 0 22 360 22 2 : tunables 0 0 0 : slabdata 1 1 0 +sock_inode_cache 663 1170 1216 26 8 : tunables 0 0 0 : slabdata 45 45 0 +skbuff_ext_cache 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0 +skbuff_fclone_cache 1 72 896 36 8 : tunables 0 0 0 : slabdata 2 2 0 +skbuff_head_cache 3 650 640 25 4 : tunables 0 0 0 : slabdata 26 26 0 +configfs_dir_cache 7 38 424 38 4 : tunables 0 0 0 : slabdata 1 1 0 +file_lock_cache 27 116 552 29 4 : tunables 0 0 0 : slabdata 4 4 0 +file_lock_ctx 106 120 392 20 2 : tunables 0 0 0 : slabdata 6 6 0 +fsnotify_mark_connector 52 66 368 22 2 : tunables 0 0 0 : slabdata 3 3 0 +net_namespace 1 6 5312 6 8 : tunables 0 0 0 : slabdata 1 1 0 +task_delay_info 784 1560 416 39 4 : tunables 0 0 0 : slabdata 40 40 0 +taskstats 45 92 688 23 4 : tunables 0 0 0 : slabdata 4 4 0 +proc_dir_entry 678 682 528 31 4 : tunables 0 0 0 : slabdata 22 22 0 +pde_opener 0 189 376 21 2 : tunables 0 0 0 : slabdata 9 9 0 +proc_inode_cache 7150 8250 992 33 8 : tunables 0 0 0 : slabdata 250 250 0 +seq_file 60 735 456 35 4 : tunables 0 0 0 : slabdata 21 21 0 +sigqueue 0 156 416 39 4 : tunables 0 0 0 : slabdata 4 4 0 +bdev_cache 36 78 1216 26 8 : tunables 0 0 0 : slabdata 3 3 0 +shmem_inode_cache 1599 2208 1016 32 8 : tunables 0 0 0 : slabdata 69 69 0 +kernfs_iattrs_cache 1251 1254 424 38 4 : tunables 0 0 0 : slabdata 33 33 0 +kernfs_node_cache 52898 52920 464 35 4 : tunables 0 0 0 : slabdata 1512 1512 0 +mnt_cache 42 46 704 23 4 : tunables 0 0 0 : slabdata 2 2 0 +filp 4314 6371 704 23 4 : tunables 0 0 0 : slabdata 277 277 0 +inode_cache 28695 29505 920 35 8 : tunables 0 0 0 : slabdata 843 843 0 +dentry 166069 169074 528 31 4 : tunables 0 0 0 : slabdata 5454 5454 0 +names_cache 0 35 4544 7 8 : tunables 0 0 0 : slabdata 5 5 0 +hashtab_node 0 0 360 22 2 : tunables 0 0 0 : slabdata 0 0 0 +ebitmap_node 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0 +avtab_extended_perms 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 +avtab_node 0 0 360 22 2 : tunables 0 0 0 : slabdata 0 0 0 +avc_xperms_data 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 +avc_xperms_decision_node 0 0 384 21 2 : tunables 0 0 0 : slabdata 0 0 0 +avc_xperms_node 0 0 392 20 2 : tunables 0 0 0 : slabdata 0 0 0 +avc_node 37 40 408 20 2 : tunables 0 0 0 : slabdata 2 2 0 +iint_cache 0 0 448 36 4 : tunables 0 0 0 : slabdata 0 0 0 +lsm_inode_cache 122284 122340 392 20 2 : tunables 0 0 0 : slabdata 6117 6117 0 +lsm_file_cache 4266 4485 352 23 2 : tunables 0 0 0 : slabdata 195 195 0 +key_jar 8 25 640 25 4 : tunables 0 0 0 : slabdata 1 1 0 +buffer_head 255622 257076 440 37 4 : tunables 0 0 0 : slabdata 6948 6948 0 +uts_namespace 0 0 776 21 4 : tunables 0 0 0 : slabdata 0 0 0 +nsproxy 31 40 408 20 2 : tunables 0 0 0 : slabdata 2 2 0 +vm_area_struct 39115 43214 528 31 4 : tunables 0 0 0 : slabdata 1394 1394 0 +mm_struct 96 529 1408 23 8 : tunables 0 0 0 : slabdata 23 23 0 +fs_cache 102 756 448 36 4 : tunables 0 0 0 : slabdata 21 21 0 +files_cache 102 588 1152 28 8 : tunables 0 0 0 : slabdata 21 21 0 +signal_cache 266 672 1536 21 8 : tunables 0 0 0 : slabdata 32 32 0 +sighand_cache 266 507 2496 13 8 : tunables 0 0 0 : slabdata 39 39 0 +task_struct 783 963 10240 3 8 : tunables 0 0 0 : slabdata 321 321 0 +cred_jar 364 952 576 28 4 : tunables 0 0 0 : slabdata 34 34 0 +anon_vma_chain 63907 67821 416 39 4 : tunables 0 0 0 : slabdata 1739 1739 0 +anon_vma 25891 28899 416 39 4 : tunables 0 0 0 : slabdata 741 741 0 +pid 408 992 512 32 4 : tunables 0 0 0 : slabdata 31 31 0 +Acpi-Operand 6682 6740 408 20 2 : tunables 0 0 0 : slabdata 337 337 0 +Acpi-ParseExt 0 39 416 39 4 : tunables 0 0 0 : slabdata 1 1 0 +Acpi-Parse 0 80 392 20 2 : tunables 0 0 0 : slabdata 4 4 0 +Acpi-State 0 78 416 39 4 : tunables 0 0 0 : slabdata 2 2 0 +Acpi-Namespace 3911 3948 384 21 2 : tunables 0 0 0 : slabdata 188 188 0 +trace_event_file 2638 2660 424 38 4 : tunables 0 0 0 : slabdata 70 70 0 +ftrace_event_field 6592 6594 384 21 2 : tunables 0 0 0 : slabdata 314 314 0 +pool_workqueue 41 64 1024 32 8 : tunables 0 0 0 : slabdata 2 2 0 +radix_tree_node 21638 24045 912 35 8 : tunables 0 0 0 : slabdata 687 687 0 +task_group 48 78 1216 26 8 : tunables 0 0 0 : slabdata 3 3 0 +vmap_area 4411 4680 400 20 2 : tunables 0 0 0 : slabdata 234 234 0 +dma-kmalloc-8k 0 0 24576 1 8 : tunables 0 0 0 : slabdata 0 0 0 +dma-kmalloc-4k 0 0 12288 2 8 : tunables 0 0 0 : slabdata 0 0 0 +dma-kmalloc-2k 0 0 6144 5 8 : tunables 0 0 0 : slabdata 0 0 0 +dma-kmalloc-1k 0 0 3072 10 8 : tunables 0 0 0 : slabdata 0 0 0 +dma-kmalloc-512 0 0 1536 21 8 : tunables 0 0 0 : slabdata 0 0 0 +dma-kmalloc-256 0 0 1024 32 8 : tunables 0 0 0 : slabdata 0 0 0 +dma-kmalloc-128 0 0 640 25 4 : tunables 0 0 0 : slabdata 0 0 0 +dma-kmalloc-64 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0 +dma-kmalloc-32 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0 +dma-kmalloc-16 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 +dma-kmalloc-8 0 0 344 23 2 : tunables 0 0 0 : slabdata 0 0 0 +dma-kmalloc-192 0 0 528 31 4 : tunables 0 0 0 : slabdata 0 0 0 +dma-kmalloc-96 0 0 432 37 4 : tunables 0 0 0 : slabdata 0 0 0 +kmalloc-rcl-8k 0 0 24576 1 8 : tunables 0 0 0 : slabdata 0 0 0 +kmalloc-rcl-4k 0 0 12288 2 8 : tunables 0 0 0 : slabdata 0 0 0 +kmalloc-rcl-2k 0 0 6144 5 8 : tunables 0 0 0 : slabdata 0 0 0 +kmalloc-rcl-1k 0 0 3072 10 8 : tunables 0 0 0 : slabdata 0 0 0 +kmalloc-rcl-512 0 0 1536 21 8 : tunables 0 0 0 : slabdata 0 0 0 +kmalloc-rcl-256 0 0 1024 32 8 : tunables 0 0 0 : slabdata 0 0 0 +kmalloc-rcl-192 0 0 528 31 4 : tunables 0 0 0 : slabdata 0 0 0 +kmalloc-rcl-128 31 75 640 25 4 : tunables 0 0 0 : slabdata 3 3 0 +kmalloc-rcl-96 3371 3626 432 37 4 : tunables 0 0 0 : slabdata 98 98 0 +kmalloc-rcl-64 2080 2272 512 32 4 : tunables 0 0 0 : slabdata 71 71 0 +kmalloc-rcl-32 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0 +kmalloc-rcl-16 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 +kmalloc-rcl-8 0 0 344 23 2 : tunables 0 0 0 : slabdata 0 0 0 +kmalloc-8k 133 140 24576 1 8 : tunables 0 0 0 : slabdata 140 140 0 +kmalloc-4k 403 444 12288 2 8 : tunables 0 0 0 : slabdata 222 222 0 +kmalloc-2k 2391 2585 6144 5 8 : tunables 0 0 0 : slabdata 517 517 0 +kmalloc-1k 2163 2420 3072 10 8 : tunables 0 0 0 : slabdata 242 242 0 +kmalloc-512 2972 3633 1536 21 8 : tunables 0 0 0 : slabdata 173 173 0 +kmalloc-256 1841 1856 1024 32 8 : tunables 0 0 0 : slabdata 58 58 0 +kmalloc-192 2165 2914 528 31 4 : tunables 0 0 0 : slabdata 94 94 0 +kmalloc-128 1137 1175 640 25 4 : tunables 0 0 0 : slabdata 47 47 0 +kmalloc-96 1925 2590 432 37 4 : tunables 0 0 0 : slabdata 70 70 0 +kmalloc-64 9433 10688 512 32 4 : tunables 0 0 0 : slabdata 334 334 0 +kmalloc-32 9098 10062 416 39 4 : tunables 0 0 0 : slabdata 258 258 0 +kmalloc-16 10914 10956 368 22 2 : tunables 0 0 0 : slabdata 498 498 0 +kmalloc-8 7576 7705 344 23 2 : tunables 0 0 0 : slabdata 335 335 0 +kmem_cache_node 904 928 512 32 4 : tunables 0 0 0 : slabdata 29 29 0 +kmem_cache 904 936 832 39 8 : tunables 0 0 0 : slabdata 24 24 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/proc/stat Lines: 16 cpu 301854 612 111922 8979004 3552 2 3944 0 0 0 @@ -4639,6 +4978,35 @@ Mode: 644 Directory: fixtures/sys/devices/system Mode: 775 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/system/node +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/system/node/node1 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/node/node1/vmstat +Lines: 6 +nr_free_pages 1 +nr_zone_inactive_anon 2 +nr_zone_active_anon 3 +nr_zone_inactive_file 4 +nr_zone_active_file 5 +nr_zone_unevictable 6 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/system/node/node2 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/node/node2/vmstat +Lines: 6 +nr_free_pages 7 +nr_zone_inactive_anon 8 +nr_zone_active_anon 9 +nr_zone_inactive_file 10 +nr_zone_active_file 11 +nr_zone_unevictable 12 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: fixtures/sys/devices/system/clocksource Mode: 775 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/vendor/github.com/prometheus/procfs/fscache.go b/vendor/github.com/prometheus/procfs/fscache.go index 8783cf3cc1..f8070e6e2b 100644 --- a/vendor/github.com/prometheus/procfs/fscache.go +++ b/vendor/github.com/prometheus/procfs/fscache.go @@ -236,7 +236,7 @@ func (fs FS) Fscacheinfo() (Fscacheinfo, error) { m, err := parseFscacheinfo(bytes.NewReader(b)) if err != nil { - return Fscacheinfo{}, fmt.Errorf("failed to parse Fscacheinfo: %v", err) + return Fscacheinfo{}, fmt.Errorf("failed to parse Fscacheinfo: %w", err) } return *m, nil diff --git a/vendor/github.com/prometheus/procfs/go.mod b/vendor/github.com/prometheus/procfs/go.mod index ded48253cd..ba6681f521 100644 --- a/vendor/github.com/prometheus/procfs/go.mod +++ b/vendor/github.com/prometheus/procfs/go.mod @@ -1,9 +1,9 @@ module github.com/prometheus/procfs -go 1.12 +go 1.13 require ( - github.com/google/go-cmp v0.3.1 - golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e - golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e + github.com/google/go-cmp v0.5.4 + golang.org/x/sync v0.0.0-20201207232520-09787c993a3a + golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c ) diff --git a/vendor/github.com/prometheus/procfs/go.sum b/vendor/github.com/prometheus/procfs/go.sum index 54b5f33033..7ceaf56b7d 100644 --- a/vendor/github.com/prometheus/procfs/go.sum +++ b/vendor/github.com/prometheus/procfs/go.sum @@ -1,6 +1,8 @@ -github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e h1:LwyF2AFISC9nVbS6MgzsaQNSUsRXI49GS+YQ5KX/QH0= -golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a h1:DcqTD9SDLc+1P/r1EmRBwnVsrOwW+kk2vWf9n+1sGhs= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c h1:VwygUrnw9jn88c4u8GD3rZQbqrP/tgas88tPUbBxQrk= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/vendor/github.com/prometheus/procfs/internal/fs/fs.go b/vendor/github.com/prometheus/procfs/internal/fs/fs.go index 565e89e42c..0040753b1c 100644 --- a/vendor/github.com/prometheus/procfs/internal/fs/fs.go +++ b/vendor/github.com/prometheus/procfs/internal/fs/fs.go @@ -39,10 +39,10 @@ type FS string func NewFS(mountPoint string) (FS, error) { info, err := os.Stat(mountPoint) if err != nil { - return "", fmt.Errorf("could not read %s: %s", mountPoint, err) + return "", fmt.Errorf("could not read %q: %w", mountPoint, err) } if !info.IsDir() { - return "", fmt.Errorf("mount point %s is not a directory", mountPoint) + return "", fmt.Errorf("mount point %q is not a directory", mountPoint) } return FS(mountPoint), nil diff --git a/vendor/github.com/prometheus/procfs/loadavg.go b/vendor/github.com/prometheus/procfs/loadavg.go index 00bbe14417..0cce190ec2 100644 --- a/vendor/github.com/prometheus/procfs/loadavg.go +++ b/vendor/github.com/prometheus/procfs/loadavg.go @@ -44,14 +44,14 @@ func parseLoad(loadavgBytes []byte) (*LoadAvg, error) { loads := make([]float64, 3) parts := strings.Fields(string(loadavgBytes)) if len(parts) < 3 { - return nil, fmt.Errorf("malformed loadavg line: too few fields in loadavg string: %s", string(loadavgBytes)) + return nil, fmt.Errorf("malformed loadavg line: too few fields in loadavg string: %q", string(loadavgBytes)) } var err error for i, load := range parts[0:3] { loads[i], err = strconv.ParseFloat(load, 64) if err != nil { - return nil, fmt.Errorf("could not parse load '%s': %s", load, err) + return nil, fmt.Errorf("could not parse load %q: %w", load, err) } } return &LoadAvg{ diff --git a/vendor/github.com/prometheus/procfs/mdstat.go b/vendor/github.com/prometheus/procfs/mdstat.go index 98e37aa8ca..4c4493bfa5 100644 --- a/vendor/github.com/prometheus/procfs/mdstat.go +++ b/vendor/github.com/prometheus/procfs/mdstat.go @@ -22,8 +22,9 @@ import ( ) var ( - statusLineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[[U_]+\]`) - recoveryLineRE = regexp.MustCompile(`\((\d+)/\d+\)`) + statusLineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[[U_]+\]`) + recoveryLineRE = regexp.MustCompile(`\((\d+)/\d+\)`) + componentDeviceRE = regexp.MustCompile(`(.*)\[\d+\]`) ) // MDStat holds info parsed from /proc/mdstat. @@ -44,6 +45,8 @@ type MDStat struct { BlocksTotal int64 // Number of blocks on the device that are in sync. BlocksSynced int64 + // Name of md component devices + Devices []string } // MDStat parses an mdstat-file (/proc/mdstat) and returns a slice of @@ -56,7 +59,7 @@ func (fs FS) MDStat() ([]MDStat, error) { } mdstat, err := parseMDStat(data) if err != nil { - return nil, fmt.Errorf("error parsing mdstat %s: %s", fs.proc.Path("mdstat"), err) + return nil, fmt.Errorf("error parsing mdstat %q: %w", fs.proc.Path("mdstat"), err) } return mdstat, nil } @@ -82,10 +85,7 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) { state := deviceFields[2] // active or inactive if len(lines) <= i+3 { - return nil, fmt.Errorf( - "error parsing %s: too few lines for md device", - mdName, - ) + return nil, fmt.Errorf("error parsing %q: too few lines for md device", mdName) } // Failed disks have the suffix (F) & Spare disks have the suffix (S). @@ -94,7 +94,7 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) { active, total, size, err := evalStatusLine(lines[i], lines[i+1]) if err != nil { - return nil, fmt.Errorf("error parsing md device lines: %s", err) + return nil, fmt.Errorf("error parsing md device lines: %w", err) } syncLineIdx := i + 2 @@ -126,7 +126,7 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) { } else { syncedBlocks, err = evalRecoveryLine(lines[syncLineIdx]) if err != nil { - return nil, fmt.Errorf("error parsing sync line in md device %s: %s", mdName, err) + return nil, fmt.Errorf("error parsing sync line in md device %q: %w", mdName, err) } } } @@ -140,6 +140,7 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) { DisksTotal: total, BlocksTotal: size, BlocksSynced: syncedBlocks, + Devices: evalComponentDevices(deviceFields), }) } @@ -151,7 +152,7 @@ func evalStatusLine(deviceLine, statusLine string) (active, total, size int64, e sizeStr := strings.Fields(statusLine)[0] size, err = strconv.ParseInt(sizeStr, 10, 64) if err != nil { - return 0, 0, 0, fmt.Errorf("unexpected statusLine %s: %s", statusLine, err) + return 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err) } if strings.Contains(deviceLine, "raid0") || strings.Contains(deviceLine, "linear") { @@ -171,12 +172,12 @@ func evalStatusLine(deviceLine, statusLine string) (active, total, size int64, e total, err = strconv.ParseInt(matches[2], 10, 64) if err != nil { - return 0, 0, 0, fmt.Errorf("unexpected statusLine %s: %s", statusLine, err) + return 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err) } active, err = strconv.ParseInt(matches[3], 10, 64) if err != nil { - return 0, 0, 0, fmt.Errorf("unexpected statusLine %s: %s", statusLine, err) + return 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err) } return active, total, size, nil @@ -190,8 +191,23 @@ func evalRecoveryLine(recoveryLine string) (syncedBlocks int64, err error) { syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64) if err != nil { - return 0, fmt.Errorf("%s in recoveryLine: %s", err, recoveryLine) + return 0, fmt.Errorf("error parsing int from recoveryLine %q: %w", recoveryLine, err) } return syncedBlocks, nil } + +func evalComponentDevices(deviceFields []string) []string { + mdComponentDevices := make([]string, 0) + if len(deviceFields) > 3 { + for _, field := range deviceFields[4:] { + match := componentDeviceRE.FindStringSubmatch(field) + if match == nil { + continue + } + mdComponentDevices = append(mdComponentDevices, match[1]) + } + } + + return mdComponentDevices +} diff --git a/vendor/github.com/prometheus/procfs/meminfo.go b/vendor/github.com/prometheus/procfs/meminfo.go index 50dab4bcd5..f65e174e57 100644 --- a/vendor/github.com/prometheus/procfs/meminfo.go +++ b/vendor/github.com/prometheus/procfs/meminfo.go @@ -28,9 +28,9 @@ import ( type Meminfo struct { // Total usable ram (i.e. physical ram minus a few reserved // bits and the kernel binary code) - MemTotal uint64 + MemTotal *uint64 // The sum of LowFree+HighFree - MemFree uint64 + MemFree *uint64 // An estimate of how much memory is available for starting // new applications, without swapping. Calculated from // MemFree, SReclaimable, the size of the file LRU lists, and @@ -39,59 +39,59 @@ type Meminfo struct { // well, and that not all reclaimable slab will be // reclaimable, due to items being in use. The impact of those // factors will vary from system to system. - MemAvailable uint64 + MemAvailable *uint64 // Relatively temporary storage for raw disk blocks shouldn't // get tremendously large (20MB or so) - Buffers uint64 - Cached uint64 + Buffers *uint64 + Cached *uint64 // Memory that once was swapped out, is swapped back in but // still also is in the swapfile (if memory is needed it // doesn't need to be swapped out AGAIN because it is already // in the swapfile. This saves I/O) - SwapCached uint64 + SwapCached *uint64 // Memory that has been used more recently and usually not // reclaimed unless absolutely necessary. - Active uint64 + Active *uint64 // Memory which has been less recently used. It is more // eligible to be reclaimed for other purposes - Inactive uint64 - ActiveAnon uint64 - InactiveAnon uint64 - ActiveFile uint64 - InactiveFile uint64 - Unevictable uint64 - Mlocked uint64 + Inactive *uint64 + ActiveAnon *uint64 + InactiveAnon *uint64 + ActiveFile *uint64 + InactiveFile *uint64 + Unevictable *uint64 + Mlocked *uint64 // total amount of swap space available - SwapTotal uint64 + SwapTotal *uint64 // Memory which has been evicted from RAM, and is temporarily // on the disk - SwapFree uint64 + SwapFree *uint64 // Memory which is waiting to get written back to the disk - Dirty uint64 + Dirty *uint64 // Memory which is actively being written back to the disk - Writeback uint64 + Writeback *uint64 // Non-file backed pages mapped into userspace page tables - AnonPages uint64 + AnonPages *uint64 // files which have been mapped, such as libraries - Mapped uint64 - Shmem uint64 + Mapped *uint64 + Shmem *uint64 // in-kernel data structures cache - Slab uint64 + Slab *uint64 // Part of Slab, that might be reclaimed, such as caches - SReclaimable uint64 + SReclaimable *uint64 // Part of Slab, that cannot be reclaimed on memory pressure - SUnreclaim uint64 - KernelStack uint64 + SUnreclaim *uint64 + KernelStack *uint64 // amount of memory dedicated to the lowest level of page // tables. - PageTables uint64 + PageTables *uint64 // NFS pages sent to the server, but not yet committed to // stable storage - NFSUnstable uint64 + NFSUnstable *uint64 // Memory used for block device "bounce buffers" - Bounce uint64 + Bounce *uint64 // Memory used by FUSE for temporary writeback buffers - WritebackTmp uint64 + WritebackTmp *uint64 // Based on the overcommit ratio ('vm.overcommit_ratio'), // this is the total amount of memory currently available to // be allocated on the system. This limit is only adhered to @@ -105,7 +105,7 @@ type Meminfo struct { // yield a CommitLimit of 7.3G. // For more details, see the memory overcommit documentation // in vm/overcommit-accounting. - CommitLimit uint64 + CommitLimit *uint64 // The amount of memory presently allocated on the system. // The committed memory is a sum of all of the memory which // has been allocated by processes, even if it has not been @@ -119,27 +119,27 @@ type Meminfo struct { // This is useful if one needs to guarantee that processes will // not fail due to lack of memory once that memory has been // successfully allocated. - CommittedAS uint64 + CommittedAS *uint64 // total size of vmalloc memory area - VmallocTotal uint64 + VmallocTotal *uint64 // amount of vmalloc area which is used - VmallocUsed uint64 + VmallocUsed *uint64 // largest contiguous block of vmalloc area which is free - VmallocChunk uint64 - HardwareCorrupted uint64 - AnonHugePages uint64 - ShmemHugePages uint64 - ShmemPmdMapped uint64 - CmaTotal uint64 - CmaFree uint64 - HugePagesTotal uint64 - HugePagesFree uint64 - HugePagesRsvd uint64 - HugePagesSurp uint64 - Hugepagesize uint64 - DirectMap4k uint64 - DirectMap2M uint64 - DirectMap1G uint64 + VmallocChunk *uint64 + HardwareCorrupted *uint64 + AnonHugePages *uint64 + ShmemHugePages *uint64 + ShmemPmdMapped *uint64 + CmaTotal *uint64 + CmaFree *uint64 + HugePagesTotal *uint64 + HugePagesFree *uint64 + HugePagesRsvd *uint64 + HugePagesSurp *uint64 + Hugepagesize *uint64 + DirectMap4k *uint64 + DirectMap2M *uint64 + DirectMap1G *uint64 } // Meminfo returns an information about current kernel/system memory statistics. @@ -152,7 +152,7 @@ func (fs FS) Meminfo() (Meminfo, error) { m, err := parseMemInfo(bytes.NewReader(b)) if err != nil { - return Meminfo{}, fmt.Errorf("failed to parse meminfo: %v", err) + return Meminfo{}, fmt.Errorf("failed to parse meminfo: %w", err) } return *m, nil @@ -175,101 +175,101 @@ func parseMemInfo(r io.Reader) (*Meminfo, error) { switch fields[0] { case "MemTotal:": - m.MemTotal = v + m.MemTotal = &v case "MemFree:": - m.MemFree = v + m.MemFree = &v case "MemAvailable:": - m.MemAvailable = v + m.MemAvailable = &v case "Buffers:": - m.Buffers = v + m.Buffers = &v case "Cached:": - m.Cached = v + m.Cached = &v case "SwapCached:": - m.SwapCached = v + m.SwapCached = &v case "Active:": - m.Active = v + m.Active = &v case "Inactive:": - m.Inactive = v + m.Inactive = &v case "Active(anon):": - m.ActiveAnon = v + m.ActiveAnon = &v case "Inactive(anon):": - m.InactiveAnon = v + m.InactiveAnon = &v case "Active(file):": - m.ActiveFile = v + m.ActiveFile = &v case "Inactive(file):": - m.InactiveFile = v + m.InactiveFile = &v case "Unevictable:": - m.Unevictable = v + m.Unevictable = &v case "Mlocked:": - m.Mlocked = v + m.Mlocked = &v case "SwapTotal:": - m.SwapTotal = v + m.SwapTotal = &v case "SwapFree:": - m.SwapFree = v + m.SwapFree = &v case "Dirty:": - m.Dirty = v + m.Dirty = &v case "Writeback:": - m.Writeback = v + m.Writeback = &v case "AnonPages:": - m.AnonPages = v + m.AnonPages = &v case "Mapped:": - m.Mapped = v + m.Mapped = &v case "Shmem:": - m.Shmem = v + m.Shmem = &v case "Slab:": - m.Slab = v + m.Slab = &v case "SReclaimable:": - m.SReclaimable = v + m.SReclaimable = &v case "SUnreclaim:": - m.SUnreclaim = v + m.SUnreclaim = &v case "KernelStack:": - m.KernelStack = v + m.KernelStack = &v case "PageTables:": - m.PageTables = v + m.PageTables = &v case "NFS_Unstable:": - m.NFSUnstable = v + m.NFSUnstable = &v case "Bounce:": - m.Bounce = v + m.Bounce = &v case "WritebackTmp:": - m.WritebackTmp = v + m.WritebackTmp = &v case "CommitLimit:": - m.CommitLimit = v + m.CommitLimit = &v case "Committed_AS:": - m.CommittedAS = v + m.CommittedAS = &v case "VmallocTotal:": - m.VmallocTotal = v + m.VmallocTotal = &v case "VmallocUsed:": - m.VmallocUsed = v + m.VmallocUsed = &v case "VmallocChunk:": - m.VmallocChunk = v + m.VmallocChunk = &v case "HardwareCorrupted:": - m.HardwareCorrupted = v + m.HardwareCorrupted = &v case "AnonHugePages:": - m.AnonHugePages = v + m.AnonHugePages = &v case "ShmemHugePages:": - m.ShmemHugePages = v + m.ShmemHugePages = &v case "ShmemPmdMapped:": - m.ShmemPmdMapped = v + m.ShmemPmdMapped = &v case "CmaTotal:": - m.CmaTotal = v + m.CmaTotal = &v case "CmaFree:": - m.CmaFree = v + m.CmaFree = &v case "HugePages_Total:": - m.HugePagesTotal = v + m.HugePagesTotal = &v case "HugePages_Free:": - m.HugePagesFree = v + m.HugePagesFree = &v case "HugePages_Rsvd:": - m.HugePagesRsvd = v + m.HugePagesRsvd = &v case "HugePages_Surp:": - m.HugePagesSurp = v + m.HugePagesSurp = &v case "Hugepagesize:": - m.Hugepagesize = v + m.Hugepagesize = &v case "DirectMap4k:": - m.DirectMap4k = v + m.DirectMap4k = &v case "DirectMap2M:": - m.DirectMap2M = v + m.DirectMap2M = &v case "DirectMap1G:": - m.DirectMap1G = v + m.DirectMap1G = &v } } diff --git a/vendor/github.com/prometheus/procfs/mountstats.go b/vendor/github.com/prometheus/procfs/mountstats.go index 861ced9da0..f7a828bb1d 100644 --- a/vendor/github.com/prometheus/procfs/mountstats.go +++ b/vendor/github.com/prometheus/procfs/mountstats.go @@ -338,12 +338,12 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e if len(ss) == 0 { break } - if len(ss) < 2 { - return nil, fmt.Errorf("not enough information for NFS stats: %v", ss) - } switch ss[0] { case fieldOpts: + if len(ss) < 2 { + return nil, fmt.Errorf("not enough information for NFS stats: %v", ss) + } if stats.Opts == nil { stats.Opts = map[string]string{} } @@ -356,6 +356,9 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e } } case fieldAge: + if len(ss) < 2 { + return nil, fmt.Errorf("not enough information for NFS stats: %v", ss) + } // Age integer is in seconds d, err := time.ParseDuration(ss[1] + "s") if err != nil { @@ -364,6 +367,9 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e stats.Age = d case fieldBytes: + if len(ss) < 2 { + return nil, fmt.Errorf("not enough information for NFS stats: %v", ss) + } bstats, err := parseNFSBytesStats(ss[1:]) if err != nil { return nil, err @@ -371,6 +377,9 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e stats.Bytes = *bstats case fieldEvents: + if len(ss) < 2 { + return nil, fmt.Errorf("not enough information for NFS stats: %v", ss) + } estats, err := parseNFSEventsStats(ss[1:]) if err != nil { return nil, err diff --git a/vendor/github.com/prometheus/procfs/net_conntrackstat.go b/vendor/github.com/prometheus/procfs/net_conntrackstat.go index b637be9845..9964a3600b 100644 --- a/vendor/github.com/prometheus/procfs/net_conntrackstat.go +++ b/vendor/github.com/prometheus/procfs/net_conntrackstat.go @@ -55,7 +55,7 @@ func readConntrackStat(path string) ([]ConntrackStatEntry, error) { stat, err := parseConntrackStat(bytes.NewReader(b)) if err != nil { - return nil, fmt.Errorf("failed to read conntrack stats from %q: %v", path, err) + return nil, fmt.Errorf("failed to read conntrack stats from %q: %w", path, err) } return stat, nil @@ -147,7 +147,7 @@ func parseConntrackStatEntry(fields []string) (*ConntrackStatEntry, error) { func parseConntrackStatField(field string) (uint64, error) { val, err := strconv.ParseUint(field, 16, 64) if err != nil { - return 0, fmt.Errorf("couldn't parse \"%s\" field: %s", field, err) + return 0, fmt.Errorf("couldn't parse %q field: %w", field, err) } return val, err } diff --git a/vendor/github.com/prometheus/procfs/net_ip_socket.go b/vendor/github.com/prometheus/procfs/net_ip_socket.go new file mode 100644 index 0000000000..ac01dd8475 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/net_ip_socket.go @@ -0,0 +1,220 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "encoding/hex" + "fmt" + "io" + "net" + "os" + "strconv" + "strings" +) + +const ( + // readLimit is used by io.LimitReader while reading the content of the + // /proc/net/udp{,6} files. The number of lines inside such a file is dynamic + // as each line represents a single used socket. + // In theory, the number of available sockets is 65535 (2^16 - 1) per IP. + // With e.g. 150 Byte per line and the maximum number of 65535, + // the reader needs to handle 150 Byte * 65535 =~ 10 MB for a single IP. + readLimit = 4294967296 // Byte -> 4 GiB +) + +// this contains generic data structures for both udp and tcp sockets +type ( + // NetIPSocket represents the contents of /proc/net/{t,u}dp{,6} file without the header. + NetIPSocket []*netIPSocketLine + + // NetIPSocketSummary provides already computed values like the total queue lengths or + // the total number of used sockets. In contrast to NetIPSocket it does not collect + // the parsed lines into a slice. + NetIPSocketSummary struct { + // TxQueueLength shows the total queue length of all parsed tx_queue lengths. + TxQueueLength uint64 + // RxQueueLength shows the total queue length of all parsed rx_queue lengths. + RxQueueLength uint64 + // UsedSockets shows the total number of parsed lines representing the + // number of used sockets. + UsedSockets uint64 + } + + // netIPSocketLine represents the fields parsed from a single line + // in /proc/net/{t,u}dp{,6}. Fields which are not used by IPSocket are skipped. + // For the proc file format details, see https://linux.die.net/man/5/proc. + netIPSocketLine struct { + Sl uint64 + LocalAddr net.IP + LocalPort uint64 + RemAddr net.IP + RemPort uint64 + St uint64 + TxQueue uint64 + RxQueue uint64 + UID uint64 + } +) + +func newNetIPSocket(file string) (NetIPSocket, error) { + f, err := os.Open(file) + if err != nil { + return nil, err + } + defer f.Close() + + var netIPSocket NetIPSocket + + lr := io.LimitReader(f, readLimit) + s := bufio.NewScanner(lr) + s.Scan() // skip first line with headers + for s.Scan() { + fields := strings.Fields(s.Text()) + line, err := parseNetIPSocketLine(fields) + if err != nil { + return nil, err + } + netIPSocket = append(netIPSocket, line) + } + if err := s.Err(); err != nil { + return nil, err + } + return netIPSocket, nil +} + +// newNetIPSocketSummary creates a new NetIPSocket{,6} from the contents of the given file. +func newNetIPSocketSummary(file string) (*NetIPSocketSummary, error) { + f, err := os.Open(file) + if err != nil { + return nil, err + } + defer f.Close() + + var netIPSocketSummary NetIPSocketSummary + + lr := io.LimitReader(f, readLimit) + s := bufio.NewScanner(lr) + s.Scan() // skip first line with headers + for s.Scan() { + fields := strings.Fields(s.Text()) + line, err := parseNetIPSocketLine(fields) + if err != nil { + return nil, err + } + netIPSocketSummary.TxQueueLength += line.TxQueue + netIPSocketSummary.RxQueueLength += line.RxQueue + netIPSocketSummary.UsedSockets++ + } + if err := s.Err(); err != nil { + return nil, err + } + return &netIPSocketSummary, nil +} + +// the /proc/net/{t,u}dp{,6} files are network byte order for ipv4 and for ipv6 the address is four words consisting of four bytes each. In each of those four words the four bytes are written in reverse order. + +func parseIP(hexIP string) (net.IP, error) { + var byteIP []byte + byteIP, err := hex.DecodeString(hexIP) + if err != nil { + return nil, fmt.Errorf("cannot parse address field in socket line %q", hexIP) + } + switch len(byteIP) { + case 4: + return net.IP{byteIP[3], byteIP[2], byteIP[1], byteIP[0]}, nil + case 16: + i := net.IP{ + byteIP[3], byteIP[2], byteIP[1], byteIP[0], + byteIP[7], byteIP[6], byteIP[5], byteIP[4], + byteIP[11], byteIP[10], byteIP[9], byteIP[8], + byteIP[15], byteIP[14], byteIP[13], byteIP[12], + } + return i, nil + default: + return nil, fmt.Errorf("Unable to parse IP %s", hexIP) + } +} + +// parseNetIPSocketLine parses a single line, represented by a list of fields. +func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) { + line := &netIPSocketLine{} + if len(fields) < 8 { + return nil, fmt.Errorf( + "cannot parse net socket line as it has less then 8 columns %q", + strings.Join(fields, " "), + ) + } + var err error // parse error + + // sl + s := strings.Split(fields[0], ":") + if len(s) != 2 { + return nil, fmt.Errorf("cannot parse sl field in socket line %q", fields[0]) + } + + if line.Sl, err = strconv.ParseUint(s[0], 0, 64); err != nil { + return nil, fmt.Errorf("cannot parse sl value in socket line: %w", err) + } + // local_address + l := strings.Split(fields[1], ":") + if len(l) != 2 { + return nil, fmt.Errorf("cannot parse local_address field in socket line %q", fields[1]) + } + if line.LocalAddr, err = parseIP(l[0]); err != nil { + return nil, err + } + if line.LocalPort, err = strconv.ParseUint(l[1], 16, 64); err != nil { + return nil, fmt.Errorf("cannot parse local_address port value in socket line: %w", err) + } + + // remote_address + r := strings.Split(fields[2], ":") + if len(r) != 2 { + return nil, fmt.Errorf("cannot parse rem_address field in socket line %q", fields[1]) + } + if line.RemAddr, err = parseIP(r[0]); err != nil { + return nil, err + } + if line.RemPort, err = strconv.ParseUint(r[1], 16, 64); err != nil { + return nil, fmt.Errorf("cannot parse rem_address port value in socket line: %w", err) + } + + // st + if line.St, err = strconv.ParseUint(fields[3], 16, 64); err != nil { + return nil, fmt.Errorf("cannot parse st value in socket line: %w", err) + } + + // tx_queue and rx_queue + q := strings.Split(fields[4], ":") + if len(q) != 2 { + return nil, fmt.Errorf( + "cannot parse tx/rx queues in socket line as it has a missing colon %q", + fields[4], + ) + } + if line.TxQueue, err = strconv.ParseUint(q[0], 16, 64); err != nil { + return nil, fmt.Errorf("cannot parse tx_queue value in socket line: %w", err) + } + if line.RxQueue, err = strconv.ParseUint(q[1], 16, 64); err != nil { + return nil, fmt.Errorf("cannot parse rx_queue value in socket line: %w", err) + } + + // uid + if line.UID, err = strconv.ParseUint(fields[7], 0, 64); err != nil { + return nil, fmt.Errorf("cannot parse uid value in socket line: %w", err) + } + + return line, nil +} diff --git a/vendor/github.com/prometheus/procfs/net_protocols.go b/vendor/github.com/prometheus/procfs/net_protocols.go new file mode 100644 index 0000000000..8c6de3791b --- /dev/null +++ b/vendor/github.com/prometheus/procfs/net_protocols.go @@ -0,0 +1,180 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// NetProtocolStats stores the contents from /proc/net/protocols +type NetProtocolStats map[string]NetProtocolStatLine + +// NetProtocolStatLine contains a single line parsed from /proc/net/protocols. We +// only care about the first six columns as the rest are not likely to change +// and only serve to provide a set of capabilities for each protocol. +type NetProtocolStatLine struct { + Name string // 0 The name of the protocol + Size uint64 // 1 The size, in bytes, of a given protocol structure. e.g. sizeof(struct tcp_sock) or sizeof(struct unix_sock) + Sockets int64 // 2 Number of sockets in use by this protocol + Memory int64 // 3 Number of 4KB pages allocated by all sockets of this protocol + Pressure int // 4 This is either yes, no, or NI (not implemented). For the sake of simplicity we treat NI as not experiencing memory pressure. + MaxHeader uint64 // 5 Protocol specific max header size + Slab bool // 6 Indicates whether or not memory is allocated from the SLAB + ModuleName string // 7 The name of the module that implemented this protocol or "kernel" if not from a module + Capabilities NetProtocolCapabilities +} + +// NetProtocolCapabilities contains a list of capabilities for each protocol +type NetProtocolCapabilities struct { + Close bool // 8 + Connect bool // 9 + Disconnect bool // 10 + Accept bool // 11 + IoCtl bool // 12 + Init bool // 13 + Destroy bool // 14 + Shutdown bool // 15 + SetSockOpt bool // 16 + GetSockOpt bool // 17 + SendMsg bool // 18 + RecvMsg bool // 19 + SendPage bool // 20 + Bind bool // 21 + BacklogRcv bool // 22 + Hash bool // 23 + UnHash bool // 24 + GetPort bool // 25 + EnterMemoryPressure bool // 26 +} + +// NetProtocols reads stats from /proc/net/protocols and returns a map of +// PortocolStatLine entries. As of this writing no official Linux Documentation +// exists, however the source is fairly self-explanatory and the format seems +// stable since its introduction in 2.6.12-rc2 +// Linux 2.6.12-rc2 - https://elixir.bootlin.com/linux/v2.6.12-rc2/source/net/core/sock.c#L1452 +// Linux 5.10 - https://elixir.bootlin.com/linux/v5.10.4/source/net/core/sock.c#L3586 +func (fs FS) NetProtocols() (NetProtocolStats, error) { + data, err := util.ReadFileNoStat(fs.proc.Path("net/protocols")) + if err != nil { + return NetProtocolStats{}, err + } + return parseNetProtocols(bufio.NewScanner(bytes.NewReader(data))) +} + +func parseNetProtocols(s *bufio.Scanner) (NetProtocolStats, error) { + nps := NetProtocolStats{} + + // Skip the header line + s.Scan() + + for s.Scan() { + line, err := nps.parseLine(s.Text()) + if err != nil { + return NetProtocolStats{}, err + } + + nps[line.Name] = *line + } + return nps, nil +} + +func (ps NetProtocolStats) parseLine(rawLine string) (*NetProtocolStatLine, error) { + line := &NetProtocolStatLine{Capabilities: NetProtocolCapabilities{}} + var err error + const enabled = "yes" + const disabled = "no" + + fields := strings.Fields(rawLine) + line.Name = fields[0] + line.Size, err = strconv.ParseUint(fields[1], 10, 64) + if err != nil { + return nil, err + } + line.Sockets, err = strconv.ParseInt(fields[2], 10, 64) + if err != nil { + return nil, err + } + line.Memory, err = strconv.ParseInt(fields[3], 10, 64) + if err != nil { + return nil, err + } + if fields[4] == enabled { + line.Pressure = 1 + } else if fields[4] == disabled { + line.Pressure = 0 + } else { + line.Pressure = -1 + } + line.MaxHeader, err = strconv.ParseUint(fields[5], 10, 64) + if err != nil { + return nil, err + } + if fields[6] == enabled { + line.Slab = true + } else if fields[6] == disabled { + line.Slab = false + } else { + return nil, fmt.Errorf("unable to parse capability for protocol: %s", line.Name) + } + line.ModuleName = fields[7] + + err = line.Capabilities.parseCapabilities(fields[8:]) + if err != nil { + return nil, err + } + + return line, nil +} + +func (pc *NetProtocolCapabilities) parseCapabilities(capabilities []string) error { + // The capabilities are all bools so we can loop over to map them + capabilityFields := [...]*bool{ + &pc.Close, + &pc.Connect, + &pc.Disconnect, + &pc.Accept, + &pc.IoCtl, + &pc.Init, + &pc.Destroy, + &pc.Shutdown, + &pc.SetSockOpt, + &pc.GetSockOpt, + &pc.SendMsg, + &pc.RecvMsg, + &pc.SendPage, + &pc.Bind, + &pc.BacklogRcv, + &pc.Hash, + &pc.UnHash, + &pc.GetPort, + &pc.EnterMemoryPressure, + } + + for i := 0; i < len(capabilities); i++ { + if capabilities[i] == "y" { + *capabilityFields[i] = true + } else if capabilities[i] == "n" { + *capabilityFields[i] = false + } else { + return fmt.Errorf("unable to parse capability block for protocol: position %d", i) + } + } + return nil +} diff --git a/vendor/github.com/prometheus/procfs/net_sockstat.go b/vendor/github.com/prometheus/procfs/net_sockstat.go index f91ef55237..e36f4872dd 100644 --- a/vendor/github.com/prometheus/procfs/net_sockstat.go +++ b/vendor/github.com/prometheus/procfs/net_sockstat.go @@ -70,7 +70,7 @@ func readSockstat(name string) (*NetSockstat, error) { stat, err := parseSockstat(bytes.NewReader(b)) if err != nil { - return nil, fmt.Errorf("failed to read sockstats from %q: %v", name, err) + return nil, fmt.Errorf("failed to read sockstats from %q: %w", name, err) } return stat, nil @@ -90,7 +90,7 @@ func parseSockstat(r io.Reader) (*NetSockstat, error) { // The remaining fields are key/value pairs. kvs, err := parseSockstatKVs(fields[1:]) if err != nil { - return nil, fmt.Errorf("error parsing sockstat key/value pairs from %q: %v", s.Text(), err) + return nil, fmt.Errorf("error parsing sockstat key/value pairs from %q: %w", s.Text(), err) } // The first field is the protocol. We must trim its colon suffix. diff --git a/vendor/github.com/prometheus/procfs/net_softnet.go b/vendor/github.com/prometheus/procfs/net_softnet.go index db5debdf4a..46f12c61d3 100644 --- a/vendor/github.com/prometheus/procfs/net_softnet.go +++ b/vendor/github.com/prometheus/procfs/net_softnet.go @@ -51,7 +51,7 @@ func (fs FS) NetSoftnetStat() ([]SoftnetStat, error) { entries, err := parseSoftnet(bytes.NewReader(b)) if err != nil { - return nil, fmt.Errorf("failed to parse /proc/net/softnet_stat: %v", err) + return nil, fmt.Errorf("failed to parse /proc/net/softnet_stat: %w", err) } return entries, nil diff --git a/vendor/github.com/prometheus/procfs/net_tcp.go b/vendor/github.com/prometheus/procfs/net_tcp.go new file mode 100644 index 0000000000..5277629557 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/net_tcp.go @@ -0,0 +1,64 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +type ( + // NetTCP represents the contents of /proc/net/tcp{,6} file without the header. + NetTCP []*netIPSocketLine + + // NetTCPSummary provides already computed values like the total queue lengths or + // the total number of used sockets. In contrast to NetTCP it does not collect + // the parsed lines into a slice. + NetTCPSummary NetIPSocketSummary +) + +// NetTCP returns the IPv4 kernel/networking statistics for TCP datagrams +// read from /proc/net/tcp. +func (fs FS) NetTCP() (NetTCP, error) { + return newNetTCP(fs.proc.Path("net/tcp")) +} + +// NetTCP6 returns the IPv6 kernel/networking statistics for TCP datagrams +// read from /proc/net/tcp6. +func (fs FS) NetTCP6() (NetTCP, error) { + return newNetTCP(fs.proc.Path("net/tcp6")) +} + +// NetTCPSummary returns already computed statistics like the total queue lengths +// for TCP datagrams read from /proc/net/tcp. +func (fs FS) NetTCPSummary() (*NetTCPSummary, error) { + return newNetTCPSummary(fs.proc.Path("net/tcp")) +} + +// NetTCP6Summary returns already computed statistics like the total queue lengths +// for TCP datagrams read from /proc/net/tcp6. +func (fs FS) NetTCP6Summary() (*NetTCPSummary, error) { + return newNetTCPSummary(fs.proc.Path("net/tcp6")) +} + +// newNetTCP creates a new NetTCP{,6} from the contents of the given file. +func newNetTCP(file string) (NetTCP, error) { + n, err := newNetIPSocket(file) + n1 := NetTCP(n) + return n1, err +} + +func newNetTCPSummary(file string) (*NetTCPSummary, error) { + n, err := newNetIPSocketSummary(file) + if n == nil { + return nil, err + } + n1 := NetTCPSummary(*n) + return &n1, err +} diff --git a/vendor/github.com/prometheus/procfs/net_udp.go b/vendor/github.com/prometheus/procfs/net_udp.go index d017e3f18d..9ac3daf2d4 100644 --- a/vendor/github.com/prometheus/procfs/net_udp.go +++ b/vendor/github.com/prometheus/procfs/net_udp.go @@ -13,58 +13,14 @@ package procfs -import ( - "bufio" - "encoding/hex" - "fmt" - "io" - "net" - "os" - "strconv" - "strings" -) - -const ( - // readLimit is used by io.LimitReader while reading the content of the - // /proc/net/udp{,6} files. The number of lines inside such a file is dynamic - // as each line represents a single used socket. - // In theory, the number of available sockets is 65535 (2^16 - 1) per IP. - // With e.g. 150 Byte per line and the maximum number of 65535, - // the reader needs to handle 150 Byte * 65535 =~ 10 MB for a single IP. - readLimit = 4294967296 // Byte -> 4 GiB -) - type ( // NetUDP represents the contents of /proc/net/udp{,6} file without the header. - NetUDP []*netUDPLine + NetUDP []*netIPSocketLine // NetUDPSummary provides already computed values like the total queue lengths or // the total number of used sockets. In contrast to NetUDP it does not collect // the parsed lines into a slice. - NetUDPSummary struct { - // TxQueueLength shows the total queue length of all parsed tx_queue lengths. - TxQueueLength uint64 - // RxQueueLength shows the total queue length of all parsed rx_queue lengths. - RxQueueLength uint64 - // UsedSockets shows the total number of parsed lines representing the - // number of used sockets. - UsedSockets uint64 - } - - // netUDPLine represents the fields parsed from a single line - // in /proc/net/udp{,6}. Fields which are not used by UDP are skipped. - // For the proc file format details, see https://linux.die.net/man/5/proc. - netUDPLine struct { - Sl uint64 - LocalAddr net.IP - LocalPort uint64 - RemAddr net.IP - RemPort uint64 - St uint64 - TxQueue uint64 - RxQueue uint64 - UID uint64 - } + NetUDPSummary NetIPSocketSummary ) // NetUDP returns the IPv4 kernel/networking statistics for UDP datagrams @@ -93,137 +49,16 @@ func (fs FS) NetUDP6Summary() (*NetUDPSummary, error) { // newNetUDP creates a new NetUDP{,6} from the contents of the given file. func newNetUDP(file string) (NetUDP, error) { - f, err := os.Open(file) - if err != nil { - return nil, err - } - defer f.Close() - - netUDP := NetUDP{} - - lr := io.LimitReader(f, readLimit) - s := bufio.NewScanner(lr) - s.Scan() // skip first line with headers - for s.Scan() { - fields := strings.Fields(s.Text()) - line, err := parseNetUDPLine(fields) - if err != nil { - return nil, err - } - netUDP = append(netUDP, line) - } - if err := s.Err(); err != nil { - return nil, err - } - return netUDP, nil + n, err := newNetIPSocket(file) + n1 := NetUDP(n) + return n1, err } -// newNetUDPSummary creates a new NetUDP{,6} from the contents of the given file. func newNetUDPSummary(file string) (*NetUDPSummary, error) { - f, err := os.Open(file) - if err != nil { - return nil, err - } - defer f.Close() - - netUDPSummary := &NetUDPSummary{} - - lr := io.LimitReader(f, readLimit) - s := bufio.NewScanner(lr) - s.Scan() // skip first line with headers - for s.Scan() { - fields := strings.Fields(s.Text()) - line, err := parseNetUDPLine(fields) - if err != nil { - return nil, err - } - netUDPSummary.TxQueueLength += line.TxQueue - netUDPSummary.RxQueueLength += line.RxQueue - netUDPSummary.UsedSockets++ - } - if err := s.Err(); err != nil { + n, err := newNetIPSocketSummary(file) + if n == nil { return nil, err } - return netUDPSummary, nil -} - -// parseNetUDPLine parses a single line, represented by a list of fields. -func parseNetUDPLine(fields []string) (*netUDPLine, error) { - line := &netUDPLine{} - if len(fields) < 8 { - return nil, fmt.Errorf( - "cannot parse net udp socket line as it has less then 8 columns: %s", - strings.Join(fields, " "), - ) - } - var err error // parse error - - // sl - s := strings.Split(fields[0], ":") - if len(s) != 2 { - return nil, fmt.Errorf( - "cannot parse sl field in udp socket line: %s", fields[0]) - } - - if line.Sl, err = strconv.ParseUint(s[0], 0, 64); err != nil { - return nil, fmt.Errorf("cannot parse sl value in udp socket line: %s", err) - } - // local_address - l := strings.Split(fields[1], ":") - if len(l) != 2 { - return nil, fmt.Errorf( - "cannot parse local_address field in udp socket line: %s", fields[1]) - } - if line.LocalAddr, err = hex.DecodeString(l[0]); err != nil { - return nil, fmt.Errorf( - "cannot parse local_address value in udp socket line: %s", err) - } - if line.LocalPort, err = strconv.ParseUint(l[1], 16, 64); err != nil { - return nil, fmt.Errorf( - "cannot parse local_address port value in udp socket line: %s", err) - } - - // remote_address - r := strings.Split(fields[2], ":") - if len(r) != 2 { - return nil, fmt.Errorf( - "cannot parse rem_address field in udp socket line: %s", fields[1]) - } - if line.RemAddr, err = hex.DecodeString(r[0]); err != nil { - return nil, fmt.Errorf( - "cannot parse rem_address value in udp socket line: %s", err) - } - if line.RemPort, err = strconv.ParseUint(r[1], 16, 64); err != nil { - return nil, fmt.Errorf( - "cannot parse rem_address port value in udp socket line: %s", err) - } - - // st - if line.St, err = strconv.ParseUint(fields[3], 16, 64); err != nil { - return nil, fmt.Errorf( - "cannot parse st value in udp socket line: %s", err) - } - - // tx_queue and rx_queue - q := strings.Split(fields[4], ":") - if len(q) != 2 { - return nil, fmt.Errorf( - "cannot parse tx/rx queues in udp socket line as it has a missing colon: %s", - fields[4], - ) - } - if line.TxQueue, err = strconv.ParseUint(q[0], 16, 64); err != nil { - return nil, fmt.Errorf("cannot parse tx_queue value in udp socket line: %s", err) - } - if line.RxQueue, err = strconv.ParseUint(q[1], 16, 64); err != nil { - return nil, fmt.Errorf("cannot parse rx_queue value in udp socket line: %s", err) - } - - // uid - if line.UID, err = strconv.ParseUint(fields[7], 0, 64); err != nil { - return nil, fmt.Errorf( - "cannot parse uid value in udp socket line: %s", err) - } - - return line, nil + n1 := NetUDPSummary(*n) + return &n1, err } diff --git a/vendor/github.com/prometheus/procfs/net_unix.go b/vendor/github.com/prometheus/procfs/net_unix.go index c55b4b18e4..98aa8e1c31 100644 --- a/vendor/github.com/prometheus/procfs/net_unix.go +++ b/vendor/github.com/prometheus/procfs/net_unix.go @@ -108,14 +108,14 @@ func parseNetUNIX(r io.Reader) (*NetUNIX, error) { line := s.Text() item, err := nu.parseLine(line, hasInode, minFields) if err != nil { - return nil, fmt.Errorf("failed to parse /proc/net/unix data %q: %v", line, err) + return nil, fmt.Errorf("failed to parse /proc/net/unix data %q: %w", line, err) } nu.Rows = append(nu.Rows, item) } if err := s.Err(); err != nil { - return nil, fmt.Errorf("failed to scan /proc/net/unix data: %v", err) + return nil, fmt.Errorf("failed to scan /proc/net/unix data: %w", err) } return &nu, nil @@ -136,29 +136,29 @@ func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine, users, err := u.parseUsers(fields[1]) if err != nil { - return nil, fmt.Errorf("failed to parse ref count(%s): %v", fields[1], err) + return nil, fmt.Errorf("failed to parse ref count %q: %w", fields[1], err) } flags, err := u.parseFlags(fields[3]) if err != nil { - return nil, fmt.Errorf("failed to parse flags(%s): %v", fields[3], err) + return nil, fmt.Errorf("failed to parse flags %q: %w", fields[3], err) } typ, err := u.parseType(fields[4]) if err != nil { - return nil, fmt.Errorf("failed to parse type(%s): %v", fields[4], err) + return nil, fmt.Errorf("failed to parse type %q: %w", fields[4], err) } state, err := u.parseState(fields[5]) if err != nil { - return nil, fmt.Errorf("failed to parse state(%s): %v", fields[5], err) + return nil, fmt.Errorf("failed to parse state %q: %w", fields[5], err) } var inode uint64 if hasInode { inode, err = u.parseInode(fields[6]) if err != nil { - return nil, fmt.Errorf("failed to parse inode(%s): %v", fields[6], err) + return nil, fmt.Errorf("failed to parse inode %q: %w", fields[6], err) } } diff --git a/vendor/github.com/prometheus/procfs/proc.go b/vendor/github.com/prometheus/procfs/proc.go index 9f97b6e523..28f696803f 100644 --- a/vendor/github.com/prometheus/procfs/proc.go +++ b/vendor/github.com/prometheus/procfs/proc.go @@ -105,7 +105,7 @@ func (fs FS) AllProcs() (Procs, error) { names, err := d.Readdirnames(-1) if err != nil { - return Procs{}, fmt.Errorf("could not read %s: %s", d.Name(), err) + return Procs{}, fmt.Errorf("could not read %q: %w", d.Name(), err) } p := Procs{} @@ -206,7 +206,7 @@ func (p Proc) FileDescriptors() ([]uintptr, error) { for i, n := range names { fd, err := strconv.ParseInt(n, 10, 32) if err != nil { - return nil, fmt.Errorf("could not parse fd %s: %s", n, err) + return nil, fmt.Errorf("could not parse fd %q: %w", n, err) } fds[i] = uintptr(fd) } @@ -278,7 +278,7 @@ func (p Proc) fileDescriptors() ([]string, error) { names, err := d.Readdirnames(-1) if err != nil { - return nil, fmt.Errorf("could not read %s: %s", d.Name(), err) + return nil, fmt.Errorf("could not read %q: %w", d.Name(), err) } return names, nil diff --git a/vendor/github.com/prometheus/procfs/proc_cgroup.go b/vendor/github.com/prometheus/procfs/proc_cgroup.go index 4abd46451c..0094a13c05 100644 --- a/vendor/github.com/prometheus/procfs/proc_cgroup.go +++ b/vendor/github.com/prometheus/procfs/proc_cgroup.go @@ -49,7 +49,7 @@ type Cgroup struct { func parseCgroupString(cgroupStr string) (*Cgroup, error) { var err error - fields := strings.Split(cgroupStr, ":") + fields := strings.SplitN(cgroupStr, ":", 3) if len(fields) < 3 { return nil, fmt.Errorf("at least 3 fields required, found %d fields in cgroup string: %s", len(fields), cgroupStr) } diff --git a/vendor/github.com/prometheus/procfs/proc_fdinfo.go b/vendor/github.com/prometheus/procfs/proc_fdinfo.go index a76ca70791..cf63227f06 100644 --- a/vendor/github.com/prometheus/procfs/proc_fdinfo.go +++ b/vendor/github.com/prometheus/procfs/proc_fdinfo.go @@ -16,7 +16,7 @@ package procfs import ( "bufio" "bytes" - "errors" + "fmt" "regexp" "github.com/prometheus/procfs/internal/util" @@ -112,7 +112,7 @@ func parseInotifyInfo(line string) (*InotifyInfo, error) { } return i, nil } - return nil, errors.New("invalid inode entry: " + line) + return nil, fmt.Errorf("invalid inode entry: %q", line) } // ProcFDInfos represents a list of ProcFDInfo structs. diff --git a/vendor/github.com/prometheus/procfs/proc_limits.go b/vendor/github.com/prometheus/procfs/proc_limits.go index 91ee24df8b..dd20f198a3 100644 --- a/vendor/github.com/prometheus/procfs/proc_limits.go +++ b/vendor/github.com/prometheus/procfs/proc_limits.go @@ -26,55 +26,55 @@ import ( // http://man7.org/linux/man-pages/man2/getrlimit.2.html. type ProcLimits struct { // CPU time limit in seconds. - CPUTime int64 + CPUTime uint64 // Maximum size of files that the process may create. - FileSize int64 + FileSize uint64 // Maximum size of the process's data segment (initialized data, // uninitialized data, and heap). - DataSize int64 + DataSize uint64 // Maximum size of the process stack in bytes. - StackSize int64 + StackSize uint64 // Maximum size of a core file. - CoreFileSize int64 + CoreFileSize uint64 // Limit of the process's resident set in pages. - ResidentSet int64 + ResidentSet uint64 // Maximum number of processes that can be created for the real user ID of // the calling process. - Processes int64 + Processes uint64 // Value one greater than the maximum file descriptor number that can be // opened by this process. - OpenFiles int64 + OpenFiles uint64 // Maximum number of bytes of memory that may be locked into RAM. - LockedMemory int64 + LockedMemory uint64 // Maximum size of the process's virtual memory address space in bytes. - AddressSpace int64 + AddressSpace uint64 // Limit on the combined number of flock(2) locks and fcntl(2) leases that // this process may establish. - FileLocks int64 + FileLocks uint64 // Limit of signals that may be queued for the real user ID of the calling // process. - PendingSignals int64 + PendingSignals uint64 // Limit on the number of bytes that can be allocated for POSIX message // queues for the real user ID of the calling process. - MsqqueueSize int64 + MsqqueueSize uint64 // Limit of the nice priority set using setpriority(2) or nice(2). - NicePriority int64 + NicePriority uint64 // Limit of the real-time priority set using sched_setscheduler(2) or // sched_setparam(2). - RealtimePriority int64 + RealtimePriority uint64 // Limit (in microseconds) on the amount of CPU time that a process // scheduled under a real-time scheduling policy may consume without making // a blocking system call. - RealtimeTimeout int64 + RealtimeTimeout uint64 } const ( - limitsFields = 3 + limitsFields = 4 limitsUnlimited = "unlimited" ) var ( - limitsDelimiter = regexp.MustCompile(" +") + limitsMatch = regexp.MustCompile(`(Max \w+\s{0,1}?\w*\s{0,1}\w*)\s{2,}(\w+)\s+(\w+)`) ) // NewLimits returns the current soft limits of the process. @@ -96,46 +96,49 @@ func (p Proc) Limits() (ProcLimits, error) { l = ProcLimits{} s = bufio.NewScanner(f) ) + + s.Scan() // Skip limits header + for s.Scan() { - fields := limitsDelimiter.Split(s.Text(), limitsFields) + //fields := limitsMatch.Split(s.Text(), limitsFields) + fields := limitsMatch.FindStringSubmatch(s.Text()) if len(fields) != limitsFields { - return ProcLimits{}, fmt.Errorf( - "couldn't parse %s line %s", f.Name(), s.Text()) + return ProcLimits{}, fmt.Errorf("couldn't parse %q line %q", f.Name(), s.Text()) } - switch fields[0] { + switch fields[1] { case "Max cpu time": - l.CPUTime, err = parseInt(fields[1]) + l.CPUTime, err = parseUint(fields[2]) case "Max file size": - l.FileSize, err = parseInt(fields[1]) + l.FileSize, err = parseUint(fields[2]) case "Max data size": - l.DataSize, err = parseInt(fields[1]) + l.DataSize, err = parseUint(fields[2]) case "Max stack size": - l.StackSize, err = parseInt(fields[1]) + l.StackSize, err = parseUint(fields[2]) case "Max core file size": - l.CoreFileSize, err = parseInt(fields[1]) + l.CoreFileSize, err = parseUint(fields[2]) case "Max resident set": - l.ResidentSet, err = parseInt(fields[1]) + l.ResidentSet, err = parseUint(fields[2]) case "Max processes": - l.Processes, err = parseInt(fields[1]) + l.Processes, err = parseUint(fields[2]) case "Max open files": - l.OpenFiles, err = parseInt(fields[1]) + l.OpenFiles, err = parseUint(fields[2]) case "Max locked memory": - l.LockedMemory, err = parseInt(fields[1]) + l.LockedMemory, err = parseUint(fields[2]) case "Max address space": - l.AddressSpace, err = parseInt(fields[1]) + l.AddressSpace, err = parseUint(fields[2]) case "Max file locks": - l.FileLocks, err = parseInt(fields[1]) + l.FileLocks, err = parseUint(fields[2]) case "Max pending signals": - l.PendingSignals, err = parseInt(fields[1]) + l.PendingSignals, err = parseUint(fields[2]) case "Max msgqueue size": - l.MsqqueueSize, err = parseInt(fields[1]) + l.MsqqueueSize, err = parseUint(fields[2]) case "Max nice priority": - l.NicePriority, err = parseInt(fields[1]) + l.NicePriority, err = parseUint(fields[2]) case "Max realtime priority": - l.RealtimePriority, err = parseInt(fields[1]) + l.RealtimePriority, err = parseUint(fields[2]) case "Max realtime timeout": - l.RealtimeTimeout, err = parseInt(fields[1]) + l.RealtimeTimeout, err = parseUint(fields[2]) } if err != nil { return ProcLimits{}, err @@ -145,13 +148,13 @@ func (p Proc) Limits() (ProcLimits, error) { return l, s.Err() } -func parseInt(s string) (int64, error) { +func parseUint(s string) (uint64, error) { if s == limitsUnlimited { - return -1, nil + return 18446744073709551615, nil } - i, err := strconv.ParseInt(s, 10, 64) + i, err := strconv.ParseUint(s, 10, 64) if err != nil { - return 0, fmt.Errorf("couldn't parse value %s: %s", s, err) + return 0, fmt.Errorf("couldn't parse value %q: %w", s, err) } return i, nil } diff --git a/vendor/github.com/prometheus/procfs/proc_ns.go b/vendor/github.com/prometheus/procfs/proc_ns.go index c66740ff74..391b4cbd11 100644 --- a/vendor/github.com/prometheus/procfs/proc_ns.go +++ b/vendor/github.com/prometheus/procfs/proc_ns.go @@ -40,7 +40,7 @@ func (p Proc) Namespaces() (Namespaces, error) { names, err := d.Readdirnames(-1) if err != nil { - return nil, fmt.Errorf("failed to read contents of ns dir: %v", err) + return nil, fmt.Errorf("failed to read contents of ns dir: %w", err) } ns := make(Namespaces, len(names)) @@ -52,13 +52,13 @@ func (p Proc) Namespaces() (Namespaces, error) { fields := strings.SplitN(target, ":", 2) if len(fields) != 2 { - return nil, fmt.Errorf("failed to parse namespace type and inode from '%v'", target) + return nil, fmt.Errorf("failed to parse namespace type and inode from %q", target) } typ := fields[0] inode, err := strconv.ParseUint(strings.Trim(fields[1], "[]"), 10, 32) if err != nil { - return nil, fmt.Errorf("failed to parse inode from '%v': %v", fields[1], err) + return nil, fmt.Errorf("failed to parse inode from %q: %w", fields[1], err) } ns[name] = Namespace{typ, uint32(inode)} diff --git a/vendor/github.com/prometheus/procfs/proc_psi.go b/vendor/github.com/prometheus/procfs/proc_psi.go index 0d7bee54ca..dc6c14f0a4 100644 --- a/vendor/github.com/prometheus/procfs/proc_psi.go +++ b/vendor/github.com/prometheus/procfs/proc_psi.go @@ -59,7 +59,7 @@ type PSIStats struct { func (fs FS) PSIStatsForResource(resource string) (PSIStats, error) { data, err := util.ReadFileNoStat(fs.proc.Path(fmt.Sprintf("%s/%s", "pressure", resource))) if err != nil { - return PSIStats{}, fmt.Errorf("psi_stats: unavailable for %s", resource) + return PSIStats{}, fmt.Errorf("psi_stats: unavailable for %q: %w", resource, err) } return parsePSIStats(resource, bytes.NewReader(data)) diff --git a/vendor/github.com/prometheus/procfs/proc_stat.go b/vendor/github.com/prometheus/procfs/proc_stat.go index 4517d2e9dd..67ca0e9fbc 100644 --- a/vendor/github.com/prometheus/procfs/proc_stat.go +++ b/vendor/github.com/prometheus/procfs/proc_stat.go @@ -127,10 +127,7 @@ func (p Proc) Stat() (ProcStat, error) { ) if l < 0 || r < 0 { - return ProcStat{}, fmt.Errorf( - "unexpected format, couldn't extract comm: %s", - data, - ) + return ProcStat{}, fmt.Errorf("unexpected format, couldn't extract comm %q", data) } s.Comm = string(data[l+1 : r]) diff --git a/vendor/github.com/prometheus/procfs/schedstat.go b/vendor/github.com/prometheus/procfs/schedstat.go index a4c4089ac5..28228164ef 100644 --- a/vendor/github.com/prometheus/procfs/schedstat.go +++ b/vendor/github.com/prometheus/procfs/schedstat.go @@ -95,24 +95,27 @@ func (fs FS) Schedstat() (*Schedstat, error) { return stats, nil } -func parseProcSchedstat(contents string) (stats ProcSchedstat, err error) { +func parseProcSchedstat(contents string) (ProcSchedstat, error) { + var ( + stats ProcSchedstat + err error + ) match := procLineRE.FindStringSubmatch(contents) if match != nil { stats.RunningNanoseconds, err = strconv.ParseUint(match[1], 10, 64) if err != nil { - return + return stats, err } stats.WaitingNanoseconds, err = strconv.ParseUint(match[2], 10, 64) if err != nil { - return + return stats, err } stats.RunTimeslices, err = strconv.ParseUint(match[3], 10, 64) - return + return stats, err } - err = errors.New("could not parse schedstat") - return + return stats, errors.New("could not parse schedstat") } diff --git a/vendor/github.com/prometheus/procfs/slab.go b/vendor/github.com/prometheus/procfs/slab.go new file mode 100644 index 0000000000..7896fd7242 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/slab.go @@ -0,0 +1,151 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "regexp" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +var ( + slabSpace = regexp.MustCompile(`\s+`) + slabVer = regexp.MustCompile(`slabinfo -`) + slabHeader = regexp.MustCompile(`# name`) +) + +// Slab represents a slab pool in the kernel. +type Slab struct { + Name string + ObjActive int64 + ObjNum int64 + ObjSize int64 + ObjPerSlab int64 + PagesPerSlab int64 + // tunables + Limit int64 + Batch int64 + SharedFactor int64 + SlabActive int64 + SlabNum int64 + SharedAvail int64 +} + +// SlabInfo represents info for all slabs. +type SlabInfo struct { + Slabs []*Slab +} + +func shouldParseSlab(line string) bool { + if slabVer.MatchString(line) { + return false + } + if slabHeader.MatchString(line) { + return false + } + return true +} + +// parseV21SlabEntry is used to parse a line from /proc/slabinfo version 2.1. +func parseV21SlabEntry(line string) (*Slab, error) { + // First cleanup whitespace. + l := slabSpace.ReplaceAllString(line, " ") + s := strings.Split(l, " ") + if len(s) != 16 { + return nil, fmt.Errorf("unable to parse: %q", line) + } + var err error + i := &Slab{Name: s[0]} + i.ObjActive, err = strconv.ParseInt(s[1], 10, 64) + if err != nil { + return nil, err + } + i.ObjNum, err = strconv.ParseInt(s[2], 10, 64) + if err != nil { + return nil, err + } + i.ObjSize, err = strconv.ParseInt(s[3], 10, 64) + if err != nil { + return nil, err + } + i.ObjPerSlab, err = strconv.ParseInt(s[4], 10, 64) + if err != nil { + return nil, err + } + i.PagesPerSlab, err = strconv.ParseInt(s[5], 10, 64) + if err != nil { + return nil, err + } + i.Limit, err = strconv.ParseInt(s[8], 10, 64) + if err != nil { + return nil, err + } + i.Batch, err = strconv.ParseInt(s[9], 10, 64) + if err != nil { + return nil, err + } + i.SharedFactor, err = strconv.ParseInt(s[10], 10, 64) + if err != nil { + return nil, err + } + i.SlabActive, err = strconv.ParseInt(s[13], 10, 64) + if err != nil { + return nil, err + } + i.SlabNum, err = strconv.ParseInt(s[14], 10, 64) + if err != nil { + return nil, err + } + i.SharedAvail, err = strconv.ParseInt(s[15], 10, 64) + if err != nil { + return nil, err + } + return i, nil +} + +// parseSlabInfo21 is used to parse a slabinfo 2.1 file. +func parseSlabInfo21(r *bytes.Reader) (SlabInfo, error) { + scanner := bufio.NewScanner(r) + s := SlabInfo{Slabs: []*Slab{}} + for scanner.Scan() { + line := scanner.Text() + if !shouldParseSlab(line) { + continue + } + slab, err := parseV21SlabEntry(line) + if err != nil { + return s, err + } + s.Slabs = append(s.Slabs, slab) + } + return s, nil +} + +// SlabInfo reads data from /proc/slabinfo +func (fs FS) SlabInfo() (SlabInfo, error) { + // TODO: Consider passing options to allow for parsing different + // slabinfo versions. However, slabinfo 2.1 has been stable since + // kernel 2.6.10 and later. + data, err := util.ReadFileNoStat(fs.proc.Path("slabinfo")) + if err != nil { + return SlabInfo{}, err + } + + return parseSlabInfo21(bytes.NewReader(data)) +} diff --git a/vendor/github.com/prometheus/procfs/stat.go b/vendor/github.com/prometheus/procfs/stat.go index b2a6fc994c..6d8727541e 100644 --- a/vendor/github.com/prometheus/procfs/stat.go +++ b/vendor/github.com/prometheus/procfs/stat.go @@ -93,10 +93,10 @@ func parseCPUStat(line string) (CPUStat, int64, error) { &cpuStat.Guest, &cpuStat.GuestNice) if err != nil && err != io.EOF { - return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu): %s", line, err) + return CPUStat{}, -1, fmt.Errorf("couldn't parse %q (cpu): %w", line, err) } if count == 0 { - return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu): 0 elements parsed", line) + return CPUStat{}, -1, fmt.Errorf("couldn't parse %q (cpu): 0 elements parsed", line) } cpuStat.User /= userHZ @@ -116,7 +116,7 @@ func parseCPUStat(line string) (CPUStat, int64, error) { cpuID, err := strconv.ParseInt(cpu[3:], 10, 64) if err != nil { - return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu/cpuid): %s", line, err) + return CPUStat{}, -1, fmt.Errorf("couldn't parse %q (cpu/cpuid): %w", line, err) } return cpuStat, cpuID, nil @@ -136,7 +136,7 @@ func parseSoftIRQStat(line string) (SoftIRQStat, uint64, error) { &softIRQStat.Hrtimer, &softIRQStat.Rcu) if err != nil { - return SoftIRQStat{}, 0, fmt.Errorf("couldn't parse %s (softirq): %s", line, err) + return SoftIRQStat{}, 0, fmt.Errorf("couldn't parse %q (softirq): %w", line, err) } return softIRQStat, total, nil @@ -184,34 +184,34 @@ func (fs FS) Stat() (Stat, error) { switch { case parts[0] == "btime": if stat.BootTime, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %s (btime): %s", parts[1], err) + return Stat{}, fmt.Errorf("couldn't parse %q (btime): %w", parts[1], err) } case parts[0] == "intr": if stat.IRQTotal, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %s (intr): %s", parts[1], err) + return Stat{}, fmt.Errorf("couldn't parse %q (intr): %w", parts[1], err) } numberedIRQs := parts[2:] stat.IRQ = make([]uint64, len(numberedIRQs)) for i, count := range numberedIRQs { if stat.IRQ[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %s (intr%d): %s", count, i, err) + return Stat{}, fmt.Errorf("couldn't parse %q (intr%d): %w", count, i, err) } } case parts[0] == "ctxt": if stat.ContextSwitches, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %s (ctxt): %s", parts[1], err) + return Stat{}, fmt.Errorf("couldn't parse %q (ctxt): %w", parts[1], err) } case parts[0] == "processes": if stat.ProcessCreated, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %s (processes): %s", parts[1], err) + return Stat{}, fmt.Errorf("couldn't parse %q (processes): %w", parts[1], err) } case parts[0] == "procs_running": if stat.ProcessesRunning, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %s (procs_running): %s", parts[1], err) + return Stat{}, fmt.Errorf("couldn't parse %q (procs_running): %w", parts[1], err) } case parts[0] == "procs_blocked": if stat.ProcessesBlocked, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %s (procs_blocked): %s", parts[1], err) + return Stat{}, fmt.Errorf("couldn't parse %q (procs_blocked): %w", parts[1], err) } case parts[0] == "softirq": softIRQStats, total, err := parseSoftIRQStat(line) @@ -237,7 +237,7 @@ func (fs FS) Stat() (Stat, error) { } if err := scanner.Err(); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %s: %s", fileName, err) + return Stat{}, fmt.Errorf("couldn't parse %q: %w", fileName, err) } return stat, nil diff --git a/vendor/github.com/prometheus/procfs/xfrm.go b/vendor/github.com/prometheus/procfs/xfrm.go index 30aa417d53..eed07c7d77 100644 --- a/vendor/github.com/prometheus/procfs/xfrm.go +++ b/vendor/github.com/prometheus/procfs/xfrm.go @@ -112,8 +112,7 @@ func (fs FS) NewXfrmStat() (XfrmStat, error) { fields := strings.Fields(s.Text()) if len(fields) != 2 { - return XfrmStat{}, fmt.Errorf( - "couldn't parse %s line %s", file.Name(), s.Text()) + return XfrmStat{}, fmt.Errorf("couldn't parse %q line %q", file.Name(), s.Text()) } name := fields[0] diff --git a/vendor/github.com/prometheus/procfs/zoneinfo.go b/vendor/github.com/prometheus/procfs/zoneinfo.go index e941503d5c..0b9bb6796b 100644 --- a/vendor/github.com/prometheus/procfs/zoneinfo.go +++ b/vendor/github.com/prometheus/procfs/zoneinfo.go @@ -74,11 +74,11 @@ var nodeZoneRE = regexp.MustCompile(`(\d+), zone\s+(\w+)`) func (fs FS) Zoneinfo() ([]Zoneinfo, error) { data, err := ioutil.ReadFile(fs.proc.Path("zoneinfo")) if err != nil { - return nil, fmt.Errorf("error reading zoneinfo %s: %s", fs.proc.Path("zoneinfo"), err) + return nil, fmt.Errorf("error reading zoneinfo %q: %w", fs.proc.Path("zoneinfo"), err) } zoneinfo, err := parseZoneinfo(data) if err != nil { - return nil, fmt.Errorf("error parsing zoneinfo %s: %s", fs.proc.Path("zoneinfo"), err) + return nil, fmt.Errorf("error parsing zoneinfo %q: %w", fs.proc.Path("zoneinfo"), err) } return zoneinfo, nil } diff --git a/vendor/github.com/prometheus/statsd_exporter/pkg/mapper/fsm/README.md b/vendor/github.com/prometheus/statsd_exporter/pkg/mapper/fsm/README.md index 722ee2104c..9d7da44e7c 100644 --- a/vendor/github.com/prometheus/statsd_exporter/pkg/mapper/fsm/README.md +++ b/vendor/github.com/prometheus/statsd_exporter/pkg/mapper/fsm/README.md @@ -40,7 +40,7 @@ At first, the FSM only contains three states, representing three possible metric / (start)---- [counter] \ - '--- [ timer ] + '--- [observer] Adding a rule `client.*.request.count` with type `counter` will make the FSM to be: @@ -50,7 +50,7 @@ Adding a rule `client.*.request.count` with type `counter` will make the FSM to / (start)---- [counter] -- [client] -- [*] -- [request] -- [count] -- {R1} \ - '--- [timer] + '--- [observer] `{R1}` is short for result 1, which is the match result for `client.*.request.count`. @@ -60,7 +60,7 @@ Adding a rule `client.*.*.size` with type `counter` will make the FSM to be: / / (start)---- [counter] -- [client] -- [*] \ \__ [*] -- [size] -- {R2} - '--- [timer] + '--- [observer] ### Finding a result state in FSM @@ -76,7 +76,7 @@ FSM, the `^1` to `^7` symbols indicate how FSM will traversal in its tree: / / ^5 ^6 ^7 (start)---- [counter] -- [client] -- [*] ^1 \ ^2 ^3 \__ [*] -- [size] -- {R2} - '--- [timer] ^4 + '--- [observer] ^4 To map `client.bbb.request.size`, FSM will do a backtracking: @@ -86,7 +86,7 @@ To map `client.bbb.request.size`, FSM will do a backtracking: / / ^5 ^6 (start)---- [counter] -- [client] -- [*] ^1 \ ^2 ^3 \__ [*] -- [size] -- {R2} - '--- [timer] ^4 + '--- [observer] ^4 ^7 ^8 ^9 diff --git a/vendor/github.com/prometheus/statsd_exporter/pkg/mapper/mapper.go b/vendor/github.com/prometheus/statsd_exporter/pkg/mapper/mapper.go index 8aac9d1f5e..680952b3e6 100644 --- a/vendor/github.com/prometheus/statsd_exporter/pkg/mapper/mapper.go +++ b/vendor/github.com/prometheus/statsd_exporter/pkg/mapper/mapper.go @@ -22,12 +22,13 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/log" - "github.com/prometheus/statsd_exporter/pkg/mapper/fsm" yaml "gopkg.in/yaml.v2" + + "github.com/prometheus/statsd_exporter/pkg/mapper/fsm" ) var ( - statsdMetricRE = `[a-zA-Z_](-?[a-zA-Z0-9_])+` + statsdMetricRE = `[a-zA-Z_](-?[a-zA-Z0-9_])*` templateReplaceRE = `(\$\{?\d+\}?)` metricLineRE = regexp.MustCompile(`^(\*\.|` + statsdMetricRE + `\.)+(\*|` + statsdMetricRE + `)$`) @@ -35,47 +36,19 @@ var ( labelNameRE = regexp.MustCompile(`^[a-zA-Z_][a-zA-Z0-9_]+$`) ) -type mapperConfigDefaults struct { - TimerType TimerType `yaml:"timer_type"` - Buckets []float64 `yaml:"buckets"` - Quantiles []metricObjective `yaml:"quantiles"` - MatchType MatchType `yaml:"match_type"` - GlobDisableOrdering bool `yaml:"glob_disable_ordering"` - Ttl time.Duration `yaml:"ttl"` -} - type MetricMapper struct { - Defaults mapperConfigDefaults `yaml:"defaults"` - Mappings []MetricMapping `yaml:"mappings"` - FSM *fsm.FSM - doFSM bool - doRegex bool - cache MetricMapperCache - mutex sync.RWMutex + Registerer prometheus.Registerer + Defaults mapperConfigDefaults `yaml:"defaults"` + Mappings []MetricMapping `yaml:"mappings"` + FSM *fsm.FSM + doFSM bool + doRegex bool + cache MetricMapperCache + mutex sync.RWMutex MappingsCount prometheus.Gauge } -type MetricMapping struct { - Match string `yaml:"match"` - Name string `yaml:"name"` - nameFormatter *fsm.TemplateFormatter - regex *regexp.Regexp - Labels prometheus.Labels `yaml:"labels"` - labelKeys []string - labelFormatters []*fsm.TemplateFormatter - TimerType TimerType `yaml:"timer_type"` - LegacyBuckets []float64 `yaml:"buckets"` - LegacyQuantiles []metricObjective `yaml:"quantiles"` - MatchType MatchType `yaml:"match_type"` - HelpText string `yaml:"help"` - Action ActionType `yaml:"action"` - MatchMetricType MetricType `yaml:"match_metric_type"` - Ttl time.Duration `yaml:"ttl"` - SummaryOptions *SummaryOptions `yaml:"summary_options"` - HistogramOptions *HistogramOptions `yaml:"histogram_options"` -} - type SummaryOptions struct { Quantiles []metricObjective `yaml:"quantiles"` MaxAge time.Duration `yaml:"max_age"` @@ -105,12 +78,12 @@ func (m *MetricMapper) InitFromYAMLString(fileContents string, cacheSize int, op return err } - if n.Defaults.Buckets == nil || len(n.Defaults.Buckets) == 0 { - n.Defaults.Buckets = prometheus.DefBuckets + if len(n.Defaults.HistogramOptions.Buckets) == 0 { + n.Defaults.HistogramOptions.Buckets = prometheus.DefBuckets } - if n.Defaults.Quantiles == nil || len(n.Defaults.Quantiles) == 0 { - n.Defaults.Quantiles = defaultQuantiles + if len(n.Defaults.SummaryOptions.Quantiles) == 0 { + n.Defaults.SummaryOptions.Quantiles = defaultQuantiles } if n.Defaults.MatchType == MatchTypeDefault { @@ -119,7 +92,7 @@ func (m *MetricMapper) InitFromYAMLString(fileContents string, cacheSize int, op remainingMappingsCount := len(n.Mappings) - n.FSM = fsm.NewFSM([]string{string(MetricTypeCounter), string(MetricTypeGauge), string(MetricTypeTimer)}, + n.FSM = fsm.NewFSM([]string{string(MetricTypeCounter), string(MetricTypeGauge), string(MetricTypeObserver)}, remainingMappingsCount, n.Defaults.GlobDisableOrdering) for i := range n.Mappings { @@ -181,8 +154,8 @@ func (m *MetricMapper) InitFromYAMLString(fileContents string, cacheSize int, op n.doRegex = true } - if currentMapping.TimerType == "" { - currentMapping.TimerType = n.Defaults.TimerType + if currentMapping.ObserverType == "" { + currentMapping.ObserverType = n.Defaults.ObserverType } if currentMapping.LegacyQuantiles != nil && @@ -207,9 +180,9 @@ func (m *MetricMapper) InitFromYAMLString(fileContents string, cacheSize int, op return fmt.Errorf("cannot use buckets in both the top level and histogram options at the same time in %s", currentMapping.Match) } - if currentMapping.TimerType == TimerTypeHistogram { + if currentMapping.ObserverType == ObserverTypeHistogram { if currentMapping.SummaryOptions != nil { - return fmt.Errorf("cannot use histogram timer and summary options at the same time") + return fmt.Errorf("cannot use histogram observer and summary options at the same time") } if currentMapping.HistogramOptions == nil { currentMapping.HistogramOptions = &HistogramOptions{} @@ -218,13 +191,13 @@ func (m *MetricMapper) InitFromYAMLString(fileContents string, cacheSize int, op currentMapping.HistogramOptions.Buckets = currentMapping.LegacyBuckets } if currentMapping.HistogramOptions.Buckets == nil || len(currentMapping.HistogramOptions.Buckets) == 0 { - currentMapping.HistogramOptions.Buckets = n.Defaults.Buckets + currentMapping.HistogramOptions.Buckets = n.Defaults.HistogramOptions.Buckets } } - if currentMapping.TimerType == TimerTypeSummary { + if currentMapping.ObserverType == ObserverTypeSummary { if currentMapping.HistogramOptions != nil { - return fmt.Errorf("cannot use summary timer and histogram options at the same time") + return fmt.Errorf("cannot use summary observer and histogram options at the same time") } if currentMapping.SummaryOptions == nil { currentMapping.SummaryOptions = &SummaryOptions{} @@ -233,7 +206,16 @@ func (m *MetricMapper) InitFromYAMLString(fileContents string, cacheSize int, op currentMapping.SummaryOptions.Quantiles = currentMapping.LegacyQuantiles } if currentMapping.SummaryOptions.Quantiles == nil || len(currentMapping.SummaryOptions.Quantiles) == 0 { - currentMapping.SummaryOptions.Quantiles = n.Defaults.Quantiles + currentMapping.SummaryOptions.Quantiles = n.Defaults.SummaryOptions.Quantiles + } + if currentMapping.SummaryOptions.MaxAge == 0 { + currentMapping.SummaryOptions.MaxAge = n.Defaults.SummaryOptions.MaxAge + } + if currentMapping.SummaryOptions.AgeBuckets == 0 { + currentMapping.SummaryOptions.AgeBuckets = n.Defaults.SummaryOptions.AgeBuckets + } + if currentMapping.SummaryOptions.BufCap == 0 { + currentMapping.SummaryOptions.BufCap = n.Defaults.SummaryOptions.BufCap } } @@ -281,7 +263,7 @@ func (m *MetricMapper) InitFromFile(fileName string, cacheSize int, options ...C func (m *MetricMapper) InitCache(cacheSize int, options ...CacheOption) { if cacheSize == 0 { - m.cache = NewMetricMapperNoopCache() + m.cache = NewMetricMapperNoopCache(m.Registerer) } else { o := cacheOptions{ cacheType: "lru", @@ -296,9 +278,9 @@ func (m *MetricMapper) InitCache(cacheSize int, options ...CacheOption) { ) switch o.cacheType { case "lru": - cache, err = NewMetricMapperCache(cacheSize) + cache, err = NewMetricMapperCache(m.Registerer, cacheSize) case "random": - cache, err = NewMetricMapperRRCache(cacheSize) + cache, err = NewMetricMapperRRCache(m.Registerer, cacheSize) default: err = fmt.Errorf("unsupported cache type %q", o.cacheType) } diff --git a/vendor/github.com/prometheus/statsd_exporter/pkg/mapper/mapper_cache.go b/vendor/github.com/prometheus/statsd_exporter/pkg/mapper/mapper_cache.go index 5b252a7b5b..306e54eace 100644 --- a/vendor/github.com/prometheus/statsd_exporter/pkg/mapper/mapper_cache.go +++ b/vendor/github.com/prometheus/statsd_exporter/pkg/mapper/mapper_cache.go @@ -20,26 +20,41 @@ import ( "github.com/prometheus/client_golang/prometheus" ) -var ( - cacheLength = prometheus.NewGauge( +type CacheMetrics struct { + CacheLength prometheus.Gauge + CacheGetsTotal prometheus.Counter + CacheHitsTotal prometheus.Counter +} + +func NewCacheMetrics(reg prometheus.Registerer) *CacheMetrics { + var m CacheMetrics + + m.CacheLength = prometheus.NewGauge( prometheus.GaugeOpts{ Name: "statsd_metric_mapper_cache_length", Help: "The count of unique metrics currently cached.", }, ) - cacheGetsTotal = prometheus.NewCounter( + m.CacheGetsTotal = prometheus.NewCounter( prometheus.CounterOpts{ Name: "statsd_metric_mapper_cache_gets_total", Help: "The count of total metric cache gets.", }, ) - cacheHitsTotal = prometheus.NewCounter( + m.CacheHitsTotal = prometheus.NewCounter( prometheus.CounterOpts{ Name: "statsd_metric_mapper_cache_hits_total", Help: "The count of total metric cache hits.", }, ) -) + + if reg != nil { + reg.MustRegister(m.CacheLength) + reg.MustRegister(m.CacheGetsTotal) + reg.MustRegister(m.CacheHitsTotal) + } + return &m +} type cacheOptions struct { cacheType string @@ -67,26 +82,28 @@ type MetricMapperCache interface { type MetricMapperLRUCache struct { MetricMapperCache - cache *lru.Cache + cache *lru.Cache + metrics *CacheMetrics } type MetricMapperNoopCache struct { MetricMapperCache + metrics *CacheMetrics } -func NewMetricMapperCache(size int) (*MetricMapperLRUCache, error) { - cacheLength.Set(0) +func NewMetricMapperCache(reg prometheus.Registerer, size int) (*MetricMapperLRUCache, error) { + metrics := NewCacheMetrics(reg) cache, err := lru.New(size) if err != nil { return &MetricMapperLRUCache{}, err } - return &MetricMapperLRUCache{cache: cache}, nil + return &MetricMapperLRUCache{metrics: metrics, cache: cache}, nil } func (m *MetricMapperLRUCache) Get(metricString string, metricType MetricType) (*MetricMapperCacheResult, bool) { - cacheGetsTotal.Inc() + m.metrics.CacheGetsTotal.Inc() if result, ok := m.cache.Get(formatKey(metricString, metricType)); ok { - cacheHitsTotal.Inc() + m.metrics.CacheHitsTotal.Inc() return result.(*MetricMapperCacheResult), true } else { return nil, false @@ -104,16 +121,15 @@ func (m *MetricMapperLRUCache) AddMiss(metricString string, metricType MetricTyp } func (m *MetricMapperLRUCache) trackCacheLength() { - cacheLength.Set(float64(m.cache.Len())) + m.metrics.CacheLength.Set(float64(m.cache.Len())) } func formatKey(metricString string, metricType MetricType) string { return string(metricType) + "." + metricString } -func NewMetricMapperNoopCache() *MetricMapperNoopCache { - cacheLength.Set(0) - return &MetricMapperNoopCache{} +func NewMetricMapperNoopCache(reg prometheus.Registerer) *MetricMapperNoopCache { + return &MetricMapperNoopCache{metrics: NewCacheMetrics(reg)} } func (m *MetricMapperNoopCache) Get(metricString string, metricType MetricType) (*MetricMapperCacheResult, bool) { @@ -130,16 +146,18 @@ func (m *MetricMapperNoopCache) AddMiss(metricString string, metricType MetricTy type MetricMapperRRCache struct { MetricMapperCache - lock sync.RWMutex - size int - items map[string]*MetricMapperCacheResult + lock sync.RWMutex + size int + items map[string]*MetricMapperCacheResult + metrics *CacheMetrics } -func NewMetricMapperRRCache(size int) (*MetricMapperRRCache, error) { - cacheLength.Set(0) +func NewMetricMapperRRCache(reg prometheus.Registerer, size int) (*MetricMapperRRCache, error) { + metrics := NewCacheMetrics(reg) c := &MetricMapperRRCache{ - items: make(map[string]*MetricMapperCacheResult, size+1), - size: size, + items: make(map[string]*MetricMapperCacheResult, size+1), + size: size, + metrics: metrics, } return c, nil } @@ -188,11 +206,5 @@ func (m *MetricMapperRRCache) trackCacheLength() { m.lock.RLock() length := len(m.items) m.lock.RUnlock() - cacheLength.Set(float64(length)) -} - -func init() { - prometheus.MustRegister(cacheLength) - prometheus.MustRegister(cacheGetsTotal) - prometheus.MustRegister(cacheHitsTotal) + m.metrics.CacheLength.Set(float64(length)) } diff --git a/vendor/github.com/prometheus/statsd_exporter/pkg/mapper/mapper_defaults.go b/vendor/github.com/prometheus/statsd_exporter/pkg/mapper/mapper_defaults.go new file mode 100644 index 0000000000..754a677b74 --- /dev/null +++ b/vendor/github.com/prometheus/statsd_exporter/pkg/mapper/mapper_defaults.go @@ -0,0 +1,72 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mapper + +import "time" + +type mapperConfigDefaults struct { + ObserverType ObserverType `yaml:"observer_type"` + MatchType MatchType `yaml:"match_type"` + GlobDisableOrdering bool `yaml:"glob_disable_ordering"` + Ttl time.Duration `yaml:"ttl"` + SummaryOptions SummaryOptions `yaml:"summary_options"` + HistogramOptions HistogramOptions `yaml:"histogram_options"` +} + +// mapperConfigDefaultsAlias is used to unmarshal the yaml config into mapperConfigDefaults and allows deprecated fields +type mapperConfigDefaultsAlias struct { + ObserverType ObserverType `yaml:"observer_type"` + TimerType ObserverType `yaml:"timer_type,omitempty"` // DEPRECATED - field only present to preserve backwards compatibility in configs + Buckets []float64 `yaml:"buckets"` // DEPRECATED - field only present to preserve backwards compatibility in configs + Quantiles []metricObjective `yaml:"quantiles"` // DEPRECATED - field only present to preserve backwards compatibility in configs + MatchType MatchType `yaml:"match_type"` + GlobDisableOrdering bool `yaml:"glob_disable_ordering"` + Ttl time.Duration `yaml:"ttl"` + SummaryOptions SummaryOptions `yaml:"summary_options"` + HistogramOptions HistogramOptions `yaml:"histogram_options"` +} + +// UnmarshalYAML is a custom unmarshal function to allow use of deprecated config keys +// observer_type will override timer_type +func (d *mapperConfigDefaults) UnmarshalYAML(unmarshal func(interface{}) error) error { + var tmp mapperConfigDefaultsAlias + if err := unmarshal(&tmp); err != nil { + return err + } + + // Copy defaults + d.ObserverType = tmp.ObserverType + d.MatchType = tmp.MatchType + d.GlobDisableOrdering = tmp.GlobDisableOrdering + d.Ttl = tmp.Ttl + d.SummaryOptions = tmp.SummaryOptions + d.HistogramOptions = tmp.HistogramOptions + + // Use deprecated TimerType if necessary + if tmp.ObserverType == "" { + d.ObserverType = tmp.TimerType + } + + // Use deprecated quantiles if necessary + if len(tmp.SummaryOptions.Quantiles) == 0 && len(tmp.Quantiles) > 0 { + d.SummaryOptions = SummaryOptions{Quantiles: tmp.Quantiles} + } + + // Use deprecated buckets if necessary + if len(tmp.HistogramOptions.Buckets) == 0 && len(tmp.Buckets) > 0 { + d.HistogramOptions = HistogramOptions{Buckets: tmp.Buckets} + } + + return nil +} diff --git a/vendor/github.com/prometheus/statsd_exporter/pkg/mapper/mapping.go b/vendor/github.com/prometheus/statsd_exporter/pkg/mapper/mapping.go new file mode 100644 index 0000000000..cdba27afb1 --- /dev/null +++ b/vendor/github.com/prometheus/statsd_exporter/pkg/mapper/mapping.go @@ -0,0 +1,76 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either xpress or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mapper + +import ( + "regexp" + "time" + + "github.com/prometheus/client_golang/prometheus" + + "github.com/prometheus/statsd_exporter/pkg/mapper/fsm" +) + +type MetricMapping struct { + Match string `yaml:"match"` + Name string `yaml:"name"` + nameFormatter *fsm.TemplateFormatter + regex *regexp.Regexp + Labels prometheus.Labels `yaml:"labels"` + labelKeys []string + labelFormatters []*fsm.TemplateFormatter + ObserverType ObserverType `yaml:"observer_type"` + TimerType ObserverType `yaml:"timer_type,omitempty"` // DEPRECATED - field only present to preserve backwards compatibility in configs. Always empty + LegacyBuckets []float64 `yaml:"buckets"` + LegacyQuantiles []metricObjective `yaml:"quantiles"` + MatchType MatchType `yaml:"match_type"` + HelpText string `yaml:"help"` + Action ActionType `yaml:"action"` + MatchMetricType MetricType `yaml:"match_metric_type"` + Ttl time.Duration `yaml:"ttl"` + SummaryOptions *SummaryOptions `yaml:"summary_options"` + HistogramOptions *HistogramOptions `yaml:"histogram_options"` +} + +// UnmarshalYAML is a custom unmarshal function to allow use of deprecated config keys +// observer_type will override timer_type +func (m *MetricMapping) UnmarshalYAML(unmarshal func(interface{}) error) error { + type MetricMappingAlias MetricMapping + var tmp MetricMappingAlias + if err := unmarshal(&tmp); err != nil { + return err + } + + // Copy defaults + m.Match = tmp.Match + m.Name = tmp.Name + m.Labels = tmp.Labels + m.ObserverType = tmp.ObserverType + m.LegacyBuckets = tmp.LegacyBuckets + m.LegacyQuantiles = tmp.LegacyQuantiles + m.MatchType = tmp.MatchType + m.HelpText = tmp.HelpText + m.Action = tmp.Action + m.MatchMetricType = tmp.MatchMetricType + m.Ttl = tmp.Ttl + m.SummaryOptions = tmp.SummaryOptions + m.HistogramOptions = tmp.HistogramOptions + + // Use deprecated TimerType if necessary + if tmp.ObserverType == "" { + m.ObserverType = tmp.TimerType + } + + return nil +} diff --git a/vendor/github.com/prometheus/statsd_exporter/pkg/mapper/metric_type.go b/vendor/github.com/prometheus/statsd_exporter/pkg/mapper/metric_type.go index 0a0810f8a2..920c16ed3d 100644 --- a/vendor/github.com/prometheus/statsd_exporter/pkg/mapper/metric_type.go +++ b/vendor/github.com/prometheus/statsd_exporter/pkg/mapper/metric_type.go @@ -18,9 +18,10 @@ import "fmt" type MetricType string const ( - MetricTypeCounter MetricType = "counter" - MetricTypeGauge MetricType = "gauge" - MetricTypeTimer MetricType = "timer" + MetricTypeCounter MetricType = "counter" + MetricTypeGauge MetricType = "gauge" + MetricTypeObserver MetricType = "observer" + MetricTypeTimer MetricType = "timer" // DEPRECATED ) func (m *MetricType) UnmarshalYAML(unmarshal func(interface{}) error) error { @@ -34,8 +35,10 @@ func (m *MetricType) UnmarshalYAML(unmarshal func(interface{}) error) error { *m = MetricTypeCounter case MetricTypeGauge: *m = MetricTypeGauge + case MetricTypeObserver: + *m = MetricTypeObserver case MetricTypeTimer: - *m = MetricTypeTimer + *m = MetricTypeObserver default: return fmt.Errorf("invalid metric type '%s'", v) } diff --git a/vendor/github.com/prometheus/statsd_exporter/pkg/mapper/timer.go b/vendor/github.com/prometheus/statsd_exporter/pkg/mapper/observer.go similarity index 61% rename from vendor/github.com/prometheus/statsd_exporter/pkg/mapper/timer.go rename to vendor/github.com/prometheus/statsd_exporter/pkg/mapper/observer.go index f1d2fb70b8..3d5da7eabc 100644 --- a/vendor/github.com/prometheus/statsd_exporter/pkg/mapper/timer.go +++ b/vendor/github.com/prometheus/statsd_exporter/pkg/mapper/observer.go @@ -15,27 +15,27 @@ package mapper import "fmt" -type TimerType string +type ObserverType string const ( - TimerTypeHistogram TimerType = "histogram" - TimerTypeSummary TimerType = "summary" - TimerTypeDefault TimerType = "" + ObserverTypeHistogram ObserverType = "histogram" + ObserverTypeSummary ObserverType = "summary" + ObserverTypeDefault ObserverType = "" ) -func (t *TimerType) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (t *ObserverType) UnmarshalYAML(unmarshal func(interface{}) error) error { var v string if err := unmarshal(&v); err != nil { return err } - switch TimerType(v) { - case TimerTypeHistogram: - *t = TimerTypeHistogram - case TimerTypeSummary, TimerTypeDefault: - *t = TimerTypeSummary + switch ObserverType(v) { + case ObserverTypeHistogram: + *t = ObserverTypeHistogram + case ObserverTypeSummary, ObserverTypeDefault: + *t = ObserverTypeSummary default: - return fmt.Errorf("invalid timer type '%s'", v) + return fmt.Errorf("invalid observer type '%s'", v) } return nil } diff --git a/vendor/github.com/stoewer/go-strcase/.gitignore b/vendor/github.com/stoewer/go-strcase/.gitignore deleted file mode 100644 index db5247b944..0000000000 --- a/vendor/github.com/stoewer/go-strcase/.gitignore +++ /dev/null @@ -1,17 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -vendor -doc - -# Temporary files -*~ -*.swp - -# Editor and IDE config -.idea -*.iml -.vscode diff --git a/vendor/github.com/stoewer/go-strcase/.golangci.yml b/vendor/github.com/stoewer/go-strcase/.golangci.yml deleted file mode 100644 index 7f98d55c42..0000000000 --- a/vendor/github.com/stoewer/go-strcase/.golangci.yml +++ /dev/null @@ -1,26 +0,0 @@ -run: - deadline: 10m - -linters: - enable: - - dupl - - goconst - - gocyclo - - godox - - gosec - - interfacer - - lll - - maligned - - misspell - - prealloc - - stylecheck - - unconvert - - unparam - - errcheck - - golint - - gofmt - disable: [] - fast: false - -issues: - exclude-use-default: false diff --git a/vendor/github.com/stoewer/go-strcase/LICENSE b/vendor/github.com/stoewer/go-strcase/LICENSE deleted file mode 100644 index a105a3819a..0000000000 --- a/vendor/github.com/stoewer/go-strcase/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2017, Adrian Stoewer - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/stoewer/go-strcase/README.md b/vendor/github.com/stoewer/go-strcase/README.md deleted file mode 100644 index 0e8635d801..0000000000 --- a/vendor/github.com/stoewer/go-strcase/README.md +++ /dev/null @@ -1,50 +0,0 @@ -[![CircleCI](https://circleci.com/gh/stoewer/go-strcase/tree/master.svg?style=svg)](https://circleci.com/gh/stoewer/go-strcase/tree/master) -[![codecov](https://codecov.io/gh/stoewer/go-strcase/branch/master/graph/badge.svg)](https://codecov.io/gh/stoewer/go-strcase) -[![GoDoc](https://godoc.org/github.com/stoewer/go-strcase?status.svg)](https://pkg.go.dev/github.com/stoewer/go-strcase) ---- - -Go strcase -========== - -The package `strcase` converts between different kinds of naming formats such as camel case -(`CamelCase`), snake case (`snake_case`) or kebab case (`kebab-case`). -The package is designed to work only with strings consisting of standard ASCII letters. -Unicode is currently not supported. - -Versioning and stability ------------------------- - -Although the master branch is supposed to remain always backward compatible, the repository -contains version tags in order to support vendoring tools. -The tag names follow semantic versioning conventions and have the following format `v1.0.0`. -This package supports Go modules introduced with version 1.11. - -Example -------- - -```go -import "github.com/stoewer/go-strcase" - -var snake = strcase.SnakeCase("CamelCase") -``` - -Dependencies ------------- - -### Build dependencies - -* none - -### Test dependencies - -* `github.com/stretchr/testify` - -Run linters and unit tests --------------------------- - -To run the static code analysis, linters and tests use the following commands: - -``` -golangci-lint run --config .golangci.yml ./... -go test ./... -``` diff --git a/vendor/github.com/stoewer/go-strcase/camel.go b/vendor/github.com/stoewer/go-strcase/camel.go deleted file mode 100644 index 5c233cc8f1..0000000000 --- a/vendor/github.com/stoewer/go-strcase/camel.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright (c) 2017, A. Stoewer -// All rights reserved. - -package strcase - -import ( - "strings" -) - -// UpperCamelCase converts a string into camel case starting with a upper case letter. -func UpperCamelCase(s string) string { - return camelCase(s, true) -} - -// LowerCamelCase converts a string into camel case starting with a lower case letter. -func LowerCamelCase(s string) string { - return camelCase(s, false) -} - -func camelCase(s string, upper bool) string { - s = strings.TrimSpace(s) - buffer := make([]rune, 0, len(s)) - - stringIter(s, func(prev, curr, next rune) { - if !isDelimiter(curr) { - if isDelimiter(prev) || (upper && prev == 0) { - buffer = append(buffer, toUpper(curr)) - } else if isLower(prev) { - buffer = append(buffer, curr) - } else { - buffer = append(buffer, toLower(curr)) - } - } - }) - - return string(buffer) -} diff --git a/vendor/github.com/stoewer/go-strcase/doc.go b/vendor/github.com/stoewer/go-strcase/doc.go deleted file mode 100644 index 3e441ca3ef..0000000000 --- a/vendor/github.com/stoewer/go-strcase/doc.go +++ /dev/null @@ -1,8 +0,0 @@ -// Copyright (c) 2017, A. Stoewer -// All rights reserved. - -// Package strcase converts between different kinds of naming formats such as camel case -// (CamelCase), snake case (snake_case) or kebab case (kebab-case). The package is designed -// to work only with strings consisting of standard ASCII letters. Unicode is currently not -// supported. -package strcase diff --git a/vendor/github.com/stoewer/go-strcase/go.mod b/vendor/github.com/stoewer/go-strcase/go.mod deleted file mode 100644 index 8a360abe14..0000000000 --- a/vendor/github.com/stoewer/go-strcase/go.mod +++ /dev/null @@ -1,5 +0,0 @@ -module github.com/stoewer/go-strcase - -go 1.11 - -require github.com/stretchr/testify v1.5.1 diff --git a/vendor/github.com/stoewer/go-strcase/go.sum b/vendor/github.com/stoewer/go-strcase/go.sum deleted file mode 100644 index 331fa69822..0000000000 --- a/vendor/github.com/stoewer/go-strcase/go.sum +++ /dev/null @@ -1,11 +0,0 @@ -github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/stoewer/go-strcase/helper.go b/vendor/github.com/stoewer/go-strcase/helper.go deleted file mode 100644 index ecad589143..0000000000 --- a/vendor/github.com/stoewer/go-strcase/helper.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright (c) 2017, A. Stoewer -// All rights reserved. - -package strcase - -// isLower checks if a character is lower case. More precisely it evaluates if it is -// in the range of ASCII character 'a' to 'z'. -func isLower(ch rune) bool { - return ch >= 'a' && ch <= 'z' -} - -// toLower converts a character in the range of ASCII characters 'A' to 'Z' to its lower -// case counterpart. Other characters remain the same. -func toLower(ch rune) rune { - if ch >= 'A' && ch <= 'Z' { - return ch + 32 - } - return ch -} - -// isLower checks if a character is upper case. More precisely it evaluates if it is -// in the range of ASCII characters 'A' to 'Z'. -func isUpper(ch rune) bool { - return ch >= 'A' && ch <= 'Z' -} - -// toLower converts a character in the range of ASCII characters 'a' to 'z' to its lower -// case counterpart. Other characters remain the same. -func toUpper(ch rune) rune { - if ch >= 'a' && ch <= 'z' { - return ch - 32 - } - return ch -} - -// isSpace checks if a character is some kind of whitespace. -func isSpace(ch rune) bool { - return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' -} - -// isDelimiter checks if a character is some kind of whitespace or '_' or '-'. -func isDelimiter(ch rune) bool { - return ch == '-' || ch == '_' || isSpace(ch) -} - -// iterFunc is a callback that is called fro a specific position in a string. Its arguments are the -// rune at the respective string position as well as the previous and the next rune. If curr is at the -// first position of the string prev is zero. If curr is at the end of the string next is zero. -type iterFunc func(prev, curr, next rune) - -// stringIter iterates over a string, invoking the callback for every single rune in the string. -func stringIter(s string, callback iterFunc) { - var prev rune - var curr rune - for _, next := range s { - if curr == 0 { - prev = curr - curr = next - continue - } - - callback(prev, curr, next) - - prev = curr - curr = next - } - - if len(s) > 0 { - callback(prev, curr, 0) - } -} diff --git a/vendor/github.com/stoewer/go-strcase/kebab.go b/vendor/github.com/stoewer/go-strcase/kebab.go deleted file mode 100644 index e9a6487579..0000000000 --- a/vendor/github.com/stoewer/go-strcase/kebab.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright (c) 2017, A. Stoewer -// All rights reserved. - -package strcase - -// KebabCase converts a string into kebab case. -func KebabCase(s string) string { - return delimiterCase(s, '-', false) -} - -// UpperKebabCase converts a string into kebab case with capital letters. -func UpperKebabCase(s string) string { - return delimiterCase(s, '-', true) -} diff --git a/vendor/github.com/stoewer/go-strcase/snake.go b/vendor/github.com/stoewer/go-strcase/snake.go deleted file mode 100644 index 1b216e20cf..0000000000 --- a/vendor/github.com/stoewer/go-strcase/snake.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright (c) 2017, A. Stoewer -// All rights reserved. - -package strcase - -import ( - "strings" -) - -// SnakeCase converts a string into snake case. -func SnakeCase(s string) string { - return delimiterCase(s, '_', false) -} - -// UpperSnakeCase converts a string into snake case with capital letters. -func UpperSnakeCase(s string) string { - return delimiterCase(s, '_', true) -} - -// delimiterCase converts a string into snake_case or kebab-case depending on the delimiter passed -// as second argument. When upperCase is true the result will be UPPER_SNAKE_CASE or UPPER-KEBAB-CASE. -func delimiterCase(s string, delimiter rune, upperCase bool) string { - s = strings.TrimSpace(s) - buffer := make([]rune, 0, len(s)+3) - - adjustCase := toLower - if upperCase { - adjustCase = toUpper - } - - var prev rune - var curr rune - for _, next := range s { - if isDelimiter(curr) { - if !isDelimiter(prev) { - buffer = append(buffer, delimiter) - } - } else if isUpper(curr) { - if isLower(prev) || (isUpper(prev) && isLower(next)) { - buffer = append(buffer, delimiter) - } - buffer = append(buffer, adjustCase(curr)) - } else if curr != 0 { - buffer = append(buffer, adjustCase(curr)) - } - prev = curr - curr = next - } - - if len(s) > 0 { - if isUpper(curr) && isLower(prev) && prev != 0 { - buffer = append(buffer, delimiter) - } - buffer = append(buffer, adjustCase(curr)) - } - - return string(buffer) -} diff --git a/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/contexts.go b/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/contexts/contexts.go similarity index 98% rename from vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/contexts.go rename to vendor/github.com/tektoncd/triggers/pkg/apis/triggers/contexts/contexts.go index ea4917ae60..72c25971e4 100644 --- a/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/contexts.go +++ b/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/contexts/contexts.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1alpha1 +package contexts import "context" diff --git a/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/register.go b/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/register.go new file mode 100644 index 0000000000..e297afe668 --- /dev/null +++ b/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/register.go @@ -0,0 +1,15 @@ +package triggers + +const ( + // GroupName is the Kubernetes resource group name for Tekton types. + GroupName = "triggers.tekton.dev" + + // EventListenerLabelKey is used as the label identifier for an EventListener. + EventListenerLabelKey = "/eventlistener" + + // EventIDLabelKey is used as the label identifier for an EventListener event. + EventIDLabelKey = "/triggers-eventid" + + // TriggerLabelKey is used as the label identifier for a Trigger + TriggerLabelKey = "/trigger" +) diff --git a/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/cluster_interceptor_types.go b/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/cluster_interceptor_types.go deleted file mode 100644 index 6e6f86ff71..0000000000 --- a/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/cluster_interceptor_types.go +++ /dev/null @@ -1,125 +0,0 @@ -/* -Copyright 2021 The Tekton Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "errors" - "fmt" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "knative.dev/pkg/apis" - duckv1 "knative.dev/pkg/apis/duck/v1" -) - -// Check that EventListener may be validated and defaulted. -var _ apis.Validatable = (*ClusterInterceptor)(nil) -var _ apis.Defaultable = (*ClusterInterceptor)(nil) - -// +genclient -// +genclient:nonNamespaced -// +genreconciler:krshapedlogic=false -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:openapi-gen=true -// ClusterInterceptor describes a pluggable interceptor including configuration -// such as the fields it accepts and its deployment address. The type is based on -// the Validating/MutatingWebhookConfiguration types for configuring AdmissionWebhooks -type ClusterInterceptor struct { - metav1.TypeMeta `json:",inline"` - // +optional - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec ClusterInterceptorSpec `json:"spec"` - // +optional - Status ClusterInterceptorStatus `json:"status"` -} - -// ClusterInterceptorSpec describes the Spec for an ClusterInterceptor -type ClusterInterceptorSpec struct { - ClientConfig ClientConfig `json:"clientConfig"` -} - -// ClusterInterceptorStatus holds the status of the ClusterInterceptor -// +k8s:deepcopy-gen=true -type ClusterInterceptorStatus struct { - duckv1.Status `json:",inline"` - - // ClusterInterceptor is Addressable and exposes the URL where the Interceptor is running - duckv1.AddressStatus `json:",inline"` -} - -// ClientConfig describes how a client can communicate with the Interceptor -type ClientConfig struct { - // URL is a fully formed URL pointing to the interceptor - // Mutually exclusive with Service - URL *apis.URL `json:"url,omitempty"` - - // Service is a reference to a Service object where the interceptor is running - // Mutually exclusive with URL - Service *ServiceReference `json:"service,omitempty"` -} - -var defaultPort = int32(80) - -// ServiceReference is a reference to a Service object -// with an optional path -type ServiceReference struct { - // Name is the name of the service - Name string `json:"name"` - - // Namespace is the namespace of the service - Namespace string `json:"namespace"` - - // Path is an optional URL path - // +optional - Path string `json:"path,omitempty"` - - // Port is a valid port number - Port *int32 `json:"port,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// ClusterInterceptorList contains a list of ClusterInterceptor -// We don't use this but it's required for certain codegen features. -type ClusterInterceptorList struct { - metav1.TypeMeta `json:",inline"` - // +optional - metav1.ListMeta `json:"metadata,omitempty"` - Items []ClusterInterceptor `json:"items"` -} - -var ErrNilURL = errors.New("interceptor URL was nil") - -// ResolveAddress returns the URL where the interceptor is running using its clientConfig -func (it *ClusterInterceptor) ResolveAddress() (*apis.URL, error) { - if url := it.Spec.ClientConfig.URL; url != nil { - return url, nil - } - svc := it.Spec.ClientConfig.Service - if svc == nil { - return nil, ErrNilURL - } - port := defaultPort - if svc.Port != nil { - port = *svc.Port - } - url := &apis.URL{ - Scheme: "http", // TODO: Support HTTPs if caBundle is present - Host: fmt.Sprintf("%s.%s.svc:%d", svc.Name, svc.Namespace, port), - Path: svc.Path, - } - return url, nil -} diff --git a/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/cluster_interceptor_validation.go b/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/cluster_interceptor_validation.go deleted file mode 100644 index f8bf39e99d..0000000000 --- a/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/cluster_interceptor_validation.go +++ /dev/null @@ -1,46 +0,0 @@ -/* -Copyright 2021 The Tekton Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "context" - - "knative.dev/pkg/apis" -) - -// Validate ClusterInterceptor -func (it *ClusterInterceptor) Validate(ctx context.Context) *apis.FieldError { - if apis.IsInDelete(ctx) { - return nil - } - return it.Spec.validate(ctx) -} - -func (s *ClusterInterceptorSpec) validate(ctx context.Context) (errs *apis.FieldError) { - if s.ClientConfig.URL != nil && s.ClientConfig.Service != nil { - errs = errs.Also(apis.ErrMultipleOneOf("spec.clientConfig.url", "spec.clientConfig.service")) - } - if svc := s.ClientConfig.Service; svc != nil { - if svc.Namespace == "" { - errs = errs.Also(apis.ErrMissingField("spec.clientConfig.service.namespace")) - } - if svc.Name == "" { - errs = errs.Also(apis.ErrMissingField("spec.clientConfig.service.name")) - } - } - return errs -} diff --git a/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/event_listener_defaults.go b/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/event_listener_defaults.go deleted file mode 100644 index b055fcabe5..0000000000 --- a/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/event_listener_defaults.go +++ /dev/null @@ -1,91 +0,0 @@ -/* -Copyright 2019 The Tekton Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "context" - - "knative.dev/pkg/logging" - "knative.dev/pkg/ptr" -) - -// SetDefaults sets the defaults on the object. -func (el *EventListener) SetDefaults(ctx context.Context) { - if IsUpgradeViaDefaulting(ctx) { - // set defaults - if el.Spec.Resources.KubernetesResource != nil { - if el.Spec.Resources.KubernetesResource.Replicas != nil && *el.Spec.Resources.KubernetesResource.Replicas == 0 { - *el.Spec.Resources.KubernetesResource.Replicas = 1 - } - } - - for i, t := range el.Spec.Triggers { - triggerSpecBindingArray(el.Spec.Triggers[i].Bindings).defaultBindings() - for _, ti := range t.Interceptors { - ti.defaultInterceptorKind() - if err := ti.updateCoreInterceptors(); err != nil { - // The err only happens due to malformed JSON and should never really happen - // We can't return an error here, so print out the error - logger := logging.FromContext(ctx) - logger.Errorf("failed to setDefaults for trigger: %s; err: %s", t.Name, err) - } - } - } - el.Spec.updatePodTemplate() - // To be removed in a later release #1020 - el.Spec.updateReplicas() - } -} - -func (spec *EventListenerSpec) updatePodTemplate() { - if spec.DeprecatedPodTemplate != nil { - if spec.DeprecatedPodTemplate.NodeSelector != nil { - if spec.Resources.KubernetesResource == nil { - spec.Resources.KubernetesResource = &KubernetesResource{} - } - spec.Resources.KubernetesResource.Template.Spec.NodeSelector = spec.DeprecatedPodTemplate.NodeSelector - spec.DeprecatedPodTemplate.NodeSelector = nil - } - if spec.DeprecatedPodTemplate.Tolerations != nil { - if spec.Resources.KubernetesResource == nil { - spec.Resources.KubernetesResource = &KubernetesResource{} - } - spec.Resources.KubernetesResource.Template.Spec.Tolerations = spec.DeprecatedPodTemplate.Tolerations - spec.DeprecatedPodTemplate.Tolerations = nil - } - spec.DeprecatedPodTemplate = nil - } -} - -// To be Removed in a later release #1020 -func (spec *EventListenerSpec) updateReplicas() { - if spec.DeprecatedReplicas != nil { - if *spec.DeprecatedReplicas == 0 { - if spec.Resources.KubernetesResource == nil { - spec.Resources.KubernetesResource = &KubernetesResource{} - } - spec.Resources.KubernetesResource.Replicas = ptr.Int32(1) - spec.DeprecatedReplicas = nil - } else if *spec.DeprecatedReplicas > 0 { - if spec.Resources.KubernetesResource == nil { - spec.Resources.KubernetesResource = &KubernetesResource{} - } - spec.Resources.KubernetesResource.Replicas = spec.DeprecatedReplicas - spec.DeprecatedReplicas = nil - } - } -} diff --git a/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/cluster_trigger_binding_defaults.go b/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1/cluster_trigger_binding_defaults.go similarity index 93% rename from vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/cluster_trigger_binding_defaults.go rename to vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1/cluster_trigger_binding_defaults.go index eae67beee3..9d1badc009 100644 --- a/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/cluster_trigger_binding_defaults.go +++ b/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1/cluster_trigger_binding_defaults.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Tekton Authors +Copyright 2021 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1alpha1 +package v1beta1 import ( "context" diff --git a/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/cluster_trigger_binding_types.go b/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1/cluster_trigger_binding_types.go similarity index 97% rename from vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/cluster_trigger_binding_types.go rename to vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1/cluster_trigger_binding_types.go index 10b9d5a480..c9ad3a5286 100644 --- a/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/cluster_trigger_binding_types.go +++ b/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1/cluster_trigger_binding_types.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Tekton Authors +Copyright 2021 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1alpha1 +package v1beta1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" diff --git a/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/cluster_trigger_binding_validation.go b/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1/cluster_trigger_binding_validation.go similarity index 94% rename from vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/cluster_trigger_binding_validation.go rename to vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1/cluster_trigger_binding_validation.go index 0c410a3517..d892ffd857 100644 --- a/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/cluster_trigger_binding_validation.go +++ b/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1/cluster_trigger_binding_validation.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Tekton Authors +Copyright 2021 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1alpha1 +package v1beta1 import ( "context" diff --git a/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/doc.go b/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1/doc.go similarity index 84% rename from vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/doc.go rename to vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1/doc.go index e4c24171be..933741aa31 100644 --- a/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/doc.go +++ b/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Tekton Authors +Copyright 2021 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,10 +14,10 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package v1alpha1 contains API Schema definitions for the triggers v1alpha1 API group +// package v1beta1 contains API Schema definitions for the triggers v1beta1 API group // +k8s:openapi-gen=true // +k8s:deepcopy-gen=package,register // +k8s:conversion-gen=github.com/tektoncd/triggers/pkg/apis/triggers // +k8s:defaulter-gen=TypeMeta // +groupName=triggers.tekton.dev -package v1alpha1 +package v1beta1 diff --git a/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1/event_listener_defaults.go b/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1/event_listener_defaults.go new file mode 100644 index 0000000000..71fbf7e500 --- /dev/null +++ b/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1/event_listener_defaults.go @@ -0,0 +1,42 @@ +/* +Copyright 2021 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "context" + + "github.com/tektoncd/triggers/pkg/apis/triggers/contexts" +) + +// SetDefaults sets the defaults on the object. +func (el *EventListener) SetDefaults(ctx context.Context) { + if contexts.IsUpgradeViaDefaulting(ctx) { + // set defaults + if el.Spec.Resources.KubernetesResource != nil { + if el.Spec.Resources.KubernetesResource.Replicas != nil && *el.Spec.Resources.KubernetesResource.Replicas == 0 { + *el.Spec.Resources.KubernetesResource.Replicas = 1 + } + } + + for i, t := range el.Spec.Triggers { + triggerSpecBindingArray(el.Spec.Triggers[i].Bindings).defaultBindings() + for _, ti := range t.Interceptors { + ti.defaultInterceptorKind() + } + } + } +} diff --git a/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/event_listener_types.go b/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1/event_listener_types.go similarity index 94% rename from vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/event_listener_types.go rename to vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1/event_listener_types.go index 6b4180cb2b..65ba5e1a30 100644 --- a/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/event_listener_types.go +++ b/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1/event_listener_types.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Tekton Authors +Copyright 2021 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1alpha1 +package v1beta1 import ( "fmt" @@ -26,7 +26,6 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "knative.dev/pkg/apis" duckv1 "knative.dev/pkg/apis/duck/v1" - duckv1alpha1 "knative.dev/pkg/apis/duck/v1alpha1" "knative.dev/pkg/apis/duck/v1beta1" ) @@ -57,12 +56,9 @@ type EventListener struct { type EventListenerSpec struct { ServiceAccountName string `json:"serviceAccountName,omitempty"` Triggers []EventListenerTrigger `json:"triggers"` - // To be removed in a later release #1020 - DeprecatedReplicas *int32 `json:"replicas,omitempty"` - DeprecatedPodTemplate *PodTemplate `json:"podTemplate,omitempty"` - NamespaceSelector NamespaceSelector `json:"namespaceSelector,omitempty"` - LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty"` - Resources Resources `json:"resources,omitempty"` + NamespaceSelector NamespaceSelector `json:"namespaceSelector,omitempty"` + LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty"` + Resources Resources `json:"resources,omitempty"` } type Resources struct { @@ -147,7 +143,7 @@ type EventListenerStatus struct { // EventListener is Addressable. It currently exposes the service DNS // address of the the EventListener sink - duckv1alpha1.AddressStatus `json:",inline"` + v1beta1.AddressStatus `json:",inline"` // Configuration stores configuration for the EventListener service Configuration EventListenerConfig `json:"configuration"` @@ -318,7 +314,7 @@ func (el *EventListener) GetOwnerReference() *metav1.OwnerReference { // SetAddress sets the address (as part of Addressable contract) and marks the correct condition. func (els *EventListenerStatus) SetAddress(hostname string) { if els.Address == nil { - els.Address = &duckv1alpha1.Addressable{} + els.Address = &v1beta1.Addressable{} } if hostname != "" { els.Address.URL = &apis.URL{ diff --git a/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/event_listener_validation.go b/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1/event_listener_validation.go similarity index 97% rename from vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/event_listener_validation.go rename to vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1/event_listener_validation.go index 79b80dcbd3..e818ac559e 100644 --- a/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/event_listener_validation.go +++ b/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1/event_listener_validation.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Tekton Authors +Copyright 2021 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1alpha1 +package v1beta1 import ( "bytes" @@ -54,13 +54,6 @@ func (s *EventListenerSpec) validate(ctx context.Context) (errs *apis.FieldError errs = errs.Also(trigger.validate(ctx).ViaField(fmt.Sprintf("spec.triggers[%d]", i))) } - // To be removed in a later release #1020 - if s.DeprecatedReplicas != nil { - if *s.DeprecatedReplicas < 0 { - errs = errs.Also(apis.ErrInvalidValue(*s.DeprecatedReplicas, "spec.replicas")) - } - } - // Both Kubernetes and Custom resource can't be present at the same time if s.Resources.KubernetesResource != nil && s.Resources.CustomResource != nil { return apis.ErrMultipleOneOf("spec.resources.kubernetesResource", "spec.resources.customResource") diff --git a/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/interceptor_types.go b/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1/interceptor_types.go similarity index 99% rename from vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/interceptor_types.go rename to vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1/interceptor_types.go index a87a53e0dc..b5b739d925 100644 --- a/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/interceptor_types.go +++ b/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1/interceptor_types.go @@ -1,4 +1,4 @@ -package v1alpha1 +package v1beta1 import ( "context" diff --git a/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/param.go b/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1/param.go similarity index 97% rename from vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/param.go rename to vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1/param.go index 4fae706dfe..b026c50272 100644 --- a/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/param.go +++ b/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1/param.go @@ -1,4 +1,4 @@ -package v1alpha1 +package v1beta1 // ParamSpec defines an arbitrary named input whose value can be supplied by a // `Param`. diff --git a/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/register.go b/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1/register.go similarity index 72% rename from vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/register.go rename to vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1/register.go index 40dbb5661b..d44525d0d8 100644 --- a/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/register.go +++ b/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1/register.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Tekton Authors +Copyright 2021 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,30 +14,17 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1alpha1 +package v1beta1 import ( + "github.com/tektoncd/triggers/pkg/apis/triggers" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" ) -const ( - // GroupName is the Kubernetes resource group name for Tekton types. - GroupName = "triggers.tekton.dev" - - // EventListenerLabelKey is used as the label identifier for an EventListener. - EventListenerLabelKey = "/eventlistener" - - // EventIDLabelKey is used as the label identifier for an EventListener event. - EventIDLabelKey = "/triggers-eventid" - - // TriggerLabelKey is used as the label identifier for a Trigger - TriggerLabelKey = "/trigger" -) - // SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} +var SchemeGroupVersion = schema.GroupVersion{Group: triggers.GroupName, Version: "v1beta1"} // Kind takes an unqualified kind and returns back a Group qualified GroupKind func Kind(kind string) schema.GroupKind { @@ -59,8 +46,6 @@ var ( // Adds the list of known types to Scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, - &ClusterInterceptor{}, - &ClusterInterceptorList{}, &ClusterTriggerBinding{}, &ClusterTriggerBindingList{}, &EventListener{}, diff --git a/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/trigger_binding_defaults.go b/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1/trigger_binding_defaults.go similarity index 93% rename from vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/trigger_binding_defaults.go rename to vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1/trigger_binding_defaults.go index 5ed3a3a847..1bd52d80fe 100644 --- a/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/trigger_binding_defaults.go +++ b/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1/trigger_binding_defaults.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Tekton Authors +Copyright 2021 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1alpha1 +package v1beta1 import ( "context" diff --git a/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/trigger_binding_interface.go b/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1/trigger_binding_interface.go similarity index 94% rename from vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/trigger_binding_interface.go rename to vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1/trigger_binding_interface.go index c7ab5fd901..f147a41b98 100644 --- a/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/trigger_binding_interface.go +++ b/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1/trigger_binding_interface.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Tekton Authors +Copyright 2021 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1alpha1 +package v1beta1 import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" diff --git a/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/trigger_binding_types.go b/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1/trigger_binding_types.go similarity index 97% rename from vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/trigger_binding_types.go rename to vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1/trigger_binding_types.go index 4614401249..64623ad4c1 100644 --- a/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/trigger_binding_types.go +++ b/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1/trigger_binding_types.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Tekton Authors +Copyright 2021 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1alpha1 +package v1beta1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" diff --git a/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/trigger_binding_validation.go b/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1/trigger_binding_validation.go similarity index 97% rename from vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/trigger_binding_validation.go rename to vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1/trigger_binding_validation.go index 5f236ac1b3..de0b356d55 100644 --- a/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/trigger_binding_validation.go +++ b/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1/trigger_binding_validation.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Tekton Authors +Copyright 2021 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1alpha1 +package v1beta1 import ( "context" diff --git a/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/trigger_defaults.go b/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1/trigger_defaults.go similarity index 71% rename from vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/trigger_defaults.go rename to vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1/trigger_defaults.go index 8abb60ca9c..cc1fb85eba 100644 --- a/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/trigger_defaults.go +++ b/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1/trigger_defaults.go @@ -1,5 +1,5 @@ /* -Copyright 2020 The Tekton Authors +Copyright 2021 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,30 +14,24 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1alpha1 +package v1beta1 import ( "context" - "knative.dev/pkg/logging" + "github.com/tektoncd/triggers/pkg/apis/triggers/contexts" ) type triggerSpecBindingArray []*TriggerSpecBinding // SetDefaults sets the defaults on the object. func (t *Trigger) SetDefaults(ctx context.Context) { - if !IsUpgradeViaDefaulting(ctx) { + if !contexts.IsUpgradeViaDefaulting(ctx) { return } triggerSpecBindingArray(t.Spec.Bindings).defaultBindings() for _, ti := range t.Spec.Interceptors { ti.defaultInterceptorKind() - if err := ti.updateCoreInterceptors(); err != nil { - // The err only happens due to malformed JSON and should never really happen - // We can't return an error here, so print out the error - logger := logging.FromContext(ctx) - logger.Errorf("failed to setDefaults for trigger: %s; err: %s", t.Name, err) - } } } diff --git a/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/trigger_template_defaults.go b/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1/trigger_template_defaults.go similarity index 93% rename from vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/trigger_template_defaults.go rename to vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1/trigger_template_defaults.go index 2e705888fc..de7097e8a2 100644 --- a/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/trigger_template_defaults.go +++ b/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1/trigger_template_defaults.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Tekton Authors +Copyright 2021 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1alpha1 +package v1beta1 import ( "context" diff --git a/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/trigger_template_types.go b/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1/trigger_template_types.go similarity index 76% rename from vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/trigger_template_types.go rename to vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1/trigger_template_types.go index d3401fa91f..fa69b51d52 100644 --- a/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/trigger_template_types.go +++ b/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1/trigger_template_types.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Tekton Authors +Copyright 2021 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,15 +14,11 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1alpha1 +package v1beta1 import ( - pipelinev1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" - pipelinev1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/serializer" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" "knative.dev/pkg/apis" ) @@ -30,19 +26,6 @@ import ( var _ apis.Validatable = (*TriggerTemplate)(nil) var _ apis.Defaultable = (*TriggerTemplate)(nil) -var Decoder runtime.Decoder - -func init() { - scheme := runtime.NewScheme() - utilruntime.Must(pipelinev1alpha1.AddToScheme(scheme)) - utilruntime.Must(pipelinev1beta1.AddToScheme(scheme)) - codec := serializer.NewCodecFactory(scheme) - Decoder = codec.UniversalDecoder( - pipelinev1alpha1.SchemeGroupVersion, - pipelinev1beta1.SchemeGroupVersion, - ) -} - // TriggerTemplateSpec holds the desired state of TriggerTemplate type TriggerTemplateSpec struct { Params []ParamSpec `json:"params,omitempty"` diff --git a/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/trigger_template_validation.go b/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1/trigger_template_validation.go similarity index 98% rename from vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/trigger_template_validation.go rename to vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1/trigger_template_validation.go index 2f23cb5e0e..79b432c119 100644 --- a/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/trigger_template_validation.go +++ b/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1/trigger_template_validation.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Tekton Authors +Copyright 2021 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1alpha1 +package v1beta1 import ( "context" diff --git a/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/trigger_types.go b/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1/trigger_types.go similarity index 71% rename from vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/trigger_types.go rename to vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1/trigger_types.go index 94513927a7..04018eafb5 100644 --- a/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/trigger_types.go +++ b/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1/trigger_types.go @@ -1,5 +1,5 @@ /* -Copyright 2020 The Tekton Authors +Copyright 2021 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,12 +14,9 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1alpha1 +package v1beta1 import ( - "encoding/json" - "fmt" - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" corev1 "k8s.io/api/core/v1" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" @@ -96,12 +93,6 @@ type TriggerInterceptor struct { // WebhookInterceptor refers to an old style webhook interceptor service Webhook *WebhookInterceptor `json:"webhook,omitempty"` - - // Deprecated old fields below - DeprecatedGitHub *GitHubInterceptor `json:"github,omitempty"` - DeprecatedGitLab *GitLabInterceptor `json:"gitlab,omitempty"` - DeprecatedCEL *CELInterceptor `json:"cel,omitempty"` - DeprecatedBitbucket *BitbucketInterceptor `json:"bitbucket,omitempty"` } // InterceptorParams defines a key-value pair that can be passed on an interceptor @@ -138,94 +129,12 @@ func (ti *TriggerInterceptor) defaultInterceptorKind() { } } -func (ti *TriggerInterceptor) updateCoreInterceptors() error { - if ti == nil { - return nil - } - if ti.Ref.Name != "" { - return nil - } - ti.Ref.Name = ti.GetName() - ti.Params = []InterceptorParams{} - switch ti.Ref.Name { - case "bitbucket": - if err := addToParams(&ti.Params, "secretRef", ti.DeprecatedBitbucket.SecretRef); err != nil { - return err - } - if err := addToParams(&ti.Params, "eventTypes", ti.DeprecatedBitbucket.EventTypes); err != nil { - return err - } - ti.DeprecatedBitbucket = nil - case "gitlab": - if err := addToParams(&ti.Params, "secretRef", ti.DeprecatedGitLab.SecretRef); err != nil { - return err - } - if err := addToParams(&ti.Params, "eventTypes", ti.DeprecatedGitLab.EventTypes); err != nil { - return err - } - ti.DeprecatedGitLab = nil - case "github": - if err := addToParams(&ti.Params, "secretRef", ti.DeprecatedGitHub.SecretRef); err != nil { - return err - } - if err := addToParams(&ti.Params, "eventTypes", ti.DeprecatedGitHub.EventTypes); err != nil { - return err - } - ti.DeprecatedGitHub = nil - case "cel": - if err := addToParams(&ti.Params, "filter", ti.DeprecatedCEL.Filter); err != nil { - return err - } - if err := addToParams(&ti.Params, "overlays", ti.DeprecatedCEL.Overlays); err != nil { - return err - } - ti.DeprecatedCEL = nil - } - return nil -} - -func addToParams(params *[]InterceptorParams, name string, val interface{}) error { - if val == nil { - return nil - } - v, err := toV1JSON(val) - if err != nil { - return err - } - *params = append(*params, InterceptorParams{ - Name: name, - Value: v, - }) - return nil -} - -func toV1JSON(v interface{}) (apiextensionsv1.JSON, error) { - b, err := json.Marshal(v) - if err != nil { - return apiextensionsv1.JSON{}, fmt.Errorf("json.Marshal() failed: %s", err) - } - return apiextensionsv1.JSON{ - Raw: b, - }, nil -} - // GetName returns the name for the given interceptor func (ti *TriggerInterceptor) GetName() string { - // This is temporary until we implement #869 - name := "" - switch { - case ti.Ref.Name != "": - name = ti.Ref.Name - case ti.DeprecatedBitbucket != nil: - name = "bitbucket" - case ti.DeprecatedCEL != nil: - name = "cel" - case ti.DeprecatedGitHub != nil: - name = "github" - case ti.DeprecatedGitLab != nil: - name = "gitlab" + if ti.Ref.Name != "" { + return ti.Ref.Name } - return name + return "" } // WebhookInterceptor provides a webhook to intercept and pre-process events @@ -266,7 +175,7 @@ type CELInterceptor struct { Overlays []CELOverlay `json:"overlays,omitempty"` } -// CELOverlay provides a way to modify the request body using DeprecatedCEL expressions +// CELOverlay provides a way to modify the request body using CEL expressions type CELOverlay struct { Key string `json:"key,omitempty"` Expression string `json:"expression,omitempty"` diff --git a/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/trigger_types_convert.go b/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1/trigger_types_convert.go similarity index 96% rename from vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/trigger_types_convert.go rename to vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1/trigger_types_convert.go index c59c2daabe..59082a4e7b 100644 --- a/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/trigger_types_convert.go +++ b/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1/trigger_types_convert.go @@ -1,5 +1,5 @@ /* -Copyright 2020 The Tekton Authors +Copyright 2021 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1alpha1 +package v1beta1 import ( "encoding/json" diff --git a/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/trigger_validation.go b/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1/trigger_validation.go similarity index 74% rename from vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/trigger_validation.go rename to vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1/trigger_validation.go index e892d41ada..898dcd8b48 100644 --- a/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/trigger_validation.go +++ b/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1/trigger_validation.go @@ -1,5 +1,5 @@ /* -Copyright 2020 The Tekton Authors +Copyright 2021 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,14 +14,13 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1alpha1 +package v1beta1 import ( "context" "fmt" "net/http" - "github.com/google/cel-go/cel" pipelinev1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" "github.com/tektoncd/pipeline/pkg/apis/validate" "knative.dev/pkg/apis" @@ -53,7 +52,7 @@ func (t *TriggerSpec) validate(ctx context.Context) *apis.FieldError { func (t TriggerSpecTemplate) validate(ctx context.Context) (errs *apis.FieldError) { // Optional explicit match if t.APIVersion != "" { - if t.APIVersion != "v1alpha1" { + if t.APIVersion != "v1alpha1" && t.APIVersion != "v1beta1" { errs = errs.Also(apis.ErrInvalidValue(fmt.Errorf("invalid apiVersion"), "template.apiVersion")) } } @@ -93,32 +92,13 @@ func (t triggerSpecBindingArray) validate(ctx context.Context) (errs *apis.Field } func (i *TriggerInterceptor) validate(ctx context.Context) (errs *apis.FieldError) { - if i.Webhook == nil && i.DeprecatedGitHub == nil && i.DeprecatedGitLab == nil && i.DeprecatedCEL == nil && i.DeprecatedBitbucket == nil { + if i.Webhook == nil { if i.Ref.Name == "" { // Check to see if Interceptor referenced using Ref errs = errs.Also(apis.ErrMissingField("interceptor")) } } - // Enforce oneof - numSet := 0 - if i.Webhook != nil { - numSet++ - } - if i.DeprecatedGitHub != nil { - numSet++ - } - if i.DeprecatedGitLab != nil { - numSet++ - } - if i.DeprecatedBitbucket != nil { - numSet++ - } - - if numSet > 1 { - errs = errs.Also(apis.ErrMultipleOneOf("interceptor.webhook", "interceptor.github", "interceptor.gitlab")) - } - - if i.Webhook != nil { + if i.Webhook != nil { // TODO: This should be an error? if i.Webhook.ObjectRef == nil || i.Webhook.ObjectRef.Name == "" { errs = errs.Also(apis.ErrMissingField("interceptor.webhook.objectRef")) } @@ -147,25 +127,5 @@ func (i *TriggerInterceptor) validate(ctx context.Context) (errs *apis.FieldErro } } } - - if i.DeprecatedCEL != nil { - if i.DeprecatedCEL.Filter == "" && len(i.DeprecatedCEL.Overlays) == 0 { - errs = errs.Also(apis.ErrMultipleOneOf("cel.filter", "cel.overlays")) - } - env, err := cel.NewEnv() - if err != nil { - errs = errs.Also(apis.ErrInvalidValue(fmt.Errorf("failed to create a DeprecatedCEL env: %s", err), "cel.filter")) - } - if i.DeprecatedCEL.Filter != "" { - if _, issues := env.Parse(i.DeprecatedCEL.Filter); issues != nil && issues.Err() != nil { - errs = errs.Also(apis.ErrInvalidValue(fmt.Errorf("failed to parse the DeprecatedCEL filter: %s", issues.Err()), "cel.filter")) - } - } - for _, v := range i.DeprecatedCEL.Overlays { - if _, issues := env.Parse(v.Expression); issues != nil && issues.Err() != nil { - errs = errs.Also(apis.ErrInvalidValue(fmt.Errorf("failed to parse the DeprecatedCEL overlay: %s", issues.Err()), "cel.overlay")) - } - } - } return errs } diff --git a/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1/zz_generated.deepcopy.go similarity index 85% rename from vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/zz_generated.deepcopy.go rename to vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1/zz_generated.deepcopy.go index d0a37947a2..3e3d9de680 100644 --- a/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1/zz_generated.deepcopy.go @@ -18,10 +18,10 @@ limitations under the License. // Code generated by deepcopy-gen. DO NOT EDIT. -package v1alpha1 +package v1beta1 import ( - v1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + pipelinev1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" corev1 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" @@ -91,128 +91,6 @@ func (in *CELOverlay) DeepCopy() *CELOverlay { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClientConfig) DeepCopyInto(out *ClientConfig) { - *out = *in - if in.URL != nil { - in, out := &in.URL, &out.URL - *out = new(apis.URL) - (*in).DeepCopyInto(*out) - } - if in.Service != nil { - in, out := &in.Service, &out.Service - *out = new(ServiceReference) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientConfig. -func (in *ClientConfig) DeepCopy() *ClientConfig { - if in == nil { - return nil - } - out := new(ClientConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterInterceptor) DeepCopyInto(out *ClusterInterceptor) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterInterceptor. -func (in *ClusterInterceptor) DeepCopy() *ClusterInterceptor { - if in == nil { - return nil - } - out := new(ClusterInterceptor) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ClusterInterceptor) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterInterceptorList) DeepCopyInto(out *ClusterInterceptorList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ClusterInterceptor, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterInterceptorList. -func (in *ClusterInterceptorList) DeepCopy() *ClusterInterceptorList { - if in == nil { - return nil - } - out := new(ClusterInterceptorList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ClusterInterceptorList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterInterceptorSpec) DeepCopyInto(out *ClusterInterceptorSpec) { - *out = *in - in.ClientConfig.DeepCopyInto(&out.ClientConfig) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterInterceptorSpec. -func (in *ClusterInterceptorSpec) DeepCopy() *ClusterInterceptorSpec { - if in == nil { - return nil - } - out := new(ClusterInterceptorSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterInterceptorStatus) DeepCopyInto(out *ClusterInterceptorStatus) { - *out = *in - in.Status.DeepCopyInto(&out.Status) - in.AddressStatus.DeepCopyInto(&out.AddressStatus) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterInterceptorStatus. -func (in *ClusterInterceptorStatus) DeepCopy() *ClusterInterceptorStatus { - if in == nil { - return nil - } - out := new(ClusterInterceptorStatus) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterTriggerBinding) DeepCopyInto(out *ClusterTriggerBinding) { *out = *in @@ -378,16 +256,6 @@ func (in *EventListenerSpec) DeepCopyInto(out *EventListenerSpec) { (*in)[i].DeepCopyInto(&(*out)[i]) } } - if in.DeprecatedReplicas != nil { - in, out := &in.DeprecatedReplicas, &out.DeprecatedReplicas - *out = new(int32) - **out = **in - } - if in.DeprecatedPodTemplate != nil { - in, out := &in.DeprecatedPodTemplate, &out.DeprecatedPodTemplate - *out = new(PodTemplate) - (*in).DeepCopyInto(*out) - } in.NamespaceSelector.DeepCopyInto(&out.NamespaceSelector) if in.LabelSelector != nil { in, out := &in.LabelSelector, &out.LabelSelector @@ -707,27 +575,6 @@ func (in *SecretRef) DeepCopy() *SecretRef { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServiceReference) DeepCopyInto(out *ServiceReference) { - *out = *in - if in.Port != nil { - in, out := &in.Port, &out.Port - *out = new(int32) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceReference. -func (in *ServiceReference) DeepCopy() *ServiceReference { - if in == nil { - return nil - } - out := new(ServiceReference) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Status) DeepCopyInto(out *Status) { *out = *in @@ -923,26 +770,6 @@ func (in *TriggerInterceptor) DeepCopyInto(out *TriggerInterceptor) { *out = new(WebhookInterceptor) (*in).DeepCopyInto(*out) } - if in.DeprecatedGitHub != nil { - in, out := &in.DeprecatedGitHub, &out.DeprecatedGitHub - *out = new(GitHubInterceptor) - (*in).DeepCopyInto(*out) - } - if in.DeprecatedGitLab != nil { - in, out := &in.DeprecatedGitLab, &out.DeprecatedGitLab - *out = new(GitLabInterceptor) - (*in).DeepCopyInto(*out) - } - if in.DeprecatedCEL != nil { - in, out := &in.DeprecatedCEL, &out.DeprecatedCEL - *out = new(CELInterceptor) - (*in).DeepCopyInto(*out) - } - if in.DeprecatedBitbucket != nil { - in, out := &in.DeprecatedBitbucket, &out.DeprecatedBitbucket - *out = new(BitbucketInterceptor) - (*in).DeepCopyInto(*out) - } return } @@ -1214,7 +1041,7 @@ func (in *WebhookInterceptor) DeepCopyInto(out *WebhookInterceptor) { } if in.Header != nil { in, out := &in.Header, &out.Header - *out = make([]v1beta1.Param, len(*in)) + *out = make([]pipelinev1beta1.Param, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } diff --git a/vendor/go.opencensus.io/resource/resourcekeys/const.go b/vendor/go.opencensus.io/resource/resourcekeys/const.go deleted file mode 100644 index 1f2246662f..0000000000 --- a/vendor/go.opencensus.io/resource/resourcekeys/const.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2019, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package resourcekeys contains well known type and label keys for resources. -package resourcekeys // import "go.opencensus.io/resource/resourcekeys" - -// Constants for Kubernetes resources. -const ( - K8SType = "k8s" - - // A uniquely identifying name for the Kubernetes cluster. Kubernetes - // does not have cluster names as an internal concept so this may be - // set to any meaningful value within the environment. For example, - // GKE clusters have a name which can be used for this label. - K8SKeyClusterName = "k8s.cluster.name" - K8SKeyNamespaceName = "k8s.namespace.name" - K8SKeyPodName = "k8s.pod.name" - K8SKeyDeploymentName = "k8s.deployment.name" -) - -// Constants for Container resources. -const ( - ContainerType = "container" - - // A uniquely identifying name for the Container. - ContainerKeyName = "container.name" - ContainerKeyImageName = "container.image.name" - ContainerKeyImageTag = "container.image.tag" -) - -// Constants for Cloud resources. -const ( - CloudType = "cloud" - - CloudKeyProvider = "cloud.provider" - CloudKeyAccountID = "cloud.account.id" - CloudKeyRegion = "cloud.region" - CloudKeyZone = "cloud.zone" - - // Cloud Providers - CloudProviderAWS = "aws" - CloudProviderGCP = "gcp" - CloudProviderAZURE = "azure" -) - -// Constants for Host resources. -const ( - HostType = "host" - - // A uniquely identifying name for the host. - HostKeyName = "host.name" - - // A hostname as returned by the 'hostname' command on host machine. - HostKeyHostName = "host.hostname" - HostKeyID = "host.id" - HostKeyType = "host.type" -) diff --git a/vendor/go.uber.org/atomic/.gitignore b/vendor/go.uber.org/atomic/.gitignore index c3fa253893..2e337a0ed5 100644 --- a/vendor/go.uber.org/atomic/.gitignore +++ b/vendor/go.uber.org/atomic/.gitignore @@ -10,3 +10,6 @@ lint.log # Profiling output *.prof + +# Output of fossa analyzer +/fossa diff --git a/vendor/go.uber.org/atomic/.travis.yml b/vendor/go.uber.org/atomic/.travis.yml deleted file mode 100644 index 13d0a4f254..0000000000 --- a/vendor/go.uber.org/atomic/.travis.yml +++ /dev/null @@ -1,27 +0,0 @@ -sudo: false -language: go -go_import_path: go.uber.org/atomic - -env: - global: - - GO111MODULE=on - -matrix: - include: - - go: oldstable - - go: stable - env: LINT=1 - -cache: - directories: - - vendor - -before_install: - - go version - -script: - - test -z "$LINT" || make lint - - make cover - -after_success: - - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/go.uber.org/atomic/CHANGELOG.md b/vendor/go.uber.org/atomic/CHANGELOG.md index 24c0274dc3..3c3f2380cd 100644 --- a/vendor/go.uber.org/atomic/CHANGELOG.md +++ b/vendor/go.uber.org/atomic/CHANGELOG.md @@ -4,6 +4,11 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [1.8.0] - 2021-06-09 +### Added +- Add `atomic.Uintptr` type for atomic operations on `uintptr` values. +- Add `atomic.UnsafePointer` type for atomic operations on `unsafe.Pointer` values. + ## [1.7.0] - 2020-09-14 ### Added - Support JSON serialization and deserialization of primitive atomic types. @@ -63,6 +68,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Initial release. +[1.8.0]: https://github.com/uber-go/atomic/compare/v1.7.0...v1.8.0 [1.7.0]: https://github.com/uber-go/atomic/compare/v1.6.0...v1.7.0 [1.6.0]: https://github.com/uber-go/atomic/compare/v1.5.1...v1.6.0 [1.5.1]: https://github.com/uber-go/atomic/compare/v1.5.0...v1.5.1 diff --git a/vendor/go.uber.org/atomic/Makefile b/vendor/go.uber.org/atomic/Makefile index 1b1376d425..46c945b32b 100644 --- a/vendor/go.uber.org/atomic/Makefile +++ b/vendor/go.uber.org/atomic/Makefile @@ -69,6 +69,7 @@ generate: $(GEN_ATOMICINT) $(GEN_ATOMICWRAPPER) generatenodirty: @[ -z "$$(git status --porcelain)" ] || ( \ echo "Working tree is dirty. Commit your changes first."; \ + git status; \ exit 1 ) @make generate @status=$$(git status --porcelain); \ diff --git a/vendor/go.uber.org/atomic/README.md b/vendor/go.uber.org/atomic/README.md index ade0c20f16..96b47a1f12 100644 --- a/vendor/go.uber.org/atomic/README.md +++ b/vendor/go.uber.org/atomic/README.md @@ -55,8 +55,8 @@ Released under the [MIT License](LICENSE.txt). [doc-img]: https://godoc.org/github.com/uber-go/atomic?status.svg [doc]: https://godoc.org/go.uber.org/atomic -[ci-img]: https://travis-ci.com/uber-go/atomic.svg?branch=master -[ci]: https://travis-ci.com/uber-go/atomic +[ci-img]: https://github.com/uber-go/atomic/actions/workflows/go.yml/badge.svg +[ci]: https://github.com/uber-go/atomic/actions/workflows/go.yml [cov-img]: https://codecov.io/gh/uber-go/atomic/branch/master/graph/badge.svg [cov]: https://codecov.io/gh/uber-go/atomic [reportcard-img]: https://goreportcard.com/badge/go.uber.org/atomic diff --git a/vendor/go.uber.org/atomic/bool.go b/vendor/go.uber.org/atomic/bool.go index 9cf1914b1f..de5f72ad28 100644 --- a/vendor/go.uber.org/atomic/bool.go +++ b/vendor/go.uber.org/atomic/bool.go @@ -1,6 +1,6 @@ // @generated Code generated by gen-atomicwrapper. -// Copyright (c) 2020 Uber Technologies, Inc. +// Copyright (c) 2020-2021 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal diff --git a/vendor/go.uber.org/atomic/duration.go b/vendor/go.uber.org/atomic/duration.go index 027cfcb20b..2bad9c010f 100644 --- a/vendor/go.uber.org/atomic/duration.go +++ b/vendor/go.uber.org/atomic/duration.go @@ -1,6 +1,6 @@ // @generated Code generated by gen-atomicwrapper. -// Copyright (c) 2020 Uber Technologies, Inc. +// Copyright (c) 2020-2021 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal diff --git a/vendor/go.uber.org/atomic/error.go b/vendor/go.uber.org/atomic/error.go index a6166fbea0..fc529756cf 100644 --- a/vendor/go.uber.org/atomic/error.go +++ b/vendor/go.uber.org/atomic/error.go @@ -1,6 +1,6 @@ // @generated Code generated by gen-atomicwrapper. -// Copyright (c) 2020 Uber Technologies, Inc. +// Copyright (c) 2020-2021 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal diff --git a/vendor/go.uber.org/atomic/float64.go b/vendor/go.uber.org/atomic/float64.go index 0719060207..f833fd373f 100644 --- a/vendor/go.uber.org/atomic/float64.go +++ b/vendor/go.uber.org/atomic/float64.go @@ -1,6 +1,6 @@ // @generated Code generated by gen-atomicwrapper. -// Copyright (c) 2020 Uber Technologies, Inc. +// Copyright (c) 2020-2021 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal diff --git a/vendor/go.uber.org/atomic/gen.go b/vendor/go.uber.org/atomic/gen.go index 50d6b24858..1e9ef4f879 100644 --- a/vendor/go.uber.org/atomic/gen.go +++ b/vendor/go.uber.org/atomic/gen.go @@ -24,3 +24,4 @@ package atomic //go:generate bin/gen-atomicint -name=Int64 -wrapped=int64 -file=int64.go //go:generate bin/gen-atomicint -name=Uint32 -wrapped=uint32 -unsigned -file=uint32.go //go:generate bin/gen-atomicint -name=Uint64 -wrapped=uint64 -unsigned -file=uint64.go +//go:generate bin/gen-atomicint -name=Uintptr -wrapped=uintptr -unsigned -file=uintptr.go diff --git a/vendor/go.uber.org/atomic/go.sum b/vendor/go.uber.org/atomic/go.sum index 4f76e62c1f..4f89841505 100644 --- a/vendor/go.uber.org/atomic/go.sum +++ b/vendor/go.uber.org/atomic/go.sum @@ -1,4 +1,3 @@ -github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= diff --git a/vendor/go.uber.org/atomic/int32.go b/vendor/go.uber.org/atomic/int32.go index 18ae56493e..a20fc998e1 100644 --- a/vendor/go.uber.org/atomic/int32.go +++ b/vendor/go.uber.org/atomic/int32.go @@ -1,6 +1,6 @@ // @generated Code generated by gen-atomicint. -// Copyright (c) 2020 Uber Technologies, Inc. +// Copyright (c) 2020-2021 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal diff --git a/vendor/go.uber.org/atomic/int64.go b/vendor/go.uber.org/atomic/int64.go index 2bcbbfaa95..f03ee00905 100644 --- a/vendor/go.uber.org/atomic/int64.go +++ b/vendor/go.uber.org/atomic/int64.go @@ -1,6 +1,6 @@ // @generated Code generated by gen-atomicint. -// Copyright (c) 2020 Uber Technologies, Inc. +// Copyright (c) 2020-2021 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal diff --git a/vendor/go.uber.org/atomic/string.go b/vendor/go.uber.org/atomic/string.go index 225b7a2be0..f8a6f339a6 100644 --- a/vendor/go.uber.org/atomic/string.go +++ b/vendor/go.uber.org/atomic/string.go @@ -1,6 +1,6 @@ // @generated Code generated by gen-atomicwrapper. -// Copyright (c) 2020 Uber Technologies, Inc. +// Copyright (c) 2020-2021 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal diff --git a/vendor/go.uber.org/atomic/uint32.go b/vendor/go.uber.org/atomic/uint32.go index a973aba1a6..0b5173230c 100644 --- a/vendor/go.uber.org/atomic/uint32.go +++ b/vendor/go.uber.org/atomic/uint32.go @@ -1,6 +1,6 @@ // @generated Code generated by gen-atomicint. -// Copyright (c) 2020 Uber Technologies, Inc. +// Copyright (c) 2020-2021 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal diff --git a/vendor/go.uber.org/atomic/uint64.go b/vendor/go.uber.org/atomic/uint64.go index 3b6c71fd5a..71e3f3247f 100644 --- a/vendor/go.uber.org/atomic/uint64.go +++ b/vendor/go.uber.org/atomic/uint64.go @@ -1,6 +1,6 @@ // @generated Code generated by gen-atomicint. -// Copyright (c) 2020 Uber Technologies, Inc. +// Copyright (c) 2020-2021 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal diff --git a/vendor/go.uber.org/atomic/uintptr.go b/vendor/go.uber.org/atomic/uintptr.go new file mode 100644 index 0000000000..6b363ada35 --- /dev/null +++ b/vendor/go.uber.org/atomic/uintptr.go @@ -0,0 +1,102 @@ +// @generated Code generated by gen-atomicint. + +// Copyright (c) 2020-2021 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "encoding/json" + "strconv" + "sync/atomic" +) + +// Uintptr is an atomic wrapper around uintptr. +type Uintptr struct { + _ nocmp // disallow non-atomic comparison + + v uintptr +} + +// NewUintptr creates a new Uintptr. +func NewUintptr(i uintptr) *Uintptr { + return &Uintptr{v: i} +} + +// Load atomically loads the wrapped value. +func (i *Uintptr) Load() uintptr { + return atomic.LoadUintptr(&i.v) +} + +// Add atomically adds to the wrapped uintptr and returns the new value. +func (i *Uintptr) Add(n uintptr) uintptr { + return atomic.AddUintptr(&i.v, n) +} + +// Sub atomically subtracts from the wrapped uintptr and returns the new value. +func (i *Uintptr) Sub(n uintptr) uintptr { + return atomic.AddUintptr(&i.v, ^(n - 1)) +} + +// Inc atomically increments the wrapped uintptr and returns the new value. +func (i *Uintptr) Inc() uintptr { + return i.Add(1) +} + +// Dec atomically decrements the wrapped uintptr and returns the new value. +func (i *Uintptr) Dec() uintptr { + return i.Sub(1) +} + +// CAS is an atomic compare-and-swap. +func (i *Uintptr) CAS(old, new uintptr) bool { + return atomic.CompareAndSwapUintptr(&i.v, old, new) +} + +// Store atomically stores the passed value. +func (i *Uintptr) Store(n uintptr) { + atomic.StoreUintptr(&i.v, n) +} + +// Swap atomically swaps the wrapped uintptr and returns the old value. +func (i *Uintptr) Swap(n uintptr) uintptr { + return atomic.SwapUintptr(&i.v, n) +} + +// MarshalJSON encodes the wrapped uintptr into JSON. +func (i *Uintptr) MarshalJSON() ([]byte, error) { + return json.Marshal(i.Load()) +} + +// UnmarshalJSON decodes JSON into the wrapped uintptr. +func (i *Uintptr) UnmarshalJSON(b []byte) error { + var v uintptr + if err := json.Unmarshal(b, &v); err != nil { + return err + } + i.Store(v) + return nil +} + +// String encodes the wrapped value as a string. +func (i *Uintptr) String() string { + v := i.Load() + return strconv.FormatUint(uint64(v), 10) +} diff --git a/vendor/go.uber.org/atomic/unsafe_pointer.go b/vendor/go.uber.org/atomic/unsafe_pointer.go new file mode 100644 index 0000000000..a3830c665d --- /dev/null +++ b/vendor/go.uber.org/atomic/unsafe_pointer.go @@ -0,0 +1,58 @@ +// Copyright (c) 2021 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "sync/atomic" + "unsafe" +) + +// UnsafePointer is an atomic wrapper around unsafe.Pointer. +type UnsafePointer struct { + _ nocmp // disallow non-atomic comparison + + v unsafe.Pointer +} + +// NewUnsafePointer creates a new UnsafePointer. +func NewUnsafePointer(p unsafe.Pointer) *UnsafePointer { + return &UnsafePointer{v: p} +} + +// Load atomically loads the wrapped value. +func (p *UnsafePointer) Load() unsafe.Pointer { + return atomic.LoadPointer(&p.v) +} + +// Store atomically stores the passed value. +func (p *UnsafePointer) Store(q unsafe.Pointer) { + atomic.StorePointer(&p.v, q) +} + +// Swap atomically swaps the wrapped unsafe.Pointer and returns the old value. +func (p *UnsafePointer) Swap(q unsafe.Pointer) unsafe.Pointer { + return atomic.SwapPointer(&p.v, q) +} + +// CAS is an atomic compare-and-swap. +func (p *UnsafePointer) CAS(old, new unsafe.Pointer) bool { + return atomic.CompareAndSwapPointer(&p.v, old, new) +} diff --git a/vendor/go.uber.org/multierr/.travis.yml b/vendor/go.uber.org/multierr/.travis.yml index 786c917a39..8636ab42ad 100644 --- a/vendor/go.uber.org/multierr/.travis.yml +++ b/vendor/go.uber.org/multierr/.travis.yml @@ -4,17 +4,11 @@ go_import_path: go.uber.org/multierr env: global: - - GO15VENDOREXPERIMENT=1 - GO111MODULE=on go: - - 1.11.x - - 1.12.x - - 1.13.x - -cache: - directories: - - vendor + - oldstable + - stable before_install: - go version diff --git a/vendor/go.uber.org/multierr/CHANGELOG.md b/vendor/go.uber.org/multierr/CHANGELOG.md index 3110c5af0b..6f1db9ef4a 100644 --- a/vendor/go.uber.org/multierr/CHANGELOG.md +++ b/vendor/go.uber.org/multierr/CHANGELOG.md @@ -1,6 +1,12 @@ Releases ======== +v1.6.0 (2020-09-14) +=================== + +- Actually drop library dependency on development-time tooling. + + v1.5.0 (2020-02-24) =================== diff --git a/vendor/go.uber.org/multierr/Makefile b/vendor/go.uber.org/multierr/Makefile index 416018237e..316004400b 100644 --- a/vendor/go.uber.org/multierr/Makefile +++ b/vendor/go.uber.org/multierr/Makefile @@ -21,12 +21,12 @@ gofmt: .PHONY: golint golint: - @go install golang.org/x/lint/golint + @cd tools && go install golang.org/x/lint/golint @$(GOBIN)/golint ./... .PHONY: staticcheck staticcheck: - @go install honnef.co/go/tools/cmd/staticcheck + @cd tools && go install honnef.co/go/tools/cmd/staticcheck @$(GOBIN)/staticcheck ./... .PHONY: lint @@ -38,5 +38,5 @@ cover: go tool cover -html=cover.out -o cover.html update-license: - @go install go.uber.org/tools/update-license + @cd tools && go install go.uber.org/tools/update-license @$(GOBIN)/update-license $(GO_FILES) diff --git a/vendor/go.uber.org/multierr/error.go b/vendor/go.uber.org/multierr/error.go index 04eb9618c1..5c9b67d537 100644 --- a/vendor/go.uber.org/multierr/error.go +++ b/vendor/go.uber.org/multierr/error.go @@ -54,7 +54,7 @@ // // errors := multierr.Errors(err) // if len(errors) > 0 { -// fmt.Println("The following errors occurred:") +// fmt.Println("The following errors occurred:", errors) // } // // Advanced Usage diff --git a/vendor/go.uber.org/multierr/go.mod b/vendor/go.uber.org/multierr/go.mod index 58d5f90bbd..ff8bdf95fc 100644 --- a/vendor/go.uber.org/multierr/go.mod +++ b/vendor/go.uber.org/multierr/go.mod @@ -4,9 +4,5 @@ go 1.12 require ( github.com/stretchr/testify v1.3.0 - go.uber.org/atomic v1.6.0 - go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee - golang.org/x/lint v0.0.0-20190930215403-16217165b5de - golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5 // indirect - honnef.co/go/tools v0.0.1-2019.2.3 + go.uber.org/atomic v1.7.0 ) diff --git a/vendor/go.uber.org/multierr/go.sum b/vendor/go.uber.org/multierr/go.sum index 557fbba28f..ecfc286578 100644 --- a/vendor/go.uber.org/multierr/go.sum +++ b/vendor/go.uber.org/multierr/go.sum @@ -1,45 +1,11 @@ -github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk= -go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4= -go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c h1:IGkKhmfzcztjm6gYkykvu/NiS8kaqbCWAEWWAyf8J5U= -golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5 h1:hKsoRgsbwY1NafxrwTs+k64bikrLBkAgPir1TNCj3Zs= -golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= diff --git a/vendor/go.uber.org/zap/.travis.yml b/vendor/go.uber.org/zap/.travis.yml deleted file mode 100644 index cfdc69f413..0000000000 --- a/vendor/go.uber.org/zap/.travis.yml +++ /dev/null @@ -1,23 +0,0 @@ -language: go -sudo: false - -go_import_path: go.uber.org/zap -env: - global: - - TEST_TIMEOUT_SCALE=10 - - GO111MODULE=on - -matrix: - include: - - go: 1.13.x - - go: 1.14.x - env: LINT=1 - -script: - - test -z "$LINT" || make lint - - make test - - make bench - -after_success: - - make cover - - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/go.uber.org/zap/CHANGELOG.md b/vendor/go.uber.org/zap/CHANGELOG.md index fa817e6a10..6c32101216 100644 --- a/vendor/go.uber.org/zap/CHANGELOG.md +++ b/vendor/go.uber.org/zap/CHANGELOG.md @@ -1,5 +1,56 @@ # Changelog +## 1.18.1 (28 Jun 2021) + +Bugfixes: +* [#974][]: Fix nil dereference in logger constructed by `zap.NewNop`. + +[#974]: https://github.com/uber-go/zap/pull/974 + +## 1.18.0 (28 Jun 2021) + +Enhancements: +* [#961][]: Add `zapcore.BufferedWriteSyncer`, a new `WriteSyncer` that buffers + messages in-memory and flushes them periodically. +* [#971][]: Add `zapio.Writer` to use a Zap logger as an `io.Writer`. +* [#897][]: Add `zap.WithClock` option to control the source of time via the + new `zapcore.Clock` interface. +* [#949][]: Avoid panicking in `zap.SugaredLogger` when arguments of `*w` + methods don't match expectations. +* [#943][]: Add support for filtering by level or arbitrary matcher function to + `zaptest/observer`. +* [#691][]: Comply with `io.StringWriter` and `io.ByteWriter` in Zap's + `buffer.Buffer`. + +Thanks to @atrn0, @ernado, @heyanfu, @hnlq715, @zchee +for their contributions to this release. + +[#691]: https://github.com/uber-go/zap/pull/691 +[#897]: https://github.com/uber-go/zap/pull/897 +[#943]: https://github.com/uber-go/zap/pull/943 +[#949]: https://github.com/uber-go/zap/pull/949 +[#961]: https://github.com/uber-go/zap/pull/961 +[#971]: https://github.com/uber-go/zap/pull/971 + +## 1.17.0 (25 May 2021) + +Bugfixes: +* [#867][]: Encode `` for nil `error` instead of a panic. +* [#931][], [#936][]: Update minimum version constraints to address + vulnerabilities in dependencies. + +Enhancements: +* [#865][]: Improve alignment of fields of the Logger struct, reducing its + size from 96 to 80 bytes. +* [#881][]: Support `grpclog.LoggerV2` in zapgrpc. +* [#903][]: Support URL-encoded POST requests to the AtomicLevel HTTP handler + with the `application/x-www-form-urlencoded` content type. +* [#912][]: Support multi-field encoding with `zap.Inline`. +* [#913][]: Speed up SugaredLogger for calls with a single string. +* [#928][]: Add support for filtering by field name to `zaptest/observer`. + +Thanks to @ash2k, @FMLS, @jimmystewpot, @Oncilla, @tsoslow, @tylitianrui, @withshubh, and @wziww for their contributions to this release. + ## 1.16.0 (1 Sep 2020) Bugfixes: @@ -430,3 +481,12 @@ upgrade to the upcoming stable release. [#854]: https://github.com/uber-go/zap/pull/854 [#861]: https://github.com/uber-go/zap/pull/861 [#862]: https://github.com/uber-go/zap/pull/862 +[#865]: https://github.com/uber-go/zap/pull/865 +[#867]: https://github.com/uber-go/zap/pull/867 +[#881]: https://github.com/uber-go/zap/pull/881 +[#903]: https://github.com/uber-go/zap/pull/903 +[#912]: https://github.com/uber-go/zap/pull/912 +[#913]: https://github.com/uber-go/zap/pull/913 +[#928]: https://github.com/uber-go/zap/pull/928 +[#931]: https://github.com/uber-go/zap/pull/931 +[#936]: https://github.com/uber-go/zap/pull/936 diff --git a/vendor/go.uber.org/zap/CONTRIBUTING.md b/vendor/go.uber.org/zap/CONTRIBUTING.md index 9454bbaf02..5cd9656871 100644 --- a/vendor/go.uber.org/zap/CONTRIBUTING.md +++ b/vendor/go.uber.org/zap/CONTRIBUTING.md @@ -25,12 +25,6 @@ git remote add upstream https://github.com/uber-go/zap.git git fetch upstream ``` -Install zap's dependencies: - -``` -make dependencies -``` - Make sure that the tests and the linters pass: ``` diff --git a/vendor/go.uber.org/zap/FAQ.md b/vendor/go.uber.org/zap/FAQ.md index 5ec7288750..b183b20bc1 100644 --- a/vendor/go.uber.org/zap/FAQ.md +++ b/vendor/go.uber.org/zap/FAQ.md @@ -27,6 +27,13 @@ abstraction, and it lets us add methods without introducing breaking changes. Your applications should define and depend upon an interface that includes just the methods you use. +### Why are some of my logs missing? + +Logs are dropped intentionally by zap when sampling is enabled. The production +configuration (as returned by `NewProductionConfig()` enables sampling which will +cause repeated logs within a second to be sampled. See more details on why sampling +is enabled in [Why sample application logs](https://github.com/uber-go/zap/blob/master/FAQ.md#why-sample-application-logs). + ### Why sample application logs? Applications often experience runs of errors, either because of a bug or @@ -150,6 +157,7 @@ We're aware of the following extensions, but haven't used them ourselves: | `github.com/fgrosse/zaptest` | Ginkgo | | `github.com/blendle/zapdriver` | Stackdriver | | `github.com/moul/zapgorm` | Gorm | +| `github.com/moul/zapfilter` | Advanced filtering rules | [go-proverbs]: https://go-proverbs.github.io/ [import-path]: https://golang.org/cmd/go/#hdr-Remote_import_paths diff --git a/vendor/go.uber.org/zap/Makefile b/vendor/go.uber.org/zap/Makefile index dfaf6406e9..9b1bc3b0e1 100644 --- a/vendor/go.uber.org/zap/Makefile +++ b/vendor/go.uber.org/zap/Makefile @@ -7,7 +7,7 @@ BENCH_FLAGS ?= -cpuprofile=cpu.pprof -memprofile=mem.pprof -benchmem # Directories containing independent Go modules. # # We track coverage only for the main module. -MODULE_DIRS = . ./benchmarks +MODULE_DIRS = . ./benchmarks ./zapgrpc/internal/test # Many Go tools take file globs or directories as arguments instead of packages. GO_FILES := $(shell \ @@ -33,12 +33,18 @@ lint: $(GOLINT) $(STATICCHECK) @echo "Checking for license headers..." @./checklicense.sh | tee -a lint.log @[ ! -s lint.log ] + @echo "Checking 'go mod tidy'..." + @make tidy + @if ! git diff --quiet; then \ + echo "'go mod tidy' resulted in changes or working tree is dirty:"; \ + git --no-pager diff; \ + fi $(GOLINT): - go install golang.org/x/lint/golint + cd tools && go install golang.org/x/lint/golint $(STATICCHECK): - go install honnef.co/go/tools/cmd/staticcheck + cd tools && go install honnef.co/go/tools/cmd/staticcheck .PHONY: test test: @@ -61,3 +67,7 @@ bench: updatereadme: rm -f README.md cat .readme.tmpl | go run internal/readme/readme.go > README.md + +.PHONY: tidy +tidy: + @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && go mod tidy) &&) true diff --git a/vendor/go.uber.org/zap/README.md b/vendor/go.uber.org/zap/README.md index bcea28a196..1e64d6cffc 100644 --- a/vendor/go.uber.org/zap/README.md +++ b/vendor/go.uber.org/zap/README.md @@ -123,10 +123,10 @@ Released under the [MIT License](LICENSE.txt). benchmarking against slightly older versions of other packages. Versions are pinned in the [benchmarks/go.mod][] file. [↩](#anchor-versions) -[doc-img]: https://godoc.org/go.uber.org/zap?status.svg -[doc]: https://godoc.org/go.uber.org/zap -[ci-img]: https://travis-ci.com/uber-go/zap.svg?branch=master -[ci]: https://travis-ci.com/uber-go/zap +[doc-img]: https://pkg.go.dev/badge/go.uber.org/zap +[doc]: https://pkg.go.dev/go.uber.org/zap +[ci-img]: https://github.com/uber-go/zap/actions/workflows/go.yml/badge.svg +[ci]: https://github.com/uber-go/zap/actions/workflows/go.yml [cov-img]: https://codecov.io/gh/uber-go/zap/branch/master/graph/badge.svg [cov]: https://codecov.io/gh/uber-go/zap [benchmarking suite]: https://github.com/uber-go/zap/tree/master/benchmarks diff --git a/vendor/go.uber.org/zap/buffer/buffer.go b/vendor/go.uber.org/zap/buffer/buffer.go index 3f4b86e081..9e929cd98e 100644 --- a/vendor/go.uber.org/zap/buffer/buffer.go +++ b/vendor/go.uber.org/zap/buffer/buffer.go @@ -106,6 +106,24 @@ func (b *Buffer) Write(bs []byte) (int, error) { return len(bs), nil } +// WriteByte writes a single byte to the Buffer. +// +// Error returned is always nil, function signature is compatible +// with bytes.Buffer and bufio.Writer +func (b *Buffer) WriteByte(v byte) error { + b.AppendByte(v) + return nil +} + +// WriteString writes a string to the Buffer. +// +// Error returned is always nil, function signature is compatible +// with bytes.Buffer and bufio.Writer +func (b *Buffer) WriteString(s string) (int, error) { + b.AppendString(s) + return len(s), nil +} + // TrimNewline trims any final "\n" byte from the end of the buffer. func (b *Buffer) TrimNewline() { if i := len(b.bs) - 1; i >= 0 { diff --git a/vendor/go.uber.org/zap/field.go b/vendor/go.uber.org/zap/field.go index 3c0d7d9578..bbb745db5b 100644 --- a/vendor/go.uber.org/zap/field.go +++ b/vendor/go.uber.org/zap/field.go @@ -400,6 +400,16 @@ func Object(key string, val zapcore.ObjectMarshaler) Field { return Field{Key: key, Type: zapcore.ObjectMarshalerType, Interface: val} } +// Inline constructs a Field that is similar to Object, but it +// will add the elements of the provided ObjectMarshaler to the +// current namespace. +func Inline(val zapcore.ObjectMarshaler) Field { + return zapcore.Field{ + Type: zapcore.InlineMarshalerType, + Interface: val, + } +} + // Any takes a key and an arbitrary value and chooses the best way to represent // them as a field, falling back to a reflection-based approach only if // necessary. diff --git a/vendor/go.uber.org/zap/go.mod b/vendor/go.uber.org/zap/go.mod index 6ef4db70ed..9455c99cc9 100644 --- a/vendor/go.uber.org/zap/go.mod +++ b/vendor/go.uber.org/zap/go.mod @@ -3,11 +3,12 @@ module go.uber.org/zap go 1.13 require ( + github.com/benbjohnson/clock v1.1.0 github.com/pkg/errors v0.8.1 - github.com/stretchr/testify v1.4.0 - go.uber.org/atomic v1.6.0 - go.uber.org/multierr v1.5.0 - golang.org/x/lint v0.0.0-20190930215403-16217165b5de - gopkg.in/yaml.v2 v2.2.2 - honnef.co/go/tools v0.0.1-2019.2.3 + github.com/stretchr/testify v1.7.0 + go.uber.org/atomic v1.7.0 + go.uber.org/goleak v1.1.10 + go.uber.org/multierr v1.6.0 + gopkg.in/yaml.v2 v2.2.8 + gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect ) diff --git a/vendor/go.uber.org/zap/go.sum b/vendor/go.uber.org/zap/go.sum index 99cdb93ea0..b330071a06 100644 --- a/vendor/go.uber.org/zap/go.sum +++ b/vendor/go.uber.org/zap/go.sum @@ -1,13 +1,11 @@ -github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1 h1:VkoXIwSboBpnk99O/KFauAEILuNHv5DVFKZMBN/gUgw= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= @@ -15,42 +13,42 @@ github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk= -go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A= -go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= -go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4= -go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/goleak v1.1.10 h1:z+mqJhf6ss6BSfSM671tgKyZBFPTTJM+HLxnhPC3wu0= +go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c h1:IGkKhmfzcztjm6gYkykvu/NiS8kaqbCWAEWWAyf8J5U= -golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5 h1:hKsoRgsbwY1NafxrwTs+k64bikrLBkAgPir1TNCj3Zs= -golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191108193012-7d206e10da11 h1:Yq9t9jnGoR+dBuitxdo9l6Q7xh/zOyNnYUtDKaQ3x0E= +golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 h1:9zdDQZ7Thm29KFXgAX/+yaf3eVbP7djjWp/dXAppNCc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/vendor/go.uber.org/zap/http_handler.go b/vendor/go.uber.org/zap/http_handler.go index 1b0ecaca9c..1297c33b32 100644 --- a/vendor/go.uber.org/zap/http_handler.go +++ b/vendor/go.uber.org/zap/http_handler.go @@ -23,6 +23,7 @@ package zap import ( "encoding/json" "fmt" + "io" "net/http" "go.uber.org/zap/zapcore" @@ -31,47 +32,63 @@ import ( // ServeHTTP is a simple JSON endpoint that can report on or change the current // logging level. // -// GET requests return a JSON description of the current logging level. PUT -// requests change the logging level and expect a payload like: +// GET +// +// The GET request returns a JSON description of the current logging level like: // {"level":"info"} // -// It's perfectly safe to change the logging level while a program is running. +// PUT +// +// The PUT request changes the logging level. It is perfectly safe to change the +// logging level while a program is running. Two content types are supported: +// +// Content-Type: application/x-www-form-urlencoded +// +// With this content type, the level can be provided through the request body or +// a query parameter. The log level is URL encoded like: +// +// level=debug +// +// The request body takes precedence over the query parameter, if both are +// specified. +// +// This content type is the default for a curl PUT request. Following are two +// example curl requests that both set the logging level to debug. +// +// curl -X PUT localhost:8080/log/level?level=debug +// curl -X PUT localhost:8080/log/level -d level=debug +// +// For any other content type, the payload is expected to be JSON encoded and +// look like: +// +// {"level":"info"} +// +// An example curl request could look like this: +// +// curl -X PUT localhost:8080/log/level -H "Content-Type: application/json" -d '{"level":"debug"}' +// func (lvl AtomicLevel) ServeHTTP(w http.ResponseWriter, r *http.Request) { type errorResponse struct { Error string `json:"error"` } type payload struct { - Level *zapcore.Level `json:"level"` + Level zapcore.Level `json:"level"` } enc := json.NewEncoder(w) switch r.Method { - case http.MethodGet: - current := lvl.Level() - enc.Encode(payload{Level: ¤t}) - + enc.Encode(payload{Level: lvl.Level()}) case http.MethodPut: - var req payload - - if errmess := func() string { - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - return fmt.Sprintf("Request body must be well-formed JSON: %v", err) - } - if req.Level == nil { - return "Must specify a logging level." - } - return "" - }(); errmess != "" { + requestedLvl, err := decodePutRequest(r.Header.Get("Content-Type"), r) + if err != nil { w.WriteHeader(http.StatusBadRequest) - enc.Encode(errorResponse{Error: errmess}) + enc.Encode(errorResponse{Error: err.Error()}) return } - - lvl.SetLevel(*req.Level) - enc.Encode(req) - + lvl.SetLevel(requestedLvl) + enc.Encode(payload{Level: lvl.Level()}) default: w.WriteHeader(http.StatusMethodNotAllowed) enc.Encode(errorResponse{ @@ -79,3 +96,37 @@ func (lvl AtomicLevel) ServeHTTP(w http.ResponseWriter, r *http.Request) { }) } } + +// Decodes incoming PUT requests and returns the requested logging level. +func decodePutRequest(contentType string, r *http.Request) (zapcore.Level, error) { + if contentType == "application/x-www-form-urlencoded" { + return decodePutURL(r) + } + return decodePutJSON(r.Body) +} + +func decodePutURL(r *http.Request) (zapcore.Level, error) { + lvl := r.FormValue("level") + if lvl == "" { + return 0, fmt.Errorf("must specify logging level") + } + var l zapcore.Level + if err := l.UnmarshalText([]byte(lvl)); err != nil { + return 0, err + } + return l, nil +} + +func decodePutJSON(body io.Reader) (zapcore.Level, error) { + var pld struct { + Level *zapcore.Level `json:"level"` + } + if err := json.NewDecoder(body).Decode(&pld); err != nil { + return 0, fmt.Errorf("malformed request body: %v", err) + } + if pld.Level == nil { + return 0, fmt.Errorf("must specify logging level") + } + return *pld.Level, nil + +} diff --git a/vendor/go.uber.org/zap/logger.go b/vendor/go.uber.org/zap/logger.go index ea484aed10..f116bd936f 100644 --- a/vendor/go.uber.org/zap/logger.go +++ b/vendor/go.uber.org/zap/logger.go @@ -26,7 +26,6 @@ import ( "os" "runtime" "strings" - "time" "go.uber.org/zap/zapcore" ) @@ -42,14 +41,17 @@ type Logger struct { core zapcore.Core development bool + addCaller bool + onFatal zapcore.CheckWriteAction // default is WriteThenFatal + name string errorOutput zapcore.WriteSyncer - addCaller bool - addStack zapcore.LevelEnabler + addStack zapcore.LevelEnabler callerSkip int - onFatal zapcore.CheckWriteAction // default is WriteThenFatal + + clock zapcore.Clock } // New constructs a new Logger from the provided zapcore.Core and Options. If @@ -70,6 +72,7 @@ func New(core zapcore.Core, options ...Option) *Logger { core: core, errorOutput: zapcore.Lock(os.Stderr), addStack: zapcore.FatalLevel + 1, + clock: zapcore.DefaultClock, } return log.WithOptions(options...) } @@ -84,6 +87,7 @@ func NewNop() *Logger { core: zapcore.NewNopCore(), errorOutput: zapcore.AddSync(ioutil.Discard), addStack: zapcore.FatalLevel + 1, + clock: zapcore.DefaultClock, } } @@ -269,7 +273,7 @@ func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { // log message will actually be written somewhere. ent := zapcore.Entry{ LoggerName: log.name, - Time: time.Now(), + Time: log.clock.Now(), Level: lvl, Message: msg, } @@ -306,7 +310,7 @@ func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { if log.addCaller { frame, defined := getCallerFrame(log.callerSkip + callerSkipOffset) if !defined { - fmt.Fprintf(log.errorOutput, "%v Logger.check error: failed to get caller\n", time.Now().UTC()) + fmt.Fprintf(log.errorOutput, "%v Logger.check error: failed to get caller\n", ent.Time.UTC()) log.errorOutput.Sync() } @@ -334,7 +338,7 @@ func getCallerFrame(skip int) (frame runtime.Frame, ok bool) { const skipOffset = 2 // skip getCallerFrame and Callers pc := make([]uintptr, 1) - numFrames := runtime.Callers(skip+skipOffset, pc[:]) + numFrames := runtime.Callers(skip+skipOffset, pc) if numFrames < 1 { return } diff --git a/vendor/go.uber.org/zap/options.go b/vendor/go.uber.org/zap/options.go index 0135c20923..e9e66161f5 100644 --- a/vendor/go.uber.org/zap/options.go +++ b/vendor/go.uber.org/zap/options.go @@ -138,3 +138,11 @@ func OnFatal(action zapcore.CheckWriteAction) Option { log.onFatal = action }) } + +// WithClock specifies the clock used by the logger to determine the current +// time for logged entries. Defaults to the system clock with time.Now. +func WithClock(clock zapcore.Clock) Option { + return optionFunc(func(log *Logger) { + log.clock = clock + }) +} diff --git a/vendor/go.uber.org/zap/sugar.go b/vendor/go.uber.org/zap/sugar.go index 77ca227f47..0b9651981a 100644 --- a/vendor/go.uber.org/zap/sugar.go +++ b/vendor/go.uber.org/zap/sugar.go @@ -222,19 +222,30 @@ func (s *SugaredLogger) log(lvl zapcore.Level, template string, fmtArgs []interf return } - // Format with Sprint, Sprintf, or neither. - msg := template - if msg == "" && len(fmtArgs) > 0 { - msg = fmt.Sprint(fmtArgs...) - } else if msg != "" && len(fmtArgs) > 0 { - msg = fmt.Sprintf(template, fmtArgs...) - } - + msg := getMessage(template, fmtArgs) if ce := s.base.Check(lvl, msg); ce != nil { ce.Write(s.sweetenFields(context)...) } } +// getMessage format with Sprint, Sprintf, or neither. +func getMessage(template string, fmtArgs []interface{}) string { + if len(fmtArgs) == 0 { + return template + } + + if template != "" { + return fmt.Sprintf(template, fmtArgs...) + } + + if len(fmtArgs) == 1 { + if str, ok := fmtArgs[0].(string); ok { + return str + } + } + return fmt.Sprint(fmtArgs...) +} + func (s *SugaredLogger) sweetenFields(args []interface{}) []Field { if len(args) == 0 { return nil @@ -255,7 +266,7 @@ func (s *SugaredLogger) sweetenFields(args []interface{}) []Field { // Make sure this element isn't a dangling key. if i == len(args)-1 { - s.base.DPanic(_oddNumberErrMsg, Any("ignored", args[i])) + s.base.Error(_oddNumberErrMsg, Any("ignored", args[i])) break } @@ -276,7 +287,7 @@ func (s *SugaredLogger) sweetenFields(args []interface{}) []Field { // If we encountered any invalid key-value pairs, log an error. if len(invalid) > 0 { - s.base.DPanic(_nonStringKeyErrMsg, Array("invalid", invalid)) + s.base.Error(_nonStringKeyErrMsg, Array("invalid", invalid)) } return fields } diff --git a/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go b/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go new file mode 100644 index 0000000000..0c1436f76c --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go @@ -0,0 +1,188 @@ +// Copyright (c) 2021 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "bufio" + "sync" + "time" + + "go.uber.org/multierr" +) + +const ( + // _defaultBufferSize specifies the default size used by Buffer. + _defaultBufferSize = 256 * 1024 // 256 kB + + // _defaultFlushInterval specifies the default flush interval for + // Buffer. + _defaultFlushInterval = 30 * time.Second +) + +// A BufferedWriteSyncer is a WriteSyncer that buffers writes in-memory before +// flushing them to a wrapped WriteSyncer after reaching some limit, or at some +// fixed interval--whichever comes first. +// +// BufferedWriteSyncer is safe for concurrent use. You don't need to use +// zapcore.Lock for WriteSyncers with BufferedWriteSyncer. +type BufferedWriteSyncer struct { + // WS is the WriteSyncer around which BufferedWriteSyncer will buffer + // writes. + // + // This field is required. + WS WriteSyncer + + // Size specifies the maximum amount of data the writer will buffered + // before flushing. + // + // Defaults to 256 kB if unspecified. + Size int + + // FlushInterval specifies how often the writer should flush data if + // there have been no writes. + // + // Defaults to 30 seconds if unspecified. + FlushInterval time.Duration + + // Clock, if specified, provides control of the source of time for the + // writer. + // + // Defaults to the system clock. + Clock Clock + + // unexported fields for state + mu sync.Mutex + initialized bool // whether initialize() has run + writer *bufio.Writer + ticker *time.Ticker + stop chan struct{} // closed when flushLoop should stop + stopped bool // whether Stop() has run + done chan struct{} // closed when flushLoop has stopped +} + +func (s *BufferedWriteSyncer) initialize() { + size := s.Size + if size == 0 { + size = _defaultBufferSize + } + + flushInterval := s.FlushInterval + if flushInterval == 0 { + flushInterval = _defaultFlushInterval + } + + if s.Clock == nil { + s.Clock = DefaultClock + } + + s.ticker = s.Clock.NewTicker(flushInterval) + s.writer = bufio.NewWriterSize(s.WS, size) + s.stop = make(chan struct{}) + s.done = make(chan struct{}) + s.initialized = true + go s.flushLoop() +} + +// Write writes log data into buffer syncer directly, multiple Write calls will be batched, +// and log data will be flushed to disk when the buffer is full or periodically. +func (s *BufferedWriteSyncer) Write(bs []byte) (int, error) { + s.mu.Lock() + defer s.mu.Unlock() + + if !s.initialized { + s.initialize() + } + + // To avoid partial writes from being flushed, we manually flush the existing buffer if: + // * The current write doesn't fit into the buffer fully, and + // * The buffer is not empty (since bufio will not split large writes when the buffer is empty) + if len(bs) > s.writer.Available() && s.writer.Buffered() > 0 { + if err := s.writer.Flush(); err != nil { + return 0, err + } + } + + return s.writer.Write(bs) +} + +// Sync flushes buffered log data into disk directly. +func (s *BufferedWriteSyncer) Sync() error { + s.mu.Lock() + defer s.mu.Unlock() + + var err error + if s.initialized { + err = s.writer.Flush() + } + + return multierr.Append(err, s.WS.Sync()) +} + +// flushLoop flushes the buffer at the configured interval until Stop is +// called. +func (s *BufferedWriteSyncer) flushLoop() { + defer close(s.done) + + for { + select { + case <-s.ticker.C: + // we just simply ignore error here + // because the underlying bufio writer stores any errors + // and we return any error from Sync() as part of the close + _ = s.Sync() + case <-s.stop: + return + } + } +} + +// Stop closes the buffer, cleans up background goroutines, and flushes +// remaining unwritten data. +func (s *BufferedWriteSyncer) Stop() (err error) { + var stopped bool + + // Critical section. + func() { + s.mu.Lock() + defer s.mu.Unlock() + + if !s.initialized { + return + } + + stopped = s.stopped + if stopped { + return + } + s.stopped = true + + s.ticker.Stop() + close(s.stop) // tell flushLoop to stop + <-s.done // and wait until it has + }() + + // Don't call Sync on consecutive Stops. + if !stopped { + err = s.Sync() + } + + return err +} diff --git a/vendor/go.uber.org/zap/zapcore/clock.go b/vendor/go.uber.org/zap/zapcore/clock.go new file mode 100644 index 0000000000..d2ea95b394 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/clock.go @@ -0,0 +1,50 @@ +// Copyright (c) 2021 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "time" +) + +// DefaultClock is the default clock used by Zap in operations that require +// time. This clock uses the system clock for all operations. +var DefaultClock = systemClock{} + +// Clock is a source of time for logged entries. +type Clock interface { + // Now returns the current local time. + Now() time.Time + + // NewTicker returns *time.Ticker that holds a channel + // that delivers "ticks" of a clock. + NewTicker(time.Duration) *time.Ticker +} + +// systemClock implements default Clock that uses system time. +type systemClock struct{} + +func (systemClock) Now() time.Time { + return time.Now() +} + +func (systemClock) NewTicker(duration time.Duration) *time.Ticker { + return time.NewTicker(duration) +} diff --git a/vendor/go.uber.org/zap/zapcore/console_encoder.go b/vendor/go.uber.org/zap/zapcore/console_encoder.go index 3b68f8c0c5..2307af404c 100644 --- a/vendor/go.uber.org/zap/zapcore/console_encoder.go +++ b/vendor/go.uber.org/zap/zapcore/console_encoder.go @@ -56,7 +56,7 @@ type consoleEncoder struct { // encoder configuration, it will omit any element whose key is set to the empty // string. func NewConsoleEncoder(cfg EncoderConfig) Encoder { - if len(cfg.ConsoleSeparator) == 0 { + if cfg.ConsoleSeparator == "" { // Use a default delimiter of '\t' for backwards compatibility cfg.ConsoleSeparator = "\t" } diff --git a/vendor/go.uber.org/zap/zapcore/entry.go b/vendor/go.uber.org/zap/zapcore/entry.go index 4aa8b4f90b..2d815feb84 100644 --- a/vendor/go.uber.org/zap/zapcore/entry.go +++ b/vendor/go.uber.org/zap/zapcore/entry.go @@ -208,7 +208,7 @@ func (ce *CheckedEntry) Write(fields ...Field) { // If the entry is dirty, log an internal error; because the // CheckedEntry is being used after it was returned to the pool, // the message may be an amalgamation from multiple call sites. - fmt.Fprintf(ce.ErrorOutput, "%v Unsafe CheckedEntry re-use near Entry %+v.\n", time.Now(), ce.Entry) + fmt.Fprintf(ce.ErrorOutput, "%v Unsafe CheckedEntry re-use near Entry %+v.\n", ce.Time, ce.Entry) ce.ErrorOutput.Sync() } return @@ -221,7 +221,7 @@ func (ce *CheckedEntry) Write(fields ...Field) { } if ce.ErrorOutput != nil { if err != nil { - fmt.Fprintf(ce.ErrorOutput, "%v write error: %v\n", time.Now(), err) + fmt.Fprintf(ce.ErrorOutput, "%v write error: %v\n", ce.Time, err) ce.ErrorOutput.Sync() } } diff --git a/vendor/go.uber.org/zap/zapcore/error.go b/vendor/go.uber.org/zap/zapcore/error.go index 9ba2272c3f..f2a07d7864 100644 --- a/vendor/go.uber.org/zap/zapcore/error.go +++ b/vendor/go.uber.org/zap/zapcore/error.go @@ -22,6 +22,7 @@ package zapcore import ( "fmt" + "reflect" "sync" ) @@ -42,7 +43,23 @@ import ( // ... // ], // } -func encodeError(key string, err error, enc ObjectEncoder) error { +func encodeError(key string, err error, enc ObjectEncoder) (retErr error) { + // Try to capture panics (from nil references or otherwise) when calling + // the Error() method + defer func() { + if rerr := recover(); rerr != nil { + // If it's a nil pointer, just say "". The likeliest causes are a + // error that fails to guard against nil or a nil pointer for a + // value receiver, and in either case, "" is a nice result. + if v := reflect.ValueOf(err); v.Kind() == reflect.Ptr && v.IsNil() { + enc.AddString(key, "") + return + } + + retErr = fmt.Errorf("PANIC=%v", rerr) + } + }() + basic := err.Error() enc.AddString(key, basic) diff --git a/vendor/go.uber.org/zap/zapcore/field.go b/vendor/go.uber.org/zap/zapcore/field.go index 7e255d63e0..95bdb0a126 100644 --- a/vendor/go.uber.org/zap/zapcore/field.go +++ b/vendor/go.uber.org/zap/zapcore/field.go @@ -92,6 +92,10 @@ const ( ErrorType // SkipType indicates that the field is a no-op. SkipType + + // InlineMarshalerType indicates that the field carries an ObjectMarshaler + // that should be inlined. + InlineMarshalerType ) // A Field is a marshaling operation used to add a key-value pair to a logger's @@ -115,6 +119,8 @@ func (f Field) AddTo(enc ObjectEncoder) { err = enc.AddArray(f.Key, f.Interface.(ArrayMarshaler)) case ObjectMarshalerType: err = enc.AddObject(f.Key, f.Interface.(ObjectMarshaler)) + case InlineMarshalerType: + err = f.Interface.(ObjectMarshaler).MarshalLogObject(enc) case BinaryType: enc.AddBinary(f.Key, f.Interface.([]byte)) case BoolType: @@ -167,7 +173,7 @@ func (f Field) AddTo(enc ObjectEncoder) { case StringerType: err = encodeStringer(f.Key, f.Interface, enc) case ErrorType: - encodeError(f.Key, f.Interface.(error), enc) + err = encodeError(f.Key, f.Interface.(error), enc) case SkipType: break default: diff --git a/vendor/go.uber.org/zap/zapcore/write_syncer.go b/vendor/go.uber.org/zap/zapcore/write_syncer.go index 209e25fe22..d4a1af3d07 100644 --- a/vendor/go.uber.org/zap/zapcore/write_syncer.go +++ b/vendor/go.uber.org/zap/zapcore/write_syncer.go @@ -91,8 +91,7 @@ func NewMultiWriteSyncer(ws ...WriteSyncer) WriteSyncer { if len(ws) == 1 { return ws[0] } - // Copy to protect against https://github.com/golang/go/issues/7809 - return multiWriteSyncer(append([]WriteSyncer(nil), ws...)) + return multiWriteSyncer(ws) } // See https://golang.org/src/io/multi.go diff --git a/vendor/golang.org/x/mod/module/module.go b/vendor/golang.org/x/mod/module/module.go index c1c5263c42..0e03014837 100644 --- a/vendor/golang.org/x/mod/module/module.go +++ b/vendor/golang.org/x/mod/module/module.go @@ -224,12 +224,16 @@ func firstPathOK(r rune) bool { 'a' <= r && r <= 'z' } -// pathOK reports whether r can appear in an import path element. +// modPathOK reports whether r can appear in a module path element. // Paths can be ASCII letters, ASCII digits, and limited ASCII punctuation: - . _ and ~. -// This matches what "go get" has historically recognized in import paths. +// +// This matches what "go get" has historically recognized in import paths, +// and avoids confusing sequences like '%20' or '+' that would change meaning +// if used in a URL. +// // TODO(rsc): We would like to allow Unicode letters, but that requires additional // care in the safe encoding (see "escaped paths" above). -func pathOK(r rune) bool { +func modPathOK(r rune) bool { if r < utf8.RuneSelf { return r == '-' || r == '.' || r == '_' || r == '~' || '0' <= r && r <= '9' || @@ -239,6 +243,17 @@ func pathOK(r rune) bool { return false } +// modPathOK reports whether r can appear in a package import path element. +// +// Import paths are intermediate between module paths and file paths: we allow +// disallow characters that would be confusing or ambiguous as arguments to +// 'go get' (such as '@' and ' ' ), but allow certain characters that are +// otherwise-unambiguous on the command line and historically used for some +// binary names (such as '++' as a suffix for compiler binaries and wrappers). +func importPathOK(r rune) bool { + return modPathOK(r) || r == '+' +} + // fileNameOK reports whether r can appear in a file name. // For now we allow all Unicode letters but otherwise limit to pathOK plus a few more punctuation characters. // If we expand the set of allowed characters here, we have to @@ -270,7 +285,7 @@ func fileNameOK(r rune) bool { // CheckPath checks that a module path is valid. // A valid module path is a valid import path, as checked by CheckImportPath, -// with two additional constraints. +// with three additional constraints. // First, the leading path element (up to the first slash, if any), // by convention a domain name, must contain only lower-case ASCII letters, // ASCII digits, dots (U+002E), and dashes (U+002D); @@ -280,8 +295,9 @@ func fileNameOK(r rune) bool { // and must not contain any dots. For paths beginning with "gopkg.in/", // this second requirement is replaced by a requirement that the path // follow the gopkg.in server's conventions. +// Third, no path element may begin with a dot. func CheckPath(path string) error { - if err := checkPath(path, false); err != nil { + if err := checkPath(path, modulePath); err != nil { return fmt.Errorf("malformed module path %q: %v", path, err) } i := strings.Index(path, "/") @@ -315,7 +331,7 @@ func CheckPath(path string) error { // // A valid path element is a non-empty string made up of // ASCII letters, ASCII digits, and limited ASCII punctuation: - . _ and ~. -// It must not begin or end with a dot (U+002E), nor contain two dots in a row. +// It must not end with a dot (U+002E), nor contain two dots in a row. // // The element prefix up to the first dot must not be a reserved file name // on Windows, regardless of case (CON, com1, NuL, and so on). The element @@ -326,19 +342,29 @@ func CheckPath(path string) error { // top-level package documentation for additional information about // subtleties of Unicode. func CheckImportPath(path string) error { - if err := checkPath(path, false); err != nil { + if err := checkPath(path, importPath); err != nil { return fmt.Errorf("malformed import path %q: %v", path, err) } return nil } +// pathKind indicates what kind of path we're checking. Module paths, +// import paths, and file paths have different restrictions. +type pathKind int + +const ( + modulePath pathKind = iota + importPath + filePath +) + // checkPath checks that a general path is valid. // It returns an error describing why but not mentioning path. // Because these checks apply to both module paths and import paths, // the caller is expected to add the "malformed ___ path %q: " prefix. // fileName indicates whether the final element of the path is a file name // (as opposed to a directory name). -func checkPath(path string, fileName bool) error { +func checkPath(path string, kind pathKind) error { if !utf8.ValidString(path) { return fmt.Errorf("invalid UTF-8") } @@ -357,39 +383,45 @@ func checkPath(path string, fileName bool) error { elemStart := 0 for i, r := range path { if r == '/' { - if err := checkElem(path[elemStart:i], fileName); err != nil { + if err := checkElem(path[elemStart:i], kind); err != nil { return err } elemStart = i + 1 } } - if err := checkElem(path[elemStart:], fileName); err != nil { + if err := checkElem(path[elemStart:], kind); err != nil { return err } return nil } // checkElem checks whether an individual path element is valid. -// fileName indicates whether the element is a file name (not a directory name). -func checkElem(elem string, fileName bool) error { +func checkElem(elem string, kind pathKind) error { if elem == "" { return fmt.Errorf("empty path element") } if strings.Count(elem, ".") == len(elem) { return fmt.Errorf("invalid path element %q", elem) } - if elem[0] == '.' && !fileName { + if elem[0] == '.' && kind == modulePath { return fmt.Errorf("leading dot in path element") } if elem[len(elem)-1] == '.' { return fmt.Errorf("trailing dot in path element") } - charOK := pathOK - if fileName { - charOK = fileNameOK - } for _, r := range elem { - if !charOK(r) { + ok := false + switch kind { + case modulePath: + ok = modPathOK(r) + case importPath: + ok = importPathOK(r) + case filePath: + ok = fileNameOK(r) + default: + panic(fmt.Sprintf("internal error: invalid kind %v", kind)) + } + if !ok { return fmt.Errorf("invalid char %q", r) } } @@ -406,7 +438,7 @@ func checkElem(elem string, fileName bool) error { } } - if fileName { + if kind == filePath { // don't check for Windows short-names in file names. They're // only an issue for import paths. return nil @@ -444,7 +476,7 @@ func checkElem(elem string, fileName bool) error { // top-level package documentation for additional information about // subtleties of Unicode. func CheckFilePath(path string) error { - if err := checkPath(path, true); err != nil { + if err := checkPath(path, filePath); err != nil { return fmt.Errorf("malformed file path %q: %v", path, err) } return nil @@ -647,7 +679,7 @@ func EscapePath(path string) (escaped string, err error) { // Versions are allowed to be in non-semver form but must be valid file names // and not contain exclamation marks. func EscapeVersion(v string) (escaped string, err error) { - if err := checkElem(v, true); err != nil || strings.Contains(v, "!") { + if err := checkElem(v, filePath); err != nil || strings.Contains(v, "!") { return "", &InvalidVersionError{ Version: v, Err: fmt.Errorf("disallowed version string"), @@ -706,7 +738,7 @@ func UnescapeVersion(escaped string) (v string, err error) { if !ok { return "", fmt.Errorf("invalid escaped version %q", escaped) } - if err := checkElem(v, true); err != nil { + if err := checkElem(v, filePath); err != nil { return "", fmt.Errorf("invalid escaped version %q: %v", v, err) } return v, nil diff --git a/vendor/golang.org/x/net/context/go17.go b/vendor/golang.org/x/net/context/go17.go index d20f52b7de..344bd14334 100644 --- a/vendor/golang.org/x/net/context/go17.go +++ b/vendor/golang.org/x/net/context/go17.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build go1.7 // +build go1.7 package context diff --git a/vendor/golang.org/x/net/context/go19.go b/vendor/golang.org/x/net/context/go19.go index d88bd1db12..64d31ecc3e 100644 --- a/vendor/golang.org/x/net/context/go19.go +++ b/vendor/golang.org/x/net/context/go19.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build go1.9 // +build go1.9 package context diff --git a/vendor/golang.org/x/net/context/pre_go17.go b/vendor/golang.org/x/net/context/pre_go17.go index 0f35592df5..5270db5db7 100644 --- a/vendor/golang.org/x/net/context/pre_go17.go +++ b/vendor/golang.org/x/net/context/pre_go17.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !go1.7 // +build !go1.7 package context diff --git a/vendor/golang.org/x/net/context/pre_go19.go b/vendor/golang.org/x/net/context/pre_go19.go index b105f80be4..1f9715341f 100644 --- a/vendor/golang.org/x/net/context/pre_go19.go +++ b/vendor/golang.org/x/net/context/pre_go19.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !go1.9 // +build !go1.9 package context diff --git a/vendor/golang.org/x/net/http/httpguts/httplex.go b/vendor/golang.org/x/net/http/httpguts/httplex.go index e7de24ee64..c79aa73f28 100644 --- a/vendor/golang.org/x/net/http/httpguts/httplex.go +++ b/vendor/golang.org/x/net/http/httpguts/httplex.go @@ -137,11 +137,13 @@ func trimOWS(x string) string { // contains token amongst its comma-separated tokens, ASCII // case-insensitively. func headerValueContainsToken(v string, token string) bool { - v = trimOWS(v) - if comma := strings.IndexByte(v, ','); comma != -1 { - return tokenEqual(trimOWS(v[:comma]), token) || headerValueContainsToken(v[comma+1:], token) + for comma := strings.IndexByte(v, ','); comma != -1; comma = strings.IndexByte(v, ',') { + if tokenEqual(trimOWS(v[:comma]), token) { + return true + } + v = v[comma+1:] } - return tokenEqual(v, token) + return tokenEqual(trimOWS(v), token) } // lowerASCII returns the ASCII lowercase version of b. diff --git a/vendor/golang.org/x/net/http2/Dockerfile b/vendor/golang.org/x/net/http2/Dockerfile index 53fc525797..8512245952 100644 --- a/vendor/golang.org/x/net/http2/Dockerfile +++ b/vendor/golang.org/x/net/http2/Dockerfile @@ -38,7 +38,7 @@ RUN make RUN make install WORKDIR /root -RUN wget http://curl.haxx.se/download/curl-7.45.0.tar.gz +RUN wget https://curl.se/download/curl-7.45.0.tar.gz RUN tar -zxvf curl-7.45.0.tar.gz WORKDIR /root/curl-7.45.0 RUN ./configure --with-ssl --with-nghttp2=/usr/local diff --git a/vendor/golang.org/x/net/http2/ascii.go b/vendor/golang.org/x/net/http2/ascii.go new file mode 100644 index 0000000000..17caa20586 --- /dev/null +++ b/vendor/golang.org/x/net/http2/ascii.go @@ -0,0 +1,53 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import "strings" + +// The HTTP protocols are defined in terms of ASCII, not Unicode. This file +// contains helper functions which may use Unicode-aware functions which would +// otherwise be unsafe and could introduce vulnerabilities if used improperly. + +// asciiEqualFold is strings.EqualFold, ASCII only. It reports whether s and t +// are equal, ASCII-case-insensitively. +func asciiEqualFold(s, t string) bool { + if len(s) != len(t) { + return false + } + for i := 0; i < len(s); i++ { + if lower(s[i]) != lower(t[i]) { + return false + } + } + return true +} + +// lower returns the ASCII lowercase version of b. +func lower(b byte) byte { + if 'A' <= b && b <= 'Z' { + return b + ('a' - 'A') + } + return b +} + +// isASCIIPrint returns whether s is ASCII and printable according to +// https://tools.ietf.org/html/rfc20#section-4.2. +func isASCIIPrint(s string) bool { + for i := 0; i < len(s); i++ { + if s[i] < ' ' || s[i] > '~' { + return false + } + } + return true +} + +// asciiToLower returns the lowercase version of s if s is ASCII and printable, +// and whether or not it was. +func asciiToLower(s string) (lower string, ok bool) { + if !isASCIIPrint(s) { + return "", false + } + return strings.ToLower(s), true +} diff --git a/vendor/golang.org/x/net/http2/client_conn_pool.go b/vendor/golang.org/x/net/http2/client_conn_pool.go index 3a67636fe2..652bc11a02 100644 --- a/vendor/golang.org/x/net/http2/client_conn_pool.go +++ b/vendor/golang.org/x/net/http2/client_conn_pool.go @@ -7,7 +7,9 @@ package http2 import ( + "context" "crypto/tls" + "errors" "net/http" "sync" ) @@ -78,61 +80,69 @@ func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMis // It gets its own connection. traceGetConn(req, addr) const singleUse = true - cc, err := p.t.dialClientConn(addr, singleUse) + cc, err := p.t.dialClientConn(req.Context(), addr, singleUse) if err != nil { return nil, err } return cc, nil } - p.mu.Lock() - for _, cc := range p.conns[addr] { - if st := cc.idleState(); st.canTakeNewRequest { - if p.shouldTraceGetConn(st) { - traceGetConn(req, addr) + for { + p.mu.Lock() + for _, cc := range p.conns[addr] { + if st := cc.idleState(); st.canTakeNewRequest { + if p.shouldTraceGetConn(st) { + traceGetConn(req, addr) + } + p.mu.Unlock() + return cc, nil } + } + if !dialOnMiss { p.mu.Unlock() - return cc, nil + return nil, ErrNoCachedConn } - } - if !dialOnMiss { + traceGetConn(req, addr) + call := p.getStartDialLocked(req.Context(), addr) p.mu.Unlock() - return nil, ErrNoCachedConn + <-call.done + if shouldRetryDial(call, req) { + continue + } + return call.res, call.err } - traceGetConn(req, addr) - call := p.getStartDialLocked(addr) - p.mu.Unlock() - <-call.done - return call.res, call.err } // dialCall is an in-flight Transport dial call to a host. type dialCall struct { - _ incomparable - p *clientConnPool + _ incomparable + p *clientConnPool + // the context associated with the request + // that created this dialCall + ctx context.Context done chan struct{} // closed when done res *ClientConn // valid after done is closed err error // valid after done is closed } // requires p.mu is held. -func (p *clientConnPool) getStartDialLocked(addr string) *dialCall { +func (p *clientConnPool) getStartDialLocked(ctx context.Context, addr string) *dialCall { if call, ok := p.dialing[addr]; ok { // A dial is already in-flight. Don't start another. return call } - call := &dialCall{p: p, done: make(chan struct{})} + call := &dialCall{p: p, done: make(chan struct{}), ctx: ctx} if p.dialing == nil { p.dialing = make(map[string]*dialCall) } p.dialing[addr] = call - go call.dial(addr) + go call.dial(call.ctx, addr) return call } // run in its own goroutine. -func (c *dialCall) dial(addr string) { +func (c *dialCall) dial(ctx context.Context, addr string) { const singleUse = false // shared conn - c.res, c.err = c.p.t.dialClientConn(addr, singleUse) + c.res, c.err = c.p.t.dialClientConn(ctx, addr, singleUse) close(c.done) c.p.mu.Lock() @@ -276,3 +286,28 @@ type noDialClientConnPool struct{ *clientConnPool } func (p noDialClientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) { return p.getClientConn(req, addr, noDialOnMiss) } + +// shouldRetryDial reports whether the current request should +// retry dialing after the call finished unsuccessfully, for example +// if the dial was canceled because of a context cancellation or +// deadline expiry. +func shouldRetryDial(call *dialCall, req *http.Request) bool { + if call.err == nil { + // No error, no need to retry + return false + } + if call.ctx == req.Context() { + // If the call has the same context as the request, the dial + // should not be retried, since any cancellation will have come + // from this request. + return false + } + if !errors.Is(call.err, context.Canceled) && !errors.Is(call.err, context.DeadlineExceeded) { + // If the call error is not because of a context cancellation or a deadline expiry, + // the dial should not be retried. + return false + } + // Only retry if the error is a context cancellation error or deadline expiry + // and the context associated with the call was canceled or expired. + return call.ctx.Err() != nil +} diff --git a/vendor/golang.org/x/net/http2/go111.go b/vendor/golang.org/x/net/http2/go111.go index 3a131016b2..5bf62b032e 100644 --- a/vendor/golang.org/x/net/http2/go111.go +++ b/vendor/golang.org/x/net/http2/go111.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build go1.11 // +build go1.11 package http2 diff --git a/vendor/golang.org/x/net/http2/go115.go b/vendor/golang.org/x/net/http2/go115.go new file mode 100644 index 0000000000..908af1ab93 --- /dev/null +++ b/vendor/golang.org/x/net/http2/go115.go @@ -0,0 +1,27 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.15 +// +build go1.15 + +package http2 + +import ( + "context" + "crypto/tls" +) + +// dialTLSWithContext uses tls.Dialer, added in Go 1.15, to open a TLS +// connection. +func (t *Transport) dialTLSWithContext(ctx context.Context, network, addr string, cfg *tls.Config) (*tls.Conn, error) { + dialer := &tls.Dialer{ + Config: cfg, + } + cn, err := dialer.DialContext(ctx, network, addr) + if err != nil { + return nil, err + } + tlsCn := cn.(*tls.Conn) // DialContext comment promises this will always succeed + return tlsCn, nil +} diff --git a/vendor/golang.org/x/net/http2/h2c/h2c.go b/vendor/golang.org/x/net/http2/h2c/h2c.go index 07c5c9a60e..16319b8fda 100644 --- a/vendor/golang.org/x/net/http2/h2c/h2c.go +++ b/vendor/golang.org/x/net/http2/h2c/h2c.go @@ -84,14 +84,20 @@ func (s h2cHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } defer conn.Close() - s.s.ServeConn(conn, &http2.ServeConnOpts{Handler: s.Handler}) + s.s.ServeConn(conn, &http2.ServeConnOpts{ + Context: r.Context(), + Handler: s.Handler, + }) return } // Handle Upgrade to h2c (RFC 7540 Section 3.2) if conn, err := h2cUpgrade(w, r); err == nil { defer conn.Close() - s.s.ServeConn(conn, &http2.ServeConnOpts{Handler: s.Handler}) + s.s.ServeConn(conn, &http2.ServeConnOpts{ + Context: r.Context(), + Handler: s.Handler, + }) return } diff --git a/vendor/golang.org/x/net/http2/headermap.go b/vendor/golang.org/x/net/http2/headermap.go index c3ff3fa1c7..9e12941da4 100644 --- a/vendor/golang.org/x/net/http2/headermap.go +++ b/vendor/golang.org/x/net/http2/headermap.go @@ -6,7 +6,6 @@ package http2 import ( "net/http" - "strings" "sync" ) @@ -79,10 +78,10 @@ func buildCommonHeaderMaps() { } } -func lowerHeader(v string) string { +func lowerHeader(v string) (lower string, ascii bool) { buildCommonHeaderMapsOnce() if s, ok := commonLowerHeader[v]; ok { - return s + return s, true } - return strings.ToLower(v) + return asciiToLower(v) } diff --git a/vendor/golang.org/x/net/http2/not_go111.go b/vendor/golang.org/x/net/http2/not_go111.go index 161bca7ce8..cc0baa8197 100644 --- a/vendor/golang.org/x/net/http2/not_go111.go +++ b/vendor/golang.org/x/net/http2/not_go111.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !go1.11 // +build !go1.11 package http2 diff --git a/vendor/golang.org/x/net/http2/not_go115.go b/vendor/golang.org/x/net/http2/not_go115.go new file mode 100644 index 0000000000..e6c04cf7ac --- /dev/null +++ b/vendor/golang.org/x/net/http2/not_go115.go @@ -0,0 +1,31 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.15 +// +build !go1.15 + +package http2 + +import ( + "context" + "crypto/tls" +) + +// dialTLSWithContext opens a TLS connection. +func (t *Transport) dialTLSWithContext(ctx context.Context, network, addr string, cfg *tls.Config) (*tls.Conn, error) { + cn, err := tls.Dial(network, addr, cfg) + if err != nil { + return nil, err + } + if err := cn.Handshake(); err != nil { + return nil, err + } + if cfg.InsecureSkipVerify { + return cn, nil + } + if err := cn.VerifyHostname(cfg.ServerName); err != nil { + return nil, err + } + return cn, nil +} diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index 2aa859f76f..0ccbe9b4c2 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -231,13 +231,12 @@ func ConfigureServer(s *http.Server, conf *Server) error { if s.TLSConfig == nil { s.TLSConfig = new(tls.Config) - } else if s.TLSConfig.CipherSuites != nil { - // If they already provided a CipherSuite list, return - // an error if it has a bad order or is missing - // ECDHE_RSA_WITH_AES_128_GCM_SHA256 or ECDHE_ECDSA_WITH_AES_128_GCM_SHA256. + } else if s.TLSConfig.CipherSuites != nil && s.TLSConfig.MinVersion < tls.VersionTLS13 { + // If they already provided a TLS 1.0–1.2 CipherSuite list, return an + // error if it is missing ECDHE_RSA_WITH_AES_128_GCM_SHA256 or + // ECDHE_ECDSA_WITH_AES_128_GCM_SHA256. haveRequired := false - sawBad := false - for i, cs := range s.TLSConfig.CipherSuites { + for _, cs := range s.TLSConfig.CipherSuites { switch cs { case tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, // Alternative MTI cipher to not discourage ECDSA-only servers. @@ -245,14 +244,9 @@ func ConfigureServer(s *http.Server, conf *Server) error { tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: haveRequired = true } - if isBadCipher(cs) { - sawBad = true - } else if sawBad { - return fmt.Errorf("http2: TLSConfig.CipherSuites index %d contains an HTTP/2-approved cipher suite (%#04x), but it comes after unapproved cipher suites. With this configuration, clients that don't support previous, approved cipher suites may be given an unapproved one and reject the connection.", i, cs) - } } if !haveRequired { - return fmt.Errorf("http2: TLSConfig.CipherSuites is missing an HTTP/2-required AES_128_GCM_SHA256 cipher (need at least one of TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 or TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256).") + return fmt.Errorf("http2: TLSConfig.CipherSuites is missing an HTTP/2-required AES_128_GCM_SHA256 cipher (need at least one of TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 or TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256)") } } @@ -265,16 +259,12 @@ func ConfigureServer(s *http.Server, conf *Server) error { s.TLSConfig.PreferServerCipherSuites = true - haveNPN := false - for _, p := range s.TLSConfig.NextProtos { - if p == NextProtoTLS { - haveNPN = true - break - } - } - if !haveNPN { + if !strSliceContains(s.TLSConfig.NextProtos, NextProtoTLS) { s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, NextProtoTLS) } + if !strSliceContains(s.TLSConfig.NextProtos, "http/1.1") { + s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, "http/1.1") + } if s.TLSNextProto == nil { s.TLSNextProto = map[string]func(*http.Server, *tls.Conn, http.Handler){} @@ -1293,7 +1283,9 @@ func (sc *serverConn) startGracefulShutdown() { sc.shutdownOnce.Do(func() { sc.sendServeMsg(gracefulShutdownMsg) }) } -// After sending GOAWAY, the connection will close after goAwayTimeout. +// After sending GOAWAY with an error code (non-graceful shutdown), the +// connection will close after goAwayTimeout. +// // If we close the connection immediately after sending GOAWAY, there may // be unsent data in our kernel receive buffer, which will cause the kernel // to send a TCP RST on close() instead of a FIN. This RST will abort the @@ -1629,23 +1621,37 @@ func (sc *serverConn) processSettingInitialWindowSize(val uint32) error { func (sc *serverConn) processData(f *DataFrame) error { sc.serveG.check() - if sc.inGoAway && sc.goAwayCode != ErrCodeNo { + id := f.Header().StreamID + if sc.inGoAway && (sc.goAwayCode != ErrCodeNo || id > sc.maxClientStreamID) { + // Discard all DATA frames if the GOAWAY is due to an + // error, or: + // + // Section 6.8: After sending a GOAWAY frame, the sender + // can discard frames for streams initiated by the + // receiver with identifiers higher than the identified + // last stream. return nil } - data := f.Data() - // "If a DATA frame is received whose stream is not in "open" - // or "half closed (local)" state, the recipient MUST respond - // with a stream error (Section 5.4.2) of type STREAM_CLOSED." - id := f.Header().StreamID + data := f.Data() state, st := sc.state(id) if id == 0 || state == stateIdle { + // Section 6.1: "DATA frames MUST be associated with a + // stream. If a DATA frame is received whose stream + // identifier field is 0x0, the recipient MUST respond + // with a connection error (Section 5.4.1) of type + // PROTOCOL_ERROR." + // // Section 5.1: "Receiving any frame other than HEADERS // or PRIORITY on a stream in this state MUST be // treated as a connection error (Section 5.4.1) of // type PROTOCOL_ERROR." return ConnectionError(ErrCodeProtocol) } + + // "If a DATA frame is received whose stream is not in "open" + // or "half closed (local)" state, the recipient MUST respond + // with a stream error (Section 5.4.2) of type STREAM_CLOSED." if st == nil || state != stateOpen || st.gotTrailerHeader || st.resetQueued { // This includes sending a RST_STREAM if the stream is // in stateHalfClosedLocal (which currently means that @@ -2773,8 +2779,12 @@ func (w *responseWriter) Push(target string, opts *http.PushOptions) error { // but PUSH_PROMISE requests cannot have a body. // http://tools.ietf.org/html/rfc7540#section-8.2 // Also disallow Host, since the promised URL must be absolute. - switch strings.ToLower(k) { - case "content-length", "content-encoding", "trailer", "te", "expect", "host": + if asciiEqualFold(k, "content-length") || + asciiEqualFold(k, "content-encoding") || + asciiEqualFold(k, "trailer") || + asciiEqualFold(k, "te") || + asciiEqualFold(k, "expect") || + asciiEqualFold(k, "host") { return fmt.Errorf("promised request headers cannot include %q", k) } } diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index 7688d72c39..b97adff7d0 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -264,9 +264,8 @@ type ClientConn struct { peerMaxHeaderListSize uint64 initialWindowSize uint32 - hbuf bytes.Buffer // HPACK encoder writes into this - henc *hpack.Encoder - freeBuf [][]byte + hbuf bytes.Buffer // HPACK encoder writes into this + henc *hpack.Encoder wmu sync.Mutex // held while writing; acquire AFTER mu if holding both werr error // first write error that has occurred @@ -564,12 +563,12 @@ func canRetryError(err error) bool { return false } -func (t *Transport) dialClientConn(addr string, singleUse bool) (*ClientConn, error) { +func (t *Transport) dialClientConn(ctx context.Context, addr string, singleUse bool) (*ClientConn, error) { host, _, err := net.SplitHostPort(addr) if err != nil { return nil, err } - tconn, err := t.dialTLS()("tcp", addr, t.newTLSConfig(host)) + tconn, err := t.dialTLS(ctx)("tcp", addr, t.newTLSConfig(host)) if err != nil { return nil, err } @@ -590,34 +589,24 @@ func (t *Transport) newTLSConfig(host string) *tls.Config { return cfg } -func (t *Transport) dialTLS() func(string, string, *tls.Config) (net.Conn, error) { +func (t *Transport) dialTLS(ctx context.Context) func(string, string, *tls.Config) (net.Conn, error) { if t.DialTLS != nil { return t.DialTLS } - return t.dialTLSDefault -} - -func (t *Transport) dialTLSDefault(network, addr string, cfg *tls.Config) (net.Conn, error) { - cn, err := tls.Dial(network, addr, cfg) - if err != nil { - return nil, err - } - if err := cn.Handshake(); err != nil { - return nil, err - } - if !cfg.InsecureSkipVerify { - if err := cn.VerifyHostname(cfg.ServerName); err != nil { + return func(network, addr string, cfg *tls.Config) (net.Conn, error) { + tlsCn, err := t.dialTLSWithContext(ctx, network, addr, cfg) + if err != nil { return nil, err } + state := tlsCn.ConnectionState() + if p := state.NegotiatedProtocol; p != NextProtoTLS { + return nil, fmt.Errorf("http2: unexpected ALPN protocol %q; want %q", p, NextProtoTLS) + } + if !state.NegotiatedProtocolIsMutual { + return nil, errors.New("http2: could not negotiate protocol mutually") + } + return tlsCn, nil } - state := cn.ConnectionState() - if p := state.NegotiatedProtocol; p != NextProtoTLS { - return nil, fmt.Errorf("http2: unexpected ALPN protocol %q; want %q", p, NextProtoTLS) - } - if !state.NegotiatedProtocolIsMutual { - return nil, errors.New("http2: could not negotiate protocol mutually") - } - return cn, nil } // disableKeepAlives reports whether connections should be closed as @@ -923,46 +912,6 @@ func (cc *ClientConn) closeForLostPing() error { return cc.closeForError(err) } -const maxAllocFrameSize = 512 << 10 - -// frameBuffer returns a scratch buffer suitable for writing DATA frames. -// They're capped at the min of the peer's max frame size or 512KB -// (kinda arbitrarily), but definitely capped so we don't allocate 4GB -// bufers. -func (cc *ClientConn) frameScratchBuffer() []byte { - cc.mu.Lock() - size := cc.maxFrameSize - if size > maxAllocFrameSize { - size = maxAllocFrameSize - } - for i, buf := range cc.freeBuf { - if len(buf) >= int(size) { - cc.freeBuf[i] = nil - cc.mu.Unlock() - return buf[:size] - } - } - cc.mu.Unlock() - return make([]byte, size) -} - -func (cc *ClientConn) putFrameScratchBuffer(buf []byte) { - cc.mu.Lock() - defer cc.mu.Unlock() - const maxBufs = 4 // arbitrary; 4 concurrent requests per conn? investigate. - if len(cc.freeBuf) < maxBufs { - cc.freeBuf = append(cc.freeBuf, buf) - return - } - for i, old := range cc.freeBuf { - if old == nil { - cc.freeBuf[i] = buf - return - } - } - // forget about it. -} - // errRequestCanceled is a copy of net/http's errRequestCanceled because it's not // exported. At least they'll be DeepEqual for h1-vs-h2 comparisons tests. var errRequestCanceled = errors.New("net/http: request canceled") @@ -1005,7 +954,7 @@ func checkConnHeaders(req *http.Request) error { if vv := req.Header["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") { return fmt.Errorf("http2: invalid Transfer-Encoding request header: %q", vv) } - if vv := req.Header["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && !strings.EqualFold(vv[0], "close") && !strings.EqualFold(vv[0], "keep-alive")) { + if vv := req.Header["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && !asciiEqualFold(vv[0], "close") && !asciiEqualFold(vv[0], "keep-alive")) { return fmt.Errorf("http2: invalid Connection request header: %q", vv) } return nil @@ -1305,11 +1254,35 @@ var ( errReqBodyTooLong = errors.New("http2: request body larger than specified content length") ) +// frameScratchBufferLen returns the length of a buffer to use for +// outgoing request bodies to read/write to/from. +// +// It returns max(1, min(peer's advertised max frame size, +// Request.ContentLength+1, 512KB)). +func (cs *clientStream) frameScratchBufferLen(maxFrameSize int) int { + const max = 512 << 10 + n := int64(maxFrameSize) + if n > max { + n = max + } + if cl := actualContentLength(cs.req); cl != -1 && cl+1 < n { + // Add an extra byte past the declared content-length to + // give the caller's Request.Body io.Reader a chance to + // give us more bytes than they declared, so we can catch it + // early. + n = cl + 1 + } + if n < 1 { + return 1 + } + return int(n) // doesn't truncate; max is 512K +} + +var bufPool sync.Pool // of *[]byte + func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) (err error) { cc := cs.cc sentEnd := false // whether we sent the final DATA frame w/ END_STREAM - buf := cc.frameScratchBuffer() - defer cc.putFrameScratchBuffer(buf) defer func() { traceWroteRequest(cs.trace, err) @@ -1328,9 +1301,24 @@ func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) ( remainLen := actualContentLength(req) hasContentLen := remainLen != -1 + cc.mu.Lock() + maxFrameSize := int(cc.maxFrameSize) + cc.mu.Unlock() + + // Scratch buffer for reading into & writing from. + scratchLen := cs.frameScratchBufferLen(maxFrameSize) + var buf []byte + if bp, ok := bufPool.Get().(*[]byte); ok && len(*bp) >= scratchLen { + defer bufPool.Put(bp) + buf = *bp + } else { + buf = make([]byte, scratchLen) + defer bufPool.Put(&buf) + } + var sawEOF bool for !sawEOF { - n, err := body.Read(buf[:len(buf)-1]) + n, err := body.Read(buf[:len(buf)]) if hasContentLen { remainLen -= int64(n) if remainLen == 0 && err == nil { @@ -1341,8 +1329,9 @@ func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) ( // to send the END_STREAM bit early, double-check that we're actually // at EOF. Subsequent reads should return (0, EOF) at this point. // If either value is different, we return an error in one of two ways below. + var scratch [1]byte var n1 int - n1, err = body.Read(buf[n:]) + n1, err = body.Read(scratch[:]) remainLen -= int64(n1) } if remainLen < 0 { @@ -1412,10 +1401,6 @@ func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) ( } } - cc.mu.Lock() - maxFrameSize := int(cc.maxFrameSize) - cc.mu.Unlock() - cc.wmu.Lock() defer cc.wmu.Unlock() @@ -1531,19 +1516,21 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail var didUA bool for k, vv := range req.Header { - if strings.EqualFold(k, "host") || strings.EqualFold(k, "content-length") { + if asciiEqualFold(k, "host") || asciiEqualFold(k, "content-length") { // Host is :authority, already sent. // Content-Length is automatic, set below. continue - } else if strings.EqualFold(k, "connection") || strings.EqualFold(k, "proxy-connection") || - strings.EqualFold(k, "transfer-encoding") || strings.EqualFold(k, "upgrade") || - strings.EqualFold(k, "keep-alive") { + } else if asciiEqualFold(k, "connection") || + asciiEqualFold(k, "proxy-connection") || + asciiEqualFold(k, "transfer-encoding") || + asciiEqualFold(k, "upgrade") || + asciiEqualFold(k, "keep-alive") { // Per 8.1.2.2 Connection-Specific Header // Fields, don't send connection-specific // fields. We have already checked if any // are error-worthy so just ignore the rest. continue - } else if strings.EqualFold(k, "user-agent") { + } else if asciiEqualFold(k, "user-agent") { // Match Go's http1 behavior: at most one // User-Agent. If set to nil or empty string, // then omit it. Otherwise if not mentioned, @@ -1556,7 +1543,7 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail if vv[0] == "" { continue } - } else if strings.EqualFold(k, "cookie") { + } else if asciiEqualFold(k, "cookie") { // Per 8.1.2.5 To allow for better compression efficiency, the // Cookie header field MAY be split into separate header fields, // each with one or more cookie-pairs. @@ -1615,7 +1602,12 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail // Header list size is ok. Write the headers. enumerateHeaders(func(name, value string) { - name = strings.ToLower(name) + name, ascii := asciiToLower(name) + if !ascii { + // Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header + // field names have to be ASCII characters (just as in HTTP/1.x). + return + } cc.writeHeader(name, value) if traceHeaders { traceWroteHeaderField(trace, name, value) @@ -1663,9 +1655,14 @@ func (cc *ClientConn) encodeTrailers(req *http.Request) ([]byte, error) { } for k, vv := range req.Trailer { + lowKey, ascii := asciiToLower(k) + if !ascii { + // Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header + // field names have to be ASCII characters (just as in HTTP/1.x). + continue + } // Transfer-Encoding, etc.. have already been filtered at the // start of RoundTrip - lowKey := strings.ToLower(k) for _, v := range vv { cc.writeHeader(lowKey, v) } diff --git a/vendor/golang.org/x/net/http2/write.go b/vendor/golang.org/x/net/http2/write.go index 3849bc2632..33f61398a1 100644 --- a/vendor/golang.org/x/net/http2/write.go +++ b/vendor/golang.org/x/net/http2/write.go @@ -341,7 +341,12 @@ func encodeHeaders(enc *hpack.Encoder, h http.Header, keys []string) { } for _, k := range keys { vv := h[k] - k = lowerHeader(k) + k, ascii := lowerHeader(k) + if !ascii { + // Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header + // field names have to be ASCII characters (just as in HTTP/1.x). + continue + } if !validWireHeaderFieldName(k) { // Skip it as backup paranoia. Per // golang.org/issue/14048, these should diff --git a/vendor/golang.org/x/net/idna/idna10.0.0.go b/vendor/golang.org/x/net/idna/idna10.0.0.go index a98a31f403..5208ba6cb8 100644 --- a/vendor/golang.org/x/net/idna/idna10.0.0.go +++ b/vendor/golang.org/x/net/idna/idna10.0.0.go @@ -4,6 +4,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build go1.10 // +build go1.10 // Package idna implements IDNA2008 using the compatibility processing @@ -66,15 +67,14 @@ func Transitional(transitional bool) Option { // VerifyDNSLength sets whether a Profile should fail if any of the IDN parts // are longer than allowed by the RFC. +// +// This option corresponds to the VerifyDnsLength flag in UTS #46. func VerifyDNSLength(verify bool) Option { return func(o *options) { o.verifyDNSLength = verify } } // RemoveLeadingDots removes leading label separators. Leading runes that map to // dots, such as U+3002 IDEOGRAPHIC FULL STOP, are removed as well. -// -// This is the behavior suggested by the UTS #46 and is adopted by some -// browsers. func RemoveLeadingDots(remove bool) Option { return func(o *options) { o.removeLeadingDots = remove } } @@ -82,6 +82,8 @@ func RemoveLeadingDots(remove bool) Option { // ValidateLabels sets whether to check the mandatory label validation criteria // as defined in Section 5.4 of RFC 5891. This includes testing for correct use // of hyphens ('-'), normalization, validity of runes, and the context rules. +// In particular, ValidateLabels also sets the CheckHyphens and CheckJoiners flags +// in UTS #46. func ValidateLabels(enable bool) Option { return func(o *options) { // Don't override existing mappings, but set one that at least checks @@ -90,25 +92,48 @@ func ValidateLabels(enable bool) Option { o.mapping = normalize } o.trie = trie - o.validateLabels = enable - o.fromPuny = validateFromPunycode + o.checkJoiners = enable + o.checkHyphens = enable + if enable { + o.fromPuny = validateFromPunycode + } else { + o.fromPuny = nil + } + } +} + +// CheckHyphens sets whether to check for correct use of hyphens ('-') in +// labels. Most web browsers do not have this option set, since labels such as +// "r3---sn-apo3qvuoxuxbt-j5pe" are in common use. +// +// This option corresponds to the CheckHyphens flag in UTS #46. +func CheckHyphens(enable bool) Option { + return func(o *options) { o.checkHyphens = enable } +} + +// CheckJoiners sets whether to check the ContextJ rules as defined in Appendix +// A of RFC 5892, concerning the use of joiner runes. +// +// This option corresponds to the CheckJoiners flag in UTS #46. +func CheckJoiners(enable bool) Option { + return func(o *options) { + o.trie = trie + o.checkJoiners = enable } } // StrictDomainName limits the set of permissible ASCII characters to those // allowed in domain names as defined in RFC 1034 (A-Z, a-z, 0-9 and the -// hyphen). This is set by default for MapForLookup and ValidateForRegistration. +// hyphen). This is set by default for MapForLookup and ValidateForRegistration, +// but is only useful if ValidateLabels is set. // // This option is useful, for instance, for browsers that allow characters // outside this range, for example a '_' (U+005F LOW LINE). See -// http://www.rfc-editor.org/std/std3.txt for more details This option -// corresponds to the UseSTD3ASCIIRules option in UTS #46. +// http://www.rfc-editor.org/std/std3.txt for more details. +// +// This option corresponds to the UseSTD3ASCIIRules flag in UTS #46. func StrictDomainName(use bool) Option { - return func(o *options) { - o.trie = trie - o.useSTD3Rules = use - o.fromPuny = validateFromPunycode - } + return func(o *options) { o.useSTD3Rules = use } } // NOTE: the following options pull in tables. The tables should not be linked @@ -116,6 +141,8 @@ func StrictDomainName(use bool) Option { // BidiRule enables the Bidi rule as defined in RFC 5893. Any application // that relies on proper validation of labels should include this rule. +// +// This option corresponds to the CheckBidi flag in UTS #46. func BidiRule() Option { return func(o *options) { o.bidirule = bidirule.ValidString } } @@ -151,7 +178,8 @@ func MapForLookup() Option { type options struct { transitional bool useSTD3Rules bool - validateLabels bool + checkHyphens bool + checkJoiners bool verifyDNSLength bool removeLeadingDots bool @@ -224,8 +252,11 @@ func (p *Profile) String() string { if p.useSTD3Rules { s += ":UseSTD3Rules" } - if p.validateLabels { - s += ":ValidateLabels" + if p.checkHyphens { + s += ":CheckHyphens" + } + if p.checkJoiners { + s += ":CheckJoiners" } if p.verifyDNSLength { s += ":VerifyDNSLength" @@ -253,26 +284,29 @@ var ( punycode = &Profile{} lookup = &Profile{options{ - transitional: true, - useSTD3Rules: true, - validateLabels: true, - trie: trie, - fromPuny: validateFromPunycode, - mapping: validateAndMap, - bidirule: bidirule.ValidString, + transitional: true, + useSTD3Rules: true, + checkHyphens: true, + checkJoiners: true, + trie: trie, + fromPuny: validateFromPunycode, + mapping: validateAndMap, + bidirule: bidirule.ValidString, }} display = &Profile{options{ - useSTD3Rules: true, - validateLabels: true, - trie: trie, - fromPuny: validateFromPunycode, - mapping: validateAndMap, - bidirule: bidirule.ValidString, + useSTD3Rules: true, + checkHyphens: true, + checkJoiners: true, + trie: trie, + fromPuny: validateFromPunycode, + mapping: validateAndMap, + bidirule: bidirule.ValidString, }} registration = &Profile{options{ useSTD3Rules: true, - validateLabels: true, verifyDNSLength: true, + checkHyphens: true, + checkJoiners: true, trie: trie, fromPuny: validateFromPunycode, mapping: validateRegistration, @@ -339,7 +373,7 @@ func (p *Profile) process(s string, toASCII bool) (string, error) { } isBidi = isBidi || bidirule.DirectionString(u) != bidi.LeftToRight labels.set(u) - if err == nil && p.validateLabels { + if err == nil && p.fromPuny != nil { err = p.fromPuny(p, u) } if err == nil { @@ -680,16 +714,18 @@ func (p *Profile) validateLabel(s string) (err error) { } return nil } - if !p.validateLabels { - return nil - } - trie := p.trie // p.validateLabels is only set if trie is set. - if len(s) > 4 && s[2] == '-' && s[3] == '-' { - return &labelError{s, "V2"} + if p.checkHyphens { + if len(s) > 4 && s[2] == '-' && s[3] == '-' { + return &labelError{s, "V2"} + } + if s[0] == '-' || s[len(s)-1] == '-' { + return &labelError{s, "V3"} + } } - if s[0] == '-' || s[len(s)-1] == '-' { - return &labelError{s, "V3"} + if !p.checkJoiners { + return nil } + trie := p.trie // p.checkJoiners is only set if trie is set. // TODO: merge the use of this in the trie. v, sz := trie.lookupString(s) x := info(v) diff --git a/vendor/golang.org/x/net/idna/idna9.0.0.go b/vendor/golang.org/x/net/idna/idna9.0.0.go index 8842146b5d..55f718f127 100644 --- a/vendor/golang.org/x/net/idna/idna9.0.0.go +++ b/vendor/golang.org/x/net/idna/idna9.0.0.go @@ -4,6 +4,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !go1.10 // +build !go1.10 // Package idna implements IDNA2008 using the compatibility processing @@ -65,15 +66,14 @@ func Transitional(transitional bool) Option { // VerifyDNSLength sets whether a Profile should fail if any of the IDN parts // are longer than allowed by the RFC. +// +// This option corresponds to the VerifyDnsLength flag in UTS #46. func VerifyDNSLength(verify bool) Option { return func(o *options) { o.verifyDNSLength = verify } } // RemoveLeadingDots removes leading label separators. Leading runes that map to // dots, such as U+3002 IDEOGRAPHIC FULL STOP, are removed as well. -// -// This is the behavior suggested by the UTS #46 and is adopted by some -// browsers. func RemoveLeadingDots(remove bool) Option { return func(o *options) { o.removeLeadingDots = remove } } @@ -81,6 +81,8 @@ func RemoveLeadingDots(remove bool) Option { // ValidateLabels sets whether to check the mandatory label validation criteria // as defined in Section 5.4 of RFC 5891. This includes testing for correct use // of hyphens ('-'), normalization, validity of runes, and the context rules. +// In particular, ValidateLabels also sets the CheckHyphens and CheckJoiners flags +// in UTS #46. func ValidateLabels(enable bool) Option { return func(o *options) { // Don't override existing mappings, but set one that at least checks @@ -89,25 +91,48 @@ func ValidateLabels(enable bool) Option { o.mapping = normalize } o.trie = trie - o.validateLabels = enable - o.fromPuny = validateFromPunycode + o.checkJoiners = enable + o.checkHyphens = enable + if enable { + o.fromPuny = validateFromPunycode + } else { + o.fromPuny = nil + } + } +} + +// CheckHyphens sets whether to check for correct use of hyphens ('-') in +// labels. Most web browsers do not have this option set, since labels such as +// "r3---sn-apo3qvuoxuxbt-j5pe" are in common use. +// +// This option corresponds to the CheckHyphens flag in UTS #46. +func CheckHyphens(enable bool) Option { + return func(o *options) { o.checkHyphens = enable } +} + +// CheckJoiners sets whether to check the ContextJ rules as defined in Appendix +// A of RFC 5892, concerning the use of joiner runes. +// +// This option corresponds to the CheckJoiners flag in UTS #46. +func CheckJoiners(enable bool) Option { + return func(o *options) { + o.trie = trie + o.checkJoiners = enable } } // StrictDomainName limits the set of permissable ASCII characters to those // allowed in domain names as defined in RFC 1034 (A-Z, a-z, 0-9 and the -// hyphen). This is set by default for MapForLookup and ValidateForRegistration. +// hyphen). This is set by default for MapForLookup and ValidateForRegistration, +// but is only useful if ValidateLabels is set. // // This option is useful, for instance, for browsers that allow characters // outside this range, for example a '_' (U+005F LOW LINE). See -// http://www.rfc-editor.org/std/std3.txt for more details This option -// corresponds to the UseSTD3ASCIIRules option in UTS #46. +// http://www.rfc-editor.org/std/std3.txt for more details. +// +// This option corresponds to the UseSTD3ASCIIRules flag in UTS #46. func StrictDomainName(use bool) Option { - return func(o *options) { - o.trie = trie - o.useSTD3Rules = use - o.fromPuny = validateFromPunycode - } + return func(o *options) { o.useSTD3Rules = use } } // NOTE: the following options pull in tables. The tables should not be linked @@ -115,6 +140,8 @@ func StrictDomainName(use bool) Option { // BidiRule enables the Bidi rule as defined in RFC 5893. Any application // that relies on proper validation of labels should include this rule. +// +// This option corresponds to the CheckBidi flag in UTS #46. func BidiRule() Option { return func(o *options) { o.bidirule = bidirule.ValidString } } @@ -151,7 +178,8 @@ func MapForLookup() Option { type options struct { transitional bool useSTD3Rules bool - validateLabels bool + checkHyphens bool + checkJoiners bool verifyDNSLength bool removeLeadingDots bool @@ -224,8 +252,11 @@ func (p *Profile) String() string { if p.useSTD3Rules { s += ":UseSTD3Rules" } - if p.validateLabels { - s += ":ValidateLabels" + if p.checkHyphens { + s += ":CheckHyphens" + } + if p.checkJoiners { + s += ":CheckJoiners" } if p.verifyDNSLength { s += ":VerifyDNSLength" @@ -254,9 +285,10 @@ var ( punycode = &Profile{} lookup = &Profile{options{ transitional: true, - useSTD3Rules: true, - validateLabels: true, removeLeadingDots: true, + useSTD3Rules: true, + checkHyphens: true, + checkJoiners: true, trie: trie, fromPuny: validateFromPunycode, mapping: validateAndMap, @@ -264,8 +296,9 @@ var ( }} display = &Profile{options{ useSTD3Rules: true, - validateLabels: true, removeLeadingDots: true, + checkHyphens: true, + checkJoiners: true, trie: trie, fromPuny: validateFromPunycode, mapping: validateAndMap, @@ -273,8 +306,9 @@ var ( }} registration = &Profile{options{ useSTD3Rules: true, - validateLabels: true, verifyDNSLength: true, + checkHyphens: true, + checkJoiners: true, trie: trie, fromPuny: validateFromPunycode, mapping: validateRegistration, @@ -338,7 +372,7 @@ func (p *Profile) process(s string, toASCII bool) (string, error) { continue } labels.set(u) - if err == nil && p.validateLabels { + if err == nil && p.fromPuny != nil { err = p.fromPuny(p, u) } if err == nil { @@ -628,16 +662,18 @@ func (p *Profile) validateLabel(s string) error { if p.bidirule != nil && !p.bidirule(s) { return &labelError{s, "B"} } - if !p.validateLabels { - return nil - } - trie := p.trie // p.validateLabels is only set if trie is set. - if len(s) > 4 && s[2] == '-' && s[3] == '-' { - return &labelError{s, "V2"} + if p.checkHyphens { + if len(s) > 4 && s[2] == '-' && s[3] == '-' { + return &labelError{s, "V2"} + } + if s[0] == '-' || s[len(s)-1] == '-' { + return &labelError{s, "V3"} + } } - if s[0] == '-' || s[len(s)-1] == '-' { - return &labelError{s, "V3"} + if !p.checkJoiners { + return nil } + trie := p.trie // p.checkJoiners is only set if trie is set. // TODO: merge the use of this in the trie. v, sz := trie.lookupString(s) x := info(v) diff --git a/vendor/golang.org/x/net/idna/tables10.0.0.go b/vendor/golang.org/x/net/idna/tables10.0.0.go index 54fddb4b16..d1d62ef459 100644 --- a/vendor/golang.org/x/net/idna/tables10.0.0.go +++ b/vendor/golang.org/x/net/idna/tables10.0.0.go @@ -1,5 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. +//go:build go1.10 && !go1.13 // +build go1.10,!go1.13 package idna diff --git a/vendor/golang.org/x/net/idna/tables11.0.0.go b/vendor/golang.org/x/net/idna/tables11.0.0.go index 8ce0811fdf..167efba712 100644 --- a/vendor/golang.org/x/net/idna/tables11.0.0.go +++ b/vendor/golang.org/x/net/idna/tables11.0.0.go @@ -1,5 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. +//go:build go1.13 && !go1.14 // +build go1.13,!go1.14 package idna diff --git a/vendor/golang.org/x/net/idna/tables12.0.0.go b/vendor/golang.org/x/net/idna/tables12.0.0.go index f39f0cb4cd..ab40f7bcc3 100644 --- a/vendor/golang.org/x/net/idna/tables12.0.0.go +++ b/vendor/golang.org/x/net/idna/tables12.0.0.go @@ -1,5 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. +//go:build go1.14 && !go1.16 // +build go1.14,!go1.16 package idna diff --git a/vendor/golang.org/x/net/idna/tables13.0.0.go b/vendor/golang.org/x/net/idna/tables13.0.0.go index e8c7a36d7a..390c5e56d2 100644 --- a/vendor/golang.org/x/net/idna/tables13.0.0.go +++ b/vendor/golang.org/x/net/idna/tables13.0.0.go @@ -1,5 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. +//go:build go1.16 // +build go1.16 package idna diff --git a/vendor/golang.org/x/net/idna/tables9.0.0.go b/vendor/golang.org/x/net/idna/tables9.0.0.go index 8b65fa1678..4074b5332e 100644 --- a/vendor/golang.org/x/net/idna/tables9.0.0.go +++ b/vendor/golang.org/x/net/idna/tables9.0.0.go @@ -1,5 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. +//go:build !go1.10 // +build !go1.10 package idna diff --git a/vendor/golang.org/x/oauth2/authhandler/authhandler.go b/vendor/golang.org/x/oauth2/authhandler/authhandler.go new file mode 100644 index 0000000000..69967cf87e --- /dev/null +++ b/vendor/golang.org/x/oauth2/authhandler/authhandler.go @@ -0,0 +1,56 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package authhandler implements a TokenSource to support +// "three-legged OAuth 2.0" via a custom AuthorizationHandler. +package authhandler + +import ( + "context" + "errors" + + "golang.org/x/oauth2" +) + +// AuthorizationHandler is a 3-legged-OAuth helper that prompts +// the user for OAuth consent at the specified auth code URL +// and returns an auth code and state upon approval. +type AuthorizationHandler func(authCodeURL string) (code string, state string, err error) + +// TokenSource returns an oauth2.TokenSource that fetches access tokens +// using 3-legged-OAuth flow. +// +// The provided context.Context is used for oauth2 Exchange operation. +// +// The provided oauth2.Config should be a full configuration containing AuthURL, +// TokenURL, and Scope. +// +// An environment-specific AuthorizationHandler is used to obtain user consent. +// +// Per the OAuth protocol, a unique "state" string should be specified here. +// This token source will verify that the "state" is identical in the request +// and response before exchanging the auth code for OAuth token to prevent CSRF +// attacks. +func TokenSource(ctx context.Context, config *oauth2.Config, state string, authHandler AuthorizationHandler) oauth2.TokenSource { + return oauth2.ReuseTokenSource(nil, authHandlerSource{config: config, ctx: ctx, authHandler: authHandler, state: state}) +} + +type authHandlerSource struct { + ctx context.Context + config *oauth2.Config + authHandler AuthorizationHandler + state string +} + +func (source authHandlerSource) Token() (*oauth2.Token, error) { + url := source.config.AuthCodeURL(source.state) + code, state, err := source.authHandler(url) + if err != nil { + return nil, err + } + if state != source.state { + return nil, errors.New("state mismatch in 3-legged-OAuth flow") + } + return source.config.Exchange(source.ctx, code) +} diff --git a/vendor/golang.org/x/oauth2/google/appengine_gen1.go b/vendor/golang.org/x/oauth2/google/appengine_gen1.go index 83dacac320..16c6c6b90c 100644 --- a/vendor/golang.org/x/oauth2/google/appengine_gen1.go +++ b/vendor/golang.org/x/oauth2/google/appengine_gen1.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build appengine // +build appengine // This file applies to App Engine first generation runtimes (<= Go 1.9). diff --git a/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go b/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go index 04c2c2216a..a7e27b3d29 100644 --- a/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go +++ b/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !appengine // +build !appengine // This file applies to App Engine second generation runtimes (>= Go 1.11) and App Engine flexible. diff --git a/vendor/golang.org/x/oauth2/google/default.go b/vendor/golang.org/x/oauth2/google/default.go index ad2c09236c..880dd7b59f 100644 --- a/vendor/golang.org/x/oauth2/google/default.go +++ b/vendor/golang.org/x/oauth2/google/default.go @@ -16,11 +16,16 @@ import ( "cloud.google.com/go/compute/metadata" "golang.org/x/oauth2" + "golang.org/x/oauth2/authhandler" ) // Credentials holds Google credentials, including "Application Default Credentials". // For more details, see: // https://developers.google.com/accounts/docs/application-default-credentials +// Credentials from external accounts (workload identity federation) are used to +// identify a particular application from an on-prem or non-Google Cloud platform +// including Amazon Web Services (AWS), Microsoft Azure or any identity provider +// that supports OpenID Connect (OIDC). type Credentials struct { ProjectID string // may be empty TokenSource oauth2.TokenSource @@ -37,6 +42,32 @@ type Credentials struct { // Deprecated: use Credentials instead. type DefaultCredentials = Credentials +// CredentialsParams holds user supplied parameters that are used together +// with a credentials file for building a Credentials object. +type CredentialsParams struct { + // Scopes is the list OAuth scopes. Required. + // Example: https://www.googleapis.com/auth/cloud-platform + Scopes []string + + // Subject is the user email used for domain wide delegation (see + // https://developers.google.com/identity/protocols/oauth2/service-account#delegatingauthority). + // Optional. + Subject string + + // AuthHandler is the AuthorizationHandler used for 3-legged OAuth flow. Optional. + AuthHandler authhandler.AuthorizationHandler + + // State is a unique string used with AuthHandler. Optional. + State string +} + +func (params CredentialsParams) deepCopy() CredentialsParams { + paramsCopy := params + paramsCopy.Scopes = make([]string, len(params.Scopes)) + copy(paramsCopy.Scopes, params.Scopes) + return paramsCopy +} + // DefaultClient returns an HTTP Client that uses the // DefaultTokenSource to obtain authentication credentials. func DefaultClient(ctx context.Context, scope ...string) (*http.Client, error) { @@ -58,13 +89,17 @@ func DefaultTokenSource(ctx context.Context, scope ...string) (oauth2.TokenSourc return creds.TokenSource, nil } -// FindDefaultCredentials searches for "Application Default Credentials". +// FindDefaultCredentialsWithParams searches for "Application Default Credentials". // // It looks for credentials in the following places, // preferring the first location found: // // 1. A JSON file whose path is specified by the // GOOGLE_APPLICATION_CREDENTIALS environment variable. +// For workload identity federation, refer to +// https://cloud.google.com/iam/docs/how-to#using-workload-identity-federation on +// how to generate the JSON configuration file for on-prem/non-Google cloud +// platforms. // 2. A JSON file in a location known to the gcloud command-line tool. // On Windows, this is %APPDATA%/gcloud/application_default_credentials.json. // On other systems, $HOME/.config/gcloud/application_default_credentials.json. @@ -73,11 +108,14 @@ func DefaultTokenSource(ctx context.Context, scope ...string) (oauth2.TokenSourc // 4. On Google Compute Engine, Google App Engine standard second generation runtimes // (>= Go 1.11), and Google App Engine flexible environment, it fetches // credentials from the metadata server. -func FindDefaultCredentials(ctx context.Context, scopes ...string) (*Credentials, error) { +func FindDefaultCredentialsWithParams(ctx context.Context, params CredentialsParams) (*Credentials, error) { + // Make defensive copy of the slices in params. + params = params.deepCopy() + // First, try the environment variable. const envVar = "GOOGLE_APPLICATION_CREDENTIALS" if filename := os.Getenv(envVar); filename != "" { - creds, err := readCredentialsFile(ctx, filename, scopes) + creds, err := readCredentialsFile(ctx, filename, params) if err != nil { return nil, fmt.Errorf("google: error getting credentials using %v environment variable: %v", envVar, err) } @@ -86,7 +124,7 @@ func FindDefaultCredentials(ctx context.Context, scopes ...string) (*Credentials // Second, try a well-known file. filename := wellKnownFile() - if creds, err := readCredentialsFile(ctx, filename, scopes); err == nil { + if creds, err := readCredentialsFile(ctx, filename, params); err == nil { return creds, nil } else if !os.IsNotExist(err) { return nil, fmt.Errorf("google: error getting credentials using well-known file (%v): %v", filename, err) @@ -98,7 +136,7 @@ func FindDefaultCredentials(ctx context.Context, scopes ...string) (*Credentials if appengineTokenFunc != nil { return &DefaultCredentials{ ProjectID: appengineAppIDFunc(ctx), - TokenSource: AppEngineTokenSource(ctx, scopes...), + TokenSource: AppEngineTokenSource(ctx, params.Scopes...), }, nil } @@ -108,7 +146,7 @@ func FindDefaultCredentials(ctx context.Context, scopes ...string) (*Credentials id, _ := metadata.ProjectID() return &DefaultCredentials{ ProjectID: id, - TokenSource: ComputeTokenSource("", scopes...), + TokenSource: ComputeTokenSource("", params.Scopes...), }, nil } @@ -117,16 +155,38 @@ func FindDefaultCredentials(ctx context.Context, scopes ...string) (*Credentials return nil, fmt.Errorf("google: could not find default credentials. See %v for more information.", url) } -// CredentialsFromJSON obtains Google credentials from a JSON value. The JSON can -// represent either a Google Developers Console client_credentials.json file (as in -// ConfigFromJSON) or a Google Developers service account key file (as in -// JWTConfigFromJSON). -func CredentialsFromJSON(ctx context.Context, jsonData []byte, scopes ...string) (*Credentials, error) { +// FindDefaultCredentials invokes FindDefaultCredentialsWithParams with the specified scopes. +func FindDefaultCredentials(ctx context.Context, scopes ...string) (*Credentials, error) { + var params CredentialsParams + params.Scopes = scopes + return FindDefaultCredentialsWithParams(ctx, params) +} + +// CredentialsFromJSONWithParams obtains Google credentials from a JSON value. The JSON can +// represent either a Google Developers Console client_credentials.json file (as in ConfigFromJSON), +// a Google Developers service account key file, a gcloud user credentials file (a.k.a. refresh +// token JSON), or the JSON configuration file for workload identity federation in non-Google cloud +// platforms (see https://cloud.google.com/iam/docs/how-to#using-workload-identity-federation). +func CredentialsFromJSONWithParams(ctx context.Context, jsonData []byte, params CredentialsParams) (*Credentials, error) { + // Make defensive copy of the slices in params. + params = params.deepCopy() + + // First, attempt to parse jsonData as a Google Developers Console client_credentials.json. + config, _ := ConfigFromJSON(jsonData, params.Scopes...) + if config != nil { + return &Credentials{ + ProjectID: "", + TokenSource: authhandler.TokenSource(ctx, config, params.State, params.AuthHandler), + JSON: jsonData, + }, nil + } + + // Otherwise, parse jsonData as one of the other supported credentials files. var f credentialsFile if err := json.Unmarshal(jsonData, &f); err != nil { return nil, err } - ts, err := f.tokenSource(ctx, append([]string(nil), scopes...)) + ts, err := f.tokenSource(ctx, params) if err != nil { return nil, err } @@ -137,6 +197,13 @@ func CredentialsFromJSON(ctx context.Context, jsonData []byte, scopes ...string) }, nil } +// CredentialsFromJSON invokes CredentialsFromJSONWithParams with the specified scopes. +func CredentialsFromJSON(ctx context.Context, jsonData []byte, scopes ...string) (*Credentials, error) { + var params CredentialsParams + params.Scopes = scopes + return CredentialsFromJSONWithParams(ctx, jsonData, params) +} + func wellKnownFile() string { const f = "application_default_credentials.json" if runtime.GOOS == "windows" { @@ -145,10 +212,10 @@ func wellKnownFile() string { return filepath.Join(guessUnixHomeDir(), ".config", "gcloud", f) } -func readCredentialsFile(ctx context.Context, filename string, scopes []string) (*DefaultCredentials, error) { +func readCredentialsFile(ctx context.Context, filename string, params CredentialsParams) (*DefaultCredentials, error) { b, err := ioutil.ReadFile(filename) if err != nil { return nil, err } - return CredentialsFromJSON(ctx, b, scopes...) + return CredentialsFromJSONWithParams(ctx, b, params) } diff --git a/vendor/golang.org/x/oauth2/google/doc.go b/vendor/golang.org/x/oauth2/google/doc.go index 73be629033..8e6a57ce96 100644 --- a/vendor/golang.org/x/oauth2/google/doc.go +++ b/vendor/golang.org/x/oauth2/google/doc.go @@ -4,13 +4,16 @@ // Package google provides support for making OAuth2 authorized and authenticated // HTTP requests to Google APIs. It supports the Web server flow, client-side -// credentials, service accounts, Google Compute Engine service accounts, and Google -// App Engine service accounts. +// credentials, service accounts, Google Compute Engine service accounts, +// Google App Engine service accounts and workload identity federation +// from non-Google cloud platforms. // // A brief overview of the package follows. For more information, please read // https://developers.google.com/accounts/docs/OAuth2 // and // https://developers.google.com/accounts/docs/application-default-credentials. +// For more information on using workload identity federation, refer to +// https://cloud.google.com/iam/docs/how-to#using-workload-identity-federation. // // OAuth2 Configs // @@ -19,6 +22,35 @@ // the other by JWTConfigFromJSON. The returned Config can be used to obtain a TokenSource or // create an http.Client. // +// Workload Identity Federation +// +// Using workload identity federation, your application can access Google Cloud +// resources from Amazon Web Services (AWS), Microsoft Azure or any identity +// provider that supports OpenID Connect (OIDC). +// Traditionally, applications running outside Google Cloud have used service +// account keys to access Google Cloud resources. Using identity federation, +// you can allow your workload to impersonate a service account. +// This lets you access Google Cloud resources directly, eliminating the +// maintenance and security burden associated with service account keys. +// +// Follow the detailed instructions on how to configure Workload Identity Federation +// in various platforms: +// +// Amazon Web Services (AWS): https://cloud.google.com/iam/docs/access-resources-aws +// Microsoft Azure: https://cloud.google.com/iam/docs/access-resources-azure +// OIDC identity provider: https://cloud.google.com/iam/docs/access-resources-oidc +// +// For OIDC providers, the library can retrieve OIDC tokens either from a +// local file location (file-sourced credentials) or from a local server +// (URL-sourced credentials). +// For file-sourced credentials, a background process needs to be continuously +// refreshing the file location with a new OIDC token prior to expiration. +// For tokens with one hour lifetimes, the token needs to be updated in the file +// every hour. The token can be stored directly as plain text or in JSON format. +// For URL-sourced credentials, a local server needs to host a GET endpoint to +// return the OIDC token. The response can be in plain text or JSON. +// Additional required request headers can also be specified. +// // // Credentials // @@ -29,6 +61,13 @@ // FindDefaultCredentials looks in some well-known places for a credentials file, and // will call AppEngineTokenSource or ComputeTokenSource as needed. // +// Application Default Credentials also support workload identity federation to +// access Google Cloud resources from non-Google Cloud platforms including Amazon +// Web Services (AWS), Microsoft Azure or any identity provider that supports +// OpenID Connect (OIDC). Workload identity federation is recommended for +// non-Google Cloud environments as it avoids the need to download, manage and +// store service account private keys locally. +// // DefaultClient and DefaultTokenSource are convenience methods. They first call FindDefaultCredentials, // then use the credentials to construct an http.Client or an oauth2.TokenSource. // diff --git a/vendor/golang.org/x/oauth2/google/google.go b/vendor/golang.org/x/oauth2/google/google.go index 2c8f1bd5ad..2b631f5223 100644 --- a/vendor/golang.org/x/oauth2/google/google.go +++ b/vendor/golang.org/x/oauth2/google/google.go @@ -19,7 +19,7 @@ import ( "golang.org/x/oauth2/jwt" ) -// Endpoint is Google's OAuth 2.0 endpoint. +// Endpoint is Google's OAuth 2.0 default endpoint. var Endpoint = oauth2.Endpoint{ AuthURL: "https://accounts.google.com/o/oauth2/auth", TokenURL: "https://oauth2.googleapis.com/token", @@ -87,7 +87,7 @@ func JWTConfigFromJSON(jsonKey []byte, scope ...string) (*jwt.Config, error) { return nil, fmt.Errorf("google: read JWT from JSON credentials: 'type' field is %q (expected %q)", f.Type, serviceAccountKey) } scope = append([]string(nil), scope...) // copy - return f.jwtConfig(scope), nil + return f.jwtConfig(scope, ""), nil } // JSON key file types. @@ -99,12 +99,13 @@ const ( // credentialsFile is the unmarshalled representation of a credentials file. type credentialsFile struct { - Type string `json:"type"` // serviceAccountKey or userCredentialsKey + Type string `json:"type"` // Service Account fields ClientEmail string `json:"client_email"` PrivateKeyID string `json:"private_key_id"` PrivateKey string `json:"private_key"` + AuthURL string `json:"auth_uri"` TokenURL string `json:"token_uri"` ProjectID string `json:"project_id"` @@ -124,13 +125,14 @@ type credentialsFile struct { QuotaProjectID string `json:"quota_project_id"` } -func (f *credentialsFile) jwtConfig(scopes []string) *jwt.Config { +func (f *credentialsFile) jwtConfig(scopes []string, subject string) *jwt.Config { cfg := &jwt.Config{ Email: f.ClientEmail, PrivateKey: []byte(f.PrivateKey), PrivateKeyID: f.PrivateKeyID, Scopes: scopes, TokenURL: f.TokenURL, + Subject: subject, // This is the user email to impersonate } if cfg.TokenURL == "" { cfg.TokenURL = JWTTokenURL @@ -138,17 +140,27 @@ func (f *credentialsFile) jwtConfig(scopes []string) *jwt.Config { return cfg } -func (f *credentialsFile) tokenSource(ctx context.Context, scopes []string) (oauth2.TokenSource, error) { +func (f *credentialsFile) tokenSource(ctx context.Context, params CredentialsParams) (oauth2.TokenSource, error) { switch f.Type { case serviceAccountKey: - cfg := f.jwtConfig(scopes) + cfg := f.jwtConfig(params.Scopes, params.Subject) return cfg.TokenSource(ctx), nil case userCredentialsKey: cfg := &oauth2.Config{ ClientID: f.ClientID, ClientSecret: f.ClientSecret, - Scopes: scopes, - Endpoint: Endpoint, + Scopes: params.Scopes, + Endpoint: oauth2.Endpoint{ + AuthURL: f.AuthURL, + TokenURL: f.TokenURL, + AuthStyle: oauth2.AuthStyleInParams, + }, + } + if cfg.Endpoint.AuthURL == "" { + cfg.Endpoint.AuthURL = Endpoint.AuthURL + } + if cfg.Endpoint.TokenURL == "" { + cfg.Endpoint.TokenURL = Endpoint.TokenURL } tok := &oauth2.Token{RefreshToken: f.RefreshToken} return cfg.TokenSource(ctx, tok), nil @@ -163,7 +175,7 @@ func (f *credentialsFile) tokenSource(ctx context.Context, scopes []string) (oau ClientID: f.ClientID, CredentialSource: f.CredentialSource, QuotaProjectID: f.QuotaProjectID, - Scopes: scopes, + Scopes: params.Scopes, } return cfg.TokenSource(ctx), nil case "": diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/aws.go b/vendor/golang.org/x/oauth2/google/internal/externalaccount/aws.go index 906d1fe9d3..a5a5423c65 100644 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/aws.go +++ b/vendor/golang.org/x/oauth2/google/internal/externalaccount/aws.go @@ -5,41 +5,57 @@ package externalaccount import ( + "bytes" + "context" "crypto/hmac" "crypto/sha256" "encoding/hex" + "encoding/json" "errors" "fmt" "io" "io/ioutil" "net/http" + "net/url" + "os" "path" "sort" "strings" "time" + + "golang.org/x/oauth2" ) -// RequestSigner is a utility class to sign http requests using a AWS V4 signature. +type awsSecurityCredentials struct { + AccessKeyID string `json:"AccessKeyID"` + SecretAccessKey string `json:"SecretAccessKey"` + SecurityToken string `json:"Token"` +} + +// awsRequestSigner is a utility class to sign http requests using a AWS V4 signature. type awsRequestSigner struct { RegionName string - AwsSecurityCredentials map[string]string + AwsSecurityCredentials awsSecurityCredentials } +// getenv aliases os.Getenv for testing +var getenv = os.Getenv + const ( -// AWS Signature Version 4 signing algorithm identifier. + // AWS Signature Version 4 signing algorithm identifier. awsAlgorithm = "AWS4-HMAC-SHA256" -// The termination string for the AWS credential scope value as defined in -// https://docs.aws.amazon.com/general/latest/gr/sigv4-create-string-to-sign.html + // The termination string for the AWS credential scope value as defined in + // https://docs.aws.amazon.com/general/latest/gr/sigv4-create-string-to-sign.html awsRequestType = "aws4_request" -// The AWS authorization header name for the security session token if available. + // The AWS authorization header name for the security session token if available. awsSecurityTokenHeader = "x-amz-security-token" -// The AWS authorization header name for the auto-generated date. + // The AWS authorization header name for the auto-generated date. awsDateHeader = "x-amz-date" - awsTimeFormatLong = "20060102T150405Z" + awsTimeFormatLong = "20060102T150405Z" awsTimeFormatShort = "20060102" ) @@ -113,7 +129,7 @@ func canonicalHeaders(req *http.Request) (string, string) { } sort.Strings(headers) - var fullHeaders strings.Builder + var fullHeaders bytes.Buffer for _, header := range headers { headerValue := strings.Join(lowerCaseHeaders[header], ",") fullHeaders.WriteString(header) @@ -167,8 +183,8 @@ func (rs *awsRequestSigner) SignRequest(req *http.Request) error { signedRequest.Header.Add("host", requestHost(req)) - if securityToken, ok := rs.AwsSecurityCredentials["security_token"]; ok { - signedRequest.Header.Add(awsSecurityTokenHeader, securityToken) + if rs.AwsSecurityCredentials.SecurityToken != "" { + signedRequest.Header.Add(awsSecurityTokenHeader, rs.AwsSecurityCredentials.SecurityToken) } if signedRequest.Header.Get("date") == "" { @@ -186,15 +202,6 @@ func (rs *awsRequestSigner) SignRequest(req *http.Request) error { } func (rs *awsRequestSigner) generateAuthentication(req *http.Request, timestamp time.Time) (string, error) { - secretAccessKey, ok := rs.AwsSecurityCredentials["secret_access_key"] - if !ok { - return "", errors.New("oauth2/google: missing secret_access_key header") - } - accessKeyId, ok := rs.AwsSecurityCredentials["access_key_id"] - if !ok { - return "", errors.New("oauth2/google: missing access_key_id header") - } - canonicalHeaderColumns, canonicalHeaderData := canonicalHeaders(req) dateStamp := timestamp.Format(awsTimeFormatShort) @@ -203,28 +210,261 @@ func (rs *awsRequestSigner) generateAuthentication(req *http.Request, timestamp serviceName = splitHost[0] } - credentialScope := fmt.Sprintf("%s/%s/%s/%s",dateStamp, rs.RegionName, serviceName, awsRequestType) + credentialScope := fmt.Sprintf("%s/%s/%s/%s", dateStamp, rs.RegionName, serviceName, awsRequestType) requestString, err := canonicalRequest(req, canonicalHeaderColumns, canonicalHeaderData) if err != nil { return "", err } requestHash, err := getSha256([]byte(requestString)) - if err != nil{ + if err != nil { return "", err } stringToSign := fmt.Sprintf("%s\n%s\n%s\n%s", awsAlgorithm, timestamp.Format(awsTimeFormatLong), credentialScope, requestHash) - signingKey := []byte("AWS4" + secretAccessKey) + signingKey := []byte("AWS4" + rs.AwsSecurityCredentials.SecretAccessKey) for _, signingInput := range []string{ dateStamp, rs.RegionName, serviceName, awsRequestType, stringToSign, } { signingKey, err = getHmacSha256(signingKey, []byte(signingInput)) - if err != nil{ + if err != nil { + return "", err + } + } + + return fmt.Sprintf("%s Credential=%s/%s, SignedHeaders=%s, Signature=%s", awsAlgorithm, rs.AwsSecurityCredentials.AccessKeyID, credentialScope, canonicalHeaderColumns, hex.EncodeToString(signingKey)), nil +} + +type awsCredentialSource struct { + EnvironmentID string + RegionURL string + RegionalCredVerificationURL string + CredVerificationURL string + TargetResource string + requestSigner *awsRequestSigner + region string + ctx context.Context + client *http.Client +} + +type awsRequestHeader struct { + Key string `json:"key"` + Value string `json:"value"` +} + +type awsRequest struct { + URL string `json:"url"` + Method string `json:"method"` + Headers []awsRequestHeader `json:"headers"` +} + +func (cs awsCredentialSource) doRequest(req *http.Request) (*http.Response, error) { + if cs.client == nil { + cs.client = oauth2.NewClient(cs.ctx, nil) + } + return cs.client.Do(req.WithContext(cs.ctx)) +} + +func (cs awsCredentialSource) subjectToken() (string, error) { + if cs.requestSigner == nil { + awsSecurityCredentials, err := cs.getSecurityCredentials() + if err != nil { + return "", err + } + + if cs.region, err = cs.getRegion(); err != nil { return "", err } + + cs.requestSigner = &awsRequestSigner{ + RegionName: cs.region, + AwsSecurityCredentials: awsSecurityCredentials, + } + } + + // Generate the signed request to AWS STS GetCallerIdentity API. + // Use the required regional endpoint. Otherwise, the request will fail. + req, err := http.NewRequest("POST", strings.Replace(cs.RegionalCredVerificationURL, "{region}", cs.region, 1), nil) + if err != nil { + return "", err + } + // The full, canonical resource name of the workload identity pool + // provider, with or without the HTTPS prefix. + // Including this header as part of the signature is recommended to + // ensure data integrity. + if cs.TargetResource != "" { + req.Header.Add("x-goog-cloud-target-resource", cs.TargetResource) + } + cs.requestSigner.SignRequest(req) + + /* + The GCP STS endpoint expects the headers to be formatted as: + # [ + # {key: 'x-amz-date', value: '...'}, + # {key: 'Authorization', value: '...'}, + # ... + # ] + # And then serialized as: + # quote(json.dumps({ + # url: '...', + # method: 'POST', + # headers: [{key: 'x-amz-date', value: '...'}, ...] + # })) + */ + + awsSignedReq := awsRequest{ + URL: req.URL.String(), + Method: "POST", + } + for headerKey, headerList := range req.Header { + for _, headerValue := range headerList { + awsSignedReq.Headers = append(awsSignedReq.Headers, awsRequestHeader{ + Key: headerKey, + Value: headerValue, + }) + } + } + sort.Slice(awsSignedReq.Headers, func(i, j int) bool { + headerCompare := strings.Compare(awsSignedReq.Headers[i].Key, awsSignedReq.Headers[j].Key) + if headerCompare == 0 { + return strings.Compare(awsSignedReq.Headers[i].Value, awsSignedReq.Headers[j].Value) < 0 + } + return headerCompare < 0 + }) + + result, err := json.Marshal(awsSignedReq) + if err != nil { + return "", err + } + return url.QueryEscape(string(result)), nil +} + +func (cs *awsCredentialSource) getRegion() (string, error) { + if envAwsRegion := getenv("AWS_REGION"); envAwsRegion != "" { + return envAwsRegion, nil + } + if envAwsRegion := getenv("AWS_DEFAULT_REGION"); envAwsRegion != "" { + return envAwsRegion, nil + } + + if cs.RegionURL == "" { + return "", errors.New("oauth2/google: unable to determine AWS region") + } + + req, err := http.NewRequest("GET", cs.RegionURL, nil) + if err != nil { + return "", err + } + + resp, err := cs.doRequest(req) + if err != nil { + return "", err + } + defer resp.Body.Close() + + respBody, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) + if err != nil { + return "", err + } + + if resp.StatusCode != 200 { + return "", fmt.Errorf("oauth2/google: unable to retrieve AWS region - %s", string(respBody)) + } + + // This endpoint will return the region in format: us-east-2b. + // Only the us-east-2 part should be used. + respBodyEnd := 0 + if len(respBody) > 1 { + respBodyEnd = len(respBody) - 1 + } + return string(respBody[:respBodyEnd]), nil +} + +func (cs *awsCredentialSource) getSecurityCredentials() (result awsSecurityCredentials, err error) { + if accessKeyID := getenv("AWS_ACCESS_KEY_ID"); accessKeyID != "" { + if secretAccessKey := getenv("AWS_SECRET_ACCESS_KEY"); secretAccessKey != "" { + return awsSecurityCredentials{ + AccessKeyID: accessKeyID, + SecretAccessKey: secretAccessKey, + SecurityToken: getenv("AWS_SESSION_TOKEN"), + }, nil + } + } + + roleName, err := cs.getMetadataRoleName() + if err != nil { + return + } + + credentials, err := cs.getMetadataSecurityCredentials(roleName) + if err != nil { + return + } + + if credentials.AccessKeyID == "" { + return result, errors.New("oauth2/google: missing AccessKeyId credential") + } + + if credentials.SecretAccessKey == "" { + return result, errors.New("oauth2/google: missing SecretAccessKey credential") + } + + return credentials, nil +} + +func (cs *awsCredentialSource) getMetadataSecurityCredentials(roleName string) (awsSecurityCredentials, error) { + var result awsSecurityCredentials + + req, err := http.NewRequest("GET", fmt.Sprintf("%s/%s", cs.CredVerificationURL, roleName), nil) + if err != nil { + return result, err + } + req.Header.Add("Content-Type", "application/json") + + resp, err := cs.doRequest(req) + if err != nil { + return result, err + } + defer resp.Body.Close() + + respBody, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) + if err != nil { + return result, err + } + + if resp.StatusCode != 200 { + return result, fmt.Errorf("oauth2/google: unable to retrieve AWS security credentials - %s", string(respBody)) + } + + err = json.Unmarshal(respBody, &result) + return result, err +} + +func (cs *awsCredentialSource) getMetadataRoleName() (string, error) { + if cs.CredVerificationURL == "" { + return "", errors.New("oauth2/google: unable to determine the AWS metadata server security credentials endpoint") + } + + req, err := http.NewRequest("GET", cs.CredVerificationURL, nil) + if err != nil { + return "", err + } + + resp, err := cs.doRequest(req) + if err != nil { + return "", err + } + defer resp.Body.Close() + + respBody, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) + if err != nil { + return "", err + } + + if resp.StatusCode != 200 { + return "", fmt.Errorf("oauth2/google: unable to retrieve AWS role name - %s", string(respBody)) } - return fmt.Sprintf("%s Credential=%s/%s, SignedHeaders=%s, Signature=%s", awsAlgorithm, accessKeyId, credentialScope, canonicalHeaderColumns, hex.EncodeToString(signingKey)), nil + return string(respBody), nil } diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/basecredentials.go b/vendor/golang.org/x/oauth2/google/internal/externalaccount/basecredentials.go index deb9deb730..a4d45d9202 100644 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/basecredentials.go +++ b/vendor/golang.org/x/oauth2/google/internal/externalaccount/basecredentials.go @@ -9,23 +9,45 @@ import ( "fmt" "golang.org/x/oauth2" "net/http" + "strconv" "time" ) // now aliases time.Now for testing -var now = time.Now +var now = func() time.Time { + return time.Now().UTC() +} // Config stores the configuration for fetching tokens with external credentials. type Config struct { + // Audience is the Secure Token Service (STS) audience which contains the resource name for the workload + // identity pool or the workforce pool and the provider identifier in that pool. Audience string + // SubjectTokenType is the STS token type based on the Oauth2.0 token exchange spec + // e.g. `urn:ietf:params:oauth:token-type:jwt`. SubjectTokenType string + // TokenURL is the STS token exchange endpoint. TokenURL string + // TokenInfoURL is the token_info endpoint used to retrieve the account related information ( + // user attributes like account identifier, eg. email, username, uid, etc). This is + // needed for gCloud session account identification. TokenInfoURL string + // ServiceAccountImpersonationURL is the URL for the service account impersonation request. This is only + // required for workload identity pools when APIs to be accessed have not integrated with UberMint. ServiceAccountImpersonationURL string + // ClientSecret is currently only required if token_info endpoint also + // needs to be called with the generated GCP access token. When provided, STS will be + // called with additional basic authentication using client_id as username and client_secret as password. ClientSecret string + // ClientID is only required in conjunction with ClientSecret, as described above. ClientID string + // CredentialSource contains the necessary information to retrieve the token itself, as well + // as some environmental information. CredentialSource CredentialSource + // QuotaProjectID is injected by gCloud. If the value is non-empty, the Auth libraries + // will set the x-goog-user-project which overrides the project associated with the credentials. QuotaProjectID string + // Scopes contains the desired scopes for the returned access token. Scopes []string } @@ -44,7 +66,7 @@ func (c *Config) TokenSource(ctx context.Context) oauth2.TokenSource { ctx: ctx, url: c.ServiceAccountImpersonationURL, scopes: scopes, - ts: oauth2.ReuseTokenSource(nil, ts), + ts: oauth2.ReuseTokenSource(nil, ts), } return oauth2.ReuseTokenSource(nil, imp) } @@ -63,6 +85,8 @@ type format struct { } // CredentialSource stores the information necessary to retrieve the credentials for the STS exchange. +// Either the File or the URL field should be filled, depending on the kind of credential in question. +// The EnvironmentID should start with AWS if being used for an AWS credential. type CredentialSource struct { File string `json:"file"` @@ -77,20 +101,34 @@ type CredentialSource struct { } // parse determines the type of CredentialSource needed -func (c *Config) parse(ctx context.Context) baseCredentialSource { - if c.CredentialSource.File != "" { - return fileCredentialSource{File: c.CredentialSource.File, Format: c.CredentialSource.Format} +func (c *Config) parse(ctx context.Context) (baseCredentialSource, error) { + if len(c.CredentialSource.EnvironmentID) > 3 && c.CredentialSource.EnvironmentID[:3] == "aws" { + if awsVersion, err := strconv.Atoi(c.CredentialSource.EnvironmentID[3:]); err == nil { + if awsVersion != 1 { + return nil, fmt.Errorf("oauth2/google: aws version '%d' is not supported in the current build", awsVersion) + } + return awsCredentialSource{ + EnvironmentID: c.CredentialSource.EnvironmentID, + RegionURL: c.CredentialSource.RegionURL, + RegionalCredVerificationURL: c.CredentialSource.RegionalCredVerificationURL, + CredVerificationURL: c.CredentialSource.URL, + TargetResource: c.Audience, + ctx: ctx, + }, nil + } + } else if c.CredentialSource.File != "" { + return fileCredentialSource{File: c.CredentialSource.File, Format: c.CredentialSource.Format}, nil } else if c.CredentialSource.URL != "" { - return urlCredentialSource{URL: c.CredentialSource.URL, Format: c.CredentialSource.Format, ctx: ctx} + return urlCredentialSource{URL: c.CredentialSource.URL, Headers: c.CredentialSource.Headers, Format: c.CredentialSource.Format, ctx: ctx}, nil } - return nil + return nil, fmt.Errorf("oauth2/google: unable to parse credential source") } type baseCredentialSource interface { subjectToken() (string, error) } -// tokenSource is the source that handles external credentials. +// tokenSource is the source that handles external credentials. It is used to retrieve Tokens. type tokenSource struct { ctx context.Context conf *Config @@ -100,15 +138,16 @@ type tokenSource struct { func (ts tokenSource) Token() (*oauth2.Token, error) { conf := ts.conf - credSource := conf.parse(ts.ctx) - if credSource == nil { - return nil, fmt.Errorf("oauth2/google: unable to parse credential source") + credSource, err := conf.parse(ts.ctx) + if err != nil { + return nil, err } subjectToken, err := credSource.subjectToken() + if err != nil { return nil, err } - stsRequest := STSTokenExchangeRequest{ + stsRequest := stsTokenExchangeRequest{ GrantType: "urn:ietf:params:oauth:grant-type:token-exchange", Audience: conf.Audience, Scope: conf.Scopes, @@ -118,12 +157,12 @@ func (ts tokenSource) Token() (*oauth2.Token, error) { } header := make(http.Header) header.Add("Content-Type", "application/x-www-form-urlencoded") - clientAuth := ClientAuthentication{ + clientAuth := clientAuthentication{ AuthStyle: oauth2.AuthStyleInHeader, ClientID: conf.ClientID, ClientSecret: conf.ClientSecret, } - stsResp, err := ExchangeToken(ts.ctx, conf.TokenURL, &stsRequest, clientAuth, header, nil) + stsResp, err := exchangeToken(ts.ctx, conf.TokenURL, &stsRequest, clientAuth, header, nil) if err != nil { return nil, err } diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/clientauth.go b/vendor/golang.org/x/oauth2/google/internal/externalaccount/clientauth.go index 0464724fd8..62c2e36cc1 100644 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/clientauth.go +++ b/vendor/golang.org/x/oauth2/google/internal/externalaccount/clientauth.go @@ -11,15 +11,18 @@ import ( "net/url" ) -// ClientAuthentication represents an OAuth client ID and secret and the mechanism for passing these credentials as stated in rfc6749#2.3.1. -type ClientAuthentication struct { +// clientAuthentication represents an OAuth client ID and secret and the mechanism for passing these credentials as stated in rfc6749#2.3.1. +type clientAuthentication struct { // AuthStyle can be either basic or request-body AuthStyle oauth2.AuthStyle ClientID string ClientSecret string } -func (c *ClientAuthentication) InjectAuthentication(values url.Values, headers http.Header) { +// InjectAuthentication is used to add authentication to a Secure Token Service exchange +// request. It modifies either the passed url.Values or http.Header depending on the desired +// authentication format. +func (c *clientAuthentication) InjectAuthentication(values url.Values, headers http.Header) { if c.ClientID == "" || c.ClientSecret == "" || values == nil || headers == nil { return } diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/impersonate.go b/vendor/golang.org/x/oauth2/google/internal/externalaccount/impersonate.go index 1d29c467f7..1f6009b38f 100644 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/impersonate.go +++ b/vendor/golang.org/x/oauth2/google/internal/externalaccount/impersonate.go @@ -36,7 +36,7 @@ type impersonateTokenSource struct { scopes []string } -// Token performs the exchange to get a temporary service account +// Token performs the exchange to get a temporary service account token to allow access to GCP. func (its impersonateTokenSource) Token() (*oauth2.Token, error) { reqBody := generateAccessTokenReq{ Lifetime: "3600s", diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/sts_exchange.go b/vendor/golang.org/x/oauth2/google/internal/externalaccount/sts_exchange.go index c7d85a3c20..a8a704bb41 100644 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/sts_exchange.go +++ b/vendor/golang.org/x/oauth2/google/internal/externalaccount/sts_exchange.go @@ -9,6 +9,7 @@ import ( "encoding/json" "fmt" "io" + "io/ioutil" "net/http" "net/url" "strconv" @@ -17,11 +18,11 @@ import ( "golang.org/x/oauth2" ) -// ExchangeToken performs an oauth2 token exchange with the provided endpoint. +// exchangeToken performs an oauth2 token exchange with the provided endpoint. // The first 4 fields are all mandatory. headers can be used to pass additional // headers beyond the bare minimum required by the token exchange. options can // be used to pass additional JSON-structured options to the remote server. -func ExchangeToken(ctx context.Context, endpoint string, request *STSTokenExchangeRequest, authentication ClientAuthentication, headers http.Header, options map[string]interface{}) (*STSTokenExchangeResponse, error) { +func exchangeToken(ctx context.Context, endpoint string, request *stsTokenExchangeRequest, authentication clientAuthentication, headers http.Header, options map[string]interface{}) (*stsTokenExchangeResponse, error) { client := oauth2.NewClient(ctx, nil) @@ -32,11 +33,13 @@ func ExchangeToken(ctx context.Context, endpoint string, request *STSTokenExchan data.Set("subject_token_type", request.SubjectTokenType) data.Set("subject_token", request.SubjectToken) data.Set("scope", strings.Join(request.Scope, " ")) - opts, err := json.Marshal(options) - if err != nil { - return nil, fmt.Errorf("oauth2/google: failed to marshal additional options: %v", err) + if options != nil { + opts, err := json.Marshal(options) + if err != nil { + return nil, fmt.Errorf("oauth2/google: failed to marshal additional options: %v", err) + } + data.Set("options", string(opts)) } - data.Set("options", string(opts)) authentication.InjectAuthentication(data, headers) encodedData := data.Encode() @@ -61,9 +64,12 @@ func ExchangeToken(ctx context.Context, endpoint string, request *STSTokenExchan } defer resp.Body.Close() - bodyJson := json.NewDecoder(io.LimitReader(resp.Body, 1<<20)) - var stsResp STSTokenExchangeResponse - err = bodyJson.Decode(&stsResp) + body, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) + if c := resp.StatusCode; c < 200 || c > 299 { + return nil, fmt.Errorf("oauth2/google: status code %d: %s", c, body) + } + var stsResp stsTokenExchangeResponse + err = json.Unmarshal(body, &stsResp) if err != nil { return nil, fmt.Errorf("oauth2/google: failed to unmarshal response body from Secure Token Server: %v", err) @@ -72,8 +78,8 @@ func ExchangeToken(ctx context.Context, endpoint string, request *STSTokenExchan return &stsResp, nil } -// STSTokenExchangeRequest contains fields necessary to make an oauth2 token exchange. -type STSTokenExchangeRequest struct { +// stsTokenExchangeRequest contains fields necessary to make an oauth2 token exchange. +type stsTokenExchangeRequest struct { ActingParty struct { ActorToken string ActorTokenType string @@ -87,8 +93,8 @@ type STSTokenExchangeRequest struct { SubjectTokenType string } -// STSTokenExchangeResponse is used to decode the remote server response during an oauth2 token exchange. -type STSTokenExchangeResponse struct { +// stsTokenExchangeResponse is used to decode the remote server response during an oauth2 token exchange. +type stsTokenExchangeResponse struct { AccessToken string `json:"access_token"` IssuedTokenType string `json:"issued_token_type"` TokenType string `json:"token_type"` diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/urlcredsource.go b/vendor/golang.org/x/oauth2/google/internal/externalaccount/urlcredsource.go index b0d5d35e75..91b8f2002a 100644 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/urlcredsource.go +++ b/vendor/golang.org/x/oauth2/google/internal/externalaccount/urlcredsource.go @@ -39,15 +39,18 @@ func (cs urlCredentialSource) subjectToken() (string, error) { } defer resp.Body.Close() - tokenBytes, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) + respBody, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) if err != nil { return "", fmt.Errorf("oauth2/google: invalid body in subject token URL query: %v", err) } + if c := resp.StatusCode; c < 200 || c > 299 { + return "", fmt.Errorf("oauth2/google: status code %d: %s", c, respBody) + } switch cs.Format.Type { case "json": jsonData := make(map[string]interface{}) - err = json.Unmarshal(tokenBytes, &jsonData) + err = json.Unmarshal(respBody, &jsonData) if err != nil { return "", fmt.Errorf("oauth2/google: failed to unmarshal subject token file: %v", err) } @@ -61,9 +64,9 @@ func (cs urlCredentialSource) subjectToken() (string, error) { } return token, nil case "text": - return string(tokenBytes), nil + return string(respBody), nil case "": - return string(tokenBytes), nil + return string(respBody), nil default: return "", errors.New("oauth2/google: invalid credential_source file format type") } diff --git a/vendor/golang.org/x/oauth2/google/jwt.go b/vendor/golang.org/x/oauth2/google/jwt.go index b0fdb3a888..67d97b9904 100644 --- a/vendor/golang.org/x/oauth2/google/jwt.go +++ b/vendor/golang.org/x/oauth2/google/jwt.go @@ -7,6 +7,7 @@ package google import ( "crypto/rsa" "fmt" + "strings" "time" "golang.org/x/oauth2" @@ -24,6 +25,28 @@ import ( // optimization supported by a few Google services. // Unless you know otherwise, you should use JWTConfigFromJSON instead. func JWTAccessTokenSourceFromJSON(jsonKey []byte, audience string) (oauth2.TokenSource, error) { + return newJWTSource(jsonKey, audience, nil) +} + +// JWTAccessTokenSourceWithScope uses a Google Developers service account JSON +// key file to read the credentials that authorize and authenticate the +// requests, and returns a TokenSource that does not use any OAuth2 flow but +// instead creates a JWT and sends that as the access token. +// The scope is typically a list of URLs that specifies the scope of the +// credentials. +// +// Note that this is not a standard OAuth flow, but rather an +// optimization supported by a few Google services. +// Unless you know otherwise, you should use JWTConfigFromJSON instead. +func JWTAccessTokenSourceWithScope(jsonKey []byte, scope ...string) (oauth2.TokenSource, error) { + return newJWTSource(jsonKey, "", scope) +} + +func newJWTSource(jsonKey []byte, audience string, scopes []string) (oauth2.TokenSource, error) { + if len(scopes) == 0 && audience == "" { + return nil, fmt.Errorf("google: missing scope/audience for JWT access token") + } + cfg, err := JWTConfigFromJSON(jsonKey) if err != nil { return nil, fmt.Errorf("google: could not parse JSON key: %v", err) @@ -35,6 +58,7 @@ func JWTAccessTokenSourceFromJSON(jsonKey []byte, audience string) (oauth2.Token ts := &jwtAccessTokenSource{ email: cfg.Email, audience: audience, + scopes: scopes, pk: pk, pkID: cfg.PrivateKeyID, } @@ -47,6 +71,7 @@ func JWTAccessTokenSourceFromJSON(jsonKey []byte, audience string) (oauth2.Token type jwtAccessTokenSource struct { email, audience string + scopes []string pk *rsa.PrivateKey pkID string } @@ -54,12 +79,14 @@ type jwtAccessTokenSource struct { func (ts *jwtAccessTokenSource) Token() (*oauth2.Token, error) { iat := time.Now() exp := iat.Add(time.Hour) + scope := strings.Join(ts.scopes, " ") cs := &jws.ClaimSet{ - Iss: ts.email, - Sub: ts.email, - Aud: ts.audience, - Iat: iat.Unix(), - Exp: exp.Unix(), + Iss: ts.email, + Sub: ts.email, + Aud: ts.audience, + Scope: scope, + Iat: iat.Unix(), + Exp: exp.Unix(), } hdr := &jws.Header{ Algorithm: "RS256", diff --git a/vendor/golang.org/x/oauth2/internal/client_appengine.go b/vendor/golang.org/x/oauth2/internal/client_appengine.go index 7434871880..e1755d1d9a 100644 --- a/vendor/golang.org/x/oauth2/internal/client_appengine.go +++ b/vendor/golang.org/x/oauth2/internal/client_appengine.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build appengine // +build appengine package internal diff --git a/vendor/golang.org/x/sys/unix/README.md b/vendor/golang.org/x/sys/unix/README.md index 579d2d7355..474efad0e0 100644 --- a/vendor/golang.org/x/sys/unix/README.md +++ b/vendor/golang.org/x/sys/unix/README.md @@ -76,7 +76,7 @@ arguments can be passed to the kernel. The third is for low-level use by the ForkExec wrapper. Unlike the first two, it does not call into the scheduler to let it know that a system call is running. -When porting Go to an new architecture/OS, this file must be implemented for +When porting Go to a new architecture/OS, this file must be implemented for each GOOS/GOARCH pair. ### mksysnum @@ -107,7 +107,7 @@ prototype can be exported (capitalized) or not. Adding a new syscall often just requires adding a new `//sys` function prototype with the desired arguments and a capitalized name so it is exported. However, if you want the interface to the syscall to be different, often one will make an -unexported `//sys` prototype, an then write a custom wrapper in +unexported `//sys` prototype, and then write a custom wrapper in `syscall_${GOOS}.go`. ### types files @@ -137,7 +137,7 @@ some `#if/#elif` macros in your include statements. This script is used to generate the system's various constants. This doesn't just include the error numbers and error strings, but also the signal numbers -an a wide variety of miscellaneous constants. The constants come from the list +and a wide variety of miscellaneous constants. The constants come from the list of include files in the `includes_${uname}` variable. A regex then picks out the desired `#define` statements, and generates the corresponding Go constants. The error numbers and strings are generated from `#include `, and the diff --git a/vendor/golang.org/x/sys/unix/aliases.go b/vendor/golang.org/x/sys/unix/aliases.go index 951fce4d0d..abc89c104a 100644 --- a/vendor/golang.org/x/sys/unix/aliases.go +++ b/vendor/golang.org/x/sys/unix/aliases.go @@ -2,7 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris +//go:build (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos) && go1.9 +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos // +build go1.9 package unix diff --git a/vendor/golang.org/x/sys/unix/asm_aix_ppc64.s b/vendor/golang.org/x/sys/unix/asm_aix_ppc64.s index 6b4027b33f..db9171c2e4 100644 --- a/vendor/golang.org/x/sys/unix/asm_aix_ppc64.s +++ b/vendor/golang.org/x/sys/unix/asm_aix_ppc64.s @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build gc // +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_netbsd_386.s b/vendor/golang.org/x/sys/unix/asm_bsd_386.s similarity index 72% rename from vendor/golang.org/x/sys/unix/asm_netbsd_386.s rename to vendor/golang.org/x/sys/unix/asm_bsd_386.s index ae7b498d50..e0fcd9b3de 100644 --- a/vendor/golang.org/x/sys/unix/asm_netbsd_386.s +++ b/vendor/golang.org/x/sys/unix/asm_bsd_386.s @@ -1,14 +1,14 @@ -// Copyright 2009 The Go Authors. All rights reserved. +// Copyright 2021 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build (freebsd || netbsd || openbsd) && gc +// +build freebsd netbsd openbsd // +build gc #include "textflag.h" -// -// System call support for 386, NetBSD -// +// System call support for 386 BSD // Just jump to package syscall's implementation for all these functions. // The runtime may know about them. @@ -22,7 +22,7 @@ TEXT ·Syscall6(SB),NOSPLIT,$0-40 TEXT ·Syscall9(SB),NOSPLIT,$0-52 JMP syscall·Syscall9(SB) -TEXT ·RawSyscall(SB),NOSPLIT,$0-28 +TEXT ·RawSyscall(SB),NOSPLIT,$0-28 JMP syscall·RawSyscall(SB) TEXT ·RawSyscall6(SB),NOSPLIT,$0-40 diff --git a/vendor/golang.org/x/sys/unix/asm_darwin_amd64.s b/vendor/golang.org/x/sys/unix/asm_bsd_amd64.s similarity index 72% rename from vendor/golang.org/x/sys/unix/asm_darwin_amd64.s rename to vendor/golang.org/x/sys/unix/asm_bsd_amd64.s index f2397fde55..2b99c349a2 100644 --- a/vendor/golang.org/x/sys/unix/asm_darwin_amd64.s +++ b/vendor/golang.org/x/sys/unix/asm_bsd_amd64.s @@ -1,14 +1,14 @@ -// Copyright 2009 The Go Authors. All rights reserved. +// Copyright 2021 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build (darwin || dragonfly || freebsd || netbsd || openbsd) && gc +// +build darwin dragonfly freebsd netbsd openbsd // +build gc #include "textflag.h" -// -// System call support for AMD64, Darwin -// +// System call support for AMD64 BSD // Just jump to package syscall's implementation for all these functions. // The runtime may know about them. diff --git a/vendor/golang.org/x/sys/unix/asm_darwin_arm.s b/vendor/golang.org/x/sys/unix/asm_bsd_arm.s similarity index 76% rename from vendor/golang.org/x/sys/unix/asm_darwin_arm.s rename to vendor/golang.org/x/sys/unix/asm_bsd_arm.s index c9e6b6fc8b..d702d4adc7 100644 --- a/vendor/golang.org/x/sys/unix/asm_darwin_arm.s +++ b/vendor/golang.org/x/sys/unix/asm_bsd_arm.s @@ -1,15 +1,14 @@ -// Copyright 2015 The Go Authors. All rights reserved. +// Copyright 2021 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build (freebsd || netbsd || openbsd) && gc +// +build freebsd netbsd openbsd // +build gc -// +build arm,darwin #include "textflag.h" -// -// System call support for ARM, Darwin -// +// System call support for ARM BSD // Just jump to package syscall's implementation for all these functions. // The runtime may know about them. diff --git a/vendor/golang.org/x/sys/unix/asm_netbsd_amd64.s b/vendor/golang.org/x/sys/unix/asm_bsd_arm64.s similarity index 75% rename from vendor/golang.org/x/sys/unix/asm_netbsd_amd64.s rename to vendor/golang.org/x/sys/unix/asm_bsd_arm64.s index e57367c17a..fe36a7391a 100644 --- a/vendor/golang.org/x/sys/unix/asm_netbsd_amd64.s +++ b/vendor/golang.org/x/sys/unix/asm_bsd_arm64.s @@ -1,14 +1,14 @@ -// Copyright 2009 The Go Authors. All rights reserved. +// Copyright 2021 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build (darwin || freebsd || netbsd || openbsd) && gc +// +build darwin freebsd netbsd openbsd // +build gc #include "textflag.h" -// -// System call support for AMD64, NetBSD -// +// System call support for ARM64 BSD // Just jump to package syscall's implementation for all these functions. // The runtime may know about them. diff --git a/vendor/golang.org/x/sys/unix/asm_darwin_386.s b/vendor/golang.org/x/sys/unix/asm_darwin_386.s deleted file mode 100644 index 8a06b87d71..0000000000 --- a/vendor/golang.org/x/sys/unix/asm_darwin_386.s +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build gc - -#include "textflag.h" - -// -// System call support for 386, Darwin -// - -// Just jump to package syscall's implementation for all these functions. -// The runtime may know about them. - -TEXT ·Syscall(SB),NOSPLIT,$0-28 - JMP syscall·Syscall(SB) - -TEXT ·Syscall6(SB),NOSPLIT,$0-40 - JMP syscall·Syscall6(SB) - -TEXT ·Syscall9(SB),NOSPLIT,$0-52 - JMP syscall·Syscall9(SB) - -TEXT ·RawSyscall(SB),NOSPLIT,$0-28 - JMP syscall·RawSyscall(SB) - -TEXT ·RawSyscall6(SB),NOSPLIT,$0-40 - JMP syscall·RawSyscall6(SB) diff --git a/vendor/golang.org/x/sys/unix/asm_darwin_arm64.s b/vendor/golang.org/x/sys/unix/asm_darwin_arm64.s deleted file mode 100644 index 89843f8f4b..0000000000 --- a/vendor/golang.org/x/sys/unix/asm_darwin_arm64.s +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build gc -// +build arm64,darwin - -#include "textflag.h" - -// -// System call support for AMD64, Darwin -// - -// Just jump to package syscall's implementation for all these functions. -// The runtime may know about them. - -TEXT ·Syscall(SB),NOSPLIT,$0-56 - B syscall·Syscall(SB) - -TEXT ·Syscall6(SB),NOSPLIT,$0-80 - B syscall·Syscall6(SB) - -TEXT ·Syscall9(SB),NOSPLIT,$0-104 - B syscall·Syscall9(SB) - -TEXT ·RawSyscall(SB),NOSPLIT,$0-56 - B syscall·RawSyscall(SB) - -TEXT ·RawSyscall6(SB),NOSPLIT,$0-80 - B syscall·RawSyscall6(SB) diff --git a/vendor/golang.org/x/sys/unix/asm_dragonfly_amd64.s b/vendor/golang.org/x/sys/unix/asm_dragonfly_amd64.s deleted file mode 100644 index 27674e1caf..0000000000 --- a/vendor/golang.org/x/sys/unix/asm_dragonfly_amd64.s +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build gc - -#include "textflag.h" - -// -// System call support for AMD64, DragonFly -// - -// Just jump to package syscall's implementation for all these functions. -// The runtime may know about them. - -TEXT ·Syscall(SB),NOSPLIT,$0-56 - JMP syscall·Syscall(SB) - -TEXT ·Syscall6(SB),NOSPLIT,$0-80 - JMP syscall·Syscall6(SB) - -TEXT ·Syscall9(SB),NOSPLIT,$0-104 - JMP syscall·Syscall9(SB) - -TEXT ·RawSyscall(SB),NOSPLIT,$0-56 - JMP syscall·RawSyscall(SB) - -TEXT ·RawSyscall6(SB),NOSPLIT,$0-80 - JMP syscall·RawSyscall6(SB) diff --git a/vendor/golang.org/x/sys/unix/asm_freebsd_386.s b/vendor/golang.org/x/sys/unix/asm_freebsd_386.s deleted file mode 100644 index 49f0ac2364..0000000000 --- a/vendor/golang.org/x/sys/unix/asm_freebsd_386.s +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build gc - -#include "textflag.h" - -// -// System call support for 386, FreeBSD -// - -// Just jump to package syscall's implementation for all these functions. -// The runtime may know about them. - -TEXT ·Syscall(SB),NOSPLIT,$0-28 - JMP syscall·Syscall(SB) - -TEXT ·Syscall6(SB),NOSPLIT,$0-40 - JMP syscall·Syscall6(SB) - -TEXT ·Syscall9(SB),NOSPLIT,$0-52 - JMP syscall·Syscall9(SB) - -TEXT ·RawSyscall(SB),NOSPLIT,$0-28 - JMP syscall·RawSyscall(SB) - -TEXT ·RawSyscall6(SB),NOSPLIT,$0-40 - JMP syscall·RawSyscall6(SB) diff --git a/vendor/golang.org/x/sys/unix/asm_freebsd_amd64.s b/vendor/golang.org/x/sys/unix/asm_freebsd_amd64.s deleted file mode 100644 index f2dfc57b83..0000000000 --- a/vendor/golang.org/x/sys/unix/asm_freebsd_amd64.s +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build gc - -#include "textflag.h" - -// -// System call support for AMD64, FreeBSD -// - -// Just jump to package syscall's implementation for all these functions. -// The runtime may know about them. - -TEXT ·Syscall(SB),NOSPLIT,$0-56 - JMP syscall·Syscall(SB) - -TEXT ·Syscall6(SB),NOSPLIT,$0-80 - JMP syscall·Syscall6(SB) - -TEXT ·Syscall9(SB),NOSPLIT,$0-104 - JMP syscall·Syscall9(SB) - -TEXT ·RawSyscall(SB),NOSPLIT,$0-56 - JMP syscall·RawSyscall(SB) - -TEXT ·RawSyscall6(SB),NOSPLIT,$0-80 - JMP syscall·RawSyscall6(SB) diff --git a/vendor/golang.org/x/sys/unix/asm_freebsd_arm.s b/vendor/golang.org/x/sys/unix/asm_freebsd_arm.s deleted file mode 100644 index 6d740db2c0..0000000000 --- a/vendor/golang.org/x/sys/unix/asm_freebsd_arm.s +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build gc - -#include "textflag.h" - -// -// System call support for ARM, FreeBSD -// - -// Just jump to package syscall's implementation for all these functions. -// The runtime may know about them. - -TEXT ·Syscall(SB),NOSPLIT,$0-28 - B syscall·Syscall(SB) - -TEXT ·Syscall6(SB),NOSPLIT,$0-40 - B syscall·Syscall6(SB) - -TEXT ·Syscall9(SB),NOSPLIT,$0-52 - B syscall·Syscall9(SB) - -TEXT ·RawSyscall(SB),NOSPLIT,$0-28 - B syscall·RawSyscall(SB) - -TEXT ·RawSyscall6(SB),NOSPLIT,$0-40 - B syscall·RawSyscall6(SB) diff --git a/vendor/golang.org/x/sys/unix/asm_freebsd_arm64.s b/vendor/golang.org/x/sys/unix/asm_freebsd_arm64.s deleted file mode 100644 index a8f5a29b35..0000000000 --- a/vendor/golang.org/x/sys/unix/asm_freebsd_arm64.s +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build gc - -#include "textflag.h" - -// -// System call support for ARM64, FreeBSD -// - -// Just jump to package syscall's implementation for all these functions. -// The runtime may know about them. - -TEXT ·Syscall(SB),NOSPLIT,$0-56 - JMP syscall·Syscall(SB) - -TEXT ·Syscall6(SB),NOSPLIT,$0-80 - JMP syscall·Syscall6(SB) - -TEXT ·Syscall9(SB),NOSPLIT,$0-104 - JMP syscall·Syscall9(SB) - -TEXT ·RawSyscall(SB),NOSPLIT,$0-56 - JMP syscall·RawSyscall(SB) - -TEXT ·RawSyscall6(SB),NOSPLIT,$0-80 - JMP syscall·RawSyscall6(SB) diff --git a/vendor/golang.org/x/sys/unix/asm_linux_386.s b/vendor/golang.org/x/sys/unix/asm_linux_386.s index 0655ecbfbb..8fd101d071 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_386.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_386.s @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build gc // +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_amd64.s b/vendor/golang.org/x/sys/unix/asm_linux_amd64.s index bc3fb6ac3e..7ed38e43c6 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_amd64.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_amd64.s @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build gc // +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_arm.s b/vendor/golang.org/x/sys/unix/asm_linux_arm.s index 55b13c7ba4..8ef1d51402 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_arm.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_arm.s @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build gc // +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_arm64.s b/vendor/golang.org/x/sys/unix/asm_linux_arm64.s index 22a83d8e3f..98ae02760d 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_arm64.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_arm64.s @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build linux && arm64 && gc // +build linux // +build arm64 // +build gc diff --git a/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s b/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s index dc222b90ce..21231d2ce1 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build linux && (mips64 || mips64le) && gc // +build linux // +build mips64 mips64le // +build gc diff --git a/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s b/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s index d333f13cff..6783b26c60 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build linux && (mips || mipsle) && gc // +build linux // +build mips mipsle // +build gc diff --git a/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s b/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s index 459a629c27..19d4989344 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build linux && (ppc64 || ppc64le) && gc // +build linux // +build ppc64 ppc64le // +build gc diff --git a/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s b/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s index 04d38497c6..e42eb81d58 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s @@ -2,7 +2,9 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build riscv64,gc +//go:build riscv64 && gc +// +build riscv64 +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_s390x.s b/vendor/golang.org/x/sys/unix/asm_linux_s390x.s index cc303989e1..c46aab3395 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_s390x.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_s390x.s @@ -2,8 +2,9 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build s390x +//go:build linux && s390x && gc // +build linux +// +build s390x // +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_netbsd_arm.s b/vendor/golang.org/x/sys/unix/asm_netbsd_arm.s deleted file mode 100644 index d7da175e1a..0000000000 --- a/vendor/golang.org/x/sys/unix/asm_netbsd_arm.s +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build gc - -#include "textflag.h" - -// -// System call support for ARM, NetBSD -// - -// Just jump to package syscall's implementation for all these functions. -// The runtime may know about them. - -TEXT ·Syscall(SB),NOSPLIT,$0-28 - B syscall·Syscall(SB) - -TEXT ·Syscall6(SB),NOSPLIT,$0-40 - B syscall·Syscall6(SB) - -TEXT ·Syscall9(SB),NOSPLIT,$0-52 - B syscall·Syscall9(SB) - -TEXT ·RawSyscall(SB),NOSPLIT,$0-28 - B syscall·RawSyscall(SB) - -TEXT ·RawSyscall6(SB),NOSPLIT,$0-40 - B syscall·RawSyscall6(SB) diff --git a/vendor/golang.org/x/sys/unix/asm_netbsd_arm64.s b/vendor/golang.org/x/sys/unix/asm_netbsd_arm64.s deleted file mode 100644 index e7cbe1904c..0000000000 --- a/vendor/golang.org/x/sys/unix/asm_netbsd_arm64.s +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build gc - -#include "textflag.h" - -// -// System call support for ARM64, NetBSD -// - -// Just jump to package syscall's implementation for all these functions. -// The runtime may know about them. - -TEXT ·Syscall(SB),NOSPLIT,$0-56 - B syscall·Syscall(SB) - -TEXT ·Syscall6(SB),NOSPLIT,$0-80 - B syscall·Syscall6(SB) - -TEXT ·Syscall9(SB),NOSPLIT,$0-104 - B syscall·Syscall9(SB) - -TEXT ·RawSyscall(SB),NOSPLIT,$0-56 - B syscall·RawSyscall(SB) - -TEXT ·RawSyscall6(SB),NOSPLIT,$0-80 - B syscall·RawSyscall6(SB) diff --git a/vendor/golang.org/x/sys/unix/asm_openbsd_386.s b/vendor/golang.org/x/sys/unix/asm_openbsd_386.s deleted file mode 100644 index 2f00b0310f..0000000000 --- a/vendor/golang.org/x/sys/unix/asm_openbsd_386.s +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build gc - -#include "textflag.h" - -// -// System call support for 386, OpenBSD -// - -// Just jump to package syscall's implementation for all these functions. -// The runtime may know about them. - -TEXT ·Syscall(SB),NOSPLIT,$0-28 - JMP syscall·Syscall(SB) - -TEXT ·Syscall6(SB),NOSPLIT,$0-40 - JMP syscall·Syscall6(SB) - -TEXT ·Syscall9(SB),NOSPLIT,$0-52 - JMP syscall·Syscall9(SB) - -TEXT ·RawSyscall(SB),NOSPLIT,$0-28 - JMP syscall·RawSyscall(SB) - -TEXT ·RawSyscall6(SB),NOSPLIT,$0-40 - JMP syscall·RawSyscall6(SB) diff --git a/vendor/golang.org/x/sys/unix/asm_openbsd_amd64.s b/vendor/golang.org/x/sys/unix/asm_openbsd_amd64.s deleted file mode 100644 index 07632c99ce..0000000000 --- a/vendor/golang.org/x/sys/unix/asm_openbsd_amd64.s +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build gc - -#include "textflag.h" - -// -// System call support for AMD64, OpenBSD -// - -// Just jump to package syscall's implementation for all these functions. -// The runtime may know about them. - -TEXT ·Syscall(SB),NOSPLIT,$0-56 - JMP syscall·Syscall(SB) - -TEXT ·Syscall6(SB),NOSPLIT,$0-80 - JMP syscall·Syscall6(SB) - -TEXT ·Syscall9(SB),NOSPLIT,$0-104 - JMP syscall·Syscall9(SB) - -TEXT ·RawSyscall(SB),NOSPLIT,$0-56 - JMP syscall·RawSyscall(SB) - -TEXT ·RawSyscall6(SB),NOSPLIT,$0-80 - JMP syscall·RawSyscall6(SB) diff --git a/vendor/golang.org/x/sys/unix/asm_openbsd_arm.s b/vendor/golang.org/x/sys/unix/asm_openbsd_arm.s deleted file mode 100644 index 73e997320f..0000000000 --- a/vendor/golang.org/x/sys/unix/asm_openbsd_arm.s +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build gc - -#include "textflag.h" - -// -// System call support for ARM, OpenBSD -// - -// Just jump to package syscall's implementation for all these functions. -// The runtime may know about them. - -TEXT ·Syscall(SB),NOSPLIT,$0-28 - B syscall·Syscall(SB) - -TEXT ·Syscall6(SB),NOSPLIT,$0-40 - B syscall·Syscall6(SB) - -TEXT ·Syscall9(SB),NOSPLIT,$0-52 - B syscall·Syscall9(SB) - -TEXT ·RawSyscall(SB),NOSPLIT,$0-28 - B syscall·RawSyscall(SB) - -TEXT ·RawSyscall6(SB),NOSPLIT,$0-40 - B syscall·RawSyscall6(SB) diff --git a/vendor/golang.org/x/sys/unix/asm_openbsd_arm64.s b/vendor/golang.org/x/sys/unix/asm_openbsd_arm64.s deleted file mode 100644 index c47302aa46..0000000000 --- a/vendor/golang.org/x/sys/unix/asm_openbsd_arm64.s +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build gc - -#include "textflag.h" - -// -// System call support for arm64, OpenBSD -// - -// Just jump to package syscall's implementation for all these functions. -// The runtime may know about them. - -TEXT ·Syscall(SB),NOSPLIT,$0-56 - JMP syscall·Syscall(SB) - -TEXT ·Syscall6(SB),NOSPLIT,$0-80 - JMP syscall·Syscall6(SB) - -TEXT ·Syscall9(SB),NOSPLIT,$0-104 - JMP syscall·Syscall9(SB) - -TEXT ·RawSyscall(SB),NOSPLIT,$0-56 - JMP syscall·RawSyscall(SB) - -TEXT ·RawSyscall6(SB),NOSPLIT,$0-80 - JMP syscall·RawSyscall6(SB) diff --git a/vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s b/vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s index 47c93fcb6c..5e7a1169c0 100644 --- a/vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s +++ b/vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build gc // +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s b/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s index 1f2c755a72..f8c5394c1a 100644 --- a/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s +++ b/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build gc // +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_zos_s390x.s b/vendor/golang.org/x/sys/unix/asm_zos_s390x.s new file mode 100644 index 0000000000..3b54e18581 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/asm_zos_s390x.s @@ -0,0 +1,426 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build zos && s390x && gc +// +build zos +// +build s390x +// +build gc + +#include "textflag.h" + +#define PSALAA 1208(R0) +#define GTAB64(x) 80(x) +#define LCA64(x) 88(x) +#define CAA(x) 8(x) +#define EDCHPXV(x) 1016(x) // in the CAA +#define SAVSTACK_ASYNC(x) 336(x) // in the LCA + +// SS_*, where x=SAVSTACK_ASYNC +#define SS_LE(x) 0(x) +#define SS_GO(x) 8(x) +#define SS_ERRNO(x) 16(x) +#define SS_ERRNOJR(x) 20(x) + +#define LE_CALL BYTE $0x0D; BYTE $0x76; // BL R7, R6 + +TEXT ·clearErrno(SB),NOSPLIT,$0-0 + BL addrerrno<>(SB) + MOVD $0, 0(R3) + RET + +// Returns the address of errno in R3. +TEXT addrerrno<>(SB),NOSPLIT|NOFRAME,$0-0 + // Get library control area (LCA). + MOVW PSALAA, R8 + MOVD LCA64(R8), R8 + + // Get __errno FuncDesc. + MOVD CAA(R8), R9 + MOVD EDCHPXV(R9), R9 + ADD $(0x156*16), R9 + LMG 0(R9), R5, R6 + + // Switch to saved LE stack. + MOVD SAVSTACK_ASYNC(R8), R9 + MOVD 0(R9), R4 + MOVD $0, 0(R9) + + // Call __errno function. + LE_CALL + NOPH + + // Switch back to Go stack. + XOR R0, R0 // Restore R0 to $0. + MOVD R4, 0(R9) // Save stack pointer. + RET + +TEXT ·syscall_syscall(SB),NOSPLIT,$0-56 + BL runtime·entersyscall(SB) + MOVD a1+8(FP), R1 + MOVD a2+16(FP), R2 + MOVD a3+24(FP), R3 + + // Get library control area (LCA). + MOVW PSALAA, R8 + MOVD LCA64(R8), R8 + + // Get function. + MOVD CAA(R8), R9 + MOVD EDCHPXV(R9), R9 + MOVD trap+0(FP), R5 + SLD $4, R5 + ADD R5, R9 + LMG 0(R9), R5, R6 + + // Restore LE stack. + MOVD SAVSTACK_ASYNC(R8), R9 + MOVD 0(R9), R4 + MOVD $0, 0(R9) + + // Call function. + LE_CALL + NOPH + XOR R0, R0 // Restore R0 to $0. + MOVD R4, 0(R9) // Save stack pointer. + + MOVD R3, r1+32(FP) + MOVD R0, r2+40(FP) + MOVD R0, err+48(FP) + MOVW R3, R4 + CMP R4, $-1 + BNE done + BL addrerrno<>(SB) + MOVWZ 0(R3), R3 + MOVD R3, err+48(FP) +done: + BL runtime·exitsyscall(SB) + RET + +TEXT ·syscall_rawsyscall(SB),NOSPLIT,$0-56 + MOVD a1+8(FP), R1 + MOVD a2+16(FP), R2 + MOVD a3+24(FP), R3 + + // Get library control area (LCA). + MOVW PSALAA, R8 + MOVD LCA64(R8), R8 + + // Get function. + MOVD CAA(R8), R9 + MOVD EDCHPXV(R9), R9 + MOVD trap+0(FP), R5 + SLD $4, R5 + ADD R5, R9 + LMG 0(R9), R5, R6 + + // Restore LE stack. + MOVD SAVSTACK_ASYNC(R8), R9 + MOVD 0(R9), R4 + MOVD $0, 0(R9) + + // Call function. + LE_CALL + NOPH + XOR R0, R0 // Restore R0 to $0. + MOVD R4, 0(R9) // Save stack pointer. + + MOVD R3, r1+32(FP) + MOVD R0, r2+40(FP) + MOVD R0, err+48(FP) + MOVW R3, R4 + CMP R4, $-1 + BNE done + BL addrerrno<>(SB) + MOVWZ 0(R3), R3 + MOVD R3, err+48(FP) +done: + RET + +TEXT ·syscall_syscall6(SB),NOSPLIT,$0-80 + BL runtime·entersyscall(SB) + MOVD a1+8(FP), R1 + MOVD a2+16(FP), R2 + MOVD a3+24(FP), R3 + + // Get library control area (LCA). + MOVW PSALAA, R8 + MOVD LCA64(R8), R8 + + // Get function. + MOVD CAA(R8), R9 + MOVD EDCHPXV(R9), R9 + MOVD trap+0(FP), R5 + SLD $4, R5 + ADD R5, R9 + LMG 0(R9), R5, R6 + + // Restore LE stack. + MOVD SAVSTACK_ASYNC(R8), R9 + MOVD 0(R9), R4 + MOVD $0, 0(R9) + + // Fill in parameter list. + MOVD a4+32(FP), R12 + MOVD R12, (2176+24)(R4) + MOVD a5+40(FP), R12 + MOVD R12, (2176+32)(R4) + MOVD a6+48(FP), R12 + MOVD R12, (2176+40)(R4) + + // Call function. + LE_CALL + NOPH + XOR R0, R0 // Restore R0 to $0. + MOVD R4, 0(R9) // Save stack pointer. + + MOVD R3, r1+56(FP) + MOVD R0, r2+64(FP) + MOVD R0, err+72(FP) + MOVW R3, R4 + CMP R4, $-1 + BNE done + BL addrerrno<>(SB) + MOVWZ 0(R3), R3 + MOVD R3, err+72(FP) +done: + BL runtime·exitsyscall(SB) + RET + +TEXT ·syscall_rawsyscall6(SB),NOSPLIT,$0-80 + MOVD a1+8(FP), R1 + MOVD a2+16(FP), R2 + MOVD a3+24(FP), R3 + + // Get library control area (LCA). + MOVW PSALAA, R8 + MOVD LCA64(R8), R8 + + // Get function. + MOVD CAA(R8), R9 + MOVD EDCHPXV(R9), R9 + MOVD trap+0(FP), R5 + SLD $4, R5 + ADD R5, R9 + LMG 0(R9), R5, R6 + + // Restore LE stack. + MOVD SAVSTACK_ASYNC(R8), R9 + MOVD 0(R9), R4 + MOVD $0, 0(R9) + + // Fill in parameter list. + MOVD a4+32(FP), R12 + MOVD R12, (2176+24)(R4) + MOVD a5+40(FP), R12 + MOVD R12, (2176+32)(R4) + MOVD a6+48(FP), R12 + MOVD R12, (2176+40)(R4) + + // Call function. + LE_CALL + NOPH + XOR R0, R0 // Restore R0 to $0. + MOVD R4, 0(R9) // Save stack pointer. + + MOVD R3, r1+56(FP) + MOVD R0, r2+64(FP) + MOVD R0, err+72(FP) + MOVW R3, R4 + CMP R4, $-1 + BNE done + BL ·rrno<>(SB) + MOVWZ 0(R3), R3 + MOVD R3, err+72(FP) +done: + RET + +TEXT ·syscall_syscall9(SB),NOSPLIT,$0 + BL runtime·entersyscall(SB) + MOVD a1+8(FP), R1 + MOVD a2+16(FP), R2 + MOVD a3+24(FP), R3 + + // Get library control area (LCA). + MOVW PSALAA, R8 + MOVD LCA64(R8), R8 + + // Get function. + MOVD CAA(R8), R9 + MOVD EDCHPXV(R9), R9 + MOVD trap+0(FP), R5 + SLD $4, R5 + ADD R5, R9 + LMG 0(R9), R5, R6 + + // Restore LE stack. + MOVD SAVSTACK_ASYNC(R8), R9 + MOVD 0(R9), R4 + MOVD $0, 0(R9) + + // Fill in parameter list. + MOVD a4+32(FP), R12 + MOVD R12, (2176+24)(R4) + MOVD a5+40(FP), R12 + MOVD R12, (2176+32)(R4) + MOVD a6+48(FP), R12 + MOVD R12, (2176+40)(R4) + MOVD a7+56(FP), R12 + MOVD R12, (2176+48)(R4) + MOVD a8+64(FP), R12 + MOVD R12, (2176+56)(R4) + MOVD a9+72(FP), R12 + MOVD R12, (2176+64)(R4) + + // Call function. + LE_CALL + NOPH + XOR R0, R0 // Restore R0 to $0. + MOVD R4, 0(R9) // Save stack pointer. + + MOVD R3, r1+80(FP) + MOVD R0, r2+88(FP) + MOVD R0, err+96(FP) + MOVW R3, R4 + CMP R4, $-1 + BNE done + BL addrerrno<>(SB) + MOVWZ 0(R3), R3 + MOVD R3, err+96(FP) +done: + BL runtime·exitsyscall(SB) + RET + +TEXT ·syscall_rawsyscall9(SB),NOSPLIT,$0 + MOVD a1+8(FP), R1 + MOVD a2+16(FP), R2 + MOVD a3+24(FP), R3 + + // Get library control area (LCA). + MOVW PSALAA, R8 + MOVD LCA64(R8), R8 + + // Get function. + MOVD CAA(R8), R9 + MOVD EDCHPXV(R9), R9 + MOVD trap+0(FP), R5 + SLD $4, R5 + ADD R5, R9 + LMG 0(R9), R5, R6 + + // Restore LE stack. + MOVD SAVSTACK_ASYNC(R8), R9 + MOVD 0(R9), R4 + MOVD $0, 0(R9) + + // Fill in parameter list. + MOVD a4+32(FP), R12 + MOVD R12, (2176+24)(R4) + MOVD a5+40(FP), R12 + MOVD R12, (2176+32)(R4) + MOVD a6+48(FP), R12 + MOVD R12, (2176+40)(R4) + MOVD a7+56(FP), R12 + MOVD R12, (2176+48)(R4) + MOVD a8+64(FP), R12 + MOVD R12, (2176+56)(R4) + MOVD a9+72(FP), R12 + MOVD R12, (2176+64)(R4) + + // Call function. + LE_CALL + NOPH + XOR R0, R0 // Restore R0 to $0. + MOVD R4, 0(R9) // Save stack pointer. + + MOVD R3, r1+80(FP) + MOVD R0, r2+88(FP) + MOVD R0, err+96(FP) + MOVW R3, R4 + CMP R4, $-1 + BNE done + BL addrerrno<>(SB) + MOVWZ 0(R3), R3 + MOVD R3, err+96(FP) +done: + RET + +// func svcCall(fnptr unsafe.Pointer, argv *unsafe.Pointer, dsa *uint64) +TEXT ·svcCall(SB),NOSPLIT,$0 + BL runtime·save_g(SB) // Save g and stack pointer + MOVW PSALAA, R8 + MOVD LCA64(R8), R8 + MOVD SAVSTACK_ASYNC(R8), R9 + MOVD R15, 0(R9) + + MOVD argv+8(FP), R1 // Move function arguments into registers + MOVD dsa+16(FP), g + MOVD fnptr+0(FP), R15 + + BYTE $0x0D // Branch to function + BYTE $0xEF + + BL runtime·load_g(SB) // Restore g and stack pointer + MOVW PSALAA, R8 + MOVD LCA64(R8), R8 + MOVD SAVSTACK_ASYNC(R8), R9 + MOVD 0(R9), R15 + + RET + +// func svcLoad(name *byte) unsafe.Pointer +TEXT ·svcLoad(SB),NOSPLIT,$0 + MOVD R15, R2 // Save go stack pointer + MOVD name+0(FP), R0 // Move SVC args into registers + MOVD $0x80000000, R1 + MOVD $0, R15 + BYTE $0x0A // SVC 08 LOAD + BYTE $0x08 + MOVW R15, R3 // Save return code from SVC + MOVD R2, R15 // Restore go stack pointer + CMP R3, $0 // Check SVC return code + BNE error + + MOVD $-2, R3 // Reset last bit of entry point to zero + AND R0, R3 + MOVD R3, addr+8(FP) // Return entry point returned by SVC + CMP R0, R3 // Check if last bit of entry point was set + BNE done + + MOVD R15, R2 // Save go stack pointer + MOVD $0, R15 // Move SVC args into registers (entry point still in r0 from SVC 08) + BYTE $0x0A // SVC 09 DELETE + BYTE $0x09 + MOVD R2, R15 // Restore go stack pointer + +error: + MOVD $0, addr+8(FP) // Return 0 on failure +done: + XOR R0, R0 // Reset r0 to 0 + RET + +// func svcUnload(name *byte, fnptr unsafe.Pointer) int64 +TEXT ·svcUnload(SB),NOSPLIT,$0 + MOVD R15, R2 // Save go stack pointer + MOVD name+0(FP), R0 // Move SVC args into registers + MOVD addr+8(FP), R15 + BYTE $0x0A // SVC 09 + BYTE $0x09 + XOR R0, R0 // Reset r0 to 0 + MOVD R15, R1 // Save SVC return code + MOVD R2, R15 // Restore go stack pointer + MOVD R1, rc+0(FP) // Return SVC return code + RET + +// func gettid() uint64 +TEXT ·gettid(SB), NOSPLIT, $0 + // Get library control area (LCA). + MOVW PSALAA, R8 + MOVD LCA64(R8), R8 + + // Get CEECAATHDID + MOVD CAA(R8), R9 + MOVD 0x3D0(R9), R9 + MOVD R9, ret+0(FP) + + RET diff --git a/vendor/golang.org/x/sys/unix/cap_freebsd.go b/vendor/golang.org/x/sys/unix/cap_freebsd.go index df52048773..0b7c6adb86 100644 --- a/vendor/golang.org/x/sys/unix/cap_freebsd.go +++ b/vendor/golang.org/x/sys/unix/cap_freebsd.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build freebsd // +build freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/constants.go b/vendor/golang.org/x/sys/unix/constants.go index 3a6ac648dd..394a3965b6 100644 --- a/vendor/golang.org/x/sys/unix/constants.go +++ b/vendor/golang.org/x/sys/unix/constants.go @@ -2,7 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris +//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos package unix diff --git a/vendor/golang.org/x/sys/unix/dev_aix_ppc.go b/vendor/golang.org/x/sys/unix/dev_aix_ppc.go index 5e5fb45104..65a998508d 100644 --- a/vendor/golang.org/x/sys/unix/dev_aix_ppc.go +++ b/vendor/golang.org/x/sys/unix/dev_aix_ppc.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build aix -// +build ppc +//go:build aix && ppc +// +build aix,ppc // Functions to access/create device major and minor numbers matching the // encoding used by AIX. diff --git a/vendor/golang.org/x/sys/unix/dev_aix_ppc64.go b/vendor/golang.org/x/sys/unix/dev_aix_ppc64.go index 8b401244c4..8fc08ad0aa 100644 --- a/vendor/golang.org/x/sys/unix/dev_aix_ppc64.go +++ b/vendor/golang.org/x/sys/unix/dev_aix_ppc64.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build aix -// +build ppc64 +//go:build aix && ppc64 +// +build aix,ppc64 // Functions to access/create device major and minor numbers matching the // encoding used AIX. diff --git a/vendor/golang.org/x/sys/unix/dev_zos.go b/vendor/golang.org/x/sys/unix/dev_zos.go new file mode 100644 index 0000000000..a388e59a0e --- /dev/null +++ b/vendor/golang.org/x/sys/unix/dev_zos.go @@ -0,0 +1,29 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build zos && s390x +// +build zos,s390x + +// Functions to access/create device major and minor numbers matching the +// encoding used by z/OS. +// +// The information below is extracted and adapted from macros. + +package unix + +// Major returns the major component of a z/OS device number. +func Major(dev uint64) uint32 { + return uint32((dev >> 16) & 0x0000FFFF) +} + +// Minor returns the minor component of a z/OS device number. +func Minor(dev uint64) uint32 { + return uint32(dev & 0x0000FFFF) +} + +// Mkdev returns a z/OS device number generated from the given major and minor +// components. +func Mkdev(major, minor uint32) uint64 { + return (uint64(major) << 16) | uint64(minor) +} diff --git a/vendor/golang.org/x/sys/unix/dirent.go b/vendor/golang.org/x/sys/unix/dirent.go index 304016b688..e74e5eaa3b 100644 --- a/vendor/golang.org/x/sys/unix/dirent.go +++ b/vendor/golang.org/x/sys/unix/dirent.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris // +build aix darwin dragonfly freebsd linux netbsd openbsd solaris package unix diff --git a/vendor/golang.org/x/sys/unix/endian_big.go b/vendor/golang.org/x/sys/unix/endian_big.go index 86781eac22..a520265576 100644 --- a/vendor/golang.org/x/sys/unix/endian_big.go +++ b/vendor/golang.org/x/sys/unix/endian_big.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // +//go:build armbe || arm64be || m68k || mips || mips64 || mips64p32 || ppc || ppc64 || s390 || s390x || shbe || sparc || sparc64 // +build armbe arm64be m68k mips mips64 mips64p32 ppc ppc64 s390 s390x shbe sparc sparc64 package unix diff --git a/vendor/golang.org/x/sys/unix/endian_little.go b/vendor/golang.org/x/sys/unix/endian_little.go index 8822d8541f..4362f47e2c 100644 --- a/vendor/golang.org/x/sys/unix/endian_little.go +++ b/vendor/golang.org/x/sys/unix/endian_little.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // +//go:build 386 || amd64 || amd64p32 || alpha || arm || arm64 || mipsle || mips64le || mips64p32le || nios2 || ppc64le || riscv || riscv64 || sh // +build 386 amd64 amd64p32 alpha arm arm64 mipsle mips64le mips64p32le nios2 ppc64le riscv riscv64 sh package unix diff --git a/vendor/golang.org/x/sys/unix/env_unix.go b/vendor/golang.org/x/sys/unix/env_unix.go index 84178b0a13..29ccc4d133 100644 --- a/vendor/golang.org/x/sys/unix/env_unix.go +++ b/vendor/golang.org/x/sys/unix/env_unix.go @@ -2,7 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris +//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos // Unix environment variables. diff --git a/vendor/golang.org/x/sys/unix/epoll_zos.go b/vendor/golang.org/x/sys/unix/epoll_zos.go new file mode 100644 index 0000000000..cedaf7e024 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/epoll_zos.go @@ -0,0 +1,221 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build zos && s390x +// +build zos,s390x + +package unix + +import ( + "sync" +) + +// This file simulates epoll on z/OS using poll. + +// Analogous to epoll_event on Linux. +// TODO(neeilan): Pad is because the Linux kernel expects a 96-bit struct. We never pass this to the kernel; remove? +type EpollEvent struct { + Events uint32 + Fd int32 + Pad int32 +} + +const ( + EPOLLERR = 0x8 + EPOLLHUP = 0x10 + EPOLLIN = 0x1 + EPOLLMSG = 0x400 + EPOLLOUT = 0x4 + EPOLLPRI = 0x2 + EPOLLRDBAND = 0x80 + EPOLLRDNORM = 0x40 + EPOLLWRBAND = 0x200 + EPOLLWRNORM = 0x100 + EPOLL_CTL_ADD = 0x1 + EPOLL_CTL_DEL = 0x2 + EPOLL_CTL_MOD = 0x3 + // The following constants are part of the epoll API, but represent + // currently unsupported functionality on z/OS. + // EPOLL_CLOEXEC = 0x80000 + // EPOLLET = 0x80000000 + // EPOLLONESHOT = 0x40000000 + // EPOLLRDHUP = 0x2000 // Typically used with edge-triggered notis + // EPOLLEXCLUSIVE = 0x10000000 // Exclusive wake-up mode + // EPOLLWAKEUP = 0x20000000 // Relies on Linux's BLOCK_SUSPEND capability +) + +// TODO(neeilan): We can eliminate these epToPoll / pToEpoll calls by using identical mask values for POLL/EPOLL +// constants where possible The lower 16 bits of epoll events (uint32) can fit any system poll event (int16). + +// epToPollEvt converts epoll event field to poll equivalent. +// In epoll, Events is a 32-bit field, while poll uses 16 bits. +func epToPollEvt(events uint32) int16 { + var ep2p = map[uint32]int16{ + EPOLLIN: POLLIN, + EPOLLOUT: POLLOUT, + EPOLLHUP: POLLHUP, + EPOLLPRI: POLLPRI, + EPOLLERR: POLLERR, + } + + var pollEvts int16 = 0 + for epEvt, pEvt := range ep2p { + if (events & epEvt) != 0 { + pollEvts |= pEvt + } + } + + return pollEvts +} + +// pToEpollEvt converts 16 bit poll event bitfields to 32-bit epoll event fields. +func pToEpollEvt(revents int16) uint32 { + var p2ep = map[int16]uint32{ + POLLIN: EPOLLIN, + POLLOUT: EPOLLOUT, + POLLHUP: EPOLLHUP, + POLLPRI: EPOLLPRI, + POLLERR: EPOLLERR, + } + + var epollEvts uint32 = 0 + for pEvt, epEvt := range p2ep { + if (revents & pEvt) != 0 { + epollEvts |= epEvt + } + } + + return epollEvts +} + +// Per-process epoll implementation. +type epollImpl struct { + mu sync.Mutex + epfd2ep map[int]*eventPoll + nextEpfd int +} + +// eventPoll holds a set of file descriptors being watched by the process. A process can have multiple epoll instances. +// On Linux, this is an in-kernel data structure accessed through a fd. +type eventPoll struct { + mu sync.Mutex + fds map[int]*EpollEvent +} + +// epoll impl for this process. +var impl epollImpl = epollImpl{ + epfd2ep: make(map[int]*eventPoll), + nextEpfd: 0, +} + +func (e *epollImpl) epollcreate(size int) (epfd int, err error) { + e.mu.Lock() + defer e.mu.Unlock() + epfd = e.nextEpfd + e.nextEpfd++ + + e.epfd2ep[epfd] = &eventPoll{ + fds: make(map[int]*EpollEvent), + } + return epfd, nil +} + +func (e *epollImpl) epollcreate1(flag int) (fd int, err error) { + return e.epollcreate(4) +} + +func (e *epollImpl) epollctl(epfd int, op int, fd int, event *EpollEvent) (err error) { + e.mu.Lock() + defer e.mu.Unlock() + + ep, ok := e.epfd2ep[epfd] + if !ok { + + return EBADF + } + + switch op { + case EPOLL_CTL_ADD: + // TODO(neeilan): When we make epfds and fds disjoint, detect epoll + // loops here (instances watching each other) and return ELOOP. + if _, ok := ep.fds[fd]; ok { + return EEXIST + } + ep.fds[fd] = event + case EPOLL_CTL_MOD: + if _, ok := ep.fds[fd]; !ok { + return ENOENT + } + ep.fds[fd] = event + case EPOLL_CTL_DEL: + if _, ok := ep.fds[fd]; !ok { + return ENOENT + } + delete(ep.fds, fd) + + } + return nil +} + +// Must be called while holding ep.mu +func (ep *eventPoll) getFds() []int { + fds := make([]int, len(ep.fds)) + for fd := range ep.fds { + fds = append(fds, fd) + } + return fds +} + +func (e *epollImpl) epollwait(epfd int, events []EpollEvent, msec int) (n int, err error) { + e.mu.Lock() // in [rare] case of concurrent epollcreate + epollwait + ep, ok := e.epfd2ep[epfd] + + if !ok { + e.mu.Unlock() + return 0, EBADF + } + + pollfds := make([]PollFd, 4) + for fd, epollevt := range ep.fds { + pollfds = append(pollfds, PollFd{Fd: int32(fd), Events: epToPollEvt(epollevt.Events)}) + } + e.mu.Unlock() + + n, err = Poll(pollfds, msec) + if err != nil { + return n, err + } + + i := 0 + for _, pFd := range pollfds { + if pFd.Revents != 0 { + events[i] = EpollEvent{Fd: pFd.Fd, Events: pToEpollEvt(pFd.Revents)} + i++ + } + + if i == n { + break + } + } + + return n, nil +} + +func EpollCreate(size int) (fd int, err error) { + return impl.epollcreate(size) +} + +func EpollCreate1(flag int) (fd int, err error) { + return impl.epollcreate1(flag) +} + +func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { + return impl.epollctl(epfd, op, fd, event) +} + +// Because EpollWait mutates events, the caller is expected to coordinate +// concurrent access if calling with the same epfd from multiple goroutines. +func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { + return impl.epollwait(epfd, events, msec) +} diff --git a/vendor/golang.org/x/sys/unix/fcntl.go b/vendor/golang.org/x/sys/unix/fcntl.go index 4dc5348643..e9b991258c 100644 --- a/vendor/golang.org/x/sys/unix/fcntl.go +++ b/vendor/golang.org/x/sys/unix/fcntl.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build dragonfly || freebsd || linux || netbsd || openbsd // +build dragonfly freebsd linux netbsd openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go b/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go index 8db48e5e06..29d44808b1 100644 --- a/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go +++ b/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go @@ -2,7 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build linux,386 linux,arm linux,mips linux,mipsle +//go:build (linux && 386) || (linux && arm) || (linux && mips) || (linux && mipsle) || (linux && ppc) +// +build linux,386 linux,arm linux,mips linux,mipsle linux,ppc package unix diff --git a/vendor/golang.org/x/sys/unix/fdset.go b/vendor/golang.org/x/sys/unix/fdset.go index b27be0a014..a8068f94f2 100644 --- a/vendor/golang.org/x/sys/unix/fdset.go +++ b/vendor/golang.org/x/sys/unix/fdset.go @@ -2,7 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris +//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos package unix diff --git a/vendor/golang.org/x/sys/unix/fstatfs_zos.go b/vendor/golang.org/x/sys/unix/fstatfs_zos.go new file mode 100644 index 0000000000..e377cc9f49 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/fstatfs_zos.go @@ -0,0 +1,164 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build zos && s390x +// +build zos,s390x + +package unix + +import ( + "unsafe" +) + +// This file simulates fstatfs on z/OS using fstatvfs and w_getmntent. + +func Fstatfs(fd int, stat *Statfs_t) (err error) { + var stat_v Statvfs_t + err = Fstatvfs(fd, &stat_v) + if err == nil { + // populate stat + stat.Type = 0 + stat.Bsize = stat_v.Bsize + stat.Blocks = stat_v.Blocks + stat.Bfree = stat_v.Bfree + stat.Bavail = stat_v.Bavail + stat.Files = stat_v.Files + stat.Ffree = stat_v.Ffree + stat.Fsid = stat_v.Fsid + stat.Namelen = stat_v.Namemax + stat.Frsize = stat_v.Frsize + stat.Flags = stat_v.Flag + for passn := 0; passn < 5; passn++ { + switch passn { + case 0: + err = tryGetmntent64(stat) + break + case 1: + err = tryGetmntent128(stat) + break + case 2: + err = tryGetmntent256(stat) + break + case 3: + err = tryGetmntent512(stat) + break + case 4: + err = tryGetmntent1024(stat) + break + default: + break + } + //proceed to return if: err is nil (found), err is nonnil but not ERANGE (another error occurred) + if err == nil || err != nil && err != ERANGE { + break + } + } + } + return err +} + +func tryGetmntent64(stat *Statfs_t) (err error) { + var mnt_ent_buffer struct { + header W_Mnth + filesys_info [64]W_Mntent + } + var buffer_size int = int(unsafe.Sizeof(mnt_ent_buffer)) + fs_count, err := W_Getmntent((*byte)(unsafe.Pointer(&mnt_ent_buffer)), buffer_size) + if err != nil { + return err + } + err = ERANGE //return ERANGE if no match is found in this batch + for i := 0; i < fs_count; i++ { + if stat.Fsid == uint64(mnt_ent_buffer.filesys_info[i].Dev) { + stat.Type = uint32(mnt_ent_buffer.filesys_info[i].Fstname[0]) + err = nil + break + } + } + return err +} + +func tryGetmntent128(stat *Statfs_t) (err error) { + var mnt_ent_buffer struct { + header W_Mnth + filesys_info [128]W_Mntent + } + var buffer_size int = int(unsafe.Sizeof(mnt_ent_buffer)) + fs_count, err := W_Getmntent((*byte)(unsafe.Pointer(&mnt_ent_buffer)), buffer_size) + if err != nil { + return err + } + err = ERANGE //return ERANGE if no match is found in this batch + for i := 0; i < fs_count; i++ { + if stat.Fsid == uint64(mnt_ent_buffer.filesys_info[i].Dev) { + stat.Type = uint32(mnt_ent_buffer.filesys_info[i].Fstname[0]) + err = nil + break + } + } + return err +} + +func tryGetmntent256(stat *Statfs_t) (err error) { + var mnt_ent_buffer struct { + header W_Mnth + filesys_info [256]W_Mntent + } + var buffer_size int = int(unsafe.Sizeof(mnt_ent_buffer)) + fs_count, err := W_Getmntent((*byte)(unsafe.Pointer(&mnt_ent_buffer)), buffer_size) + if err != nil { + return err + } + err = ERANGE //return ERANGE if no match is found in this batch + for i := 0; i < fs_count; i++ { + if stat.Fsid == uint64(mnt_ent_buffer.filesys_info[i].Dev) { + stat.Type = uint32(mnt_ent_buffer.filesys_info[i].Fstname[0]) + err = nil + break + } + } + return err +} + +func tryGetmntent512(stat *Statfs_t) (err error) { + var mnt_ent_buffer struct { + header W_Mnth + filesys_info [512]W_Mntent + } + var buffer_size int = int(unsafe.Sizeof(mnt_ent_buffer)) + fs_count, err := W_Getmntent((*byte)(unsafe.Pointer(&mnt_ent_buffer)), buffer_size) + if err != nil { + return err + } + err = ERANGE //return ERANGE if no match is found in this batch + for i := 0; i < fs_count; i++ { + if stat.Fsid == uint64(mnt_ent_buffer.filesys_info[i].Dev) { + stat.Type = uint32(mnt_ent_buffer.filesys_info[i].Fstname[0]) + err = nil + break + } + } + return err +} + +func tryGetmntent1024(stat *Statfs_t) (err error) { + var mnt_ent_buffer struct { + header W_Mnth + filesys_info [1024]W_Mntent + } + var buffer_size int = int(unsafe.Sizeof(mnt_ent_buffer)) + fs_count, err := W_Getmntent((*byte)(unsafe.Pointer(&mnt_ent_buffer)), buffer_size) + if err != nil { + return err + } + err = ERANGE //return ERANGE if no match is found in this batch + for i := 0; i < fs_count; i++ { + if stat.Fsid == uint64(mnt_ent_buffer.filesys_info[i].Dev) { + stat.Type = uint32(mnt_ent_buffer.filesys_info[i].Fstname[0]) + err = nil + break + } + } + return err +} diff --git a/vendor/golang.org/x/sys/unix/gccgo.go b/vendor/golang.org/x/sys/unix/gccgo.go index 86032c11ef..0dee23222c 100644 --- a/vendor/golang.org/x/sys/unix/gccgo.go +++ b/vendor/golang.org/x/sys/unix/gccgo.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build gccgo -// +build !aix +//go:build gccgo && !aix +// +build gccgo,!aix package unix diff --git a/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go b/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go index 251a977a81..e60e49a3d9 100644 --- a/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build gccgo && linux && amd64 // +build gccgo,linux,amd64 package unix diff --git a/vendor/golang.org/x/sys/unix/ioctl.go b/vendor/golang.org/x/sys/unix/ioctl.go index 5641678613..6c7ad052e6 100644 --- a/vendor/golang.org/x/sys/unix/ioctl.go +++ b/vendor/golang.org/x/sys/unix/ioctl.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris // +build aix darwin dragonfly freebsd linux netbsd openbsd solaris package unix diff --git a/vendor/golang.org/x/sys/unix/ioctl_linux.go b/vendor/golang.org/x/sys/unix/ioctl_linux.go new file mode 100644 index 0000000000..48773f730a --- /dev/null +++ b/vendor/golang.org/x/sys/unix/ioctl_linux.go @@ -0,0 +1,196 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package unix + +import ( + "runtime" + "unsafe" +) + +// IoctlRetInt performs an ioctl operation specified by req on a device +// associated with opened file descriptor fd, and returns a non-negative +// integer that is returned by the ioctl syscall. +func IoctlRetInt(fd int, req uint) (int, error) { + ret, _, err := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), 0) + if err != 0 { + return 0, err + } + return int(ret), nil +} + +func IoctlGetUint32(fd int, req uint) (uint32, error) { + var value uint32 + err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) + return value, err +} + +func IoctlGetRTCTime(fd int) (*RTCTime, error) { + var value RTCTime + err := ioctl(fd, RTC_RD_TIME, uintptr(unsafe.Pointer(&value))) + return &value, err +} + +func IoctlSetRTCTime(fd int, value *RTCTime) error { + err := ioctl(fd, RTC_SET_TIME, uintptr(unsafe.Pointer(value))) + runtime.KeepAlive(value) + return err +} + +func IoctlGetRTCWkAlrm(fd int) (*RTCWkAlrm, error) { + var value RTCWkAlrm + err := ioctl(fd, RTC_WKALM_RD, uintptr(unsafe.Pointer(&value))) + return &value, err +} + +func IoctlSetRTCWkAlrm(fd int, value *RTCWkAlrm) error { + err := ioctl(fd, RTC_WKALM_SET, uintptr(unsafe.Pointer(value))) + runtime.KeepAlive(value) + return err +} + +type ifreqEthtool struct { + name [IFNAMSIZ]byte + data unsafe.Pointer +} + +// IoctlGetEthtoolDrvinfo fetches ethtool driver information for the network +// device specified by ifname. +func IoctlGetEthtoolDrvinfo(fd int, ifname string) (*EthtoolDrvinfo, error) { + // Leave room for terminating NULL byte. + if len(ifname) >= IFNAMSIZ { + return nil, EINVAL + } + + value := EthtoolDrvinfo{ + Cmd: ETHTOOL_GDRVINFO, + } + ifreq := ifreqEthtool{ + data: unsafe.Pointer(&value), + } + copy(ifreq.name[:], ifname) + err := ioctl(fd, SIOCETHTOOL, uintptr(unsafe.Pointer(&ifreq))) + runtime.KeepAlive(ifreq) + return &value, err +} + +// IoctlGetWatchdogInfo fetches information about a watchdog device from the +// Linux watchdog API. For more information, see: +// https://www.kernel.org/doc/html/latest/watchdog/watchdog-api.html. +func IoctlGetWatchdogInfo(fd int) (*WatchdogInfo, error) { + var value WatchdogInfo + err := ioctl(fd, WDIOC_GETSUPPORT, uintptr(unsafe.Pointer(&value))) + return &value, err +} + +// IoctlWatchdogKeepalive issues a keepalive ioctl to a watchdog device. For +// more information, see: +// https://www.kernel.org/doc/html/latest/watchdog/watchdog-api.html. +func IoctlWatchdogKeepalive(fd int) error { + return ioctl(fd, WDIOC_KEEPALIVE, 0) +} + +// IoctlFileCloneRange performs an FICLONERANGE ioctl operation to clone the +// range of data conveyed in value to the file associated with the file +// descriptor destFd. See the ioctl_ficlonerange(2) man page for details. +func IoctlFileCloneRange(destFd int, value *FileCloneRange) error { + err := ioctl(destFd, FICLONERANGE, uintptr(unsafe.Pointer(value))) + runtime.KeepAlive(value) + return err +} + +// IoctlFileClone performs an FICLONE ioctl operation to clone the entire file +// associated with the file description srcFd to the file associated with the +// file descriptor destFd. See the ioctl_ficlone(2) man page for details. +func IoctlFileClone(destFd, srcFd int) error { + return ioctl(destFd, FICLONE, uintptr(srcFd)) +} + +type FileDedupeRange struct { + Src_offset uint64 + Src_length uint64 + Reserved1 uint16 + Reserved2 uint32 + Info []FileDedupeRangeInfo +} + +type FileDedupeRangeInfo struct { + Dest_fd int64 + Dest_offset uint64 + Bytes_deduped uint64 + Status int32 + Reserved uint32 +} + +// IoctlFileDedupeRange performs an FIDEDUPERANGE ioctl operation to share the +// range of data conveyed in value from the file associated with the file +// descriptor srcFd to the value.Info destinations. See the +// ioctl_fideduperange(2) man page for details. +func IoctlFileDedupeRange(srcFd int, value *FileDedupeRange) error { + buf := make([]byte, SizeofRawFileDedupeRange+ + len(value.Info)*SizeofRawFileDedupeRangeInfo) + rawrange := (*RawFileDedupeRange)(unsafe.Pointer(&buf[0])) + rawrange.Src_offset = value.Src_offset + rawrange.Src_length = value.Src_length + rawrange.Dest_count = uint16(len(value.Info)) + rawrange.Reserved1 = value.Reserved1 + rawrange.Reserved2 = value.Reserved2 + + for i := range value.Info { + rawinfo := (*RawFileDedupeRangeInfo)(unsafe.Pointer( + uintptr(unsafe.Pointer(&buf[0])) + uintptr(SizeofRawFileDedupeRange) + + uintptr(i*SizeofRawFileDedupeRangeInfo))) + rawinfo.Dest_fd = value.Info[i].Dest_fd + rawinfo.Dest_offset = value.Info[i].Dest_offset + rawinfo.Bytes_deduped = value.Info[i].Bytes_deduped + rawinfo.Status = value.Info[i].Status + rawinfo.Reserved = value.Info[i].Reserved + } + + err := ioctl(srcFd, FIDEDUPERANGE, uintptr(unsafe.Pointer(&buf[0]))) + + // Output + for i := range value.Info { + rawinfo := (*RawFileDedupeRangeInfo)(unsafe.Pointer( + uintptr(unsafe.Pointer(&buf[0])) + uintptr(SizeofRawFileDedupeRange) + + uintptr(i*SizeofRawFileDedupeRangeInfo))) + value.Info[i].Dest_fd = rawinfo.Dest_fd + value.Info[i].Dest_offset = rawinfo.Dest_offset + value.Info[i].Bytes_deduped = rawinfo.Bytes_deduped + value.Info[i].Status = rawinfo.Status + value.Info[i].Reserved = rawinfo.Reserved + } + + return err +} + +func IoctlHIDGetDesc(fd int, value *HIDRawReportDescriptor) error { + err := ioctl(fd, HIDIOCGRDESC, uintptr(unsafe.Pointer(value))) + runtime.KeepAlive(value) + return err +} + +func IoctlHIDGetRawInfo(fd int) (*HIDRawDevInfo, error) { + var value HIDRawDevInfo + err := ioctl(fd, HIDIOCGRAWINFO, uintptr(unsafe.Pointer(&value))) + return &value, err +} + +func IoctlHIDGetRawName(fd int) (string, error) { + var value [_HIDIOCGRAWNAME_LEN]byte + err := ioctl(fd, _HIDIOCGRAWNAME, uintptr(unsafe.Pointer(&value[0]))) + return ByteSliceToString(value[:]), err +} + +func IoctlHIDGetRawPhys(fd int) (string, error) { + var value [_HIDIOCGRAWPHYS_LEN]byte + err := ioctl(fd, _HIDIOCGRAWPHYS, uintptr(unsafe.Pointer(&value[0]))) + return ByteSliceToString(value[:]), err +} + +func IoctlHIDGetRawUniq(fd int) (string, error) { + var value [_HIDIOCGRAWUNIQ_LEN]byte + err := ioctl(fd, _HIDIOCGRAWUNIQ, uintptr(unsafe.Pointer(&value[0]))) + return ByteSliceToString(value[:]), err +} diff --git a/vendor/golang.org/x/sys/unix/ioctl_zos.go b/vendor/golang.org/x/sys/unix/ioctl_zos.go new file mode 100644 index 0000000000..5384e7d91d --- /dev/null +++ b/vendor/golang.org/x/sys/unix/ioctl_zos.go @@ -0,0 +1,74 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build zos && s390x +// +build zos,s390x + +package unix + +import ( + "runtime" + "unsafe" +) + +// ioctl itself should not be exposed directly, but additional get/set +// functions for specific types are permissible. + +// IoctlSetInt performs an ioctl operation which sets an integer value +// on fd, using the specified request number. +func IoctlSetInt(fd int, req uint, value int) error { + return ioctl(fd, req, uintptr(value)) +} + +// IoctlSetWinsize performs an ioctl on fd with a *Winsize argument. +// +// To change fd's window size, the req argument should be TIOCSWINSZ. +func IoctlSetWinsize(fd int, req uint, value *Winsize) error { + // TODO: if we get the chance, remove the req parameter and + // hardcode TIOCSWINSZ. + err := ioctl(fd, req, uintptr(unsafe.Pointer(value))) + runtime.KeepAlive(value) + return err +} + +// IoctlSetTermios performs an ioctl on fd with a *Termios. +// +// The req value is expected to be TCSETS, TCSETSW, or TCSETSF +func IoctlSetTermios(fd int, req uint, value *Termios) error { + if (req != TCSETS) && (req != TCSETSW) && (req != TCSETSF) { + return ENOSYS + } + err := Tcsetattr(fd, int(req), value) + runtime.KeepAlive(value) + return err +} + +// IoctlGetInt performs an ioctl operation which gets an integer value +// from fd, using the specified request number. +// +// A few ioctl requests use the return value as an output parameter; +// for those, IoctlRetInt should be used instead of this function. +func IoctlGetInt(fd int, req uint) (int, error) { + var value int + err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) + return value, err +} + +func IoctlGetWinsize(fd int, req uint) (*Winsize, error) { + var value Winsize + err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) + return &value, err +} + +// IoctlGetTermios performs an ioctl on fd with a *Termios. +// +// The req value is expected to be TCGETS +func IoctlGetTermios(fd int, req uint) (*Termios, error) { + var value Termios + if req != TCGETS { + return &value, ENOSYS + } + err := Tcgetattr(fd, &value) + return &value, err +} diff --git a/vendor/golang.org/x/sys/unix/mkall.sh b/vendor/golang.org/x/sys/unix/mkall.sh index d257fac505..396aadf86d 100644 --- a/vendor/golang.org/x/sys/unix/mkall.sh +++ b/vendor/golang.org/x/sys/unix/mkall.sh @@ -70,23 +70,11 @@ aix_ppc64) mksyscall="go run mksyscall_aix_ppc64.go -aix" mktypes="GOARCH=$GOARCH go tool cgo -godefs" ;; -darwin_386) - mkerrors="$mkerrors -m32" - mksyscall="go run mksyscall.go -l32" - mktypes="GOARCH=$GOARCH go tool cgo -godefs" - mkasm="go run mkasm_darwin.go" - ;; darwin_amd64) mkerrors="$mkerrors -m64" mktypes="GOARCH=$GOARCH go tool cgo -godefs" mkasm="go run mkasm_darwin.go" ;; -darwin_arm) - mkerrors="$mkerrors" - mksyscall="go run mksyscall.go -l32" - mktypes="GOARCH=$GOARCH go tool cgo -godefs" - mkasm="go run mkasm_darwin.go" - ;; darwin_arm64) mkerrors="$mkerrors -m64" mktypes="GOARCH=$GOARCH go tool cgo -godefs" @@ -199,7 +187,7 @@ illumos_amd64) mksyscall="go run mksyscall_solaris.go" mkerrors= mksysnum= - mktypes= + mktypes="GOARCH=$GOARCH go tool cgo -godefs" ;; *) echo 'unrecognized $GOOS_$GOARCH: ' "$GOOSARCH" 1>&2 diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index b8313e98af..c4e3e1eeee 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -56,6 +56,7 @@ includes_Darwin=' #define _DARWIN_C_SOURCE #define KERNEL #define _DARWIN_USE_64_BIT_INODE +#define __APPLE_USE_RFC_3542 #include #include #include @@ -65,6 +66,7 @@ includes_Darwin=' #include #include #include +#include #include #include #include @@ -114,6 +116,7 @@ includes_FreeBSD=' #include #include #include +#include #include #include #include @@ -213,6 +216,8 @@ struct ltchars { #include #include #include +#include +#include #include #include #include @@ -223,6 +228,7 @@ struct ltchars { #include #include #include +#include #include #include #include @@ -252,6 +258,7 @@ struct ltchars { #include #include +#include #include #if defined(__sparc__) @@ -299,6 +306,17 @@ struct ltchars { // Including linux/l2tp.h here causes conflicts between linux/in.h // and netinet/in.h included via net/route.h above. #define IPPROTO_L2TP 115 + +// Copied from linux/hid.h. +// Keep in sync with the size of the referenced fields. +#define _HIDIOCGRAWNAME_LEN 128 // sizeof_field(struct hid_device, name) +#define _HIDIOCGRAWPHYS_LEN 64 // sizeof_field(struct hid_device, phys) +#define _HIDIOCGRAWUNIQ_LEN 64 // sizeof_field(struct hid_device, uniq) + +#define _HIDIOCGRAWNAME HIDIOCGRAWNAME(_HIDIOCGRAWNAME_LEN) +#define _HIDIOCGRAWPHYS HIDIOCGRAWPHYS(_HIDIOCGRAWPHYS_LEN) +#define _HIDIOCGRAWUNIQ HIDIOCGRAWUNIQ(_HIDIOCGRAWUNIQ_LEN) + ' includes_NetBSD=' @@ -388,10 +406,11 @@ includes_SunOS=' #include #include #include +#include #include -#include #include #include +#include ' @@ -446,6 +465,8 @@ ccflags="$@" $2 !~ /^EPROC_/ && $2 !~ /^EQUIV_/ && $2 !~ /^EXPR_/ && + $2 !~ /^EVIOC/ && + $2 !~ /^EV_/ && $2 ~ /^E[A-Z0-9_]+$/ || $2 ~ /^B[0-9_]+$/ || $2 ~ /^(OLD|NEW)DEV$/ || @@ -480,10 +501,10 @@ ccflags="$@" $2 ~ /^LOCK_(SH|EX|NB|UN)$/ || $2 ~ /^LO_(KEY|NAME)_SIZE$/ || $2 ~ /^LOOP_(CLR|CTL|GET|SET)_/ || - $2 ~ /^(AF|SOCK|SO|SOL|IPPROTO|IP|IPV6|ICMP6|TCP|MCAST|EVFILT|NOTE|EV|SHUT|PROT|MAP|MFD|T?PACKET|MSG|SCM|MCL|DT|MADV|PR)_/ || + $2 ~ /^(AF|SOCK|SO|SOL|IPPROTO|IP|IPV6|TCP|MCAST|EVFILT|NOTE|SHUT|PROT|MAP|MFD|T?PACKET|MSG|SCM|MCL|DT|MADV|PR|LOCAL)_/ || $2 ~ /^TP_STATUS_/ || $2 ~ /^FALLOC_/ || - $2 == "ICMPV6_FILTER" || + $2 ~ /^ICMPV?6?_(FILTER|SEC)/ || $2 == "SOMAXCONN" || $2 == "NAME_MAX" || $2 == "IFNAMSIZ" || @@ -570,6 +591,12 @@ ccflags="$@" $2 ~ /^W[A-Z0-9]+$/ || $2 ~/^PPPIOC/ || $2 ~ /^FAN_|FANOTIFY_/ || + $2 == "HID_MAX_DESCRIPTOR_SIZE" || + $2 ~ /^_?HIDIOC/ || + $2 ~ /^BUS_(USB|HIL|BLUETOOTH|VIRTUAL)$/ || + $2 ~ /^MTD/ || + $2 ~ /^OTP/ || + $2 ~ /^MEM/ || $2 ~ /^BLK[A-Z]*(GET$|SET$|BUF$|PART$|SIZE)/ {printf("\t%s = C.%s\n", $2, $2)} $2 ~ /^__WCOREFLAG$/ {next} $2 ~ /^__W[A-Z0-9]+$/ {printf("\t%s = C.%s\n", substr($2,3), $2)} @@ -607,6 +634,7 @@ echo '#include ' | $CC -x c - -E -dM $ccflags | echo '// mkerrors.sh' "$@" echo '// Code generated by the command above; see README.md. DO NOT EDIT.' echo +echo "//go:build ${GOARCH} && ${GOOS}" echo "// +build ${GOARCH},${GOOS}" echo go tool cgo -godefs -- "$@" _const.go >_error.out diff --git a/vendor/golang.org/x/sys/unix/pagesize_unix.go b/vendor/golang.org/x/sys/unix/pagesize_unix.go index bc2f3629a7..53f1b4c5b8 100644 --- a/vendor/golang.org/x/sys/unix/pagesize_unix.go +++ b/vendor/golang.org/x/sys/unix/pagesize_unix.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris // +build aix darwin dragonfly freebsd linux netbsd openbsd solaris // For Unix, get the pagesize from the runtime. diff --git a/vendor/golang.org/x/sys/unix/ptrace_darwin.go b/vendor/golang.org/x/sys/unix/ptrace_darwin.go index fc568b5403..463c3eff7f 100644 --- a/vendor/golang.org/x/sys/unix/ptrace_darwin.go +++ b/vendor/golang.org/x/sys/unix/ptrace_darwin.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build darwin && !ios // +build darwin,!ios package unix diff --git a/vendor/golang.org/x/sys/unix/ptrace_ios.go b/vendor/golang.org/x/sys/unix/ptrace_ios.go index 183441c9a5..ed0509a011 100644 --- a/vendor/golang.org/x/sys/unix/ptrace_ios.go +++ b/vendor/golang.org/x/sys/unix/ptrace_ios.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build ios // +build ios package unix diff --git a/vendor/golang.org/x/sys/unix/race.go b/vendor/golang.org/x/sys/unix/race.go index 61712b51c9..6f6c5fec5a 100644 --- a/vendor/golang.org/x/sys/unix/race.go +++ b/vendor/golang.org/x/sys/unix/race.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build (darwin && race) || (linux && race) || (freebsd && race) // +build darwin,race linux,race freebsd,race package unix diff --git a/vendor/golang.org/x/sys/unix/race0.go b/vendor/golang.org/x/sys/unix/race0.go index ad026678c7..706e1322ae 100644 --- a/vendor/golang.org/x/sys/unix/race0.go +++ b/vendor/golang.org/x/sys/unix/race0.go @@ -2,7 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build aix darwin,!race linux,!race freebsd,!race netbsd openbsd solaris dragonfly +//go:build aix || (darwin && !race) || (linux && !race) || (freebsd && !race) || netbsd || openbsd || solaris || dragonfly || zos +// +build aix darwin,!race linux,!race freebsd,!race netbsd openbsd solaris dragonfly zos package unix diff --git a/vendor/golang.org/x/sys/unix/readdirent_getdents.go b/vendor/golang.org/x/sys/unix/readdirent_getdents.go index 3a90aa6dfa..4d6257569e 100644 --- a/vendor/golang.org/x/sys/unix/readdirent_getdents.go +++ b/vendor/golang.org/x/sys/unix/readdirent_getdents.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build aix || dragonfly || freebsd || linux || netbsd || openbsd // +build aix dragonfly freebsd linux netbsd openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/readdirent_getdirentries.go b/vendor/golang.org/x/sys/unix/readdirent_getdirentries.go index 5fdae40b3a..2a4ba47c45 100644 --- a/vendor/golang.org/x/sys/unix/readdirent_getdirentries.go +++ b/vendor/golang.org/x/sys/unix/readdirent_getdirentries.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build darwin // +build darwin package unix diff --git a/vendor/golang.org/x/sys/unix/sockcmsg_unix.go b/vendor/golang.org/x/sys/unix/sockcmsg_unix.go index 003916ed7a..453a942c5d 100644 --- a/vendor/golang.org/x/sys/unix/sockcmsg_unix.go +++ b/vendor/golang.org/x/sys/unix/sockcmsg_unix.go @@ -2,7 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris +//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos // Socket control messages diff --git a/vendor/golang.org/x/sys/unix/sockcmsg_unix_other.go b/vendor/golang.org/x/sys/unix/sockcmsg_unix_other.go index 57a0021da5..0840fe4a57 100644 --- a/vendor/golang.org/x/sys/unix/sockcmsg_unix_other.go +++ b/vendor/golang.org/x/sys/unix/sockcmsg_unix_other.go @@ -2,7 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build aix darwin freebsd linux netbsd openbsd solaris +//go:build aix || darwin || freebsd || linux || netbsd || openbsd || solaris || zos +// +build aix darwin freebsd linux netbsd openbsd solaris zos package unix @@ -36,6 +37,10 @@ func cmsgAlignOf(salen int) int { if runtime.GOOS == "netbsd" && runtime.GOARCH == "arm64" { salign = 16 } + case "zos": + // z/OS socket macros use [32-bit] sizeof(int) alignment, + // not pointer width. + salign = SizeofInt } return (salen + salign - 1) & ^(salign - 1) diff --git a/vendor/golang.org/x/sys/unix/str.go b/vendor/golang.org/x/sys/unix/str.go index 17fb698683..8ba89ed869 100644 --- a/vendor/golang.org/x/sys/unix/str.go +++ b/vendor/golang.org/x/sys/unix/str.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris // +build aix darwin dragonfly freebsd linux netbsd openbsd solaris package unix diff --git a/vendor/golang.org/x/sys/unix/syscall.go b/vendor/golang.org/x/sys/unix/syscall.go index ab75ef9cc6..649fa87405 100644 --- a/vendor/golang.org/x/sys/unix/syscall.go +++ b/vendor/golang.org/x/sys/unix/syscall.go @@ -2,7 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris +//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos // Package unix contains an interface to the low-level operating system // primitives. OS details vary depending on the underlying system, and diff --git a/vendor/golang.org/x/sys/unix/syscall_aix.go b/vendor/golang.org/x/sys/unix/syscall_aix.go index 4408153822..d8efb715ff 100644 --- a/vendor/golang.org/x/sys/unix/syscall_aix.go +++ b/vendor/golang.org/x/sys/unix/syscall_aix.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build aix // +build aix // Aix system calls. @@ -251,7 +252,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { } } - bytes := (*[10000]byte)(unsafe.Pointer(&pp.Path[0]))[0:n] + bytes := (*[len(pp.Path)]byte)(unsafe.Pointer(&pp.Path[0]))[0:n] sa.Name = string(bytes) return sa, nil @@ -419,8 +420,8 @@ func (w WaitStatus) TrapCause() int { return -1 } //sys Mknod(path string, mode uint32, dev int) (err error) //sys Mknodat(dirfd int, path string, mode uint32, dev int) (err error) //sys Nanosleep(time *Timespec, leftover *Timespec) (err error) -//sys Open(path string, mode int, perm uint32) (fd int, err error) = open64 -//sys Openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) +//sys Open(path string, mode int, perm uint32) (fd int, err error) = open64 +//sys Openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) //sys read(fd int, p []byte) (n int, err error) //sys Readlink(path string, buf []byte) (n int, err error) //sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) @@ -439,8 +440,8 @@ func (w WaitStatus) TrapCause() int { return -1 } //sysnb Times(tms *Tms) (ticks uintptr, err error) //sysnb Umask(mask int) (oldmask int) //sysnb Uname(buf *Utsname) (err error) -//sys Unlink(path string) (err error) -//sys Unlinkat(dirfd int, path string, flags int) (err error) +//sys Unlink(path string) (err error) +//sys Unlinkat(dirfd int, path string, flags int) (err error) //sys Ustat(dev int, ubuf *Ustat_t) (err error) //sys write(fd int, p []byte) (n int, err error) //sys readlen(fd int, p *byte, np int) (n int, err error) = read @@ -514,7 +515,7 @@ func Munmap(b []byte) (err error) { //sys Munlock(b []byte) (err error) //sys Munlockall() (err error) -//sysnb pipe(p *[2]_C_int) (err error) +//sysnb pipe(p *[2]_C_int) (err error) func Pipe(p []int) (err error) { if len(p) != 2 { diff --git a/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go b/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go index b3c8e3301c..e92a0be163 100644 --- a/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go +++ b/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build aix -// +build ppc +//go:build aix && ppc +// +build aix,ppc package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go b/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go index 9a6e024179..16eed17098 100644 --- a/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go +++ b/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build aix -// +build ppc64 +//go:build aix && ppc64 +// +build aix,ppc64 package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_bsd.go b/vendor/golang.org/x/sys/unix/syscall_bsd.go index bc634a280a..95ac3946b5 100644 --- a/vendor/golang.org/x/sys/unix/syscall_bsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_bsd.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build darwin || dragonfly || freebsd || netbsd || openbsd // +build darwin dragonfly freebsd netbsd openbsd // BSD system call wrappers shared by *BSD based systems @@ -318,7 +319,7 @@ func Getsockname(fd int) (sa Sockaddr, err error) { return anyToSockaddr(fd, &rsa) } -//sysnb socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) +//sysnb socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) // GetsockoptString returns the string value of the socket option opt for the // socket associated with fd at the given socket level. @@ -332,8 +333,8 @@ func GetsockoptString(fd, level, opt int) (string, error) { return string(buf[:vallen-1]), nil } -//sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) -//sys sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) +//sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) +//sys sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) //sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error) func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from Sockaddr, err error) { @@ -626,7 +627,7 @@ func Futimes(fd int, tv []Timeval) error { return futimes(fd, (*[2]Timeval)(unsafe.Pointer(&tv[0]))) } -//sys poll(fds *PollFd, nfds int, timeout int) (n int, err error) +//sys poll(fds *PollFd, nfds int, timeout int) (n int, err error) func Poll(fds []PollFd, timeout int) (n int, err error) { if len(fds) == 0 { diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.1_12.go b/vendor/golang.org/x/sys/unix/syscall_darwin.1_12.go index b31ef03588..b0098607c7 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.1_12.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.1_12.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build darwin && go1.12 && !go1.13 // +build darwin,go1.12,!go1.13 package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.1_13.go b/vendor/golang.org/x/sys/unix/syscall_darwin.1_13.go index ee852f1abc..1596426b1e 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.1_13.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.1_13.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build darwin && go1.13 // +build darwin,go1.13 package unix @@ -16,7 +17,7 @@ import ( //sys readdir_r(dir uintptr, entry *Dirent, result **Dirent) (res Errno) func fdopendir(fd int) (dir uintptr, err error) { - r0, _, e1 := syscall_syscallPtr(funcPC(libc_fdopendir_trampoline), uintptr(fd), 0, 0) + r0, _, e1 := syscall_syscallPtr(libc_fdopendir_trampoline_addr, uintptr(fd), 0, 0) dir = uintptr(r0) if e1 != 0 { err = errnoErr(e1) @@ -24,7 +25,7 @@ func fdopendir(fd int) (dir uintptr, err error) { return } -func libc_fdopendir_trampoline() +var libc_fdopendir_trampoline_addr uintptr //go:cgo_import_dynamic libc_fdopendir fdopendir "/usr/lib/libSystem.B.dylib" diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go index 16f9c226b9..9945e5f965 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -119,7 +119,7 @@ type attrList struct { Forkattr uint32 } -//sysnb pipe(p *[2]int32) (err error) +//sysnb pipe(p *[2]int32) (err error) func Pipe(p []int) (err error) { if len(p) != 2 { @@ -272,7 +272,7 @@ func setattrlistTimes(path string, times []Timespec, flags int) error { options) } -//sys setattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) +//sys setattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) func utimensat(dirfd int, path string, times *[2]Timespec, flags int) error { // Darwin doesn't support SYS_UTIMENSAT @@ -320,7 +320,7 @@ func IoctlSetIfreqMTU(fd int, ifreq *IfreqMTU) error { return err } -//sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS_SYSCTL +//sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS_SYSCTL func Uname(uname *Utsname) error { mib := []_C_int{CTL_KERN, KERN_OSTYPE} @@ -378,6 +378,26 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e return } +func GetsockoptIPMreqn(fd, level, opt int) (*IPMreqn, error) { + var value IPMreqn + vallen := _Socklen(SizeofIPMreqn) + errno := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen) + return &value, errno +} + +func SetsockoptIPMreqn(fd, level, opt int, mreq *IPMreqn) (err error) { + return setsockopt(fd, level, opt, unsafe.Pointer(mreq), unsafe.Sizeof(*mreq)) +} + +// GetsockoptXucred is a getsockopt wrapper that returns an Xucred struct. +// The usual level and opt are SOL_LOCAL and LOCAL_PEERCRED, respectively. +func GetsockoptXucred(fd, level, opt int) (*Xucred, error) { + x := new(Xucred) + vallen := _Socklen(SizeofXucred) + err := getsockopt(fd, level, opt, unsafe.Pointer(x), &vallen) + return x, err +} + //sys sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) /* @@ -472,8 +492,8 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys Unlinkat(dirfd int, path string, flags int) (err error) //sys Unmount(path string, flags int) (err error) //sys write(fd int, p []byte) (n int, err error) -//sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) -//sys munmap(addr uintptr, length uintptr) (err error) +//sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) +//sys munmap(addr uintptr, length uintptr) (err error) //sys readlen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_READ //sys writelen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_WRITE diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_386.go b/vendor/golang.org/x/sys/unix/syscall_darwin_386.go deleted file mode 100644 index ee065fcf2d..0000000000 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_386.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build 386,darwin - -package unix - -import "syscall" - -func setTimespec(sec, nsec int64) Timespec { - return Timespec{Sec: int32(sec), Nsec: int32(nsec)} -} - -func setTimeval(sec, usec int64) Timeval { - return Timeval{Sec: int32(sec), Usec: int32(usec)} -} - -func SetKevent(k *Kevent_t, fd, mode, flags int) { - k.Ident = uint32(fd) - k.Filter = int16(mode) - k.Flags = uint16(flags) -} - -func (iov *Iovec) SetLen(length int) { - iov.Len = uint32(length) -} - -func (msghdr *Msghdr) SetControllen(length int) { - msghdr.Controllen = uint32(length) -} - -func (msghdr *Msghdr) SetIovlen(length int) { - msghdr.Iovlen = int32(length) -} - -func (cmsg *Cmsghdr) SetLen(length int) { - cmsg.Len = uint32(length) -} - -func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) - -//sys Fstat(fd int, stat *Stat_t) (err error) = SYS_FSTAT64 -//sys Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) = SYS_FSTATAT64 -//sys Fstatfs(fd int, stat *Statfs_t) (err error) = SYS_FSTATFS64 -//sys getfsstat(buf unsafe.Pointer, size uintptr, flags int) (n int, err error) = SYS_GETFSSTAT64 -//sys Lstat(path string, stat *Stat_t) (err error) = SYS_LSTAT64 -//sys ptrace1(request int, pid int, addr uintptr, data uintptr) (err error) = SYS_ptrace -//sys Stat(path string, stat *Stat_t) (err error) = SYS_STAT64 -//sys Statfs(path string, stat *Statfs_t) (err error) = SYS_STATFS64 diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go index 7a1f64a7b6..b37310ce9b 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build amd64 && darwin // +build amd64,darwin package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go b/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go deleted file mode 100644 index d30735c5d6..0000000000 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package unix - -import "syscall" - -func ptrace1(request int, pid int, addr uintptr, data uintptr) error { - return ENOTSUP -} - -func setTimespec(sec, nsec int64) Timespec { - return Timespec{Sec: int32(sec), Nsec: int32(nsec)} -} - -func setTimeval(sec, usec int64) Timeval { - return Timeval{Sec: int32(sec), Usec: int32(usec)} -} - -func SetKevent(k *Kevent_t, fd, mode, flags int) { - k.Ident = uint32(fd) - k.Filter = int16(mode) - k.Flags = uint16(flags) -} - -func (iov *Iovec) SetLen(length int) { - iov.Len = uint32(length) -} - -func (msghdr *Msghdr) SetControllen(length int) { - msghdr.Controllen = uint32(length) -} - -func (msghdr *Msghdr) SetIovlen(length int) { - msghdr.Iovlen = int32(length) -} - -func (cmsg *Cmsghdr) SetLen(length int) { - cmsg.Len = uint32(length) -} - -func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) // sic - -//sys Fstat(fd int, stat *Stat_t) (err error) -//sys Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) -//sys Fstatfs(fd int, stat *Statfs_t) (err error) -//sys getfsstat(buf unsafe.Pointer, size uintptr, flags int) (n int, err error) = SYS_GETFSSTAT -//sys Lstat(path string, stat *Stat_t) (err error) -//sys Stat(path string, stat *Stat_t) (err error) -//sys Statfs(path string, stat *Statfs_t) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go index 9f85fd4046..d51ec99630 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build arm64 && darwin // +build arm64,darwin package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go b/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go index f34c86c899..53c96641f8 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go @@ -2,11 +2,12 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build darwin && go1.12 // +build darwin,go1.12 package unix -import "unsafe" +import _ "unsafe" // Implemented in the runtime package (runtime/sys_darwin.go) func syscall_syscall(fn, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno) @@ -24,10 +25,3 @@ func syscall_syscallPtr(fn, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno) //go:linkname syscall_rawSyscall syscall.rawSyscall //go:linkname syscall_rawSyscall6 syscall.rawSyscall6 //go:linkname syscall_syscallPtr syscall.syscallPtr - -// Find the entry point for f. See comments in runtime/proc.go for the -// function of the same name. -//go:nosplit -func funcPC(f func()) uintptr { - return **(**uintptr)(unsafe.Pointer(&f)) -} diff --git a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go index a4f2944a24..5af108a503 100644 --- a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go +++ b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go @@ -95,7 +95,7 @@ func direntNamlen(buf []byte) (uint64, bool) { return readInt(buf, unsafe.Offsetof(Dirent{}.Namlen), unsafe.Sizeof(Dirent{}.Namlen)) } -//sysnb pipe() (r int, w int, err error) +//sysnb pipe() (r int, w int, err error) func Pipe(p []int) (err error) { if len(p) != 2 { @@ -105,16 +105,16 @@ func Pipe(p []int) (err error) { return } -//sysnb pipe2(p *[2]_C_int, flags int) (err error) +//sysnb pipe2(p *[2]_C_int, flags int) (r int, w int, err error) -func Pipe2(p []int, flags int) error { +func Pipe2(p []int, flags int) (err error) { if len(p) != 2 { return EINVAL } var pp [2]_C_int - err := pipe2(&pp, flags) - p[0] = int(pp[0]) - p[1] = int(pp[1]) + // pipe2 on dragonfly takes an fds array as an argument, but still + // returns the file descriptors. + p[0], p[1], err = pipe2(&pp, flags) return err } @@ -170,7 +170,7 @@ func setattrlistTimes(path string, times []Timespec, flags int) error { //sys ioctl(fd int, req uint, arg uintptr) (err error) -//sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL +//sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL func sysctlUname(mib []_C_int, old *byte, oldlen *uintptr) error { err := sysctl(mib, old, oldlen, nil, 0) @@ -337,8 +337,8 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys Unlinkat(dirfd int, path string, flags int) (err error) //sys Unmount(path string, flags int) (err error) //sys write(fd int, p []byte) (n int, err error) -//sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) -//sys munmap(addr uintptr, length uintptr) (err error) +//sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) +//sys munmap(addr uintptr, length uintptr) (err error) //sys readlen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_READ //sys writelen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_WRITE //sys accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go index a6b4830ac8..4e2d32120a 100644 --- a/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build amd64 && dragonfly // +build amd64,dragonfly package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd.go b/vendor/golang.org/x/sys/unix/syscall_freebsd.go index acc00c2e6a..18c392cf36 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd.go @@ -126,6 +126,15 @@ func SetsockoptIPMreqn(fd, level, opt int, mreq *IPMreqn) (err error) { return setsockopt(fd, level, opt, unsafe.Pointer(mreq), unsafe.Sizeof(*mreq)) } +// GetsockoptXucred is a getsockopt wrapper that returns an Xucred struct. +// The usual level and opt are SOL_LOCAL and LOCAL_PEERCRED, respectively. +func GetsockoptXucred(fd, level, opt int) (*Xucred, error) { + x := new(Xucred) + vallen := _Socklen(SizeofXucred) + err := getsockopt(fd, level, opt, unsafe.Pointer(x), &vallen) + return x, err +} + func Accept4(fd, flags int) (nfd int, sa Sockaddr, err error) { var rsa RawSockaddrAny var len _Socklen = SizeofSockaddrAny @@ -188,9 +197,9 @@ func setattrlistTimes(path string, times []Timespec, flags int) error { return ENOSYS } -//sys ioctl(fd int, req uint, arg uintptr) (err error) +//sys ioctl(fd int, req uint, arg uintptr) (err error) -//sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL +//sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL func Uname(uname *Utsname) error { mib := []_C_int{CTL_KERN, KERN_OSTYPE} @@ -665,8 +674,8 @@ func PtraceSingleStep(pid int) (err error) { //sys Unlinkat(dirfd int, path string, flags int) (err error) //sys Unmount(path string, flags int) (err error) //sys write(fd int, p []byte) (n int, err error) -//sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) -//sys munmap(addr uintptr, length uintptr) (err error) +//sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) +//sys munmap(addr uintptr, length uintptr) (err error) //sys readlen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_READ //sys writelen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_WRITE //sys accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go index 72a506ddcb..342fc32b16 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build 386 && freebsd // +build 386,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go index d5e376acae..a32d5aa4ae 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build amd64 && freebsd // +build amd64,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go index 4ea45bce52..1e36d39abe 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build arm && freebsd // +build arm,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go index aa5326db19..a09a1537bd 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build arm64 && freebsd // +build arm64,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_illumos.go b/vendor/golang.org/x/sys/unix/syscall_illumos.go index 7a2d4120fc..8c53576835 100644 --- a/vendor/golang.org/x/sys/unix/syscall_illumos.go +++ b/vendor/golang.org/x/sys/unix/syscall_illumos.go @@ -1,14 +1,19 @@ -// Copyright 2009 The Go Authors. All rights reserved. +// Copyright 2021 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // illumos system calls not present on Solaris. +//go:build amd64 && illumos // +build amd64,illumos package unix -import "unsafe" +import ( + "fmt" + "runtime" + "unsafe" +) func bytes2iovec(bs [][]byte) []Iovec { iovecs := make([]Iovec, len(bs)) @@ -75,3 +80,99 @@ func Accept4(fd int, flags int) (nfd int, sa Sockaddr, err error) { } return } + +//sys putmsg(fd int, clptr *strbuf, dataptr *strbuf, flags int) (err error) + +func Putmsg(fd int, cl []byte, data []byte, flags int) (err error) { + var clp, datap *strbuf + if len(cl) > 0 { + clp = &strbuf{ + Len: int32(len(cl)), + Buf: (*int8)(unsafe.Pointer(&cl[0])), + } + } + if len(data) > 0 { + datap = &strbuf{ + Len: int32(len(data)), + Buf: (*int8)(unsafe.Pointer(&data[0])), + } + } + return putmsg(fd, clp, datap, flags) +} + +//sys getmsg(fd int, clptr *strbuf, dataptr *strbuf, flags *int) (err error) + +func Getmsg(fd int, cl []byte, data []byte) (retCl []byte, retData []byte, flags int, err error) { + var clp, datap *strbuf + if len(cl) > 0 { + clp = &strbuf{ + Maxlen: int32(len(cl)), + Buf: (*int8)(unsafe.Pointer(&cl[0])), + } + } + if len(data) > 0 { + datap = &strbuf{ + Maxlen: int32(len(data)), + Buf: (*int8)(unsafe.Pointer(&data[0])), + } + } + + if err = getmsg(fd, clp, datap, &flags); err != nil { + return nil, nil, 0, err + } + + if len(cl) > 0 { + retCl = cl[:clp.Len] + } + if len(data) > 0 { + retData = data[:datap.Len] + } + return retCl, retData, flags, nil +} + +func IoctlSetIntRetInt(fd int, req uint, arg int) (int, error) { + return ioctlRet(fd, req, uintptr(arg)) +} + +func IoctlSetString(fd int, req uint, val string) error { + bs := make([]byte, len(val)+1) + copy(bs[:len(bs)-1], val) + err := ioctl(fd, req, uintptr(unsafe.Pointer(&bs[0]))) + runtime.KeepAlive(&bs[0]) + return err +} + +// Lifreq Helpers + +func (l *Lifreq) SetName(name string) error { + if len(name) >= len(l.Name) { + return fmt.Errorf("name cannot be more than %d characters", len(l.Name)-1) + } + for i := range name { + l.Name[i] = int8(name[i]) + } + return nil +} + +func (l *Lifreq) SetLifruInt(d int) { + *(*int)(unsafe.Pointer(&l.Lifru[0])) = d +} + +func (l *Lifreq) GetLifruInt() int { + return *(*int)(unsafe.Pointer(&l.Lifru[0])) +} + +func IoctlLifreq(fd int, req uint, l *Lifreq) error { + return ioctl(fd, req, uintptr(unsafe.Pointer(l))) +} + +// Strioctl Helpers + +func (s *Strioctl) SetInt(i int) { + s.Len = int32(unsafe.Sizeof(i)) + s.Dp = (*int8)(unsafe.Pointer(&i)) +} + +func IoctlSetStrioctlRetInt(fd int, req uint, s *Strioctl) (int, error) { + return ioctlRet(fd, req, uintptr(unsafe.Pointer(s))) +} diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index 28be1306ec..2dd7c8e34a 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -70,88 +70,7 @@ func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { // ioctl itself should not be exposed directly, but additional get/set // functions for specific types are permissible. - -// IoctlRetInt performs an ioctl operation specified by req on a device -// associated with opened file descriptor fd, and returns a non-negative -// integer that is returned by the ioctl syscall. -func IoctlRetInt(fd int, req uint) (int, error) { - ret, _, err := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), 0) - if err != 0 { - return 0, err - } - return int(ret), nil -} - -func IoctlSetRTCTime(fd int, value *RTCTime) error { - err := ioctl(fd, RTC_SET_TIME, uintptr(unsafe.Pointer(value))) - runtime.KeepAlive(value) - return err -} - -func IoctlSetRTCWkAlrm(fd int, value *RTCWkAlrm) error { - err := ioctl(fd, RTC_WKALM_SET, uintptr(unsafe.Pointer(value))) - runtime.KeepAlive(value) - return err -} - -func IoctlGetUint32(fd int, req uint) (uint32, error) { - var value uint32 - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return value, err -} - -func IoctlGetRTCTime(fd int) (*RTCTime, error) { - var value RTCTime - err := ioctl(fd, RTC_RD_TIME, uintptr(unsafe.Pointer(&value))) - return &value, err -} - -// IoctlGetWatchdogInfo fetches information about a watchdog device from the -// Linux watchdog API. For more information, see: -// https://www.kernel.org/doc/html/latest/watchdog/watchdog-api.html. -func IoctlGetWatchdogInfo(fd int) (*WatchdogInfo, error) { - var value WatchdogInfo - err := ioctl(fd, WDIOC_GETSUPPORT, uintptr(unsafe.Pointer(&value))) - return &value, err -} - -func IoctlGetRTCWkAlrm(fd int) (*RTCWkAlrm, error) { - var value RTCWkAlrm - err := ioctl(fd, RTC_WKALM_RD, uintptr(unsafe.Pointer(&value))) - return &value, err -} - -// IoctlFileCloneRange performs an FICLONERANGE ioctl operation to clone the -// range of data conveyed in value to the file associated with the file -// descriptor destFd. See the ioctl_ficlonerange(2) man page for details. -func IoctlFileCloneRange(destFd int, value *FileCloneRange) error { - err := ioctl(destFd, FICLONERANGE, uintptr(unsafe.Pointer(value))) - runtime.KeepAlive(value) - return err -} - -// IoctlFileClone performs an FICLONE ioctl operation to clone the entire file -// associated with the file description srcFd to the file associated with the -// file descriptor destFd. See the ioctl_ficlone(2) man page for details. -func IoctlFileClone(destFd, srcFd int) error { - return ioctl(destFd, FICLONE, uintptr(srcFd)) -} - -// IoctlFileDedupeRange performs an FIDEDUPERANGE ioctl operation to share the -// range of data conveyed in value with the file associated with the file -// descriptor destFd. See the ioctl_fideduperange(2) man page for details. -func IoctlFileDedupeRange(destFd int, value *FileDedupeRange) error { - err := ioctl(destFd, FIDEDUPERANGE, uintptr(unsafe.Pointer(value))) - runtime.KeepAlive(value) - return err -} - -// IoctlWatchdogKeepalive issues a keepalive ioctl to a watchdog device. For -// more information, see: -// https://www.kernel.org/doc/html/latest/watchdog/watchdog-api.html. -func IoctlWatchdogKeepalive(fd int) error { - return ioctl(fd, WDIOC_KEEPALIVE, 0) -} +// These are defined in ioctl.go and ioctl_linux.go. //sys Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) @@ -778,16 +697,19 @@ type SockaddrVM struct { // CID and Port specify a context ID and port address for a VM socket. // Guests have a unique CID, and hosts may have a well-known CID of: // - VMADDR_CID_HYPERVISOR: refers to the hypervisor process. + // - VMADDR_CID_LOCAL: refers to local communication (loopback). // - VMADDR_CID_HOST: refers to other processes on the host. - CID uint32 - Port uint32 - raw RawSockaddrVM + CID uint32 + Port uint32 + Flags uint8 + raw RawSockaddrVM } func (sa *SockaddrVM) sockaddr() (unsafe.Pointer, _Socklen, error) { sa.raw.Family = AF_VSOCK sa.raw.Port = sa.Port sa.raw.Cid = sa.CID + sa.raw.Flags = sa.Flags return unsafe.Pointer(&sa.raw), SizeofSockaddrVM, nil } @@ -1092,8 +1014,9 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { case AF_VSOCK: pp := (*RawSockaddrVM)(unsafe.Pointer(rsa)) sa := &SockaddrVM{ - CID: pp.Cid, - Port: pp.Port, + CID: pp.Cid, + Port: pp.Port, + Flags: pp.Flags, } return sa, nil case AF_BLUETOOTH: @@ -1228,7 +1151,11 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { func Accept(fd int) (nfd int, sa Sockaddr, err error) { var rsa RawSockaddrAny var len _Socklen = SizeofSockaddrAny - nfd, err = accept(fd, &rsa, &len) + // Try accept4 first for Android, then try accept for kernel older than 2.6.28 + nfd, err = accept4(fd, &rsa, &len, 0) + if err == ENOSYS { + nfd, err = accept(fd, &rsa, &len) + } if err != nil { return } @@ -1482,8 +1409,8 @@ func KeyctlRestrictKeyring(ringid int, keyType string, restriction string) error return keyctlRestrictKeyringByType(KEYCTL_RESTRICT_KEYRING, ringid, keyType, restriction) } -//sys keyctlRestrictKeyringByType(cmd int, arg2 int, keyType string, restriction string) (err error) = SYS_KEYCTL -//sys keyctlRestrictKeyring(cmd int, arg2 int) (err error) = SYS_KEYCTL +//sys keyctlRestrictKeyringByType(cmd int, arg2 int, keyType string, restriction string) (err error) = SYS_KEYCTL +//sys keyctlRestrictKeyring(cmd int, arg2 int) (err error) = SYS_KEYCTL func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from Sockaddr, err error) { var msg Msghdr @@ -1798,6 +1725,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys ClockGettime(clockid int32, time *Timespec) (err error) //sys ClockNanosleep(clockid int32, flags int, request *Timespec, remain *Timespec) (err error) //sys Close(fd int) (err error) +//sys CloseRange(first uint, last uint, flags uint) (err error) //sys CopyFileRange(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) //sys DeleteModule(name string, flags int) (err error) //sys Dup(oldfd int) (fd int, err error) @@ -1860,8 +1788,8 @@ func Getpgrp() (pid int) { //sys Nanosleep(time *Timespec, leftover *Timespec) (err error) //sys PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) //sys PivotRoot(newroot string, putold string) (err error) = SYS_PIVOT_ROOT -//sysnb prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) = SYS_PRLIMIT64 -//sys Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) +//sysnb prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) = SYS_PRLIMIT64 +//sys Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) //sys Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) = SYS_PSELECT6 //sys read(fd int, p []byte) (n int, err error) //sys Removexattr(path string, attr string) (err error) @@ -1934,9 +1862,9 @@ func Signalfd(fd int, sigmask *Sigset_t, flags int) (newfd int, err error) { //sys Syncfs(fd int) (err error) //sysnb Sysinfo(info *Sysinfo_t) (err error) //sys Tee(rfd int, wfd int, len int, flags int) (n int64, err error) -//sysnb TimerfdCreate(clockid int, flags int) (fd int, err error) -//sysnb TimerfdGettime(fd int, currValue *ItimerSpec) (err error) -//sysnb TimerfdSettime(fd int, flags int, newValue *ItimerSpec, oldValue *ItimerSpec) (err error) +//sysnb TimerfdCreate(clockid int, flags int) (fd int, err error) +//sysnb TimerfdGettime(fd int, currValue *ItimerSpec) (err error) +//sysnb TimerfdSettime(fd int, flags int, newValue *ItimerSpec, oldValue *ItimerSpec) (err error) //sysnb Tgkill(tgid int, tid int, sig syscall.Signal) (err error) //sysnb Times(tms *Tms) (ticks uintptr, err error) //sysnb Umask(mask int) (oldmask int) @@ -2196,8 +2124,8 @@ func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { return EACCES } -//sys nameToHandleAt(dirFD int, pathname string, fh *fileHandle, mountID *_C_int, flags int) (err error) = SYS_NAME_TO_HANDLE_AT -//sys openByHandleAt(mountFD int, fh *fileHandle, flags int) (fd int, err error) = SYS_OPEN_BY_HANDLE_AT +//sys nameToHandleAt(dirFD int, pathname string, fh *fileHandle, mountID *_C_int, flags int) (err error) = SYS_NAME_TO_HANDLE_AT +//sys openByHandleAt(mountFD int, fh *fileHandle, flags int) (fd int, err error) = SYS_OPEN_BY_HANDLE_AT // fileHandle is the argument to nameToHandleAt and openByHandleAt. We // originally tried to generate it via unix/linux/types.go with "type diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_386.go b/vendor/golang.org/x/sys/unix/syscall_linux_386.go index c97c2ee53e..7b52e5d8a4 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_386.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build 386 && linux // +build 386,linux package unix @@ -31,7 +32,7 @@ func Pipe(p []int) (err error) { return } -//sysnb pipe2(p *[2]_C_int, flags int) (err error) +//sysnb pipe2(p *[2]_C_int, flags int) (err error) func Pipe2(p []int, flags int) (err error) { if len(p) != 2 { @@ -98,7 +99,7 @@ type rlimit32 struct { Max uint32 } -//sysnb getrlimit(resource int, rlim *rlimit32) (err error) = SYS_GETRLIMIT +//sysnb getrlimit(resource int, rlim *rlimit32) (err error) = SYS_GETRLIMIT const rlimInf32 = ^uint32(0) const rlimInf64 = ^uint64(0) @@ -129,7 +130,7 @@ func Getrlimit(resource int, rlim *Rlimit) (err error) { return } -//sysnb setrlimit(resource int, rlim *rlimit32) (err error) = SYS_SETRLIMIT +//sysnb setrlimit(resource int, rlim *rlimit32) (err error) = SYS_SETRLIMIT func Setrlimit(resource int, rlim *Rlimit) (err error) { err = prlimit(0, resource, rlim, nil) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go index 72efe86ed4..28b7641152 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build amd64 && linux // +build amd64,linux package unix @@ -138,7 +139,7 @@ func Pipe(p []int) (err error) { return } -//sysnb pipe2(p *[2]_C_int, flags int) (err error) +//sysnb pipe2(p *[2]_C_int, flags int) (err error) func Pipe2(p []int, flags int) (err error) { if len(p) != 2 { diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_amd64_gc.go b/vendor/golang.org/x/sys/unix/syscall_linux_amd64_gc.go index baa771f8ad..8b0f0f3aa5 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_amd64_gc.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_amd64_gc.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build amd64,linux -// +build gc +//go:build amd64 && linux && gc +// +build amd64,linux,gc package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go index 496837b1e3..68877728ec 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build arm && linux // +build arm,linux package unix @@ -35,7 +36,7 @@ func Pipe(p []int) (err error) { return } -//sysnb pipe2(p *[2]_C_int, flags int) (err error) +//sysnb pipe2(p *[2]_C_int, flags int) (err error) func Pipe2(p []int, flags int) (err error) { if len(p) != 2 { @@ -129,8 +130,8 @@ func Utime(path string, buf *Utimbuf) error { //sys utimes(path string, times *[2]Timeval) (err error) -//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 -//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 +//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 +//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 //sys Truncate(path string, length int64) (err error) = SYS_TRUNCATE64 //sys Ftruncate(fd int, length int64) (err error) = SYS_FTRUNCATE64 @@ -177,7 +178,7 @@ type rlimit32 struct { Max uint32 } -//sysnb getrlimit(resource int, rlim *rlimit32) (err error) = SYS_UGETRLIMIT +//sysnb getrlimit(resource int, rlim *rlimit32) (err error) = SYS_UGETRLIMIT const rlimInf32 = ^uint32(0) const rlimInf64 = ^uint64(0) @@ -208,7 +209,7 @@ func Getrlimit(resource int, rlim *Rlimit) (err error) { return } -//sysnb setrlimit(resource int, rlim *rlimit32) (err error) = SYS_SETRLIMIT +//sysnb setrlimit(resource int, rlim *rlimit32) (err error) = SYS_SETRLIMIT func Setrlimit(resource int, rlim *Rlimit) (err error) { err = prlimit(0, resource, rlim, nil) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go index c6de6b9134..7ed7034761 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build arm64 && linux // +build arm64,linux package unix @@ -155,7 +156,7 @@ func Pipe(p []int) (err error) { return } -//sysnb pipe2(p *[2]_C_int, flags int) (err error) +//sysnb pipe2(p *[2]_C_int, flags int) (err error) func Pipe2(p []int, flags int) (err error) { if len(p) != 2 { diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_gc.go b/vendor/golang.org/x/sys/unix/syscall_linux_gc.go index 9edf3961b0..2b1168d7d1 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_gc.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_gc.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build linux && gc // +build linux,gc package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_gc_386.go b/vendor/golang.org/x/sys/unix/syscall_linux_gc_386.go index 90e33d8cf7..9843fb4896 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_gc_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_gc_386.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build linux && gc && 386 // +build linux,gc,386 package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_gc_arm.go b/vendor/golang.org/x/sys/unix/syscall_linux_gc_arm.go index 1a97baae73..a6008fccd5 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_gc_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_gc_arm.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build arm && gc && linux // +build arm,gc,linux package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_386.go b/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_386.go index 308eb7aecf..7740af2428 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_386.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build linux && gccgo && 386 // +build linux,gccgo,386 package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_arm.go b/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_arm.go index aa7fc9e199..e16a12299a 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_arm.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build linux && gccgo && arm // +build linux,gccgo,arm package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go index f0287476cd..06dec06fa1 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build linux && (mips64 || mips64le) // +build linux // +build mips64 mips64le @@ -104,7 +105,7 @@ func Pipe(p []int) (err error) { return } -//sysnb pipe2(p *[2]_C_int, flags int) (err error) +//sysnb pipe2(p *[2]_C_int, flags int) (err error) func Pipe2(p []int, flags int) (err error) { if len(p) != 2 { diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go b/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go index c11328111d..8f0d0a5b59 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build linux && (mips || mipsle) // +build linux // +build mips mipsle @@ -112,7 +113,7 @@ func setTimeval(sec, usec int64) Timeval { return Timeval{Sec: int32(sec), Usec: int32(usec)} } -//sysnb pipe2(p *[2]_C_int, flags int) (err error) +//sysnb pipe2(p *[2]_C_int, flags int) (err error) func Pipe2(p []int, flags int) (err error) { if len(p) != 2 { @@ -125,7 +126,7 @@ func Pipe2(p []int, flags int) (err error) { return } -//sysnb pipe() (p1 int, p2 int, err error) +//sysnb pipe() (p1 int, p2 int, err error) func Pipe(p []int) (err error) { if len(p) != 2 { @@ -153,7 +154,7 @@ type rlimit32 struct { Max uint32 } -//sysnb getrlimit(resource int, rlim *rlimit32) (err error) = SYS_GETRLIMIT +//sysnb getrlimit(resource int, rlim *rlimit32) (err error) = SYS_GETRLIMIT func Getrlimit(resource int, rlim *Rlimit) (err error) { err = prlimit(0, resource, nil, rlim) @@ -181,7 +182,7 @@ func Getrlimit(resource int, rlim *Rlimit) (err error) { return } -//sysnb setrlimit(resource int, rlim *rlimit32) (err error) = SYS_SETRLIMIT +//sysnb setrlimit(resource int, rlim *rlimit32) (err error) = SYS_SETRLIMIT func Setrlimit(resource int, rlim *Rlimit) (err error) { err = prlimit(0, resource, rlim, nil) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go b/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go new file mode 100644 index 0000000000..7e65e088d2 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go @@ -0,0 +1,272 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build linux && ppc +// +build linux +// +build ppc + +package unix + +import ( + "syscall" + "unsafe" +) + +//sys dup2(oldfd int, newfd int) (err error) +//sysnb EpollCreate(size int) (fd int, err error) +//sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) +//sys Fchown(fd int, uid int, gid int) (err error) +//sys Fstat(fd int, stat *Stat_t) (err error) = SYS_FSTAT64 +//sys Fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) = SYS_FSTATAT64 +//sys Ftruncate(fd int, length int64) (err error) = SYS_FTRUNCATE64 +//sysnb Getegid() (egid int) +//sysnb Geteuid() (euid int) +//sysnb Getgid() (gid int) +//sysnb Getuid() (uid int) +//sysnb InotifyInit() (fd int, err error) +//sys Ioperm(from int, num int, on int) (err error) +//sys Iopl(level int) (err error) +//sys Lchown(path string, uid int, gid int) (err error) +//sys Listen(s int, n int) (err error) +//sys Lstat(path string, stat *Stat_t) (err error) = SYS_LSTAT64 +//sys Pause() (err error) +//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 +//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 +//sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) +//sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) = SYS__NEWSELECT +//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) = SYS_SENDFILE64 +//sys setfsgid(gid int) (prev int, err error) +//sys setfsuid(uid int) (prev int, err error) +//sysnb Setregid(rgid int, egid int) (err error) +//sysnb Setresgid(rgid int, egid int, sgid int) (err error) +//sysnb Setresuid(ruid int, euid int, suid int) (err error) +//sysnb Setreuid(ruid int, euid int) (err error) +//sys Shutdown(fd int, how int) (err error) +//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) +//sys Stat(path string, stat *Stat_t) (err error) = SYS_STAT64 +//sys Truncate(path string, length int64) (err error) = SYS_TRUNCATE64 +//sys Ustat(dev int, ubuf *Ustat_t) (err error) +//sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) +//sys accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) +//sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) +//sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) +//sysnb getgroups(n int, list *_Gid_t) (nn int, err error) +//sysnb setgroups(n int, list *_Gid_t) (err error) +//sys getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) +//sys setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) +//sysnb socket(domain int, typ int, proto int) (fd int, err error) +//sysnb socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) +//sysnb getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) +//sysnb getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) +//sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) +//sys sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) +//sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error) +//sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error) + +//sys futimesat(dirfd int, path string, times *[2]Timeval) (err error) +//sysnb Gettimeofday(tv *Timeval) (err error) +//sysnb Time(t *Time_t) (tt Time_t, err error) +//sys Utime(path string, buf *Utimbuf) (err error) +//sys utimes(path string, times *[2]Timeval) (err error) + +func Fadvise(fd int, offset int64, length int64, advice int) (err error) { + _, _, e1 := Syscall6(SYS_FADVISE64_64, uintptr(fd), uintptr(advice), uintptr(offset>>32), uintptr(offset), uintptr(length>>32), uintptr(length)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func seek(fd int, offset int64, whence int) (int64, syscall.Errno) { + var newoffset int64 + offsetLow := uint32(offset & 0xffffffff) + offsetHigh := uint32((offset >> 32) & 0xffffffff) + _, _, err := Syscall6(SYS__LLSEEK, uintptr(fd), uintptr(offsetHigh), uintptr(offsetLow), uintptr(unsafe.Pointer(&newoffset)), uintptr(whence), 0) + return newoffset, err +} + +func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { + newoffset, errno := seek(fd, offset, whence) + if errno != 0 { + return 0, errno + } + return newoffset, nil +} + +func Fstatfs(fd int, buf *Statfs_t) (err error) { + _, _, e := Syscall(SYS_FSTATFS64, uintptr(fd), unsafe.Sizeof(*buf), uintptr(unsafe.Pointer(buf))) + if e != 0 { + err = e + } + return +} + +func Statfs(path string, buf *Statfs_t) (err error) { + pathp, err := BytePtrFromString(path) + if err != nil { + return err + } + _, _, e := Syscall(SYS_STATFS64, uintptr(unsafe.Pointer(pathp)), unsafe.Sizeof(*buf), uintptr(unsafe.Pointer(buf))) + if e != 0 { + err = e + } + return +} + +//sys mmap2(addr uintptr, length uintptr, prot int, flags int, fd int, pageOffset uintptr) (xaddr uintptr, err error) + +func mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) { + page := uintptr(offset / 4096) + if offset != int64(page)*4096 { + return 0, EINVAL + } + return mmap2(addr, length, prot, flags, fd, page) +} + +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: int32(sec), Nsec: int32(nsec)} +} + +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: int32(sec), Usec: int32(usec)} +} + +type rlimit32 struct { + Cur uint32 + Max uint32 +} + +//sysnb getrlimit(resource int, rlim *rlimit32) (err error) = SYS_UGETRLIMIT + +const rlimInf32 = ^uint32(0) +const rlimInf64 = ^uint64(0) + +func Getrlimit(resource int, rlim *Rlimit) (err error) { + err = prlimit(0, resource, nil, rlim) + if err != ENOSYS { + return err + } + + rl := rlimit32{} + err = getrlimit(resource, &rl) + if err != nil { + return + } + + if rl.Cur == rlimInf32 { + rlim.Cur = rlimInf64 + } else { + rlim.Cur = uint64(rl.Cur) + } + + if rl.Max == rlimInf32 { + rlim.Max = rlimInf64 + } else { + rlim.Max = uint64(rl.Max) + } + return +} + +//sysnb setrlimit(resource int, rlim *rlimit32) (err error) = SYS_SETRLIMIT + +func Setrlimit(resource int, rlim *Rlimit) (err error) { + err = prlimit(0, resource, rlim, nil) + if err != ENOSYS { + return err + } + + rl := rlimit32{} + if rlim.Cur == rlimInf64 { + rl.Cur = rlimInf32 + } else if rlim.Cur < uint64(rlimInf32) { + rl.Cur = uint32(rlim.Cur) + } else { + return EINVAL + } + if rlim.Max == rlimInf64 { + rl.Max = rlimInf32 + } else if rlim.Max < uint64(rlimInf32) { + rl.Max = uint32(rlim.Max) + } else { + return EINVAL + } + + return setrlimit(resource, &rl) +} + +func (r *PtraceRegs) PC() uint32 { return r.Nip } + +func (r *PtraceRegs) SetPC(pc uint32) { r.Nip = pc } + +func (iov *Iovec) SetLen(length int) { + iov.Len = uint32(length) +} + +func (msghdr *Msghdr) SetControllen(length int) { + msghdr.Controllen = uint32(length) +} + +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = uint32(length) +} + +func (cmsg *Cmsghdr) SetLen(length int) { + cmsg.Len = uint32(length) +} + +//sysnb pipe(p *[2]_C_int) (err error) + +func Pipe(p []int) (err error) { + if len(p) != 2 { + return EINVAL + } + var pp [2]_C_int + err = pipe(&pp) + p[0] = int(pp[0]) + p[1] = int(pp[1]) + return +} + +//sysnb pipe2(p *[2]_C_int, flags int) (err error) + +func Pipe2(p []int, flags int) (err error) { + if len(p) != 2 { + return EINVAL + } + var pp [2]_C_int + err = pipe2(&pp, flags) + p[0] = int(pp[0]) + p[1] = int(pp[1]) + return +} + +//sys poll(fds *PollFd, nfds int, timeout int) (n int, err error) + +func Poll(fds []PollFd, timeout int) (n int, err error) { + if len(fds) == 0 { + return poll(nil, 0, timeout) + } + return poll(&fds[0], len(fds), timeout) +} + +//sys syncFileRange2(fd int, flags int, off int64, n int64) (err error) = SYS_SYNC_FILE_RANGE2 + +func SyncFileRange(fd int, off int64, n int64, flags int) error { + // The sync_file_range and sync_file_range2 syscalls differ only in the + // order of their arguments. + return syncFileRange2(fd, flags, off, n) +} + +//sys kexecFileLoad(kernelFd int, initrdFd int, cmdlineLen int, cmdline string, flags int) (err error) + +func KexecFileLoad(kernelFd int, initrdFd int, cmdline string, flags int) error { + cmdlineLen := len(cmdline) + if cmdlineLen > 0 { + // Account for the additional NULL byte added by + // BytePtrFromString in kexecFileLoad. The kexec_file_load + // syscall expects a NULL-terminated string. + cmdlineLen++ + } + return kexecFileLoad(kernelFd, initrdFd, cmdlineLen, cmdline, flags) +} diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go index 349374409b..0b1f0d6da5 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build linux && (ppc64 || ppc64le) // +build linux // +build ppc64 ppc64le @@ -99,7 +100,7 @@ func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint64(length) } -//sysnb pipe(p *[2]_C_int) (err error) +//sysnb pipe(p *[2]_C_int) (err error) func Pipe(p []int) (err error) { if len(p) != 2 { @@ -112,7 +113,7 @@ func Pipe(p []int) (err error) { return } -//sysnb pipe2(p *[2]_C_int, flags int) (err error) +//sysnb pipe2(p *[2]_C_int, flags int) (err error) func Pipe2(p []int, flags int) (err error) { if len(p) != 2 { diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go index b0b1505565..ce9bcd3171 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build riscv64 && linux // +build riscv64,linux package unix @@ -154,7 +155,7 @@ func Pipe(p []int) (err error) { return } -//sysnb pipe2(p *[2]_C_int, flags int) (err error) +//sysnb pipe2(p *[2]_C_int, flags int) (err error) func Pipe2(p []int, flags int) (err error) { if len(p) != 2 { diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go index 2363f74991..a1e45694b4 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build s390x && linux // +build s390x,linux package unix @@ -76,7 +77,7 @@ func setTimeval(sec, usec int64) Timeval { return Timeval{Sec: sec, Usec: usec} } -//sysnb pipe2(p *[2]_C_int, flags int) (err error) +//sysnb pipe2(p *[2]_C_int, flags int) (err error) func Pipe(p []int) (err error) { if len(p) != 2 { @@ -249,7 +250,7 @@ func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen } func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) error { - args := [4]uintptr{uintptr(s), uintptr(level), uintptr(name), uintptr(val)} + args := [5]uintptr{uintptr(s), uintptr(level), uintptr(name), uintptr(val), vallen} _, _, err := Syscall(SYS_SOCKETCALL, netSetSockOpt, uintptr(unsafe.Pointer(&args)), 0) if err != 0 { return err diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go b/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go index d389f1518f..49055a3cf5 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build sparc64 && linux // +build sparc64,linux package unix @@ -115,7 +116,7 @@ func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint64(length) } -//sysnb pipe(p *[2]_C_int) (err error) +//sysnb pipe(p *[2]_C_int) (err error) func Pipe(p []int) (err error) { if len(p) != 2 { @@ -128,7 +129,7 @@ func Pipe(p []int) (err error) { return } -//sysnb pipe2(p *[2]_C_int, flags int) (err error) +//sysnb pipe2(p *[2]_C_int, flags int) (err error) func Pipe2(p []int, flags int) (err error) { if len(p) != 2 { diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd.go b/vendor/golang.org/x/sys/unix/syscall_netbsd.go index 1e6843b4c3..853d5f0f43 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd.go @@ -110,7 +110,8 @@ func direntNamlen(buf []byte) (uint64, bool) { return readInt(buf, unsafe.Offsetof(Dirent{}.Namlen), unsafe.Sizeof(Dirent{}.Namlen)) } -//sysnb pipe() (fd1 int, fd2 int, err error) +//sysnb pipe() (fd1 int, fd2 int, err error) + func Pipe(p []int) (err error) { if len(p) != 2 { return EINVAL @@ -119,7 +120,21 @@ func Pipe(p []int) (err error) { return } -//sys Getdents(fd int, buf []byte) (n int, err error) +//sysnb pipe2(p *[2]_C_int, flags int) (err error) + +func Pipe2(p []int, flags int) error { + if len(p) != 2 { + return EINVAL + } + var pp [2]_C_int + err := pipe2(&pp, flags) + p[0] = int(pp[0]) + p[1] = int(pp[1]) + return err +} + +//sys Getdents(fd int, buf []byte) (n int, err error) + func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { n, err = Getdents(fd, buf) if err != nil || basep == nil { @@ -159,7 +174,7 @@ func setattrlistTimes(path string, times []Timespec, flags int) error { //sys ioctl(fd int, req uint, arg uintptr) (err error) -//sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL +//sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL func IoctlGetPtmget(fd int, req uint) (*Ptmget, error) { var value Ptmget diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go index 24da8b5245..5199d282fd 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build 386 && netbsd // +build 386,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go index 25a0ac8258..70a9c52e98 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build amd64 && netbsd // +build amd64,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go index 21591ecd4d..3eb5942f93 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build arm && netbsd // +build arm,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_arm64.go index 8047496350..fc6ccfd810 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd_arm64.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build arm64 && netbsd // +build arm64,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd.go b/vendor/golang.org/x/sys/unix/syscall_openbsd.go index 6a50b50bd6..22b5503850 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd.go @@ -92,7 +92,7 @@ func Pipe2(p []int, flags int) error { return err } -//sys Getdents(fd int, buf []byte) (n int, err error) +//sys Getdents(fd int, buf []byte) (n int, err error) func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { n, err = Getdents(fd, buf) if err != nil || basep == nil { @@ -154,7 +154,7 @@ func setattrlistTimes(path string, times []Timespec, flags int) error { //sys ioctl(fd int, req uint, arg uintptr) (err error) -//sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL +//sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL //sys ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go index 42b5a0e51e..6baabcdcb0 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build 386 && openbsd // +build 386,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go index 6ea4b48831..bab25360ea 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build amd64 && openbsd // +build amd64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go index 1c3d26fa2c..8eed3c4d4e 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build arm && openbsd // +build arm,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go index a8c458cb03..483dde99d4 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build arm64 && openbsd // +build arm64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris.go b/vendor/golang.org/x/sys/unix/syscall_solaris.go index 184786ed99..77fcde7c18 100644 --- a/vendor/golang.org/x/sys/unix/syscall_solaris.go +++ b/vendor/golang.org/x/sys/unix/syscall_solaris.go @@ -565,7 +565,12 @@ func Minor(dev uint64) uint32 { * Expose the ioctl function */ -//sys ioctl(fd int, req uint, arg uintptr) (err error) +//sys ioctlRet(fd int, req uint, arg uintptr) (ret int, err error) = libc.ioctl + +func ioctl(fd int, req uint, arg uintptr) (err error) { + _, err = ioctlRet(fd, req, arg) + return err +} func IoctlSetTermio(fd int, req uint, value *Termio) error { err := ioctl(fd, req, uintptr(unsafe.Pointer(value))) @@ -579,7 +584,7 @@ func IoctlGetTermio(fd int, req uint) (*Termio, error) { return &value, err } -//sys poll(fds *PollFd, nfds int, timeout int) (n int, err error) +//sys poll(fds *PollFd, nfds int, timeout int) (n int, err error) func Poll(fds []PollFd, timeout int) (n int, err error) { if len(fds) == 0 { @@ -682,6 +687,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys Statvfs(path string, vfsstat *Statvfs_t) (err error) //sys Symlink(path string, link string) (err error) //sys Sync() (err error) +//sys Sysconf(which int) (n int64, err error) //sysnb Times(tms *Tms) (ticks uintptr, err error) //sys Truncate(path string, length int64) (err error) //sys Fsync(fd int) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go index b22a34d7ae..0bd25ef81f 100644 --- a/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build amd64 && solaris // +build amd64,solaris package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_unix.go b/vendor/golang.org/x/sys/unix/syscall_unix.go index 400ba9fbc9..a7618ceb55 100644 --- a/vendor/golang.org/x/sys/unix/syscall_unix.go +++ b/vendor/golang.org/x/sys/unix/syscall_unix.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris // +build aix darwin dragonfly freebsd linux netbsd openbsd solaris package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_unix_gc.go b/vendor/golang.org/x/sys/unix/syscall_unix_gc.go index 87bd161cef..5898e9a52b 100644 --- a/vendor/golang.org/x/sys/unix/syscall_unix_gc.go +++ b/vendor/golang.org/x/sys/unix/syscall_unix_gc.go @@ -2,8 +2,11 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build (darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris) && gc && !ppc64le && !ppc64 // +build darwin dragonfly freebsd linux netbsd openbsd solaris -// +build gc,!ppc64le,!ppc64 +// +build gc +// +build !ppc64le +// +build !ppc64 package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_unix_gc_ppc64x.go b/vendor/golang.org/x/sys/unix/syscall_unix_gc_ppc64x.go index d36216c3ca..f6f707acf2 100644 --- a/vendor/golang.org/x/sys/unix/syscall_unix_gc_ppc64x.go +++ b/vendor/golang.org/x/sys/unix/syscall_unix_gc_ppc64x.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build linux && (ppc64le || ppc64) && gc // +build linux // +build ppc64le ppc64 // +build gc diff --git a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go new file mode 100644 index 0000000000..1ffd8bfcfb --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go @@ -0,0 +1,1829 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build zos && s390x +// +build zos,s390x + +package unix + +import ( + "bytes" + "runtime" + "sort" + "sync" + "syscall" + "unsafe" +) + +const ( + O_CLOEXEC = 0 // Dummy value (not supported). + AF_LOCAL = AF_UNIX // AF_LOCAL is an alias for AF_UNIX +) + +func syscall_syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno) +func syscall_rawsyscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno) +func syscall_syscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) +func syscall_rawsyscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) +func syscall_syscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err Errno) +func syscall_rawsyscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err Errno) + +func copyStat(stat *Stat_t, statLE *Stat_LE_t) { + stat.Dev = uint64(statLE.Dev) + stat.Ino = uint64(statLE.Ino) + stat.Nlink = uint64(statLE.Nlink) + stat.Mode = uint32(statLE.Mode) + stat.Uid = uint32(statLE.Uid) + stat.Gid = uint32(statLE.Gid) + stat.Rdev = uint64(statLE.Rdev) + stat.Size = statLE.Size + stat.Atim.Sec = int64(statLE.Atim) + stat.Atim.Nsec = 0 //zos doesn't return nanoseconds + stat.Mtim.Sec = int64(statLE.Mtim) + stat.Mtim.Nsec = 0 //zos doesn't return nanoseconds + stat.Ctim.Sec = int64(statLE.Ctim) + stat.Ctim.Nsec = 0 //zos doesn't return nanoseconds + stat.Blksize = int64(statLE.Blksize) + stat.Blocks = statLE.Blocks +} + +func svcCall(fnptr unsafe.Pointer, argv *unsafe.Pointer, dsa *uint64) +func svcLoad(name *byte) unsafe.Pointer +func svcUnload(name *byte, fnptr unsafe.Pointer) int64 + +func (d *Dirent) NameString() string { + if d == nil { + return "" + } + return string(d.Name[:d.Namlen]) +} + +func (sa *SockaddrInet4) sockaddr() (unsafe.Pointer, _Socklen, error) { + if sa.Port < 0 || sa.Port > 0xFFFF { + return nil, 0, EINVAL + } + sa.raw.Len = SizeofSockaddrInet4 + sa.raw.Family = AF_INET + p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port)) + p[0] = byte(sa.Port >> 8) + p[1] = byte(sa.Port) + for i := 0; i < len(sa.Addr); i++ { + sa.raw.Addr[i] = sa.Addr[i] + } + return unsafe.Pointer(&sa.raw), _Socklen(sa.raw.Len), nil +} + +func (sa *SockaddrInet6) sockaddr() (unsafe.Pointer, _Socklen, error) { + if sa.Port < 0 || sa.Port > 0xFFFF { + return nil, 0, EINVAL + } + sa.raw.Len = SizeofSockaddrInet6 + sa.raw.Family = AF_INET6 + p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port)) + p[0] = byte(sa.Port >> 8) + p[1] = byte(sa.Port) + sa.raw.Scope_id = sa.ZoneId + for i := 0; i < len(sa.Addr); i++ { + sa.raw.Addr[i] = sa.Addr[i] + } + return unsafe.Pointer(&sa.raw), _Socklen(sa.raw.Len), nil +} + +func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, _Socklen, error) { + name := sa.Name + n := len(name) + if n >= len(sa.raw.Path) || n == 0 { + return nil, 0, EINVAL + } + sa.raw.Len = byte(3 + n) // 2 for Family, Len; 1 for NUL + sa.raw.Family = AF_UNIX + for i := 0; i < n; i++ { + sa.raw.Path[i] = int8(name[i]) + } + return unsafe.Pointer(&sa.raw), _Socklen(sa.raw.Len), nil +} + +func anyToSockaddr(_ int, rsa *RawSockaddrAny) (Sockaddr, error) { + // TODO(neeilan): Implement use of first param (fd) + switch rsa.Addr.Family { + case AF_UNIX: + pp := (*RawSockaddrUnix)(unsafe.Pointer(rsa)) + sa := new(SockaddrUnix) + // For z/OS, only replace NUL with @ when the + // length is not zero. + if pp.Len != 0 && pp.Path[0] == 0 { + // "Abstract" Unix domain socket. + // Rewrite leading NUL as @ for textual display. + // (This is the standard convention.) + // Not friendly to overwrite in place, + // but the callers below don't care. + pp.Path[0] = '@' + } + + // Assume path ends at NUL. + // + // For z/OS, the length of the name is a field + // in the structure. To be on the safe side, we + // will still scan the name for a NUL but only + // to the length provided in the structure. + // + // This is not technically the Linux semantics for + // abstract Unix domain sockets--they are supposed + // to be uninterpreted fixed-size binary blobs--but + // everyone uses this convention. + n := 0 + for n < int(pp.Len) && pp.Path[n] != 0 { + n++ + } + bytes := (*[len(pp.Path)]byte)(unsafe.Pointer(&pp.Path[0]))[0:n] + sa.Name = string(bytes) + return sa, nil + + case AF_INET: + pp := (*RawSockaddrInet4)(unsafe.Pointer(rsa)) + sa := new(SockaddrInet4) + p := (*[2]byte)(unsafe.Pointer(&pp.Port)) + sa.Port = int(p[0])<<8 + int(p[1]) + for i := 0; i < len(sa.Addr); i++ { + sa.Addr[i] = pp.Addr[i] + } + return sa, nil + + case AF_INET6: + pp := (*RawSockaddrInet6)(unsafe.Pointer(rsa)) + sa := new(SockaddrInet6) + p := (*[2]byte)(unsafe.Pointer(&pp.Port)) + sa.Port = int(p[0])<<8 + int(p[1]) + sa.ZoneId = pp.Scope_id + for i := 0; i < len(sa.Addr); i++ { + sa.Addr[i] = pp.Addr[i] + } + return sa, nil + } + return nil, EAFNOSUPPORT +} + +func Accept(fd int) (nfd int, sa Sockaddr, err error) { + var rsa RawSockaddrAny + var len _Socklen = SizeofSockaddrAny + nfd, err = accept(fd, &rsa, &len) + if err != nil { + return + } + // TODO(neeilan): Remove 0 in call + sa, err = anyToSockaddr(0, &rsa) + if err != nil { + Close(nfd) + nfd = 0 + } + return +} + +func (iov *Iovec) SetLen(length int) { + iov.Len = uint64(length) +} + +func (msghdr *Msghdr) SetControllen(length int) { + msghdr.Controllen = int32(length) +} + +func (cmsg *Cmsghdr) SetLen(length int) { + cmsg.Len = int32(length) +} + +//sys fcntl(fd int, cmd int, arg int) (val int, err error) +//sys read(fd int, p []byte) (n int, err error) +//sys readlen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_READ +//sys write(fd int, p []byte) (n int, err error) + +//sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) = SYS___ACCEPT_A +//sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) = SYS___BIND_A +//sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) = SYS___CONNECT_A +//sysnb getgroups(n int, list *_Gid_t) (nn int, err error) +//sysnb setgroups(n int, list *_Gid_t) (err error) +//sys getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) +//sys setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) +//sysnb socket(domain int, typ int, proto int) (fd int, err error) +//sysnb socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) +//sysnb getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) = SYS___GETPEERNAME_A +//sysnb getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) = SYS___GETSOCKNAME_A +//sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) = SYS___RECVFROM_A +//sys sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) = SYS___SENDTO_A +//sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error) = SYS___RECVMSG_A +//sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error) = SYS___SENDMSG_A +//sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) = SYS_MMAP +//sys munmap(addr uintptr, length uintptr) (err error) = SYS_MUNMAP +//sys ioctl(fd int, req uint, arg uintptr) (err error) = SYS_IOCTL + +//sys Access(path string, mode uint32) (err error) = SYS___ACCESS_A +//sys Chdir(path string) (err error) = SYS___CHDIR_A +//sys Chown(path string, uid int, gid int) (err error) = SYS___CHOWN_A +//sys Chmod(path string, mode uint32) (err error) = SYS___CHMOD_A +//sys Creat(path string, mode uint32) (fd int, err error) = SYS___CREAT_A +//sys Dup(oldfd int) (fd int, err error) +//sys Dup2(oldfd int, newfd int) (err error) +//sys Errno2() (er2 int) = SYS___ERRNO2 +//sys Err2ad() (eadd *int) = SYS___ERR2AD +//sys Exit(code int) +//sys Fchdir(fd int) (err error) +//sys Fchmod(fd int, mode uint32) (err error) +//sys Fchown(fd int, uid int, gid int) (err error) +//sys FcntlInt(fd uintptr, cmd int, arg int) (retval int, err error) = SYS_FCNTL +//sys fstat(fd int, stat *Stat_LE_t) (err error) + +func Fstat(fd int, stat *Stat_t) (err error) { + var statLE Stat_LE_t + err = fstat(fd, &statLE) + copyStat(stat, &statLE) + return +} + +//sys Fstatvfs(fd int, stat *Statvfs_t) (err error) = SYS_FSTATVFS +//sys Fsync(fd int) (err error) +//sys Ftruncate(fd int, length int64) (err error) +//sys Getpagesize() (pgsize int) = SYS_GETPAGESIZE +//sys Mprotect(b []byte, prot int) (err error) = SYS_MPROTECT +//sys Msync(b []byte, flags int) (err error) = SYS_MSYNC +//sys Poll(fds []PollFd, timeout int) (n int, err error) = SYS_POLL +//sys Times(tms *Tms) (ticks uintptr, err error) = SYS_TIMES +//sys W_Getmntent(buff *byte, size int) (lastsys int, err error) = SYS_W_GETMNTENT +//sys W_Getmntent_A(buff *byte, size int) (lastsys int, err error) = SYS___W_GETMNTENT_A + +//sys mount_LE(path string, filesystem string, fstype string, mtm uint32, parmlen int32, parm string) (err error) = SYS___MOUNT_A +//sys unmount(filesystem string, mtm int) (err error) = SYS___UMOUNT_A +//sys Chroot(path string) (err error) = SYS___CHROOT_A +//sys Select(nmsgsfds int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (ret int, err error) = SYS_SELECT +//sysnb Uname(buf *Utsname) (err error) = SYS___UNAME_A + +func Ptsname(fd int) (name string, err error) { + r0, _, e1 := syscall_syscall(SYS___PTSNAME_A, uintptr(fd), 0, 0) + name = u2s(unsafe.Pointer(r0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func u2s(cstr unsafe.Pointer) string { + str := (*[1024]uint8)(cstr) + i := 0 + for str[i] != 0 { + i++ + } + return string(str[:i]) +} + +func Close(fd int) (err error) { + _, _, e1 := syscall_syscall(SYS_CLOSE, uintptr(fd), 0, 0) + for i := 0; e1 == EAGAIN && i < 10; i++ { + _, _, _ = syscall_syscall(SYS_USLEEP, uintptr(10), 0, 0) + _, _, e1 = syscall_syscall(SYS_CLOSE, uintptr(fd), 0, 0) + } + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var mapper = &mmapper{ + active: make(map[*byte][]byte), + mmap: mmap, + munmap: munmap, +} + +// Dummy function: there are no semantics for Madvise on z/OS +func Madvise(b []byte, advice int) (err error) { + return +} + +func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) { + return mapper.Mmap(fd, offset, length, prot, flags) +} + +func Munmap(b []byte) (err error) { + return mapper.Munmap(b) +} + +//sys Gethostname(buf []byte) (err error) = SYS___GETHOSTNAME_A +//sysnb Getegid() (egid int) +//sysnb Geteuid() (uid int) +//sysnb Getgid() (gid int) +//sysnb Getpid() (pid int) +//sysnb Getpgid(pid int) (pgid int, err error) = SYS_GETPGID + +func Getpgrp() (pid int) { + pid, _ = Getpgid(0) + return +} + +//sysnb Getppid() (pid int) +//sys Getpriority(which int, who int) (prio int, err error) +//sysnb Getrlimit(resource int, rlim *Rlimit) (err error) = SYS_GETRLIMIT + +//sysnb getrusage(who int, rusage *rusage_zos) (err error) = SYS_GETRUSAGE + +func Getrusage(who int, rusage *Rusage) (err error) { + var ruz rusage_zos + err = getrusage(who, &ruz) + //Only the first two fields of Rusage are set + rusage.Utime.Sec = ruz.Utime.Sec + rusage.Utime.Usec = int64(ruz.Utime.Usec) + rusage.Stime.Sec = ruz.Stime.Sec + rusage.Stime.Usec = int64(ruz.Stime.Usec) + return +} + +//sysnb Getsid(pid int) (sid int, err error) = SYS_GETSID +//sysnb Getuid() (uid int) +//sysnb Kill(pid int, sig Signal) (err error) +//sys Lchown(path string, uid int, gid int) (err error) = SYS___LCHOWN_A +//sys Link(path string, link string) (err error) = SYS___LINK_A +//sys Listen(s int, n int) (err error) +//sys lstat(path string, stat *Stat_LE_t) (err error) = SYS___LSTAT_A + +func Lstat(path string, stat *Stat_t) (err error) { + var statLE Stat_LE_t + err = lstat(path, &statLE) + copyStat(stat, &statLE) + return +} + +//sys Mkdir(path string, mode uint32) (err error) = SYS___MKDIR_A +//sys Mkfifo(path string, mode uint32) (err error) = SYS___MKFIFO_A +//sys Mknod(path string, mode uint32, dev int) (err error) = SYS___MKNOD_A +//sys Pread(fd int, p []byte, offset int64) (n int, err error) +//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) +//sys Readlink(path string, buf []byte) (n int, err error) = SYS___READLINK_A +//sys Rename(from string, to string) (err error) = SYS___RENAME_A +//sys Rmdir(path string) (err error) = SYS___RMDIR_A +//sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK +//sys Setpriority(which int, who int, prio int) (err error) +//sysnb Setpgid(pid int, pgid int) (err error) = SYS_SETPGID +//sysnb Setrlimit(resource int, lim *Rlimit) (err error) +//sysnb Setregid(rgid int, egid int) (err error) = SYS_SETREGID +//sysnb Setreuid(ruid int, euid int) (err error) = SYS_SETREUID +//sysnb Setsid() (pid int, err error) = SYS_SETSID +//sys Setuid(uid int) (err error) = SYS_SETUID +//sys Setgid(uid int) (err error) = SYS_SETGID +//sys Shutdown(fd int, how int) (err error) +//sys stat(path string, statLE *Stat_LE_t) (err error) = SYS___STAT_A + +func Stat(path string, sta *Stat_t) (err error) { + var statLE Stat_LE_t + err = stat(path, &statLE) + copyStat(sta, &statLE) + return +} + +//sys Symlink(path string, link string) (err error) = SYS___SYMLINK_A +//sys Sync() = SYS_SYNC +//sys Truncate(path string, length int64) (err error) = SYS___TRUNCATE_A +//sys Tcgetattr(fildes int, termptr *Termios) (err error) = SYS_TCGETATTR +//sys Tcsetattr(fildes int, when int, termptr *Termios) (err error) = SYS_TCSETATTR +//sys Umask(mask int) (oldmask int) +//sys Unlink(path string) (err error) = SYS___UNLINK_A +//sys Utime(path string, utim *Utimbuf) (err error) = SYS___UTIME_A + +//sys open(path string, mode int, perm uint32) (fd int, err error) = SYS___OPEN_A + +func Open(path string, mode int, perm uint32) (fd int, err error) { + return open(path, mode, perm) +} + +func Mkfifoat(dirfd int, path string, mode uint32) (err error) { + wd, err := Getwd() + if err != nil { + return err + } + + if err := Fchdir(dirfd); err != nil { + return err + } + defer Chdir(wd) + + return Mkfifo(path, mode) +} + +//sys remove(path string) (err error) + +func Remove(path string) error { + return remove(path) +} + +const ImplementsGetwd = true + +func Getcwd(buf []byte) (n int, err error) { + var p unsafe.Pointer + if len(buf) > 0 { + p = unsafe.Pointer(&buf[0]) + } else { + p = unsafe.Pointer(&_zero) + } + _, _, e := syscall_syscall(SYS___GETCWD_A, uintptr(p), uintptr(len(buf)), 0) + n = clen(buf) + 1 + if e != 0 { + err = errnoErr(e) + } + return +} + +func Getwd() (wd string, err error) { + var buf [PathMax]byte + n, err := Getcwd(buf[0:]) + if err != nil { + return "", err + } + // Getcwd returns the number of bytes written to buf, including the NUL. + if n < 1 || n > len(buf) || buf[n-1] != 0 { + return "", EINVAL + } + return string(buf[0 : n-1]), nil +} + +func Getgroups() (gids []int, err error) { + n, err := getgroups(0, nil) + if err != nil { + return nil, err + } + if n == 0 { + return nil, nil + } + + // Sanity check group count. Max is 1<<16 on Linux. + if n < 0 || n > 1<<20 { + return nil, EINVAL + } + + a := make([]_Gid_t, n) + n, err = getgroups(n, &a[0]) + if err != nil { + return nil, err + } + gids = make([]int, n) + for i, v := range a[0:n] { + gids[i] = int(v) + } + return +} + +func Setgroups(gids []int) (err error) { + if len(gids) == 0 { + return setgroups(0, nil) + } + + a := make([]_Gid_t, len(gids)) + for i, v := range gids { + a[i] = _Gid_t(v) + } + return setgroups(len(a), &a[0]) +} + +func gettid() uint64 + +func Gettid() (tid int) { + return int(gettid()) +} + +type WaitStatus uint32 + +// Wait status is 7 bits at bottom, either 0 (exited), +// 0x7F (stopped), or a signal number that caused an exit. +// The 0x80 bit is whether there was a core dump. +// An extra number (exit code, signal causing a stop) +// is in the high bits. At least that's the idea. +// There are various irregularities. For example, the +// "continued" status is 0xFFFF, distinguishing itself +// from stopped via the core dump bit. + +const ( + mask = 0x7F + core = 0x80 + exited = 0x00 + stopped = 0x7F + shift = 8 +) + +func (w WaitStatus) Exited() bool { return w&mask == exited } + +func (w WaitStatus) Signaled() bool { return w&mask != stopped && w&mask != exited } + +func (w WaitStatus) Stopped() bool { return w&0xFF == stopped } + +func (w WaitStatus) Continued() bool { return w == 0xFFFF } + +func (w WaitStatus) CoreDump() bool { return w.Signaled() && w&core != 0 } + +func (w WaitStatus) ExitStatus() int { + if !w.Exited() { + return -1 + } + return int(w>>shift) & 0xFF +} + +func (w WaitStatus) Signal() Signal { + if !w.Signaled() { + return -1 + } + return Signal(w & mask) +} + +func (w WaitStatus) StopSignal() Signal { + if !w.Stopped() { + return -1 + } + return Signal(w>>shift) & 0xFF +} + +func (w WaitStatus) TrapCause() int { return -1 } + +//sys waitpid(pid int, wstatus *_C_int, options int) (wpid int, err error) + +func Wait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, err error) { + // TODO(mundaym): z/OS doesn't have wait4. I don't think getrusage does what we want. + // At the moment rusage will not be touched. + var status _C_int + wpid, err = waitpid(pid, &status, options) + if wstatus != nil { + *wstatus = WaitStatus(status) + } + return +} + +//sysnb gettimeofday(tv *timeval_zos) (err error) + +func Gettimeofday(tv *Timeval) (err error) { + var tvz timeval_zos + err = gettimeofday(&tvz) + tv.Sec = tvz.Sec + tv.Usec = int64(tvz.Usec) + return +} + +func Time(t *Time_t) (tt Time_t, err error) { + var tv Timeval + err = Gettimeofday(&tv) + if err != nil { + return 0, err + } + if t != nil { + *t = Time_t(tv.Sec) + } + return Time_t(tv.Sec), nil +} + +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: sec, Nsec: nsec} +} + +func setTimeval(sec, usec int64) Timeval { //fix + return Timeval{Sec: sec, Usec: usec} +} + +//sysnb pipe(p *[2]_C_int) (err error) + +func Pipe(p []int) (err error) { + if len(p) != 2 { + return EINVAL + } + var pp [2]_C_int + err = pipe(&pp) + p[0] = int(pp[0]) + p[1] = int(pp[1]) + return +} + +//sys utimes(path string, timeval *[2]Timeval) (err error) = SYS___UTIMES_A + +func Utimes(path string, tv []Timeval) (err error) { + if len(tv) != 2 { + return EINVAL + } + return utimes(path, (*[2]Timeval)(unsafe.Pointer(&tv[0]))) +} + +func UtimesNano(path string, ts []Timespec) error { + if len(ts) != 2 { + return EINVAL + } + // Not as efficient as it could be because Timespec and + // Timeval have different types in the different OSes + tv := [2]Timeval{ + NsecToTimeval(TimespecToNsec(ts[0])), + NsecToTimeval(TimespecToNsec(ts[1])), + } + return utimes(path, (*[2]Timeval)(unsafe.Pointer(&tv[0]))) +} + +func Getsockname(fd int) (sa Sockaddr, err error) { + var rsa RawSockaddrAny + var len _Socklen = SizeofSockaddrAny + if err = getsockname(fd, &rsa, &len); err != nil { + return + } + // TODO(neeilan) : Remove this 0 ( added to get sys/unix compiling on z/OS ) + return anyToSockaddr(0, &rsa) +} + +const ( + // identifier constants + nwmHeaderIdentifier = 0xd5e6d4c8 + nwmFilterIdentifier = 0xd5e6d4c6 + nwmTCPConnIdentifier = 0xd5e6d4c3 + nwmRecHeaderIdentifier = 0xd5e6d4d9 + nwmIPStatsIdentifier = 0xd5e6d4c9d7e2e340 + nwmIPGStatsIdentifier = 0xd5e6d4c9d7c7e2e3 + nwmTCPStatsIdentifier = 0xd5e6d4e3c3d7e2e3 + nwmUDPStatsIdentifier = 0xd5e6d4e4c4d7e2e3 + nwmICMPGStatsEntry = 0xd5e6d4c9c3d4d7c7 + nwmICMPTStatsEntry = 0xd5e6d4c9c3d4d7e3 + + // nwmHeader constants + nwmVersion1 = 1 + nwmVersion2 = 2 + nwmCurrentVer = 2 + + nwmTCPConnType = 1 + nwmGlobalStatsType = 14 + + // nwmFilter constants + nwmFilterLclAddrMask = 0x20000000 // Local address + nwmFilterSrcAddrMask = 0x20000000 // Source address + nwmFilterLclPortMask = 0x10000000 // Local port + nwmFilterSrcPortMask = 0x10000000 // Source port + + // nwmConnEntry constants + nwmTCPStateClosed = 1 + nwmTCPStateListen = 2 + nwmTCPStateSynSent = 3 + nwmTCPStateSynRcvd = 4 + nwmTCPStateEstab = 5 + nwmTCPStateFinWait1 = 6 + nwmTCPStateFinWait2 = 7 + nwmTCPStateClosWait = 8 + nwmTCPStateLastAck = 9 + nwmTCPStateClosing = 10 + nwmTCPStateTimeWait = 11 + nwmTCPStateDeletTCB = 12 + + // Existing constants on linux + BPF_TCP_CLOSE = 1 + BPF_TCP_LISTEN = 2 + BPF_TCP_SYN_SENT = 3 + BPF_TCP_SYN_RECV = 4 + BPF_TCP_ESTABLISHED = 5 + BPF_TCP_FIN_WAIT1 = 6 + BPF_TCP_FIN_WAIT2 = 7 + BPF_TCP_CLOSE_WAIT = 8 + BPF_TCP_LAST_ACK = 9 + BPF_TCP_CLOSING = 10 + BPF_TCP_TIME_WAIT = 11 + BPF_TCP_NEW_SYN_RECV = -1 + BPF_TCP_MAX_STATES = -2 +) + +type nwmTriplet struct { + offset uint32 + length uint32 + number uint32 +} + +type nwmQuadruplet struct { + offset uint32 + length uint32 + number uint32 + match uint32 +} + +type nwmHeader struct { + ident uint32 + length uint32 + version uint16 + nwmType uint16 + bytesNeeded uint32 + options uint32 + _ [16]byte + inputDesc nwmTriplet + outputDesc nwmQuadruplet +} + +type nwmFilter struct { + ident uint32 + flags uint32 + resourceName [8]byte + resourceId uint32 + listenerId uint32 + local [28]byte // union of sockaddr4 and sockaddr6 + remote [28]byte // union of sockaddr4 and sockaddr6 + _ uint16 + _ uint16 + asid uint16 + _ [2]byte + tnLuName [8]byte + tnMonGrp uint32 + tnAppl [8]byte + applData [40]byte + nInterface [16]byte + dVipa [16]byte + dVipaPfx uint16 + dVipaPort uint16 + dVipaFamily byte + _ [3]byte + destXCF [16]byte + destXCFPfx uint16 + destXCFFamily byte + _ [1]byte + targIP [16]byte + targIPPfx uint16 + targIPFamily byte + _ [1]byte + _ [20]byte +} + +type nwmRecHeader struct { + ident uint32 + length uint32 + number byte + _ [3]byte +} + +type nwmTCPStatsEntry struct { + ident uint64 + currEstab uint32 + activeOpened uint32 + passiveOpened uint32 + connClosed uint32 + estabResets uint32 + attemptFails uint32 + passiveDrops uint32 + timeWaitReused uint32 + inSegs uint64 + predictAck uint32 + predictData uint32 + inDupAck uint32 + inBadSum uint32 + inBadLen uint32 + inShort uint32 + inDiscOldTime uint32 + inAllBeforeWin uint32 + inSomeBeforeWin uint32 + inAllAfterWin uint32 + inSomeAfterWin uint32 + inOutOfOrder uint32 + inAfterClose uint32 + inWinProbes uint32 + inWinUpdates uint32 + outWinUpdates uint32 + outSegs uint64 + outDelayAcks uint32 + outRsts uint32 + retransSegs uint32 + retransTimeouts uint32 + retransDrops uint32 + pmtuRetrans uint32 + pmtuErrors uint32 + outWinProbes uint32 + probeDrops uint32 + keepAliveProbes uint32 + keepAliveDrops uint32 + finwait2Drops uint32 + acceptCount uint64 + inBulkQSegs uint64 + inDiscards uint64 + connFloods uint32 + connStalls uint32 + cfgEphemDef uint16 + ephemInUse uint16 + ephemHiWater uint16 + flags byte + _ [1]byte + ephemExhaust uint32 + smcRCurrEstabLnks uint32 + smcRLnkActTimeOut uint32 + smcRActLnkOpened uint32 + smcRPasLnkOpened uint32 + smcRLnksClosed uint32 + smcRCurrEstab uint32 + smcRActiveOpened uint32 + smcRPassiveOpened uint32 + smcRConnClosed uint32 + smcRInSegs uint64 + smcROutSegs uint64 + smcRInRsts uint32 + smcROutRsts uint32 + smcDCurrEstabLnks uint32 + smcDActLnkOpened uint32 + smcDPasLnkOpened uint32 + smcDLnksClosed uint32 + smcDCurrEstab uint32 + smcDActiveOpened uint32 + smcDPassiveOpened uint32 + smcDConnClosed uint32 + smcDInSegs uint64 + smcDOutSegs uint64 + smcDInRsts uint32 + smcDOutRsts uint32 +} + +type nwmConnEntry struct { + ident uint32 + local [28]byte // union of sockaddr4 and sockaddr6 + remote [28]byte // union of sockaddr4 and sockaddr6 + startTime [8]byte // uint64, changed to prevent padding from being inserted + lastActivity [8]byte // uint64 + bytesIn [8]byte // uint64 + bytesOut [8]byte // uint64 + inSegs [8]byte // uint64 + outSegs [8]byte // uint64 + state uint16 + activeOpen byte + flag01 byte + outBuffered uint32 + inBuffered uint32 + maxSndWnd uint32 + reXmtCount uint32 + congestionWnd uint32 + ssThresh uint32 + roundTripTime uint32 + roundTripVar uint32 + sendMSS uint32 + sndWnd uint32 + rcvBufSize uint32 + sndBufSize uint32 + outOfOrderCount uint32 + lcl0WindowCount uint32 + rmt0WindowCount uint32 + dupacks uint32 + flag02 byte + sockOpt6Cont byte + asid uint16 + resourceName [8]byte + resourceId uint32 + subtask uint32 + sockOpt byte + sockOpt6 byte + clusterConnFlag byte + proto byte + targetAppl [8]byte + luName [8]byte + clientUserId [8]byte + logMode [8]byte + timeStamp uint32 + timeStampAge uint32 + serverResourceId uint32 + intfName [16]byte + ttlsStatPol byte + ttlsStatConn byte + ttlsSSLProt uint16 + ttlsNegCiph [2]byte + ttlsSecType byte + ttlsFIPS140Mode byte + ttlsUserID [8]byte + applData [40]byte + inOldestTime [8]byte // uint64 + outOldestTime [8]byte // uint64 + tcpTrustedPartner byte + _ [3]byte + bulkDataIntfName [16]byte + ttlsNegCiph4 [4]byte + smcReason uint32 + lclSMCLinkId uint32 + rmtSMCLinkId uint32 + smcStatus byte + smcFlags byte + _ [2]byte + rcvWnd uint32 + lclSMCBufSz uint32 + rmtSMCBufSz uint32 + ttlsSessID [32]byte + ttlsSessIDLen int16 + _ [1]byte + smcDStatus byte + smcDReason uint32 +} + +var svcNameTable [][]byte = [][]byte{ + []byte("\xc5\xe9\xc2\xd5\xd4\xc9\xc6\xf4"), // svc_EZBNMIF4 +} + +const ( + svc_EZBNMIF4 = 0 +) + +func GetsockoptTCPInfo(fd, level, opt int) (*TCPInfo, error) { + jobname := []byte("\x5c\x40\x40\x40\x40\x40\x40\x40") // "*" + responseBuffer := [4096]byte{0} + var bufferAlet, reasonCode uint32 = 0, 0 + var bufferLen, returnValue, returnCode int32 = 4096, 0, 0 + + dsa := [18]uint64{0} + var argv [7]unsafe.Pointer + argv[0] = unsafe.Pointer(&jobname[0]) + argv[1] = unsafe.Pointer(&responseBuffer[0]) + argv[2] = unsafe.Pointer(&bufferAlet) + argv[3] = unsafe.Pointer(&bufferLen) + argv[4] = unsafe.Pointer(&returnValue) + argv[5] = unsafe.Pointer(&returnCode) + argv[6] = unsafe.Pointer(&reasonCode) + + request := (*struct { + header nwmHeader + filter nwmFilter + })(unsafe.Pointer(&responseBuffer[0])) + + EZBNMIF4 := svcLoad(&svcNameTable[svc_EZBNMIF4][0]) + if EZBNMIF4 == nil { + return nil, errnoErr(EINVAL) + } + + // GetGlobalStats EZBNMIF4 call + request.header.ident = nwmHeaderIdentifier + request.header.length = uint32(unsafe.Sizeof(request.header)) + request.header.version = nwmCurrentVer + request.header.nwmType = nwmGlobalStatsType + request.header.options = 0x80000000 + + svcCall(EZBNMIF4, &argv[0], &dsa[0]) + + // outputDesc field is filled by EZBNMIF4 on success + if returnCode != 0 || request.header.outputDesc.offset == 0 { + return nil, errnoErr(EINVAL) + } + + // Check that EZBNMIF4 returned a nwmRecHeader + recHeader := (*nwmRecHeader)(unsafe.Pointer(&responseBuffer[request.header.outputDesc.offset])) + if recHeader.ident != nwmRecHeaderIdentifier { + return nil, errnoErr(EINVAL) + } + + // Parse nwmTriplets to get offsets of returned entries + var sections []*uint64 + var sectionDesc *nwmTriplet = (*nwmTriplet)(unsafe.Pointer(&responseBuffer[0])) + for i := uint32(0); i < uint32(recHeader.number); i++ { + offset := request.header.outputDesc.offset + uint32(unsafe.Sizeof(*recHeader)) + i*uint32(unsafe.Sizeof(*sectionDesc)) + sectionDesc = (*nwmTriplet)(unsafe.Pointer(&responseBuffer[offset])) + for j := uint32(0); j < sectionDesc.number; j++ { + offset = request.header.outputDesc.offset + sectionDesc.offset + j*sectionDesc.length + sections = append(sections, (*uint64)(unsafe.Pointer(&responseBuffer[offset]))) + } + } + + // Find nwmTCPStatsEntry in returned entries + var tcpStats *nwmTCPStatsEntry = nil + for _, ptr := range sections { + switch *ptr { + case nwmTCPStatsIdentifier: + if tcpStats != nil { + return nil, errnoErr(EINVAL) + } + tcpStats = (*nwmTCPStatsEntry)(unsafe.Pointer(ptr)) + case nwmIPStatsIdentifier: + case nwmIPGStatsIdentifier: + case nwmUDPStatsIdentifier: + case nwmICMPGStatsEntry: + case nwmICMPTStatsEntry: + default: + return nil, errnoErr(EINVAL) + } + } + if tcpStats == nil { + return nil, errnoErr(EINVAL) + } + + // GetConnectionDetail EZBNMIF4 call + responseBuffer = [4096]byte{0} + dsa = [18]uint64{0} + bufferAlet, reasonCode = 0, 0 + bufferLen, returnValue, returnCode = 4096, 0, 0 + nameptr := (*uint32)(unsafe.Pointer(uintptr(0x21c))) // Get jobname of current process + nameptr = (*uint32)(unsafe.Pointer(uintptr(*nameptr + 12))) + argv[0] = unsafe.Pointer(uintptr(*nameptr)) + + request.header.ident = nwmHeaderIdentifier + request.header.length = uint32(unsafe.Sizeof(request.header)) + request.header.version = nwmCurrentVer + request.header.nwmType = nwmTCPConnType + request.header.options = 0x80000000 + + request.filter.ident = nwmFilterIdentifier + + var localSockaddr RawSockaddrAny + socklen := _Socklen(SizeofSockaddrAny) + err := getsockname(fd, &localSockaddr, &socklen) + if err != nil { + return nil, errnoErr(EINVAL) + } + if localSockaddr.Addr.Family == AF_INET { + localSockaddr := (*RawSockaddrInet4)(unsafe.Pointer(&localSockaddr.Addr)) + localSockFilter := (*RawSockaddrInet4)(unsafe.Pointer(&request.filter.local[0])) + localSockFilter.Family = AF_INET + var i int + for i = 0; i < 4; i++ { + if localSockaddr.Addr[i] != 0 { + break + } + } + if i != 4 { + request.filter.flags |= nwmFilterLclAddrMask + for i = 0; i < 4; i++ { + localSockFilter.Addr[i] = localSockaddr.Addr[i] + } + } + if localSockaddr.Port != 0 { + request.filter.flags |= nwmFilterLclPortMask + localSockFilter.Port = localSockaddr.Port + } + } else if localSockaddr.Addr.Family == AF_INET6 { + localSockaddr := (*RawSockaddrInet6)(unsafe.Pointer(&localSockaddr.Addr)) + localSockFilter := (*RawSockaddrInet6)(unsafe.Pointer(&request.filter.local[0])) + localSockFilter.Family = AF_INET6 + var i int + for i = 0; i < 16; i++ { + if localSockaddr.Addr[i] != 0 { + break + } + } + if i != 16 { + request.filter.flags |= nwmFilterLclAddrMask + for i = 0; i < 16; i++ { + localSockFilter.Addr[i] = localSockaddr.Addr[i] + } + } + if localSockaddr.Port != 0 { + request.filter.flags |= nwmFilterLclPortMask + localSockFilter.Port = localSockaddr.Port + } + } + + svcCall(EZBNMIF4, &argv[0], &dsa[0]) + + // outputDesc field is filled by EZBNMIF4 on success + if returnCode != 0 || request.header.outputDesc.offset == 0 { + return nil, errnoErr(EINVAL) + } + + // Check that EZBNMIF4 returned a nwmConnEntry + conn := (*nwmConnEntry)(unsafe.Pointer(&responseBuffer[request.header.outputDesc.offset])) + if conn.ident != nwmTCPConnIdentifier { + return nil, errnoErr(EINVAL) + } + + // Copy data from the returned data structures into tcpInfo + // Stats from nwmConnEntry are specific to that connection. + // Stats from nwmTCPStatsEntry are global (to the interface?) + // Fields may not be an exact match. Some fields have no equivalent. + var tcpinfo TCPInfo + tcpinfo.State = uint8(conn.state) + tcpinfo.Ca_state = 0 // dummy + tcpinfo.Retransmits = uint8(tcpStats.retransSegs) + tcpinfo.Probes = uint8(tcpStats.outWinProbes) + tcpinfo.Backoff = 0 // dummy + tcpinfo.Options = 0 // dummy + tcpinfo.Rto = tcpStats.retransTimeouts + tcpinfo.Ato = tcpStats.outDelayAcks + tcpinfo.Snd_mss = conn.sendMSS + tcpinfo.Rcv_mss = conn.sendMSS // dummy + tcpinfo.Unacked = 0 // dummy + tcpinfo.Sacked = 0 // dummy + tcpinfo.Lost = 0 // dummy + tcpinfo.Retrans = conn.reXmtCount + tcpinfo.Fackets = 0 // dummy + tcpinfo.Last_data_sent = uint32(*(*uint64)(unsafe.Pointer(&conn.lastActivity[0]))) + tcpinfo.Last_ack_sent = uint32(*(*uint64)(unsafe.Pointer(&conn.outOldestTime[0]))) + tcpinfo.Last_data_recv = uint32(*(*uint64)(unsafe.Pointer(&conn.inOldestTime[0]))) + tcpinfo.Last_ack_recv = uint32(*(*uint64)(unsafe.Pointer(&conn.inOldestTime[0]))) + tcpinfo.Pmtu = conn.sendMSS // dummy, NWMIfRouteMtu is a candidate + tcpinfo.Rcv_ssthresh = conn.ssThresh + tcpinfo.Rtt = conn.roundTripTime + tcpinfo.Rttvar = conn.roundTripVar + tcpinfo.Snd_ssthresh = conn.ssThresh // dummy + tcpinfo.Snd_cwnd = conn.congestionWnd + tcpinfo.Advmss = conn.sendMSS // dummy + tcpinfo.Reordering = 0 // dummy + tcpinfo.Rcv_rtt = conn.roundTripTime // dummy + tcpinfo.Rcv_space = conn.sendMSS // dummy + tcpinfo.Total_retrans = conn.reXmtCount + + svcUnload(&svcNameTable[svc_EZBNMIF4][0], EZBNMIF4) + + return &tcpinfo, nil +} + +// GetsockoptString returns the string value of the socket option opt for the +// socket associated with fd at the given socket level. +func GetsockoptString(fd, level, opt int) (string, error) { + buf := make([]byte, 256) + vallen := _Socklen(len(buf)) + err := getsockopt(fd, level, opt, unsafe.Pointer(&buf[0]), &vallen) + if err != nil { + return "", err + } + + return string(buf[:vallen-1]), nil +} + +func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from Sockaddr, err error) { + var msg Msghdr + var rsa RawSockaddrAny + msg.Name = (*byte)(unsafe.Pointer(&rsa)) + msg.Namelen = SizeofSockaddrAny + var iov Iovec + if len(p) > 0 { + iov.Base = (*byte)(unsafe.Pointer(&p[0])) + iov.SetLen(len(p)) + } + var dummy byte + if len(oob) > 0 { + // receive at least one normal byte + if len(p) == 0 { + iov.Base = &dummy + iov.SetLen(1) + } + msg.Control = (*byte)(unsafe.Pointer(&oob[0])) + msg.SetControllen(len(oob)) + } + msg.Iov = &iov + msg.Iovlen = 1 + if n, err = recvmsg(fd, &msg, flags); err != nil { + return + } + oobn = int(msg.Controllen) + recvflags = int(msg.Flags) + // source address is only specified if the socket is unconnected + if rsa.Addr.Family != AF_UNSPEC { + // TODO(neeilan): Remove 0 arg added to get this compiling on z/OS + from, err = anyToSockaddr(0, &rsa) + } + return +} + +func Sendmsg(fd int, p, oob []byte, to Sockaddr, flags int) (err error) { + _, err = SendmsgN(fd, p, oob, to, flags) + return +} + +func SendmsgN(fd int, p, oob []byte, to Sockaddr, flags int) (n int, err error) { + var ptr unsafe.Pointer + var salen _Socklen + if to != nil { + var err error + ptr, salen, err = to.sockaddr() + if err != nil { + return 0, err + } + } + var msg Msghdr + msg.Name = (*byte)(unsafe.Pointer(ptr)) + msg.Namelen = int32(salen) + var iov Iovec + if len(p) > 0 { + iov.Base = (*byte)(unsafe.Pointer(&p[0])) + iov.SetLen(len(p)) + } + var dummy byte + if len(oob) > 0 { + // send at least one normal byte + if len(p) == 0 { + iov.Base = &dummy + iov.SetLen(1) + } + msg.Control = (*byte)(unsafe.Pointer(&oob[0])) + msg.SetControllen(len(oob)) + } + msg.Iov = &iov + msg.Iovlen = 1 + if n, err = sendmsg(fd, &msg, flags); err != nil { + return 0, err + } + if len(oob) > 0 && len(p) == 0 { + n = 0 + } + return n, nil +} + +func Opendir(name string) (uintptr, error) { + p, err := BytePtrFromString(name) + if err != nil { + return 0, err + } + dir, _, e := syscall_syscall(SYS___OPENDIR_A, uintptr(unsafe.Pointer(p)), 0, 0) + runtime.KeepAlive(unsafe.Pointer(p)) + if e != 0 { + err = errnoErr(e) + } + return dir, err +} + +// clearsyscall.Errno resets the errno value to 0. +func clearErrno() + +func Readdir(dir uintptr) (*Dirent, error) { + var ent Dirent + var res uintptr + // __readdir_r_a returns errno at the end of the directory stream, rather than 0. + // Therefore to avoid false positives we clear errno before calling it. + + // TODO(neeilan): Commented this out to get sys/unix compiling on z/OS. Uncomment and fix. Error: "undefined: clearsyscall" + //clearsyscall.Errno() // TODO(mundaym): check pre-emption rules. + + e, _, _ := syscall_syscall(SYS___READDIR_R_A, dir, uintptr(unsafe.Pointer(&ent)), uintptr(unsafe.Pointer(&res))) + var err error + if e != 0 { + err = errnoErr(Errno(e)) + } + if res == 0 { + return nil, err + } + return &ent, err +} + +func Closedir(dir uintptr) error { + _, _, e := syscall_syscall(SYS_CLOSEDIR, dir, 0, 0) + if e != 0 { + return errnoErr(e) + } + return nil +} + +func Seekdir(dir uintptr, pos int) { + _, _, _ = syscall_syscall(SYS_SEEKDIR, dir, uintptr(pos), 0) +} + +func Telldir(dir uintptr) (int, error) { + p, _, e := syscall_syscall(SYS_TELLDIR, dir, 0, 0) + pos := int(p) + if pos == -1 { + return pos, errnoErr(e) + } + return pos, nil +} + +// FcntlFlock performs a fcntl syscall for the F_GETLK, F_SETLK or F_SETLKW command. +func FcntlFlock(fd uintptr, cmd int, lk *Flock_t) error { + // struct flock is packed on z/OS. We can't emulate that in Go so + // instead we pack it here. + var flock [24]byte + *(*int16)(unsafe.Pointer(&flock[0])) = lk.Type + *(*int16)(unsafe.Pointer(&flock[2])) = lk.Whence + *(*int64)(unsafe.Pointer(&flock[4])) = lk.Start + *(*int64)(unsafe.Pointer(&flock[12])) = lk.Len + *(*int32)(unsafe.Pointer(&flock[20])) = lk.Pid + _, _, errno := syscall_syscall(SYS_FCNTL, fd, uintptr(cmd), uintptr(unsafe.Pointer(&flock))) + lk.Type = *(*int16)(unsafe.Pointer(&flock[0])) + lk.Whence = *(*int16)(unsafe.Pointer(&flock[2])) + lk.Start = *(*int64)(unsafe.Pointer(&flock[4])) + lk.Len = *(*int64)(unsafe.Pointer(&flock[12])) + lk.Pid = *(*int32)(unsafe.Pointer(&flock[20])) + if errno == 0 { + return nil + } + return errno +} + +func Flock(fd int, how int) error { + + var flock_type int16 + var fcntl_cmd int + + switch how { + case LOCK_SH | LOCK_NB: + flock_type = F_RDLCK + fcntl_cmd = F_SETLK + case LOCK_EX | LOCK_NB: + flock_type = F_WRLCK + fcntl_cmd = F_SETLK + case LOCK_EX: + flock_type = F_WRLCK + fcntl_cmd = F_SETLKW + case LOCK_UN: + flock_type = F_UNLCK + fcntl_cmd = F_SETLKW + default: + } + + flock := Flock_t{ + Type: int16(flock_type), + Whence: int16(0), + Start: int64(0), + Len: int64(0), + Pid: int32(Getppid()), + } + + err := FcntlFlock(uintptr(fd), fcntl_cmd, &flock) + return err +} + +func Mlock(b []byte) (err error) { + _, _, e1 := syscall_syscall(SYS___MLOCKALL, _BPX_NONSWAP, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func Mlock2(b []byte, flags int) (err error) { + _, _, e1 := syscall_syscall(SYS___MLOCKALL, _BPX_NONSWAP, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func Mlockall(flags int) (err error) { + _, _, e1 := syscall_syscall(SYS___MLOCKALL, _BPX_NONSWAP, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func Munlock(b []byte) (err error) { + _, _, e1 := syscall_syscall(SYS___MLOCKALL, _BPX_SWAP, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func Munlockall() (err error) { + _, _, e1 := syscall_syscall(SYS___MLOCKALL, _BPX_SWAP, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func ClockGettime(clockid int32, ts *Timespec) error { + + var ticks_per_sec uint32 = 100 //TODO(kenan): value is currently hardcoded; need sysconf() call otherwise + var nsec_per_sec int64 = 1000000000 + + if ts == nil { + return EFAULT + } + if clockid == CLOCK_REALTIME || clockid == CLOCK_MONOTONIC { + var nanotime int64 = runtime.Nanotime1() + ts.Sec = nanotime / nsec_per_sec + ts.Nsec = nanotime % nsec_per_sec + } else if clockid == CLOCK_PROCESS_CPUTIME_ID || clockid == CLOCK_THREAD_CPUTIME_ID { + var tm Tms + _, err := Times(&tm) + if err != nil { + return EFAULT + } + ts.Sec = int64(tm.Utime / ticks_per_sec) + ts.Nsec = int64(tm.Utime) * nsec_per_sec / int64(ticks_per_sec) + } else { + return EINVAL + } + return nil +} + +func Statfs(path string, stat *Statfs_t) (err error) { + fd, err := open(path, O_RDONLY, 0) + defer Close(fd) + if err != nil { + return err + } + return Fstatfs(fd, stat) +} + +var ( + Stdin = 0 + Stdout = 1 + Stderr = 2 +) + +// Do the interface allocations only once for common +// Errno values. +var ( + errEAGAIN error = syscall.EAGAIN + errEINVAL error = syscall.EINVAL + errENOENT error = syscall.ENOENT +) + +var ( + signalNameMapOnce sync.Once + signalNameMap map[string]syscall.Signal +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e Errno) error { + switch e { + case 0: + return nil + case EAGAIN: + return errEAGAIN + case EINVAL: + return errEINVAL + case ENOENT: + return errENOENT + } + return e +} + +// ErrnoName returns the error name for error number e. +func ErrnoName(e Errno) string { + i := sort.Search(len(errorList), func(i int) bool { + return errorList[i].num >= e + }) + if i < len(errorList) && errorList[i].num == e { + return errorList[i].name + } + return "" +} + +// SignalName returns the signal name for signal number s. +func SignalName(s syscall.Signal) string { + i := sort.Search(len(signalList), func(i int) bool { + return signalList[i].num >= s + }) + if i < len(signalList) && signalList[i].num == s { + return signalList[i].name + } + return "" +} + +// SignalNum returns the syscall.Signal for signal named s, +// or 0 if a signal with such name is not found. +// The signal name should start with "SIG". +func SignalNum(s string) syscall.Signal { + signalNameMapOnce.Do(func() { + signalNameMap = make(map[string]syscall.Signal, len(signalList)) + for _, signal := range signalList { + signalNameMap[signal.name] = signal.num + } + }) + return signalNameMap[s] +} + +// clen returns the index of the first NULL byte in n or len(n) if n contains no NULL byte. +func clen(n []byte) int { + i := bytes.IndexByte(n, 0) + if i == -1 { + i = len(n) + } + return i +} + +// Mmap manager, for use by operating system-specific implementations. + +type mmapper struct { + sync.Mutex + active map[*byte][]byte // active mappings; key is last byte in mapping + mmap func(addr, length uintptr, prot, flags, fd int, offset int64) (uintptr, error) + munmap func(addr uintptr, length uintptr) error +} + +func (m *mmapper) Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) { + if length <= 0 { + return nil, EINVAL + } + + // Map the requested memory. + addr, errno := m.mmap(0, uintptr(length), prot, flags, fd, offset) + if errno != nil { + return nil, errno + } + + // Slice memory layout + var sl = struct { + addr uintptr + len int + cap int + }{addr, length, length} + + // Use unsafe to turn sl into a []byte. + b := *(*[]byte)(unsafe.Pointer(&sl)) + + // Register mapping in m and return it. + p := &b[cap(b)-1] + m.Lock() + defer m.Unlock() + m.active[p] = b + return b, nil +} + +func (m *mmapper) Munmap(data []byte) (err error) { + if len(data) == 0 || len(data) != cap(data) { + return EINVAL + } + + // Find the base of the mapping. + p := &data[cap(data)-1] + m.Lock() + defer m.Unlock() + b := m.active[p] + if b == nil || &b[0] != &data[0] { + return EINVAL + } + + // Unmap the memory and update m. + if errno := m.munmap(uintptr(unsafe.Pointer(&b[0])), uintptr(len(b))); errno != nil { + return errno + } + delete(m.active, p) + return nil +} + +func Read(fd int, p []byte) (n int, err error) { + n, err = read(fd, p) + if raceenabled { + if n > 0 { + raceWriteRange(unsafe.Pointer(&p[0]), n) + } + if err == nil { + raceAcquire(unsafe.Pointer(&ioSync)) + } + } + return +} + +func Write(fd int, p []byte) (n int, err error) { + if raceenabled { + raceReleaseMerge(unsafe.Pointer(&ioSync)) + } + n, err = write(fd, p) + if raceenabled && n > 0 { + raceReadRange(unsafe.Pointer(&p[0]), n) + } + return +} + +// For testing: clients can set this flag to force +// creation of IPv6 sockets to return EAFNOSUPPORT. +var SocketDisableIPv6 bool + +// Sockaddr represents a socket address. +type Sockaddr interface { + sockaddr() (ptr unsafe.Pointer, len _Socklen, err error) // lowercase; only we can define Sockaddrs +} + +// SockaddrInet4 implements the Sockaddr interface for AF_INET type sockets. +type SockaddrInet4 struct { + Port int + Addr [4]byte + raw RawSockaddrInet4 +} + +// SockaddrInet6 implements the Sockaddr interface for AF_INET6 type sockets. +type SockaddrInet6 struct { + Port int + ZoneId uint32 + Addr [16]byte + raw RawSockaddrInet6 +} + +// SockaddrUnix implements the Sockaddr interface for AF_UNIX type sockets. +type SockaddrUnix struct { + Name string + raw RawSockaddrUnix +} + +func Bind(fd int, sa Sockaddr) (err error) { + ptr, n, err := sa.sockaddr() + if err != nil { + return err + } + return bind(fd, ptr, n) +} + +func Connect(fd int, sa Sockaddr) (err error) { + ptr, n, err := sa.sockaddr() + if err != nil { + return err + } + return connect(fd, ptr, n) +} + +func Getpeername(fd int) (sa Sockaddr, err error) { + var rsa RawSockaddrAny + var len _Socklen = SizeofSockaddrAny + if err = getpeername(fd, &rsa, &len); err != nil { + return + } + return anyToSockaddr(fd, &rsa) +} + +func GetsockoptByte(fd, level, opt int) (value byte, err error) { + var n byte + vallen := _Socklen(1) + err = getsockopt(fd, level, opt, unsafe.Pointer(&n), &vallen) + return n, err +} + +func GetsockoptInt(fd, level, opt int) (value int, err error) { + var n int32 + vallen := _Socklen(4) + err = getsockopt(fd, level, opt, unsafe.Pointer(&n), &vallen) + return int(n), err +} + +func GetsockoptInet4Addr(fd, level, opt int) (value [4]byte, err error) { + vallen := _Socklen(4) + err = getsockopt(fd, level, opt, unsafe.Pointer(&value[0]), &vallen) + return value, err +} + +func GetsockoptIPMreq(fd, level, opt int) (*IPMreq, error) { + var value IPMreq + vallen := _Socklen(SizeofIPMreq) + err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen) + return &value, err +} + +func GetsockoptIPv6Mreq(fd, level, opt int) (*IPv6Mreq, error) { + var value IPv6Mreq + vallen := _Socklen(SizeofIPv6Mreq) + err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen) + return &value, err +} + +func GetsockoptIPv6MTUInfo(fd, level, opt int) (*IPv6MTUInfo, error) { + var value IPv6MTUInfo + vallen := _Socklen(SizeofIPv6MTUInfo) + err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen) + return &value, err +} + +func GetsockoptICMPv6Filter(fd, level, opt int) (*ICMPv6Filter, error) { + var value ICMPv6Filter + vallen := _Socklen(SizeofICMPv6Filter) + err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen) + return &value, err +} + +func GetsockoptLinger(fd, level, opt int) (*Linger, error) { + var linger Linger + vallen := _Socklen(SizeofLinger) + err := getsockopt(fd, level, opt, unsafe.Pointer(&linger), &vallen) + return &linger, err +} + +func GetsockoptTimeval(fd, level, opt int) (*Timeval, error) { + var tv Timeval + vallen := _Socklen(unsafe.Sizeof(tv)) + err := getsockopt(fd, level, opt, unsafe.Pointer(&tv), &vallen) + return &tv, err +} + +func GetsockoptUint64(fd, level, opt int) (value uint64, err error) { + var n uint64 + vallen := _Socklen(8) + err = getsockopt(fd, level, opt, unsafe.Pointer(&n), &vallen) + return n, err +} + +func Recvfrom(fd int, p []byte, flags int) (n int, from Sockaddr, err error) { + var rsa RawSockaddrAny + var len _Socklen = SizeofSockaddrAny + if n, err = recvfrom(fd, p, flags, &rsa, &len); err != nil { + return + } + if rsa.Addr.Family != AF_UNSPEC { + from, err = anyToSockaddr(fd, &rsa) + } + return +} + +func Sendto(fd int, p []byte, flags int, to Sockaddr) (err error) { + ptr, n, err := to.sockaddr() + if err != nil { + return err + } + return sendto(fd, p, flags, ptr, n) +} + +func SetsockoptByte(fd, level, opt int, value byte) (err error) { + return setsockopt(fd, level, opt, unsafe.Pointer(&value), 1) +} + +func SetsockoptInt(fd, level, opt int, value int) (err error) { + var n = int32(value) + return setsockopt(fd, level, opt, unsafe.Pointer(&n), 4) +} + +func SetsockoptInet4Addr(fd, level, opt int, value [4]byte) (err error) { + return setsockopt(fd, level, opt, unsafe.Pointer(&value[0]), 4) +} + +func SetsockoptIPMreq(fd, level, opt int, mreq *IPMreq) (err error) { + return setsockopt(fd, level, opt, unsafe.Pointer(mreq), SizeofIPMreq) +} + +func SetsockoptIPv6Mreq(fd, level, opt int, mreq *IPv6Mreq) (err error) { + return setsockopt(fd, level, opt, unsafe.Pointer(mreq), SizeofIPv6Mreq) +} + +func SetsockoptICMPv6Filter(fd, level, opt int, filter *ICMPv6Filter) error { + return setsockopt(fd, level, opt, unsafe.Pointer(filter), SizeofICMPv6Filter) +} + +func SetsockoptLinger(fd, level, opt int, l *Linger) (err error) { + return setsockopt(fd, level, opt, unsafe.Pointer(l), SizeofLinger) +} + +func SetsockoptString(fd, level, opt int, s string) (err error) { + var p unsafe.Pointer + if len(s) > 0 { + p = unsafe.Pointer(&[]byte(s)[0]) + } + return setsockopt(fd, level, opt, p, uintptr(len(s))) +} + +func SetsockoptTimeval(fd, level, opt int, tv *Timeval) (err error) { + return setsockopt(fd, level, opt, unsafe.Pointer(tv), unsafe.Sizeof(*tv)) +} + +func SetsockoptUint64(fd, level, opt int, value uint64) (err error) { + return setsockopt(fd, level, opt, unsafe.Pointer(&value), 8) +} + +func Socket(domain, typ, proto int) (fd int, err error) { + if domain == AF_INET6 && SocketDisableIPv6 { + return -1, EAFNOSUPPORT + } + fd, err = socket(domain, typ, proto) + return +} + +func Socketpair(domain, typ, proto int) (fd [2]int, err error) { + var fdx [2]int32 + err = socketpair(domain, typ, proto, &fdx) + if err == nil { + fd[0] = int(fdx[0]) + fd[1] = int(fdx[1]) + } + return +} + +var ioSync int64 + +func CloseOnExec(fd int) { fcntl(fd, F_SETFD, FD_CLOEXEC) } + +func SetNonblock(fd int, nonblocking bool) (err error) { + flag, err := fcntl(fd, F_GETFL, 0) + if err != nil { + return err + } + if nonblocking { + flag |= O_NONBLOCK + } else { + flag &= ^O_NONBLOCK + } + _, err = fcntl(fd, F_SETFL, flag) + return err +} + +// Exec calls execve(2), which replaces the calling executable in the process +// tree. argv0 should be the full path to an executable ("/bin/ls") and the +// executable name should also be the first argument in argv (["ls", "-l"]). +// envv are the environment variables that should be passed to the new +// process (["USER=go", "PWD=/tmp"]). +func Exec(argv0 string, argv []string, envv []string) error { + return syscall.Exec(argv0, argv, envv) +} + +func Mount(source string, target string, fstype string, flags uintptr, data string) (err error) { + if needspace := 8 - len(fstype); needspace <= 0 { + fstype = fstype[:8] + } else { + fstype += " "[:needspace] + } + return mount_LE(target, source, fstype, uint32(flags), int32(len(data)), data) +} + +func Unmount(name string, mtm int) (err error) { + // mountpoint is always a full path and starts with a '/' + // check if input string is not a mountpoint but a filesystem name + if name[0] != '/' { + return unmount(name, mtm) + } + // treat name as mountpoint + b2s := func(arr []byte) string { + nulli := bytes.IndexByte(arr, 0) + if nulli == -1 { + return string(arr) + } else { + return string(arr[:nulli]) + } + } + var buffer struct { + header W_Mnth + fsinfo [64]W_Mntent + } + fsCount, err := W_Getmntent_A((*byte)(unsafe.Pointer(&buffer)), int(unsafe.Sizeof(buffer))) + if err != nil { + return err + } + if fsCount == 0 { + return EINVAL + } + for i := 0; i < fsCount; i++ { + if b2s(buffer.fsinfo[i].Mountpoint[:]) == name { + err = unmount(b2s(buffer.fsinfo[i].Fsname[:]), mtm) + break + } + } + return err +} diff --git a/vendor/golang.org/x/sys/unix/timestruct.go b/vendor/golang.org/x/sys/unix/timestruct.go index 103604299e..3d89304055 100644 --- a/vendor/golang.org/x/sys/unix/timestruct.go +++ b/vendor/golang.org/x/sys/unix/timestruct.go @@ -2,7 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris +//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos package unix diff --git a/vendor/golang.org/x/sys/unix/xattr_bsd.go b/vendor/golang.org/x/sys/unix/xattr_bsd.go index 30c1d71f4e..25df1e3780 100644 --- a/vendor/golang.org/x/sys/unix/xattr_bsd.go +++ b/vendor/golang.org/x/sys/unix/xattr_bsd.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build freebsd || netbsd // +build freebsd netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go index 104994bc6a..ca9799b79e 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go @@ -1,6 +1,7 @@ // mkerrors.sh -maix32 // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build ppc && aix // +build ppc,aix // Created by cgo -godefs - DO NOT EDIT diff --git a/vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go index 4fc8d30649..200c8c26fe 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go @@ -1,6 +1,7 @@ // mkerrors.sh -maix64 // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build ppc64 && aix // +build ppc64,aix // Code generated by cmd/cgo -godefs; DO NOT EDIT. diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_386.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_386.go deleted file mode 100644 index ec376f51bc..0000000000 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_386.go +++ /dev/null @@ -1,1788 +0,0 @@ -// mkerrors.sh -m32 -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build 386,darwin - -// Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -m32 _const.go - -package unix - -import "syscall" - -const ( - AF_APPLETALK = 0x10 - AF_CCITT = 0xa - AF_CHAOS = 0x5 - AF_CNT = 0x15 - AF_COIP = 0x14 - AF_DATAKIT = 0x9 - AF_DECnet = 0xc - AF_DLI = 0xd - AF_E164 = 0x1c - AF_ECMA = 0x8 - AF_HYLINK = 0xf - AF_IEEE80211 = 0x25 - AF_IMPLINK = 0x3 - AF_INET = 0x2 - AF_INET6 = 0x1e - AF_IPX = 0x17 - AF_ISDN = 0x1c - AF_ISO = 0x7 - AF_LAT = 0xe - AF_LINK = 0x12 - AF_LOCAL = 0x1 - AF_MAX = 0x28 - AF_NATM = 0x1f - AF_NDRV = 0x1b - AF_NETBIOS = 0x21 - AF_NS = 0x6 - AF_OSI = 0x7 - AF_PPP = 0x22 - AF_PUP = 0x4 - AF_RESERVED_36 = 0x24 - AF_ROUTE = 0x11 - AF_SIP = 0x18 - AF_SNA = 0xb - AF_SYSTEM = 0x20 - AF_SYS_CONTROL = 0x2 - AF_UNIX = 0x1 - AF_UNSPEC = 0x0 - AF_UTUN = 0x26 - ALTWERASE = 0x200 - ATTR_BIT_MAP_COUNT = 0x5 - ATTR_CMN_ACCESSMASK = 0x20000 - ATTR_CMN_ACCTIME = 0x1000 - ATTR_CMN_ADDEDTIME = 0x10000000 - ATTR_CMN_BKUPTIME = 0x2000 - ATTR_CMN_CHGTIME = 0x800 - ATTR_CMN_CRTIME = 0x200 - ATTR_CMN_DATA_PROTECT_FLAGS = 0x40000000 - ATTR_CMN_DEVID = 0x2 - ATTR_CMN_DOCUMENT_ID = 0x100000 - ATTR_CMN_ERROR = 0x20000000 - ATTR_CMN_EXTENDED_SECURITY = 0x400000 - ATTR_CMN_FILEID = 0x2000000 - ATTR_CMN_FLAGS = 0x40000 - ATTR_CMN_FNDRINFO = 0x4000 - ATTR_CMN_FSID = 0x4 - ATTR_CMN_FULLPATH = 0x8000000 - ATTR_CMN_GEN_COUNT = 0x80000 - ATTR_CMN_GRPID = 0x10000 - ATTR_CMN_GRPUUID = 0x1000000 - ATTR_CMN_MODTIME = 0x400 - ATTR_CMN_NAME = 0x1 - ATTR_CMN_NAMEDATTRCOUNT = 0x80000 - ATTR_CMN_NAMEDATTRLIST = 0x100000 - ATTR_CMN_OBJID = 0x20 - ATTR_CMN_OBJPERMANENTID = 0x40 - ATTR_CMN_OBJTAG = 0x10 - ATTR_CMN_OBJTYPE = 0x8 - ATTR_CMN_OWNERID = 0x8000 - ATTR_CMN_PARENTID = 0x4000000 - ATTR_CMN_PAROBJID = 0x80 - ATTR_CMN_RETURNED_ATTRS = 0x80000000 - ATTR_CMN_SCRIPT = 0x100 - ATTR_CMN_SETMASK = 0x41c7ff00 - ATTR_CMN_USERACCESS = 0x200000 - ATTR_CMN_UUID = 0x800000 - ATTR_CMN_VALIDMASK = 0xffffffff - ATTR_CMN_VOLSETMASK = 0x6700 - ATTR_FILE_ALLOCSIZE = 0x4 - ATTR_FILE_CLUMPSIZE = 0x10 - ATTR_FILE_DATAALLOCSIZE = 0x400 - ATTR_FILE_DATAEXTENTS = 0x800 - ATTR_FILE_DATALENGTH = 0x200 - ATTR_FILE_DEVTYPE = 0x20 - ATTR_FILE_FILETYPE = 0x40 - ATTR_FILE_FORKCOUNT = 0x80 - ATTR_FILE_FORKLIST = 0x100 - ATTR_FILE_IOBLOCKSIZE = 0x8 - ATTR_FILE_LINKCOUNT = 0x1 - ATTR_FILE_RSRCALLOCSIZE = 0x2000 - ATTR_FILE_RSRCEXTENTS = 0x4000 - ATTR_FILE_RSRCLENGTH = 0x1000 - ATTR_FILE_SETMASK = 0x20 - ATTR_FILE_TOTALSIZE = 0x2 - ATTR_FILE_VALIDMASK = 0x37ff - ATTR_VOL_ALLOCATIONCLUMP = 0x40 - ATTR_VOL_ATTRIBUTES = 0x40000000 - ATTR_VOL_CAPABILITIES = 0x20000 - ATTR_VOL_DIRCOUNT = 0x400 - ATTR_VOL_ENCODINGSUSED = 0x10000 - ATTR_VOL_FILECOUNT = 0x200 - ATTR_VOL_FSTYPE = 0x1 - ATTR_VOL_INFO = 0x80000000 - ATTR_VOL_IOBLOCKSIZE = 0x80 - ATTR_VOL_MAXOBJCOUNT = 0x800 - ATTR_VOL_MINALLOCATION = 0x20 - ATTR_VOL_MOUNTEDDEVICE = 0x8000 - ATTR_VOL_MOUNTFLAGS = 0x4000 - ATTR_VOL_MOUNTPOINT = 0x1000 - ATTR_VOL_NAME = 0x2000 - ATTR_VOL_OBJCOUNT = 0x100 - ATTR_VOL_QUOTA_SIZE = 0x10000000 - ATTR_VOL_RESERVED_SIZE = 0x20000000 - ATTR_VOL_SETMASK = 0x80002000 - ATTR_VOL_SIGNATURE = 0x2 - ATTR_VOL_SIZE = 0x4 - ATTR_VOL_SPACEAVAIL = 0x10 - ATTR_VOL_SPACEFREE = 0x8 - ATTR_VOL_UUID = 0x40000 - ATTR_VOL_VALIDMASK = 0xf007ffff - B0 = 0x0 - B110 = 0x6e - B115200 = 0x1c200 - B1200 = 0x4b0 - B134 = 0x86 - B14400 = 0x3840 - B150 = 0x96 - B1800 = 0x708 - B19200 = 0x4b00 - B200 = 0xc8 - B230400 = 0x38400 - B2400 = 0x960 - B28800 = 0x7080 - B300 = 0x12c - B38400 = 0x9600 - B4800 = 0x12c0 - B50 = 0x32 - B57600 = 0xe100 - B600 = 0x258 - B7200 = 0x1c20 - B75 = 0x4b - B76800 = 0x12c00 - B9600 = 0x2580 - BIOCFLUSH = 0x20004268 - BIOCGBLEN = 0x40044266 - BIOCGDLT = 0x4004426a - BIOCGDLTLIST = 0xc00c4279 - BIOCGETIF = 0x4020426b - BIOCGHDRCMPLT = 0x40044274 - BIOCGRSIG = 0x40044272 - BIOCGRTIMEOUT = 0x4008426e - BIOCGSEESENT = 0x40044276 - BIOCGSTATS = 0x4008426f - BIOCIMMEDIATE = 0x80044270 - BIOCPROMISC = 0x20004269 - BIOCSBLEN = 0xc0044266 - BIOCSDLT = 0x80044278 - BIOCSETF = 0x80084267 - BIOCSETFNR = 0x8008427e - BIOCSETIF = 0x8020426c - BIOCSHDRCMPLT = 0x80044275 - BIOCSRSIG = 0x80044273 - BIOCSRTIMEOUT = 0x8008426d - BIOCSSEESENT = 0x80044277 - BIOCVERSION = 0x40044271 - BPF_A = 0x10 - BPF_ABS = 0x20 - BPF_ADD = 0x0 - BPF_ALIGNMENT = 0x4 - BPF_ALU = 0x4 - BPF_AND = 0x50 - BPF_B = 0x10 - BPF_DIV = 0x30 - BPF_H = 0x8 - BPF_IMM = 0x0 - BPF_IND = 0x40 - BPF_JA = 0x0 - BPF_JEQ = 0x10 - BPF_JGE = 0x30 - BPF_JGT = 0x20 - BPF_JMP = 0x5 - BPF_JSET = 0x40 - BPF_K = 0x0 - BPF_LD = 0x0 - BPF_LDX = 0x1 - BPF_LEN = 0x80 - BPF_LSH = 0x60 - BPF_MAJOR_VERSION = 0x1 - BPF_MAXBUFSIZE = 0x80000 - BPF_MAXINSNS = 0x200 - BPF_MEM = 0x60 - BPF_MEMWORDS = 0x10 - BPF_MINBUFSIZE = 0x20 - BPF_MINOR_VERSION = 0x1 - BPF_MISC = 0x7 - BPF_MSH = 0xa0 - BPF_MUL = 0x20 - BPF_NEG = 0x80 - BPF_OR = 0x40 - BPF_RELEASE = 0x30bb6 - BPF_RET = 0x6 - BPF_RSH = 0x70 - BPF_ST = 0x2 - BPF_STX = 0x3 - BPF_SUB = 0x10 - BPF_TAX = 0x0 - BPF_TXA = 0x80 - BPF_W = 0x0 - BPF_X = 0x8 - BRKINT = 0x2 - BS0 = 0x0 - BS1 = 0x8000 - BSDLY = 0x8000 - CFLUSH = 0xf - CLOCAL = 0x8000 - CLOCK_MONOTONIC = 0x6 - CLOCK_MONOTONIC_RAW = 0x4 - CLOCK_MONOTONIC_RAW_APPROX = 0x5 - CLOCK_PROCESS_CPUTIME_ID = 0xc - CLOCK_REALTIME = 0x0 - CLOCK_THREAD_CPUTIME_ID = 0x10 - CLOCK_UPTIME_RAW = 0x8 - CLOCK_UPTIME_RAW_APPROX = 0x9 - CLONE_NOFOLLOW = 0x1 - CLONE_NOOWNERCOPY = 0x2 - CR0 = 0x0 - CR1 = 0x1000 - CR2 = 0x2000 - CR3 = 0x3000 - CRDLY = 0x3000 - CREAD = 0x800 - CRTSCTS = 0x30000 - CS5 = 0x0 - CS6 = 0x100 - CS7 = 0x200 - CS8 = 0x300 - CSIZE = 0x300 - CSTART = 0x11 - CSTATUS = 0x14 - CSTOP = 0x13 - CSTOPB = 0x400 - CSUSP = 0x1a - CTLIOCGINFO = 0xc0644e03 - CTL_HW = 0x6 - CTL_KERN = 0x1 - CTL_MAXNAME = 0xc - CTL_NET = 0x4 - DLT_A429 = 0xb8 - DLT_A653_ICM = 0xb9 - DLT_AIRONET_HEADER = 0x78 - DLT_AOS = 0xde - DLT_APPLE_IP_OVER_IEEE1394 = 0x8a - DLT_ARCNET = 0x7 - DLT_ARCNET_LINUX = 0x81 - DLT_ATM_CLIP = 0x13 - DLT_ATM_RFC1483 = 0xb - DLT_AURORA = 0x7e - DLT_AX25 = 0x3 - DLT_AX25_KISS = 0xca - DLT_BACNET_MS_TP = 0xa5 - DLT_BLUETOOTH_HCI_H4 = 0xbb - DLT_BLUETOOTH_HCI_H4_WITH_PHDR = 0xc9 - DLT_CAN20B = 0xbe - DLT_CAN_SOCKETCAN = 0xe3 - DLT_CHAOS = 0x5 - DLT_CHDLC = 0x68 - DLT_CISCO_IOS = 0x76 - DLT_C_HDLC = 0x68 - DLT_C_HDLC_WITH_DIR = 0xcd - DLT_DBUS = 0xe7 - DLT_DECT = 0xdd - DLT_DOCSIS = 0x8f - DLT_DVB_CI = 0xeb - DLT_ECONET = 0x73 - DLT_EN10MB = 0x1 - DLT_EN3MB = 0x2 - DLT_ENC = 0x6d - DLT_ERF = 0xc5 - DLT_ERF_ETH = 0xaf - DLT_ERF_POS = 0xb0 - DLT_FC_2 = 0xe0 - DLT_FC_2_WITH_FRAME_DELIMS = 0xe1 - DLT_FDDI = 0xa - DLT_FLEXRAY = 0xd2 - DLT_FRELAY = 0x6b - DLT_FRELAY_WITH_DIR = 0xce - DLT_GCOM_SERIAL = 0xad - DLT_GCOM_T1E1 = 0xac - DLT_GPF_F = 0xab - DLT_GPF_T = 0xaa - DLT_GPRS_LLC = 0xa9 - DLT_GSMTAP_ABIS = 0xda - DLT_GSMTAP_UM = 0xd9 - DLT_HHDLC = 0x79 - DLT_IBM_SN = 0x92 - DLT_IBM_SP = 0x91 - DLT_IEEE802 = 0x6 - DLT_IEEE802_11 = 0x69 - DLT_IEEE802_11_RADIO = 0x7f - DLT_IEEE802_11_RADIO_AVS = 0xa3 - DLT_IEEE802_15_4 = 0xc3 - DLT_IEEE802_15_4_LINUX = 0xbf - DLT_IEEE802_15_4_NOFCS = 0xe6 - DLT_IEEE802_15_4_NONASK_PHY = 0xd7 - DLT_IEEE802_16_MAC_CPS = 0xbc - DLT_IEEE802_16_MAC_CPS_RADIO = 0xc1 - DLT_IPFILTER = 0x74 - DLT_IPMB = 0xc7 - DLT_IPMB_LINUX = 0xd1 - DLT_IPNET = 0xe2 - DLT_IPOIB = 0xf2 - DLT_IPV4 = 0xe4 - DLT_IPV6 = 0xe5 - DLT_IP_OVER_FC = 0x7a - DLT_JUNIPER_ATM1 = 0x89 - DLT_JUNIPER_ATM2 = 0x87 - DLT_JUNIPER_ATM_CEMIC = 0xee - DLT_JUNIPER_CHDLC = 0xb5 - DLT_JUNIPER_ES = 0x84 - DLT_JUNIPER_ETHER = 0xb2 - DLT_JUNIPER_FIBRECHANNEL = 0xea - DLT_JUNIPER_FRELAY = 0xb4 - DLT_JUNIPER_GGSN = 0x85 - DLT_JUNIPER_ISM = 0xc2 - DLT_JUNIPER_MFR = 0x86 - DLT_JUNIPER_MLFR = 0x83 - DLT_JUNIPER_MLPPP = 0x82 - DLT_JUNIPER_MONITOR = 0xa4 - DLT_JUNIPER_PIC_PEER = 0xae - DLT_JUNIPER_PPP = 0xb3 - DLT_JUNIPER_PPPOE = 0xa7 - DLT_JUNIPER_PPPOE_ATM = 0xa8 - DLT_JUNIPER_SERVICES = 0x88 - DLT_JUNIPER_SRX_E2E = 0xe9 - DLT_JUNIPER_ST = 0xc8 - DLT_JUNIPER_VP = 0xb7 - DLT_JUNIPER_VS = 0xe8 - DLT_LAPB_WITH_DIR = 0xcf - DLT_LAPD = 0xcb - DLT_LIN = 0xd4 - DLT_LINUX_EVDEV = 0xd8 - DLT_LINUX_IRDA = 0x90 - DLT_LINUX_LAPD = 0xb1 - DLT_LINUX_PPP_WITHDIRECTION = 0xa6 - DLT_LINUX_SLL = 0x71 - DLT_LOOP = 0x6c - DLT_LTALK = 0x72 - DLT_MATCHING_MAX = 0xf5 - DLT_MATCHING_MIN = 0x68 - DLT_MFR = 0xb6 - DLT_MOST = 0xd3 - DLT_MPEG_2_TS = 0xf3 - DLT_MPLS = 0xdb - DLT_MTP2 = 0x8c - DLT_MTP2_WITH_PHDR = 0x8b - DLT_MTP3 = 0x8d - DLT_MUX27010 = 0xec - DLT_NETANALYZER = 0xf0 - DLT_NETANALYZER_TRANSPARENT = 0xf1 - DLT_NFC_LLCP = 0xf5 - DLT_NFLOG = 0xef - DLT_NG40 = 0xf4 - DLT_NULL = 0x0 - DLT_PCI_EXP = 0x7d - DLT_PFLOG = 0x75 - DLT_PFSYNC = 0x12 - DLT_PPI = 0xc0 - DLT_PPP = 0x9 - DLT_PPP_BSDOS = 0x10 - DLT_PPP_ETHER = 0x33 - DLT_PPP_PPPD = 0xa6 - DLT_PPP_SERIAL = 0x32 - DLT_PPP_WITH_DIR = 0xcc - DLT_PPP_WITH_DIRECTION = 0xa6 - DLT_PRISM_HEADER = 0x77 - DLT_PRONET = 0x4 - DLT_RAIF1 = 0xc6 - DLT_RAW = 0xc - DLT_RIO = 0x7c - DLT_SCCP = 0x8e - DLT_SITA = 0xc4 - DLT_SLIP = 0x8 - DLT_SLIP_BSDOS = 0xf - DLT_STANAG_5066_D_PDU = 0xed - DLT_SUNATM = 0x7b - DLT_SYMANTEC_FIREWALL = 0x63 - DLT_TZSP = 0x80 - DLT_USB = 0xba - DLT_USB_LINUX = 0xbd - DLT_USB_LINUX_MMAPPED = 0xdc - DLT_USER0 = 0x93 - DLT_USER1 = 0x94 - DLT_USER10 = 0x9d - DLT_USER11 = 0x9e - DLT_USER12 = 0x9f - DLT_USER13 = 0xa0 - DLT_USER14 = 0xa1 - DLT_USER15 = 0xa2 - DLT_USER2 = 0x95 - DLT_USER3 = 0x96 - DLT_USER4 = 0x97 - DLT_USER5 = 0x98 - DLT_USER6 = 0x99 - DLT_USER7 = 0x9a - DLT_USER8 = 0x9b - DLT_USER9 = 0x9c - DLT_WIHART = 0xdf - DLT_X2E_SERIAL = 0xd5 - DLT_X2E_XORAYA = 0xd6 - DT_BLK = 0x6 - DT_CHR = 0x2 - DT_DIR = 0x4 - DT_FIFO = 0x1 - DT_LNK = 0xa - DT_REG = 0x8 - DT_SOCK = 0xc - DT_UNKNOWN = 0x0 - DT_WHT = 0xe - ECHO = 0x8 - ECHOCTL = 0x40 - ECHOE = 0x2 - ECHOK = 0x4 - ECHOKE = 0x1 - ECHONL = 0x10 - ECHOPRT = 0x20 - EVFILT_AIO = -0x3 - EVFILT_EXCEPT = -0xf - EVFILT_FS = -0x9 - EVFILT_MACHPORT = -0x8 - EVFILT_PROC = -0x5 - EVFILT_READ = -0x1 - EVFILT_SIGNAL = -0x6 - EVFILT_SYSCOUNT = 0xf - EVFILT_THREADMARKER = 0xf - EVFILT_TIMER = -0x7 - EVFILT_USER = -0xa - EVFILT_VM = -0xc - EVFILT_VNODE = -0x4 - EVFILT_WRITE = -0x2 - EV_ADD = 0x1 - EV_CLEAR = 0x20 - EV_DELETE = 0x2 - EV_DISABLE = 0x8 - EV_DISPATCH = 0x80 - EV_DISPATCH2 = 0x180 - EV_ENABLE = 0x4 - EV_EOF = 0x8000 - EV_ERROR = 0x4000 - EV_FLAG0 = 0x1000 - EV_FLAG1 = 0x2000 - EV_ONESHOT = 0x10 - EV_OOBAND = 0x2000 - EV_POLL = 0x1000 - EV_RECEIPT = 0x40 - EV_SYSFLAGS = 0xf000 - EV_UDATA_SPECIFIC = 0x100 - EV_VANISHED = 0x200 - EXTA = 0x4b00 - EXTB = 0x9600 - EXTPROC = 0x800 - FD_CLOEXEC = 0x1 - FD_SETSIZE = 0x400 - FF0 = 0x0 - FF1 = 0x4000 - FFDLY = 0x4000 - FLUSHO = 0x800000 - FSOPT_ATTR_CMN_EXTENDED = 0x20 - FSOPT_NOFOLLOW = 0x1 - FSOPT_NOINMEMUPDATE = 0x2 - FSOPT_PACK_INVAL_ATTRS = 0x8 - FSOPT_REPORT_FULLSIZE = 0x4 - F_ADDFILESIGS = 0x3d - F_ADDFILESIGS_FOR_DYLD_SIM = 0x53 - F_ADDFILESIGS_RETURN = 0x61 - F_ADDSIGS = 0x3b - F_ALLOCATEALL = 0x4 - F_ALLOCATECONTIG = 0x2 - F_BARRIERFSYNC = 0x55 - F_CHECK_LV = 0x62 - F_CHKCLEAN = 0x29 - F_DUPFD = 0x0 - F_DUPFD_CLOEXEC = 0x43 - F_FINDSIGS = 0x4e - F_FLUSH_DATA = 0x28 - F_FREEZE_FS = 0x35 - F_FULLFSYNC = 0x33 - F_GETCODEDIR = 0x48 - F_GETFD = 0x1 - F_GETFL = 0x3 - F_GETLK = 0x7 - F_GETLKPID = 0x42 - F_GETNOSIGPIPE = 0x4a - F_GETOWN = 0x5 - F_GETPATH = 0x32 - F_GETPATH_MTMINFO = 0x47 - F_GETPROTECTIONCLASS = 0x3f - F_GETPROTECTIONLEVEL = 0x4d - F_GLOBAL_NOCACHE = 0x37 - F_LOG2PHYS = 0x31 - F_LOG2PHYS_EXT = 0x41 - F_NOCACHE = 0x30 - F_NODIRECT = 0x3e - F_OK = 0x0 - F_PATHPKG_CHECK = 0x34 - F_PEOFPOSMODE = 0x3 - F_PREALLOCATE = 0x2a - F_PUNCHHOLE = 0x63 - F_RDADVISE = 0x2c - F_RDAHEAD = 0x2d - F_RDLCK = 0x1 - F_SETBACKINGSTORE = 0x46 - F_SETFD = 0x2 - F_SETFL = 0x4 - F_SETLK = 0x8 - F_SETLKW = 0x9 - F_SETLKWTIMEOUT = 0xa - F_SETNOSIGPIPE = 0x49 - F_SETOWN = 0x6 - F_SETPROTECTIONCLASS = 0x40 - F_SETSIZE = 0x2b - F_SINGLE_WRITER = 0x4c - F_THAW_FS = 0x36 - F_TRANSCODEKEY = 0x4b - F_TRIM_ACTIVE_FILE = 0x64 - F_UNLCK = 0x2 - F_VOLPOSMODE = 0x4 - F_WRLCK = 0x3 - HUPCL = 0x4000 - HW_MACHINE = 0x1 - ICANON = 0x100 - ICMP6_FILTER = 0x12 - ICRNL = 0x100 - IEXTEN = 0x400 - IFF_ALLMULTI = 0x200 - IFF_ALTPHYS = 0x4000 - IFF_BROADCAST = 0x2 - IFF_DEBUG = 0x4 - IFF_LINK0 = 0x1000 - IFF_LINK1 = 0x2000 - IFF_LINK2 = 0x4000 - IFF_LOOPBACK = 0x8 - IFF_MULTICAST = 0x8000 - IFF_NOARP = 0x80 - IFF_NOTRAILERS = 0x20 - IFF_OACTIVE = 0x400 - IFF_POINTOPOINT = 0x10 - IFF_PROMISC = 0x100 - IFF_RUNNING = 0x40 - IFF_SIMPLEX = 0x800 - IFF_UP = 0x1 - IFNAMSIZ = 0x10 - IFT_1822 = 0x2 - IFT_AAL5 = 0x31 - IFT_ARCNET = 0x23 - IFT_ARCNETPLUS = 0x24 - IFT_ATM = 0x25 - IFT_BRIDGE = 0xd1 - IFT_CARP = 0xf8 - IFT_CELLULAR = 0xff - IFT_CEPT = 0x13 - IFT_DS3 = 0x1e - IFT_ENC = 0xf4 - IFT_EON = 0x19 - IFT_ETHER = 0x6 - IFT_FAITH = 0x38 - IFT_FDDI = 0xf - IFT_FRELAY = 0x20 - IFT_FRELAYDCE = 0x2c - IFT_GIF = 0x37 - IFT_HDH1822 = 0x3 - IFT_HIPPI = 0x2f - IFT_HSSI = 0x2e - IFT_HY = 0xe - IFT_IEEE1394 = 0x90 - IFT_IEEE8023ADLAG = 0x88 - IFT_ISDNBASIC = 0x14 - IFT_ISDNPRIMARY = 0x15 - IFT_ISO88022LLC = 0x29 - IFT_ISO88023 = 0x7 - IFT_ISO88024 = 0x8 - IFT_ISO88025 = 0x9 - IFT_ISO88026 = 0xa - IFT_L2VLAN = 0x87 - IFT_LAPB = 0x10 - IFT_LOCALTALK = 0x2a - IFT_LOOP = 0x18 - IFT_MIOX25 = 0x26 - IFT_MODEM = 0x30 - IFT_NSIP = 0x1b - IFT_OTHER = 0x1 - IFT_P10 = 0xc - IFT_P80 = 0xd - IFT_PARA = 0x22 - IFT_PDP = 0xff - IFT_PFLOG = 0xf5 - IFT_PFSYNC = 0xf6 - IFT_PKTAP = 0xfe - IFT_PPP = 0x17 - IFT_PROPMUX = 0x36 - IFT_PROPVIRTUAL = 0x35 - IFT_PTPSERIAL = 0x16 - IFT_RS232 = 0x21 - IFT_SDLC = 0x11 - IFT_SIP = 0x1f - IFT_SLIP = 0x1c - IFT_SMDSDXI = 0x2b - IFT_SMDSICIP = 0x34 - IFT_SONET = 0x27 - IFT_SONETPATH = 0x32 - IFT_SONETVT = 0x33 - IFT_STARLAN = 0xb - IFT_STF = 0x39 - IFT_T1 = 0x12 - IFT_ULTRA = 0x1d - IFT_V35 = 0x2d - IFT_X25 = 0x5 - IFT_X25DDN = 0x4 - IFT_X25PLE = 0x28 - IFT_XETHER = 0x1a - IGNBRK = 0x1 - IGNCR = 0x80 - IGNPAR = 0x4 - IMAXBEL = 0x2000 - INLCR = 0x40 - INPCK = 0x10 - IN_CLASSA_HOST = 0xffffff - IN_CLASSA_MAX = 0x80 - IN_CLASSA_NET = 0xff000000 - IN_CLASSA_NSHIFT = 0x18 - IN_CLASSB_HOST = 0xffff - IN_CLASSB_MAX = 0x10000 - IN_CLASSB_NET = 0xffff0000 - IN_CLASSB_NSHIFT = 0x10 - IN_CLASSC_HOST = 0xff - IN_CLASSC_NET = 0xffffff00 - IN_CLASSC_NSHIFT = 0x8 - IN_CLASSD_HOST = 0xfffffff - IN_CLASSD_NET = 0xf0000000 - IN_CLASSD_NSHIFT = 0x1c - IN_LINKLOCALNETNUM = 0xa9fe0000 - IN_LOOPBACKNET = 0x7f - IPPROTO_3PC = 0x22 - IPPROTO_ADFS = 0x44 - IPPROTO_AH = 0x33 - IPPROTO_AHIP = 0x3d - IPPROTO_APES = 0x63 - IPPROTO_ARGUS = 0xd - IPPROTO_AX25 = 0x5d - IPPROTO_BHA = 0x31 - IPPROTO_BLT = 0x1e - IPPROTO_BRSATMON = 0x4c - IPPROTO_CFTP = 0x3e - IPPROTO_CHAOS = 0x10 - IPPROTO_CMTP = 0x26 - IPPROTO_CPHB = 0x49 - IPPROTO_CPNX = 0x48 - IPPROTO_DDP = 0x25 - IPPROTO_DGP = 0x56 - IPPROTO_DIVERT = 0xfe - IPPROTO_DONE = 0x101 - IPPROTO_DSTOPTS = 0x3c - IPPROTO_EGP = 0x8 - IPPROTO_EMCON = 0xe - IPPROTO_ENCAP = 0x62 - IPPROTO_EON = 0x50 - IPPROTO_ESP = 0x32 - IPPROTO_ETHERIP = 0x61 - IPPROTO_FRAGMENT = 0x2c - IPPROTO_GGP = 0x3 - IPPROTO_GMTP = 0x64 - IPPROTO_GRE = 0x2f - IPPROTO_HELLO = 0x3f - IPPROTO_HMP = 0x14 - IPPROTO_HOPOPTS = 0x0 - IPPROTO_ICMP = 0x1 - IPPROTO_ICMPV6 = 0x3a - IPPROTO_IDP = 0x16 - IPPROTO_IDPR = 0x23 - IPPROTO_IDRP = 0x2d - IPPROTO_IGMP = 0x2 - IPPROTO_IGP = 0x55 - IPPROTO_IGRP = 0x58 - IPPROTO_IL = 0x28 - IPPROTO_INLSP = 0x34 - IPPROTO_INP = 0x20 - IPPROTO_IP = 0x0 - IPPROTO_IPCOMP = 0x6c - IPPROTO_IPCV = 0x47 - IPPROTO_IPEIP = 0x5e - IPPROTO_IPIP = 0x4 - IPPROTO_IPPC = 0x43 - IPPROTO_IPV4 = 0x4 - IPPROTO_IPV6 = 0x29 - IPPROTO_IRTP = 0x1c - IPPROTO_KRYPTOLAN = 0x41 - IPPROTO_LARP = 0x5b - IPPROTO_LEAF1 = 0x19 - IPPROTO_LEAF2 = 0x1a - IPPROTO_MAX = 0x100 - IPPROTO_MAXID = 0x34 - IPPROTO_MEAS = 0x13 - IPPROTO_MHRP = 0x30 - IPPROTO_MICP = 0x5f - IPPROTO_MTP = 0x5c - IPPROTO_MUX = 0x12 - IPPROTO_ND = 0x4d - IPPROTO_NHRP = 0x36 - IPPROTO_NONE = 0x3b - IPPROTO_NSP = 0x1f - IPPROTO_NVPII = 0xb - IPPROTO_OSPFIGP = 0x59 - IPPROTO_PGM = 0x71 - IPPROTO_PIGP = 0x9 - IPPROTO_PIM = 0x67 - IPPROTO_PRM = 0x15 - IPPROTO_PUP = 0xc - IPPROTO_PVP = 0x4b - IPPROTO_RAW = 0xff - IPPROTO_RCCMON = 0xa - IPPROTO_RDP = 0x1b - IPPROTO_ROUTING = 0x2b - IPPROTO_RSVP = 0x2e - IPPROTO_RVD = 0x42 - IPPROTO_SATEXPAK = 0x40 - IPPROTO_SATMON = 0x45 - IPPROTO_SCCSP = 0x60 - IPPROTO_SCTP = 0x84 - IPPROTO_SDRP = 0x2a - IPPROTO_SEP = 0x21 - IPPROTO_SRPC = 0x5a - IPPROTO_ST = 0x7 - IPPROTO_SVMTP = 0x52 - IPPROTO_SWIPE = 0x35 - IPPROTO_TCF = 0x57 - IPPROTO_TCP = 0x6 - IPPROTO_TP = 0x1d - IPPROTO_TPXX = 0x27 - IPPROTO_TRUNK1 = 0x17 - IPPROTO_TRUNK2 = 0x18 - IPPROTO_TTP = 0x54 - IPPROTO_UDP = 0x11 - IPPROTO_VINES = 0x53 - IPPROTO_VISA = 0x46 - IPPROTO_VMTP = 0x51 - IPPROTO_WBEXPAK = 0x4f - IPPROTO_WBMON = 0x4e - IPPROTO_WSN = 0x4a - IPPROTO_XNET = 0xf - IPPROTO_XTP = 0x24 - IPV6_2292DSTOPTS = 0x17 - IPV6_2292HOPLIMIT = 0x14 - IPV6_2292HOPOPTS = 0x16 - IPV6_2292NEXTHOP = 0x15 - IPV6_2292PKTINFO = 0x13 - IPV6_2292PKTOPTIONS = 0x19 - IPV6_2292RTHDR = 0x18 - IPV6_BINDV6ONLY = 0x1b - IPV6_BOUND_IF = 0x7d - IPV6_CHECKSUM = 0x1a - IPV6_DEFAULT_MULTICAST_HOPS = 0x1 - IPV6_DEFAULT_MULTICAST_LOOP = 0x1 - IPV6_DEFHLIM = 0x40 - IPV6_FAITH = 0x1d - IPV6_FLOWINFO_MASK = 0xffffff0f - IPV6_FLOWLABEL_MASK = 0xffff0f00 - IPV6_FLOW_ECN_MASK = 0x300 - IPV6_FRAGTTL = 0x3c - IPV6_FW_ADD = 0x1e - IPV6_FW_DEL = 0x1f - IPV6_FW_FLUSH = 0x20 - IPV6_FW_GET = 0x22 - IPV6_FW_ZERO = 0x21 - IPV6_HLIMDEC = 0x1 - IPV6_IPSEC_POLICY = 0x1c - IPV6_JOIN_GROUP = 0xc - IPV6_LEAVE_GROUP = 0xd - IPV6_MAXHLIM = 0xff - IPV6_MAXOPTHDR = 0x800 - IPV6_MAXPACKET = 0xffff - IPV6_MAX_GROUP_SRC_FILTER = 0x200 - IPV6_MAX_MEMBERSHIPS = 0xfff - IPV6_MAX_SOCK_SRC_FILTER = 0x80 - IPV6_MIN_MEMBERSHIPS = 0x1f - IPV6_MMTU = 0x500 - IPV6_MULTICAST_HOPS = 0xa - IPV6_MULTICAST_IF = 0x9 - IPV6_MULTICAST_LOOP = 0xb - IPV6_PORTRANGE = 0xe - IPV6_PORTRANGE_DEFAULT = 0x0 - IPV6_PORTRANGE_HIGH = 0x1 - IPV6_PORTRANGE_LOW = 0x2 - IPV6_RECVTCLASS = 0x23 - IPV6_RTHDR_LOOSE = 0x0 - IPV6_RTHDR_STRICT = 0x1 - IPV6_RTHDR_TYPE_0 = 0x0 - IPV6_SOCKOPT_RESERVED1 = 0x3 - IPV6_TCLASS = 0x24 - IPV6_UNICAST_HOPS = 0x4 - IPV6_V6ONLY = 0x1b - IPV6_VERSION = 0x60 - IPV6_VERSION_MASK = 0xf0 - IP_ADD_MEMBERSHIP = 0xc - IP_ADD_SOURCE_MEMBERSHIP = 0x46 - IP_BLOCK_SOURCE = 0x48 - IP_BOUND_IF = 0x19 - IP_DEFAULT_MULTICAST_LOOP = 0x1 - IP_DEFAULT_MULTICAST_TTL = 0x1 - IP_DF = 0x4000 - IP_DROP_MEMBERSHIP = 0xd - IP_DROP_SOURCE_MEMBERSHIP = 0x47 - IP_DUMMYNET_CONFIGURE = 0x3c - IP_DUMMYNET_DEL = 0x3d - IP_DUMMYNET_FLUSH = 0x3e - IP_DUMMYNET_GET = 0x40 - IP_FAITH = 0x16 - IP_FW_ADD = 0x28 - IP_FW_DEL = 0x29 - IP_FW_FLUSH = 0x2a - IP_FW_GET = 0x2c - IP_FW_RESETLOG = 0x2d - IP_FW_ZERO = 0x2b - IP_HDRINCL = 0x2 - IP_IPSEC_POLICY = 0x15 - IP_MAXPACKET = 0xffff - IP_MAX_GROUP_SRC_FILTER = 0x200 - IP_MAX_MEMBERSHIPS = 0xfff - IP_MAX_SOCK_MUTE_FILTER = 0x80 - IP_MAX_SOCK_SRC_FILTER = 0x80 - IP_MF = 0x2000 - IP_MIN_MEMBERSHIPS = 0x1f - IP_MSFILTER = 0x4a - IP_MSS = 0x240 - IP_MULTICAST_IF = 0x9 - IP_MULTICAST_IFINDEX = 0x42 - IP_MULTICAST_LOOP = 0xb - IP_MULTICAST_TTL = 0xa - IP_MULTICAST_VIF = 0xe - IP_NAT__XXX = 0x37 - IP_OFFMASK = 0x1fff - IP_OLD_FW_ADD = 0x32 - IP_OLD_FW_DEL = 0x33 - IP_OLD_FW_FLUSH = 0x34 - IP_OLD_FW_GET = 0x36 - IP_OLD_FW_RESETLOG = 0x38 - IP_OLD_FW_ZERO = 0x35 - IP_OPTIONS = 0x1 - IP_PKTINFO = 0x1a - IP_PORTRANGE = 0x13 - IP_PORTRANGE_DEFAULT = 0x0 - IP_PORTRANGE_HIGH = 0x1 - IP_PORTRANGE_LOW = 0x2 - IP_RECVDSTADDR = 0x7 - IP_RECVIF = 0x14 - IP_RECVOPTS = 0x5 - IP_RECVPKTINFO = 0x1a - IP_RECVRETOPTS = 0x6 - IP_RECVTOS = 0x1b - IP_RECVTTL = 0x18 - IP_RETOPTS = 0x8 - IP_RF = 0x8000 - IP_RSVP_OFF = 0x10 - IP_RSVP_ON = 0xf - IP_RSVP_VIF_OFF = 0x12 - IP_RSVP_VIF_ON = 0x11 - IP_STRIPHDR = 0x17 - IP_TOS = 0x3 - IP_TRAFFIC_MGT_BACKGROUND = 0x41 - IP_TTL = 0x4 - IP_UNBLOCK_SOURCE = 0x49 - ISIG = 0x80 - ISTRIP = 0x20 - IUTF8 = 0x4000 - IXANY = 0x800 - IXOFF = 0x400 - IXON = 0x200 - KERN_HOSTNAME = 0xa - KERN_OSRELEASE = 0x2 - KERN_OSTYPE = 0x1 - KERN_VERSION = 0x4 - LOCK_EX = 0x2 - LOCK_NB = 0x4 - LOCK_SH = 0x1 - LOCK_UN = 0x8 - MADV_CAN_REUSE = 0x9 - MADV_DONTNEED = 0x4 - MADV_FREE = 0x5 - MADV_FREE_REUSABLE = 0x7 - MADV_FREE_REUSE = 0x8 - MADV_NORMAL = 0x0 - MADV_PAGEOUT = 0xa - MADV_RANDOM = 0x1 - MADV_SEQUENTIAL = 0x2 - MADV_WILLNEED = 0x3 - MADV_ZERO_WIRED_PAGES = 0x6 - MAP_ANON = 0x1000 - MAP_ANONYMOUS = 0x1000 - MAP_COPY = 0x2 - MAP_FILE = 0x0 - MAP_FIXED = 0x10 - MAP_HASSEMAPHORE = 0x200 - MAP_JIT = 0x800 - MAP_NOCACHE = 0x400 - MAP_NOEXTEND = 0x100 - MAP_NORESERVE = 0x40 - MAP_PRIVATE = 0x2 - MAP_RENAME = 0x20 - MAP_RESERVED0080 = 0x80 - MAP_RESILIENT_CODESIGN = 0x2000 - MAP_RESILIENT_MEDIA = 0x4000 - MAP_SHARED = 0x1 - MCL_CURRENT = 0x1 - MCL_FUTURE = 0x2 - MNT_ASYNC = 0x40 - MNT_AUTOMOUNTED = 0x400000 - MNT_CMDFLAGS = 0xf0000 - MNT_CPROTECT = 0x80 - MNT_DEFWRITE = 0x2000000 - MNT_DONTBROWSE = 0x100000 - MNT_DOVOLFS = 0x8000 - MNT_DWAIT = 0x4 - MNT_EXPORTED = 0x100 - MNT_FORCE = 0x80000 - MNT_IGNORE_OWNERSHIP = 0x200000 - MNT_JOURNALED = 0x800000 - MNT_LOCAL = 0x1000 - MNT_MULTILABEL = 0x4000000 - MNT_NOATIME = 0x10000000 - MNT_NOBLOCK = 0x20000 - MNT_NODEV = 0x10 - MNT_NOEXEC = 0x4 - MNT_NOSUID = 0x8 - MNT_NOUSERXATTR = 0x1000000 - MNT_NOWAIT = 0x2 - MNT_QUARANTINE = 0x400 - MNT_QUOTA = 0x2000 - MNT_RDONLY = 0x1 - MNT_RELOAD = 0x40000 - MNT_ROOTFS = 0x4000 - MNT_SYNCHRONOUS = 0x2 - MNT_UNION = 0x20 - MNT_UNKNOWNPERMISSIONS = 0x200000 - MNT_UPDATE = 0x10000 - MNT_VISFLAGMASK = 0x17f0f5ff - MNT_WAIT = 0x1 - MSG_CTRUNC = 0x20 - MSG_DONTROUTE = 0x4 - MSG_DONTWAIT = 0x80 - MSG_EOF = 0x100 - MSG_EOR = 0x8 - MSG_FLUSH = 0x400 - MSG_HAVEMORE = 0x2000 - MSG_HOLD = 0x800 - MSG_NEEDSA = 0x10000 - MSG_OOB = 0x1 - MSG_PEEK = 0x2 - MSG_RCVMORE = 0x4000 - MSG_SEND = 0x1000 - MSG_TRUNC = 0x10 - MSG_WAITALL = 0x40 - MSG_WAITSTREAM = 0x200 - MS_ASYNC = 0x1 - MS_DEACTIVATE = 0x8 - MS_INVALIDATE = 0x2 - MS_KILLPAGES = 0x4 - MS_SYNC = 0x10 - NAME_MAX = 0xff - NET_RT_DUMP = 0x1 - NET_RT_DUMP2 = 0x7 - NET_RT_FLAGS = 0x2 - NET_RT_IFLIST = 0x3 - NET_RT_IFLIST2 = 0x6 - NET_RT_MAXID = 0xa - NET_RT_STAT = 0x4 - NET_RT_TRASH = 0x5 - NFDBITS = 0x20 - NL0 = 0x0 - NL1 = 0x100 - NL2 = 0x200 - NL3 = 0x300 - NLDLY = 0x300 - NOFLSH = 0x80000000 - NOKERNINFO = 0x2000000 - NOTE_ABSOLUTE = 0x8 - NOTE_ATTRIB = 0x8 - NOTE_BACKGROUND = 0x40 - NOTE_CHILD = 0x4 - NOTE_CRITICAL = 0x20 - NOTE_DELETE = 0x1 - NOTE_EXEC = 0x20000000 - NOTE_EXIT = 0x80000000 - NOTE_EXITSTATUS = 0x4000000 - NOTE_EXIT_CSERROR = 0x40000 - NOTE_EXIT_DECRYPTFAIL = 0x10000 - NOTE_EXIT_DETAIL = 0x2000000 - NOTE_EXIT_DETAIL_MASK = 0x70000 - NOTE_EXIT_MEMORY = 0x20000 - NOTE_EXIT_REPARENTED = 0x80000 - NOTE_EXTEND = 0x4 - NOTE_FFAND = 0x40000000 - NOTE_FFCOPY = 0xc0000000 - NOTE_FFCTRLMASK = 0xc0000000 - NOTE_FFLAGSMASK = 0xffffff - NOTE_FFNOP = 0x0 - NOTE_FFOR = 0x80000000 - NOTE_FORK = 0x40000000 - NOTE_FUNLOCK = 0x100 - NOTE_LEEWAY = 0x10 - NOTE_LINK = 0x10 - NOTE_LOWAT = 0x1 - NOTE_MACH_CONTINUOUS_TIME = 0x80 - NOTE_NONE = 0x80 - NOTE_NSECONDS = 0x4 - NOTE_OOB = 0x2 - NOTE_PCTRLMASK = -0x100000 - NOTE_PDATAMASK = 0xfffff - NOTE_REAP = 0x10000000 - NOTE_RENAME = 0x20 - NOTE_REVOKE = 0x40 - NOTE_SECONDS = 0x1 - NOTE_SIGNAL = 0x8000000 - NOTE_TRACK = 0x1 - NOTE_TRACKERR = 0x2 - NOTE_TRIGGER = 0x1000000 - NOTE_USECONDS = 0x2 - NOTE_VM_ERROR = 0x10000000 - NOTE_VM_PRESSURE = 0x80000000 - NOTE_VM_PRESSURE_SUDDEN_TERMINATE = 0x20000000 - NOTE_VM_PRESSURE_TERMINATE = 0x40000000 - NOTE_WRITE = 0x2 - OCRNL = 0x10 - OFDEL = 0x20000 - OFILL = 0x80 - ONLCR = 0x2 - ONLRET = 0x40 - ONOCR = 0x20 - ONOEOT = 0x8 - OPOST = 0x1 - OXTABS = 0x4 - O_ACCMODE = 0x3 - O_ALERT = 0x20000000 - O_APPEND = 0x8 - O_ASYNC = 0x40 - O_CLOEXEC = 0x1000000 - O_CREAT = 0x200 - O_DIRECTORY = 0x100000 - O_DP_GETRAWENCRYPTED = 0x1 - O_DP_GETRAWUNENCRYPTED = 0x2 - O_DSYNC = 0x400000 - O_EVTONLY = 0x8000 - O_EXCL = 0x800 - O_EXLOCK = 0x20 - O_FSYNC = 0x80 - O_NDELAY = 0x4 - O_NOCTTY = 0x20000 - O_NOFOLLOW = 0x100 - O_NONBLOCK = 0x4 - O_POPUP = 0x80000000 - O_RDONLY = 0x0 - O_RDWR = 0x2 - O_SHLOCK = 0x10 - O_SYMLINK = 0x200000 - O_SYNC = 0x80 - O_TRUNC = 0x400 - O_WRONLY = 0x1 - PARENB = 0x1000 - PARMRK = 0x8 - PARODD = 0x2000 - PENDIN = 0x20000000 - PRIO_PGRP = 0x1 - PRIO_PROCESS = 0x0 - PRIO_USER = 0x2 - PROT_EXEC = 0x4 - PROT_NONE = 0x0 - PROT_READ = 0x1 - PROT_WRITE = 0x2 - PT_ATTACH = 0xa - PT_ATTACHEXC = 0xe - PT_CONTINUE = 0x7 - PT_DENY_ATTACH = 0x1f - PT_DETACH = 0xb - PT_FIRSTMACH = 0x20 - PT_FORCEQUOTA = 0x1e - PT_KILL = 0x8 - PT_READ_D = 0x2 - PT_READ_I = 0x1 - PT_READ_U = 0x3 - PT_SIGEXC = 0xc - PT_STEP = 0x9 - PT_THUPDATE = 0xd - PT_TRACE_ME = 0x0 - PT_WRITE_D = 0x5 - PT_WRITE_I = 0x4 - PT_WRITE_U = 0x6 - RLIMIT_AS = 0x5 - RLIMIT_CORE = 0x4 - RLIMIT_CPU = 0x0 - RLIMIT_CPU_USAGE_MONITOR = 0x2 - RLIMIT_DATA = 0x2 - RLIMIT_FSIZE = 0x1 - RLIMIT_MEMLOCK = 0x6 - RLIMIT_NOFILE = 0x8 - RLIMIT_NPROC = 0x7 - RLIMIT_RSS = 0x5 - RLIMIT_STACK = 0x3 - RLIM_INFINITY = 0x7fffffffffffffff - RTAX_AUTHOR = 0x6 - RTAX_BRD = 0x7 - RTAX_DST = 0x0 - RTAX_GATEWAY = 0x1 - RTAX_GENMASK = 0x3 - RTAX_IFA = 0x5 - RTAX_IFP = 0x4 - RTAX_MAX = 0x8 - RTAX_NETMASK = 0x2 - RTA_AUTHOR = 0x40 - RTA_BRD = 0x80 - RTA_DST = 0x1 - RTA_GATEWAY = 0x2 - RTA_GENMASK = 0x8 - RTA_IFA = 0x20 - RTA_IFP = 0x10 - RTA_NETMASK = 0x4 - RTF_BLACKHOLE = 0x1000 - RTF_BROADCAST = 0x400000 - RTF_CLONING = 0x100 - RTF_CONDEMNED = 0x2000000 - RTF_DELCLONE = 0x80 - RTF_DONE = 0x40 - RTF_DYNAMIC = 0x10 - RTF_GATEWAY = 0x2 - RTF_HOST = 0x4 - RTF_IFREF = 0x4000000 - RTF_IFSCOPE = 0x1000000 - RTF_LLINFO = 0x400 - RTF_LOCAL = 0x200000 - RTF_MODIFIED = 0x20 - RTF_MULTICAST = 0x800000 - RTF_NOIFREF = 0x2000 - RTF_PINNED = 0x100000 - RTF_PRCLONING = 0x10000 - RTF_PROTO1 = 0x8000 - RTF_PROTO2 = 0x4000 - RTF_PROTO3 = 0x40000 - RTF_PROXY = 0x8000000 - RTF_REJECT = 0x8 - RTF_ROUTER = 0x10000000 - RTF_STATIC = 0x800 - RTF_UP = 0x1 - RTF_WASCLONED = 0x20000 - RTF_XRESOLVE = 0x200 - RTM_ADD = 0x1 - RTM_CHANGE = 0x3 - RTM_DELADDR = 0xd - RTM_DELETE = 0x2 - RTM_DELMADDR = 0x10 - RTM_GET = 0x4 - RTM_GET2 = 0x14 - RTM_IFINFO = 0xe - RTM_IFINFO2 = 0x12 - RTM_LOCK = 0x8 - RTM_LOSING = 0x5 - RTM_MISS = 0x7 - RTM_NEWADDR = 0xc - RTM_NEWMADDR = 0xf - RTM_NEWMADDR2 = 0x13 - RTM_OLDADD = 0x9 - RTM_OLDDEL = 0xa - RTM_REDIRECT = 0x6 - RTM_RESOLVE = 0xb - RTM_RTTUNIT = 0xf4240 - RTM_VERSION = 0x5 - RTV_EXPIRE = 0x4 - RTV_HOPCOUNT = 0x2 - RTV_MTU = 0x1 - RTV_RPIPE = 0x8 - RTV_RTT = 0x40 - RTV_RTTVAR = 0x80 - RTV_SPIPE = 0x10 - RTV_SSTHRESH = 0x20 - RUSAGE_CHILDREN = -0x1 - RUSAGE_SELF = 0x0 - SCM_CREDS = 0x3 - SCM_RIGHTS = 0x1 - SCM_TIMESTAMP = 0x2 - SCM_TIMESTAMP_MONOTONIC = 0x4 - SHUT_RD = 0x0 - SHUT_RDWR = 0x2 - SHUT_WR = 0x1 - SIOCADDMULTI = 0x80206931 - SIOCAIFADDR = 0x8040691a - SIOCARPIPLL = 0xc0206928 - SIOCATMARK = 0x40047307 - SIOCAUTOADDR = 0xc0206926 - SIOCAUTONETMASK = 0x80206927 - SIOCDELMULTI = 0x80206932 - SIOCDIFADDR = 0x80206919 - SIOCDIFPHYADDR = 0x80206941 - SIOCGDRVSPEC = 0xc01c697b - SIOCGETVLAN = 0xc020697f - SIOCGHIWAT = 0x40047301 - SIOCGIFADDR = 0xc0206921 - SIOCGIFALTMTU = 0xc0206948 - SIOCGIFASYNCMAP = 0xc020697c - SIOCGIFBOND = 0xc0206947 - SIOCGIFBRDADDR = 0xc0206923 - SIOCGIFCAP = 0xc020695b - SIOCGIFCONF = 0xc0086924 - SIOCGIFDEVMTU = 0xc0206944 - SIOCGIFDSTADDR = 0xc0206922 - SIOCGIFFLAGS = 0xc0206911 - SIOCGIFGENERIC = 0xc020693a - SIOCGIFKPI = 0xc0206987 - SIOCGIFMAC = 0xc0206982 - SIOCGIFMEDIA = 0xc0286938 - SIOCGIFMETRIC = 0xc0206917 - SIOCGIFMTU = 0xc0206933 - SIOCGIFNETMASK = 0xc0206925 - SIOCGIFPDSTADDR = 0xc0206940 - SIOCGIFPHYS = 0xc0206935 - SIOCGIFPSRCADDR = 0xc020693f - SIOCGIFSTATUS = 0xc331693d - SIOCGIFVLAN = 0xc020697f - SIOCGIFWAKEFLAGS = 0xc0206988 - SIOCGLOWAT = 0x40047303 - SIOCGPGRP = 0x40047309 - SIOCIFCREATE = 0xc0206978 - SIOCIFCREATE2 = 0xc020697a - SIOCIFDESTROY = 0x80206979 - SIOCIFGCLONERS = 0xc00c6981 - SIOCRSLVMULTI = 0xc008693b - SIOCSDRVSPEC = 0x801c697b - SIOCSETVLAN = 0x8020697e - SIOCSHIWAT = 0x80047300 - SIOCSIFADDR = 0x8020690c - SIOCSIFALTMTU = 0x80206945 - SIOCSIFASYNCMAP = 0x8020697d - SIOCSIFBOND = 0x80206946 - SIOCSIFBRDADDR = 0x80206913 - SIOCSIFCAP = 0x8020695a - SIOCSIFDSTADDR = 0x8020690e - SIOCSIFFLAGS = 0x80206910 - SIOCSIFGENERIC = 0x80206939 - SIOCSIFKPI = 0x80206986 - SIOCSIFLLADDR = 0x8020693c - SIOCSIFMAC = 0x80206983 - SIOCSIFMEDIA = 0xc0206937 - SIOCSIFMETRIC = 0x80206918 - SIOCSIFMTU = 0x80206934 - SIOCSIFNETMASK = 0x80206916 - SIOCSIFPHYADDR = 0x8040693e - SIOCSIFPHYS = 0x80206936 - SIOCSIFVLAN = 0x8020697e - SIOCSLOWAT = 0x80047302 - SIOCSPGRP = 0x80047308 - SOCK_DGRAM = 0x2 - SOCK_MAXADDRLEN = 0xff - SOCK_RAW = 0x3 - SOCK_RDM = 0x4 - SOCK_SEQPACKET = 0x5 - SOCK_STREAM = 0x1 - SOL_SOCKET = 0xffff - SOMAXCONN = 0x80 - SO_ACCEPTCONN = 0x2 - SO_BROADCAST = 0x20 - SO_DEBUG = 0x1 - SO_DONTROUTE = 0x10 - SO_DONTTRUNC = 0x2000 - SO_ERROR = 0x1007 - SO_KEEPALIVE = 0x8 - SO_LABEL = 0x1010 - SO_LINGER = 0x80 - SO_LINGER_SEC = 0x1080 - SO_NETSVC_MARKING_LEVEL = 0x1119 - SO_NET_SERVICE_TYPE = 0x1116 - SO_NKE = 0x1021 - SO_NOADDRERR = 0x1023 - SO_NOSIGPIPE = 0x1022 - SO_NOTIFYCONFLICT = 0x1026 - SO_NP_EXTENSIONS = 0x1083 - SO_NREAD = 0x1020 - SO_NUMRCVPKT = 0x1112 - SO_NWRITE = 0x1024 - SO_OOBINLINE = 0x100 - SO_PEERLABEL = 0x1011 - SO_RANDOMPORT = 0x1082 - SO_RCVBUF = 0x1002 - SO_RCVLOWAT = 0x1004 - SO_RCVTIMEO = 0x1006 - SO_REUSEADDR = 0x4 - SO_REUSEPORT = 0x200 - SO_REUSESHAREUID = 0x1025 - SO_SNDBUF = 0x1001 - SO_SNDLOWAT = 0x1003 - SO_SNDTIMEO = 0x1005 - SO_TIMESTAMP = 0x400 - SO_TIMESTAMP_MONOTONIC = 0x800 - SO_TYPE = 0x1008 - SO_UPCALLCLOSEWAIT = 0x1027 - SO_USELOOPBACK = 0x40 - SO_WANTMORE = 0x4000 - SO_WANTOOBFLAG = 0x8000 - S_IEXEC = 0x40 - S_IFBLK = 0x6000 - S_IFCHR = 0x2000 - S_IFDIR = 0x4000 - S_IFIFO = 0x1000 - S_IFLNK = 0xa000 - S_IFMT = 0xf000 - S_IFREG = 0x8000 - S_IFSOCK = 0xc000 - S_IFWHT = 0xe000 - S_IREAD = 0x100 - S_IRGRP = 0x20 - S_IROTH = 0x4 - S_IRUSR = 0x100 - S_IRWXG = 0x38 - S_IRWXO = 0x7 - S_IRWXU = 0x1c0 - S_ISGID = 0x400 - S_ISTXT = 0x200 - S_ISUID = 0x800 - S_ISVTX = 0x200 - S_IWGRP = 0x10 - S_IWOTH = 0x2 - S_IWRITE = 0x80 - S_IWUSR = 0x80 - S_IXGRP = 0x8 - S_IXOTH = 0x1 - S_IXUSR = 0x40 - TAB0 = 0x0 - TAB1 = 0x400 - TAB2 = 0x800 - TAB3 = 0x4 - TABDLY = 0xc04 - TCIFLUSH = 0x1 - TCIOFF = 0x3 - TCIOFLUSH = 0x3 - TCION = 0x4 - TCOFLUSH = 0x2 - TCOOFF = 0x1 - TCOON = 0x2 - TCP_CONNECTIONTIMEOUT = 0x20 - TCP_CONNECTION_INFO = 0x106 - TCP_ENABLE_ECN = 0x104 - TCP_FASTOPEN = 0x105 - TCP_KEEPALIVE = 0x10 - TCP_KEEPCNT = 0x102 - TCP_KEEPINTVL = 0x101 - TCP_MAXHLEN = 0x3c - TCP_MAXOLEN = 0x28 - TCP_MAXSEG = 0x2 - TCP_MAXWIN = 0xffff - TCP_MAX_SACK = 0x4 - TCP_MAX_WINSHIFT = 0xe - TCP_MINMSS = 0xd8 - TCP_MSS = 0x200 - TCP_NODELAY = 0x1 - TCP_NOOPT = 0x8 - TCP_NOPUSH = 0x4 - TCP_NOTSENT_LOWAT = 0x201 - TCP_RXT_CONNDROPTIME = 0x80 - TCP_RXT_FINDROP = 0x100 - TCP_SENDMOREACKS = 0x103 - TCSAFLUSH = 0x2 - TIOCCBRK = 0x2000747a - TIOCCDTR = 0x20007478 - TIOCCONS = 0x80047462 - TIOCDCDTIMESTAMP = 0x40087458 - TIOCDRAIN = 0x2000745e - TIOCDSIMICROCODE = 0x20007455 - TIOCEXCL = 0x2000740d - TIOCEXT = 0x80047460 - TIOCFLUSH = 0x80047410 - TIOCGDRAINWAIT = 0x40047456 - TIOCGETA = 0x402c7413 - TIOCGETD = 0x4004741a - TIOCGPGRP = 0x40047477 - TIOCGWINSZ = 0x40087468 - TIOCIXOFF = 0x20007480 - TIOCIXON = 0x20007481 - TIOCMBIC = 0x8004746b - TIOCMBIS = 0x8004746c - TIOCMGDTRWAIT = 0x4004745a - TIOCMGET = 0x4004746a - TIOCMODG = 0x40047403 - TIOCMODS = 0x80047404 - TIOCMSDTRWAIT = 0x8004745b - TIOCMSET = 0x8004746d - TIOCM_CAR = 0x40 - TIOCM_CD = 0x40 - TIOCM_CTS = 0x20 - TIOCM_DSR = 0x100 - TIOCM_DTR = 0x2 - TIOCM_LE = 0x1 - TIOCM_RI = 0x80 - TIOCM_RNG = 0x80 - TIOCM_RTS = 0x4 - TIOCM_SR = 0x10 - TIOCM_ST = 0x8 - TIOCNOTTY = 0x20007471 - TIOCNXCL = 0x2000740e - TIOCOUTQ = 0x40047473 - TIOCPKT = 0x80047470 - TIOCPKT_DATA = 0x0 - TIOCPKT_DOSTOP = 0x20 - TIOCPKT_FLUSHREAD = 0x1 - TIOCPKT_FLUSHWRITE = 0x2 - TIOCPKT_IOCTL = 0x40 - TIOCPKT_NOSTOP = 0x10 - TIOCPKT_START = 0x8 - TIOCPKT_STOP = 0x4 - TIOCPTYGNAME = 0x40807453 - TIOCPTYGRANT = 0x20007454 - TIOCPTYUNLK = 0x20007452 - TIOCREMOTE = 0x80047469 - TIOCSBRK = 0x2000747b - TIOCSCONS = 0x20007463 - TIOCSCTTY = 0x20007461 - TIOCSDRAINWAIT = 0x80047457 - TIOCSDTR = 0x20007479 - TIOCSETA = 0x802c7414 - TIOCSETAF = 0x802c7416 - TIOCSETAW = 0x802c7415 - TIOCSETD = 0x8004741b - TIOCSIG = 0x2000745f - TIOCSPGRP = 0x80047476 - TIOCSTART = 0x2000746e - TIOCSTAT = 0x20007465 - TIOCSTI = 0x80017472 - TIOCSTOP = 0x2000746f - TIOCSWINSZ = 0x80087467 - TIOCTIMESTAMP = 0x40087459 - TIOCUCNTL = 0x80047466 - TOSTOP = 0x400000 - VDISCARD = 0xf - VDSUSP = 0xb - VEOF = 0x0 - VEOL = 0x1 - VEOL2 = 0x2 - VERASE = 0x3 - VINTR = 0x8 - VKILL = 0x5 - VLNEXT = 0xe - VMIN = 0x10 - VM_LOADAVG = 0x2 - VM_MACHFACTOR = 0x4 - VM_MAXID = 0x6 - VM_METER = 0x1 - VM_SWAPUSAGE = 0x5 - VQUIT = 0x9 - VREPRINT = 0x6 - VSTART = 0xc - VSTATUS = 0x12 - VSTOP = 0xd - VSUSP = 0xa - VT0 = 0x0 - VT1 = 0x10000 - VTDLY = 0x10000 - VTIME = 0x11 - VWERASE = 0x4 - WCONTINUED = 0x10 - WCOREFLAG = 0x80 - WEXITED = 0x4 - WNOHANG = 0x1 - WNOWAIT = 0x20 - WORDSIZE = 0x20 - WSTOPPED = 0x8 - WUNTRACED = 0x2 - XATTR_CREATE = 0x2 - XATTR_NODEFAULT = 0x10 - XATTR_NOFOLLOW = 0x1 - XATTR_NOSECURITY = 0x8 - XATTR_REPLACE = 0x4 - XATTR_SHOWCOMPRESSION = 0x20 -) - -// Errors -const ( - E2BIG = syscall.Errno(0x7) - EACCES = syscall.Errno(0xd) - EADDRINUSE = syscall.Errno(0x30) - EADDRNOTAVAIL = syscall.Errno(0x31) - EAFNOSUPPORT = syscall.Errno(0x2f) - EAGAIN = syscall.Errno(0x23) - EALREADY = syscall.Errno(0x25) - EAUTH = syscall.Errno(0x50) - EBADARCH = syscall.Errno(0x56) - EBADEXEC = syscall.Errno(0x55) - EBADF = syscall.Errno(0x9) - EBADMACHO = syscall.Errno(0x58) - EBADMSG = syscall.Errno(0x5e) - EBADRPC = syscall.Errno(0x48) - EBUSY = syscall.Errno(0x10) - ECANCELED = syscall.Errno(0x59) - ECHILD = syscall.Errno(0xa) - ECONNABORTED = syscall.Errno(0x35) - ECONNREFUSED = syscall.Errno(0x3d) - ECONNRESET = syscall.Errno(0x36) - EDEADLK = syscall.Errno(0xb) - EDESTADDRREQ = syscall.Errno(0x27) - EDEVERR = syscall.Errno(0x53) - EDOM = syscall.Errno(0x21) - EDQUOT = syscall.Errno(0x45) - EEXIST = syscall.Errno(0x11) - EFAULT = syscall.Errno(0xe) - EFBIG = syscall.Errno(0x1b) - EFTYPE = syscall.Errno(0x4f) - EHOSTDOWN = syscall.Errno(0x40) - EHOSTUNREACH = syscall.Errno(0x41) - EIDRM = syscall.Errno(0x5a) - EILSEQ = syscall.Errno(0x5c) - EINPROGRESS = syscall.Errno(0x24) - EINTR = syscall.Errno(0x4) - EINVAL = syscall.Errno(0x16) - EIO = syscall.Errno(0x5) - EISCONN = syscall.Errno(0x38) - EISDIR = syscall.Errno(0x15) - ELAST = syscall.Errno(0x6a) - ELOOP = syscall.Errno(0x3e) - EMFILE = syscall.Errno(0x18) - EMLINK = syscall.Errno(0x1f) - EMSGSIZE = syscall.Errno(0x28) - EMULTIHOP = syscall.Errno(0x5f) - ENAMETOOLONG = syscall.Errno(0x3f) - ENEEDAUTH = syscall.Errno(0x51) - ENETDOWN = syscall.Errno(0x32) - ENETRESET = syscall.Errno(0x34) - ENETUNREACH = syscall.Errno(0x33) - ENFILE = syscall.Errno(0x17) - ENOATTR = syscall.Errno(0x5d) - ENOBUFS = syscall.Errno(0x37) - ENODATA = syscall.Errno(0x60) - ENODEV = syscall.Errno(0x13) - ENOENT = syscall.Errno(0x2) - ENOEXEC = syscall.Errno(0x8) - ENOLCK = syscall.Errno(0x4d) - ENOLINK = syscall.Errno(0x61) - ENOMEM = syscall.Errno(0xc) - ENOMSG = syscall.Errno(0x5b) - ENOPOLICY = syscall.Errno(0x67) - ENOPROTOOPT = syscall.Errno(0x2a) - ENOSPC = syscall.Errno(0x1c) - ENOSR = syscall.Errno(0x62) - ENOSTR = syscall.Errno(0x63) - ENOSYS = syscall.Errno(0x4e) - ENOTBLK = syscall.Errno(0xf) - ENOTCONN = syscall.Errno(0x39) - ENOTDIR = syscall.Errno(0x14) - ENOTEMPTY = syscall.Errno(0x42) - ENOTRECOVERABLE = syscall.Errno(0x68) - ENOTSOCK = syscall.Errno(0x26) - ENOTSUP = syscall.Errno(0x2d) - ENOTTY = syscall.Errno(0x19) - ENXIO = syscall.Errno(0x6) - EOPNOTSUPP = syscall.Errno(0x66) - EOVERFLOW = syscall.Errno(0x54) - EOWNERDEAD = syscall.Errno(0x69) - EPERM = syscall.Errno(0x1) - EPFNOSUPPORT = syscall.Errno(0x2e) - EPIPE = syscall.Errno(0x20) - EPROCLIM = syscall.Errno(0x43) - EPROCUNAVAIL = syscall.Errno(0x4c) - EPROGMISMATCH = syscall.Errno(0x4b) - EPROGUNAVAIL = syscall.Errno(0x4a) - EPROTO = syscall.Errno(0x64) - EPROTONOSUPPORT = syscall.Errno(0x2b) - EPROTOTYPE = syscall.Errno(0x29) - EPWROFF = syscall.Errno(0x52) - EQFULL = syscall.Errno(0x6a) - ERANGE = syscall.Errno(0x22) - EREMOTE = syscall.Errno(0x47) - EROFS = syscall.Errno(0x1e) - ERPCMISMATCH = syscall.Errno(0x49) - ESHLIBVERS = syscall.Errno(0x57) - ESHUTDOWN = syscall.Errno(0x3a) - ESOCKTNOSUPPORT = syscall.Errno(0x2c) - ESPIPE = syscall.Errno(0x1d) - ESRCH = syscall.Errno(0x3) - ESTALE = syscall.Errno(0x46) - ETIME = syscall.Errno(0x65) - ETIMEDOUT = syscall.Errno(0x3c) - ETOOMANYREFS = syscall.Errno(0x3b) - ETXTBSY = syscall.Errno(0x1a) - EUSERS = syscall.Errno(0x44) - EWOULDBLOCK = syscall.Errno(0x23) - EXDEV = syscall.Errno(0x12) -) - -// Signals -const ( - SIGABRT = syscall.Signal(0x6) - SIGALRM = syscall.Signal(0xe) - SIGBUS = syscall.Signal(0xa) - SIGCHLD = syscall.Signal(0x14) - SIGCONT = syscall.Signal(0x13) - SIGEMT = syscall.Signal(0x7) - SIGFPE = syscall.Signal(0x8) - SIGHUP = syscall.Signal(0x1) - SIGILL = syscall.Signal(0x4) - SIGINFO = syscall.Signal(0x1d) - SIGINT = syscall.Signal(0x2) - SIGIO = syscall.Signal(0x17) - SIGIOT = syscall.Signal(0x6) - SIGKILL = syscall.Signal(0x9) - SIGPIPE = syscall.Signal(0xd) - SIGPROF = syscall.Signal(0x1b) - SIGQUIT = syscall.Signal(0x3) - SIGSEGV = syscall.Signal(0xb) - SIGSTOP = syscall.Signal(0x11) - SIGSYS = syscall.Signal(0xc) - SIGTERM = syscall.Signal(0xf) - SIGTRAP = syscall.Signal(0x5) - SIGTSTP = syscall.Signal(0x12) - SIGTTIN = syscall.Signal(0x15) - SIGTTOU = syscall.Signal(0x16) - SIGURG = syscall.Signal(0x10) - SIGUSR1 = syscall.Signal(0x1e) - SIGUSR2 = syscall.Signal(0x1f) - SIGVTALRM = syscall.Signal(0x1a) - SIGWINCH = syscall.Signal(0x1c) - SIGXCPU = syscall.Signal(0x18) - SIGXFSZ = syscall.Signal(0x19) -) - -// Error table -var errorList = [...]struct { - num syscall.Errno - name string - desc string -}{ - {1, "EPERM", "operation not permitted"}, - {2, "ENOENT", "no such file or directory"}, - {3, "ESRCH", "no such process"}, - {4, "EINTR", "interrupted system call"}, - {5, "EIO", "input/output error"}, - {6, "ENXIO", "device not configured"}, - {7, "E2BIG", "argument list too long"}, - {8, "ENOEXEC", "exec format error"}, - {9, "EBADF", "bad file descriptor"}, - {10, "ECHILD", "no child processes"}, - {11, "EDEADLK", "resource deadlock avoided"}, - {12, "ENOMEM", "cannot allocate memory"}, - {13, "EACCES", "permission denied"}, - {14, "EFAULT", "bad address"}, - {15, "ENOTBLK", "block device required"}, - {16, "EBUSY", "resource busy"}, - {17, "EEXIST", "file exists"}, - {18, "EXDEV", "cross-device link"}, - {19, "ENODEV", "operation not supported by device"}, - {20, "ENOTDIR", "not a directory"}, - {21, "EISDIR", "is a directory"}, - {22, "EINVAL", "invalid argument"}, - {23, "ENFILE", "too many open files in system"}, - {24, "EMFILE", "too many open files"}, - {25, "ENOTTY", "inappropriate ioctl for device"}, - {26, "ETXTBSY", "text file busy"}, - {27, "EFBIG", "file too large"}, - {28, "ENOSPC", "no space left on device"}, - {29, "ESPIPE", "illegal seek"}, - {30, "EROFS", "read-only file system"}, - {31, "EMLINK", "too many links"}, - {32, "EPIPE", "broken pipe"}, - {33, "EDOM", "numerical argument out of domain"}, - {34, "ERANGE", "result too large"}, - {35, "EAGAIN", "resource temporarily unavailable"}, - {36, "EINPROGRESS", "operation now in progress"}, - {37, "EALREADY", "operation already in progress"}, - {38, "ENOTSOCK", "socket operation on non-socket"}, - {39, "EDESTADDRREQ", "destination address required"}, - {40, "EMSGSIZE", "message too long"}, - {41, "EPROTOTYPE", "protocol wrong type for socket"}, - {42, "ENOPROTOOPT", "protocol not available"}, - {43, "EPROTONOSUPPORT", "protocol not supported"}, - {44, "ESOCKTNOSUPPORT", "socket type not supported"}, - {45, "ENOTSUP", "operation not supported"}, - {46, "EPFNOSUPPORT", "protocol family not supported"}, - {47, "EAFNOSUPPORT", "address family not supported by protocol family"}, - {48, "EADDRINUSE", "address already in use"}, - {49, "EADDRNOTAVAIL", "can't assign requested address"}, - {50, "ENETDOWN", "network is down"}, - {51, "ENETUNREACH", "network is unreachable"}, - {52, "ENETRESET", "network dropped connection on reset"}, - {53, "ECONNABORTED", "software caused connection abort"}, - {54, "ECONNRESET", "connection reset by peer"}, - {55, "ENOBUFS", "no buffer space available"}, - {56, "EISCONN", "socket is already connected"}, - {57, "ENOTCONN", "socket is not connected"}, - {58, "ESHUTDOWN", "can't send after socket shutdown"}, - {59, "ETOOMANYREFS", "too many references: can't splice"}, - {60, "ETIMEDOUT", "operation timed out"}, - {61, "ECONNREFUSED", "connection refused"}, - {62, "ELOOP", "too many levels of symbolic links"}, - {63, "ENAMETOOLONG", "file name too long"}, - {64, "EHOSTDOWN", "host is down"}, - {65, "EHOSTUNREACH", "no route to host"}, - {66, "ENOTEMPTY", "directory not empty"}, - {67, "EPROCLIM", "too many processes"}, - {68, "EUSERS", "too many users"}, - {69, "EDQUOT", "disc quota exceeded"}, - {70, "ESTALE", "stale NFS file handle"}, - {71, "EREMOTE", "too many levels of remote in path"}, - {72, "EBADRPC", "RPC struct is bad"}, - {73, "ERPCMISMATCH", "RPC version wrong"}, - {74, "EPROGUNAVAIL", "RPC prog. not avail"}, - {75, "EPROGMISMATCH", "program version wrong"}, - {76, "EPROCUNAVAIL", "bad procedure for program"}, - {77, "ENOLCK", "no locks available"}, - {78, "ENOSYS", "function not implemented"}, - {79, "EFTYPE", "inappropriate file type or format"}, - {80, "EAUTH", "authentication error"}, - {81, "ENEEDAUTH", "need authenticator"}, - {82, "EPWROFF", "device power is off"}, - {83, "EDEVERR", "device error"}, - {84, "EOVERFLOW", "value too large to be stored in data type"}, - {85, "EBADEXEC", "bad executable (or shared library)"}, - {86, "EBADARCH", "bad CPU type in executable"}, - {87, "ESHLIBVERS", "shared library version mismatch"}, - {88, "EBADMACHO", "malformed Mach-o file"}, - {89, "ECANCELED", "operation canceled"}, - {90, "EIDRM", "identifier removed"}, - {91, "ENOMSG", "no message of desired type"}, - {92, "EILSEQ", "illegal byte sequence"}, - {93, "ENOATTR", "attribute not found"}, - {94, "EBADMSG", "bad message"}, - {95, "EMULTIHOP", "EMULTIHOP (Reserved)"}, - {96, "ENODATA", "no message available on STREAM"}, - {97, "ENOLINK", "ENOLINK (Reserved)"}, - {98, "ENOSR", "no STREAM resources"}, - {99, "ENOSTR", "not a STREAM"}, - {100, "EPROTO", "protocol error"}, - {101, "ETIME", "STREAM ioctl timeout"}, - {102, "EOPNOTSUPP", "operation not supported on socket"}, - {103, "ENOPOLICY", "policy not found"}, - {104, "ENOTRECOVERABLE", "state not recoverable"}, - {105, "EOWNERDEAD", "previous owner died"}, - {106, "EQFULL", "interface output queue is full"}, -} - -// Signal table -var signalList = [...]struct { - num syscall.Signal - name string - desc string -}{ - {1, "SIGHUP", "hangup"}, - {2, "SIGINT", "interrupt"}, - {3, "SIGQUIT", "quit"}, - {4, "SIGILL", "illegal instruction"}, - {5, "SIGTRAP", "trace/BPT trap"}, - {6, "SIGABRT", "abort trap"}, - {7, "SIGEMT", "EMT trap"}, - {8, "SIGFPE", "floating point exception"}, - {9, "SIGKILL", "killed"}, - {10, "SIGBUS", "bus error"}, - {11, "SIGSEGV", "segmentation fault"}, - {12, "SIGSYS", "bad system call"}, - {13, "SIGPIPE", "broken pipe"}, - {14, "SIGALRM", "alarm clock"}, - {15, "SIGTERM", "terminated"}, - {16, "SIGURG", "urgent I/O condition"}, - {17, "SIGSTOP", "suspended (signal)"}, - {18, "SIGTSTP", "suspended"}, - {19, "SIGCONT", "continued"}, - {20, "SIGCHLD", "child exited"}, - {21, "SIGTTIN", "stopped (tty input)"}, - {22, "SIGTTOU", "stopped (tty output)"}, - {23, "SIGIO", "I/O possible"}, - {24, "SIGXCPU", "cputime limit exceeded"}, - {25, "SIGXFSZ", "filesize limit exceeded"}, - {26, "SIGVTALRM", "virtual timer expired"}, - {27, "SIGPROF", "profiling timer expired"}, - {28, "SIGWINCH", "window size changes"}, - {29, "SIGINFO", "information request"}, - {30, "SIGUSR1", "user defined signal 1"}, - {31, "SIGUSR2", "user defined signal 2"}, -} diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go index fea5dfaadb..991996b609 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go @@ -1,6 +1,7 @@ // mkerrors.sh -m64 // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build amd64 && darwin // +build amd64,darwin // Code generated by cmd/cgo -godefs; DO NOT EDIT. @@ -32,7 +33,7 @@ const ( AF_LAT = 0xe AF_LINK = 0x12 AF_LOCAL = 0x1 - AF_MAX = 0x28 + AF_MAX = 0x29 AF_NATM = 0x1f AF_NDRV = 0x1b AF_NETBIOS = 0x21 @@ -49,6 +50,7 @@ const ( AF_UNIX = 0x1 AF_UNSPEC = 0x0 AF_UTUN = 0x26 + AF_VSOCK = 0x28 ALTWERASE = 0x200 ATTR_BIT_MAP_COUNT = 0x5 ATTR_CMN_ACCESSMASK = 0x20000 @@ -83,7 +85,7 @@ const ( ATTR_CMN_PAROBJID = 0x80 ATTR_CMN_RETURNED_ATTRS = 0x80000000 ATTR_CMN_SCRIPT = 0x100 - ATTR_CMN_SETMASK = 0x41c7ff00 + ATTR_CMN_SETMASK = 0x51c7ff00 ATTR_CMN_USERACCESS = 0x200000 ATTR_CMN_UUID = 0x800000 ATTR_CMN_VALIDMASK = 0xffffffff @@ -357,7 +359,7 @@ const ( DLT_LINUX_SLL = 0x71 DLT_LOOP = 0x6c DLT_LTALK = 0x72 - DLT_MATCHING_MAX = 0xf5 + DLT_MATCHING_MAX = 0x10a DLT_MATCHING_MIN = 0x68 DLT_MFR = 0xb6 DLT_MOST = 0xd3 @@ -398,6 +400,7 @@ const ( DLT_SYMANTEC_FIREWALL = 0x63 DLT_TZSP = 0x80 DLT_USB = 0xba + DLT_USB_DARWIN = 0x10a DLT_USB_LINUX = 0xbd DLT_USB_LINUX_MMAPPED = 0xdc DLT_USER0 = 0x93 @@ -442,8 +445,8 @@ const ( EVFILT_PROC = -0x5 EVFILT_READ = -0x1 EVFILT_SIGNAL = -0x6 - EVFILT_SYSCOUNT = 0xf - EVFILT_THREADMARKER = 0xf + EVFILT_SYSCOUNT = 0x11 + EVFILT_THREADMARKER = 0x11 EVFILT_TIMER = -0x7 EVFILT_USER = -0xa EVFILT_VM = -0xc @@ -481,9 +484,12 @@ const ( FSOPT_NOINMEMUPDATE = 0x2 FSOPT_PACK_INVAL_ATTRS = 0x8 FSOPT_REPORT_FULLSIZE = 0x4 + FSOPT_RETURN_REALDEV = 0x200 F_ADDFILESIGS = 0x3d F_ADDFILESIGS_FOR_DYLD_SIM = 0x53 + F_ADDFILESIGS_INFO = 0x67 F_ADDFILESIGS_RETURN = 0x61 + F_ADDFILESUPPL = 0x68 F_ADDSIGS = 0x3b F_ALLOCATEALL = 0x4 F_ALLOCATECONTIG = 0x2 @@ -505,8 +511,10 @@ const ( F_GETOWN = 0x5 F_GETPATH = 0x32 F_GETPATH_MTMINFO = 0x47 + F_GETPATH_NOFIRMLINK = 0x66 F_GETPROTECTIONCLASS = 0x3f F_GETPROTECTIONLEVEL = 0x4d + F_GETSIGSINFO = 0x69 F_GLOBAL_NOCACHE = 0x37 F_LOG2PHYS = 0x31 F_LOG2PHYS_EXT = 0x41 @@ -531,6 +539,7 @@ const ( F_SETPROTECTIONCLASS = 0x40 F_SETSIZE = 0x2b F_SINGLE_WRITER = 0x4c + F_SPECULATIVE_READ = 0x65 F_THAW_FS = 0x36 F_TRANSCODEKEY = 0x4b F_TRIM_ACTIVE_FILE = 0x64 @@ -562,6 +571,7 @@ const ( IFF_UP = 0x1 IFNAMSIZ = 0x10 IFT_1822 = 0x2 + IFT_6LOWPAN = 0x40 IFT_AAL5 = 0x31 IFT_ARCNET = 0x23 IFT_ARCNETPLUS = 0x24 @@ -766,16 +776,28 @@ const ( IPV6_2292PKTINFO = 0x13 IPV6_2292PKTOPTIONS = 0x19 IPV6_2292RTHDR = 0x18 + IPV6_3542DSTOPTS = 0x32 + IPV6_3542HOPLIMIT = 0x2f + IPV6_3542HOPOPTS = 0x31 + IPV6_3542NEXTHOP = 0x30 + IPV6_3542PKTINFO = 0x2e + IPV6_3542RTHDR = 0x33 + IPV6_ADDR_MC_FLAGS_PREFIX = 0x20 + IPV6_ADDR_MC_FLAGS_TRANSIENT = 0x10 + IPV6_ADDR_MC_FLAGS_UNICAST_BASED = 0x30 + IPV6_AUTOFLOWLABEL = 0x3b IPV6_BINDV6ONLY = 0x1b IPV6_BOUND_IF = 0x7d IPV6_CHECKSUM = 0x1a IPV6_DEFAULT_MULTICAST_HOPS = 0x1 IPV6_DEFAULT_MULTICAST_LOOP = 0x1 IPV6_DEFHLIM = 0x40 + IPV6_DONTFRAG = 0x3e + IPV6_DSTOPTS = 0x32 IPV6_FAITH = 0x1d IPV6_FLOWINFO_MASK = 0xffffff0f IPV6_FLOWLABEL_MASK = 0xffff0f00 - IPV6_FLOW_ECN_MASK = 0x300 + IPV6_FLOW_ECN_MASK = 0x3000 IPV6_FRAGTTL = 0x3c IPV6_FW_ADD = 0x1e IPV6_FW_DEL = 0x1f @@ -783,6 +805,8 @@ const ( IPV6_FW_GET = 0x22 IPV6_FW_ZERO = 0x21 IPV6_HLIMDEC = 0x1 + IPV6_HOPLIMIT = 0x2f + IPV6_HOPOPTS = 0x31 IPV6_IPSEC_POLICY = 0x1c IPV6_JOIN_GROUP = 0xc IPV6_LEAVE_GROUP = 0xd @@ -794,20 +818,34 @@ const ( IPV6_MAX_SOCK_SRC_FILTER = 0x80 IPV6_MIN_MEMBERSHIPS = 0x1f IPV6_MMTU = 0x500 + IPV6_MSFILTER = 0x4a IPV6_MULTICAST_HOPS = 0xa IPV6_MULTICAST_IF = 0x9 IPV6_MULTICAST_LOOP = 0xb + IPV6_NEXTHOP = 0x30 + IPV6_PATHMTU = 0x2c + IPV6_PKTINFO = 0x2e IPV6_PORTRANGE = 0xe IPV6_PORTRANGE_DEFAULT = 0x0 IPV6_PORTRANGE_HIGH = 0x1 IPV6_PORTRANGE_LOW = 0x2 + IPV6_PREFER_TEMPADDR = 0x3f + IPV6_RECVDSTOPTS = 0x28 + IPV6_RECVHOPLIMIT = 0x25 + IPV6_RECVHOPOPTS = 0x27 + IPV6_RECVPATHMTU = 0x2b + IPV6_RECVPKTINFO = 0x3d + IPV6_RECVRTHDR = 0x26 IPV6_RECVTCLASS = 0x23 + IPV6_RTHDR = 0x33 + IPV6_RTHDRDSTOPTS = 0x39 IPV6_RTHDR_LOOSE = 0x0 IPV6_RTHDR_STRICT = 0x1 IPV6_RTHDR_TYPE_0 = 0x0 IPV6_SOCKOPT_RESERVED1 = 0x3 IPV6_TCLASS = 0x24 IPV6_UNICAST_HOPS = 0x4 + IPV6_USE_MIN_MTU = 0x2a IPV6_V6ONLY = 0x1b IPV6_VERSION = 0x60 IPV6_VERSION_MASK = 0xf0 @@ -818,6 +856,7 @@ const ( IP_DEFAULT_MULTICAST_LOOP = 0x1 IP_DEFAULT_MULTICAST_TTL = 0x1 IP_DF = 0x4000 + IP_DONTFRAG = 0x1c IP_DROP_MEMBERSHIP = 0xd IP_DROP_SOURCE_MEMBERSHIP = 0x47 IP_DUMMYNET_CONFIGURE = 0x3c @@ -889,6 +928,12 @@ const ( KERN_OSRELEASE = 0x2 KERN_OSTYPE = 0x1 KERN_VERSION = 0x4 + LOCAL_PEERCRED = 0x1 + LOCAL_PEEREPID = 0x3 + LOCAL_PEEREUUID = 0x5 + LOCAL_PEERPID = 0x2 + LOCAL_PEERTOKEN = 0x6 + LOCAL_PEERUUID = 0x4 LOCK_EX = 0x2 LOCK_NB = 0x4 LOCK_SH = 0x1 @@ -904,6 +949,7 @@ const ( MADV_SEQUENTIAL = 0x2 MADV_WILLNEED = 0x3 MADV_ZERO_WIRED_PAGES = 0x6 + MAP_32BIT = 0x8000 MAP_ANON = 0x1000 MAP_ANONYMOUS = 0x1000 MAP_COPY = 0x2 @@ -920,6 +966,17 @@ const ( MAP_RESILIENT_CODESIGN = 0x2000 MAP_RESILIENT_MEDIA = 0x4000 MAP_SHARED = 0x1 + MAP_TRANSLATED_ALLOW_EXECUTE = 0x20000 + MAP_UNIX03 = 0x40000 + MCAST_BLOCK_SOURCE = 0x54 + MCAST_EXCLUDE = 0x2 + MCAST_INCLUDE = 0x1 + MCAST_JOIN_GROUP = 0x50 + MCAST_JOIN_SOURCE_GROUP = 0x52 + MCAST_LEAVE_GROUP = 0x51 + MCAST_LEAVE_SOURCE_GROUP = 0x53 + MCAST_UNBLOCK_SOURCE = 0x55 + MCAST_UNDEFINED = 0x0 MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MNT_ASYNC = 0x40 @@ -931,6 +988,7 @@ const ( MNT_DOVOLFS = 0x8000 MNT_DWAIT = 0x4 MNT_EXPORTED = 0x100 + MNT_EXT_ROOT_DATA_VOL = 0x1 MNT_FORCE = 0x80000 MNT_IGNORE_OWNERSHIP = 0x200000 MNT_JOURNALED = 0x800000 @@ -947,12 +1005,15 @@ const ( MNT_QUOTA = 0x2000 MNT_RDONLY = 0x1 MNT_RELOAD = 0x40000 + MNT_REMOVABLE = 0x200 MNT_ROOTFS = 0x4000 + MNT_SNAPSHOT = 0x40000000 + MNT_STRICTATIME = 0x80000000 MNT_SYNCHRONOUS = 0x2 MNT_UNION = 0x20 MNT_UNKNOWNPERMISSIONS = 0x200000 MNT_UPDATE = 0x10000 - MNT_VISFLAGMASK = 0x17f0f5ff + MNT_VISFLAGMASK = 0xd7f0f7ff MNT_WAIT = 0x1 MSG_CTRUNC = 0x20 MSG_DONTROUTE = 0x4 @@ -963,6 +1024,7 @@ const ( MSG_HAVEMORE = 0x2000 MSG_HOLD = 0x800 MSG_NEEDSA = 0x10000 + MSG_NOSIGNAL = 0x80000 MSG_OOB = 0x1 MSG_PEEK = 0x2 MSG_RCVMORE = 0x4000 @@ -979,9 +1041,10 @@ const ( NET_RT_DUMP = 0x1 NET_RT_DUMP2 = 0x7 NET_RT_FLAGS = 0x2 + NET_RT_FLAGS_PRIV = 0xa NET_RT_IFLIST = 0x3 NET_RT_IFLIST2 = 0x6 - NET_RT_MAXID = 0xa + NET_RT_MAXID = 0xb NET_RT_STAT = 0x4 NET_RT_TRASH = 0x5 NFDBITS = 0x20 @@ -1019,6 +1082,7 @@ const ( NOTE_LEEWAY = 0x10 NOTE_LINK = 0x10 NOTE_LOWAT = 0x1 + NOTE_MACHTIME = 0x100 NOTE_MACH_CONTINUOUS_TIME = 0x80 NOTE_NONE = 0x80 NOTE_NSECONDS = 0x4 @@ -1065,6 +1129,7 @@ const ( O_NDELAY = 0x4 O_NOCTTY = 0x20000 O_NOFOLLOW = 0x100 + O_NOFOLLOW_ANY = 0x20000000 O_NONBLOCK = 0x4 O_POPUP = 0x80000000 O_RDONLY = 0x0 @@ -1136,6 +1201,7 @@ const ( RTF_BROADCAST = 0x400000 RTF_CLONING = 0x100 RTF_CONDEMNED = 0x2000000 + RTF_DEAD = 0x20000000 RTF_DELCLONE = 0x80 RTF_DONE = 0x40 RTF_DYNAMIC = 0x10 @@ -1143,6 +1209,7 @@ const ( RTF_HOST = 0x4 RTF_IFREF = 0x4000000 RTF_IFSCOPE = 0x1000000 + RTF_LLDATA = 0x400 RTF_LLINFO = 0x400 RTF_LOCAL = 0x200000 RTF_MODIFIED = 0x20 @@ -1210,6 +1277,7 @@ const ( SIOCGDRVSPEC = 0xc028697b SIOCGETVLAN = 0xc020697f SIOCGHIWAT = 0x40047301 + SIOCGIF6LOWPAN = 0xc02069c5 SIOCGIFADDR = 0xc0206921 SIOCGIFALTMTU = 0xc0206948 SIOCGIFASYNCMAP = 0xc020697c @@ -1220,6 +1288,7 @@ const ( SIOCGIFDEVMTU = 0xc0206944 SIOCGIFDSTADDR = 0xc0206922 SIOCGIFFLAGS = 0xc0206911 + SIOCGIFFUNCTIONALTYPE = 0xc02069ad SIOCGIFGENERIC = 0xc020693a SIOCGIFKPI = 0xc0206987 SIOCGIFMAC = 0xc0206982 @@ -1233,6 +1302,7 @@ const ( SIOCGIFSTATUS = 0xc331693d SIOCGIFVLAN = 0xc020697f SIOCGIFWAKEFLAGS = 0xc0206988 + SIOCGIFXMEDIA = 0xc02c6948 SIOCGLOWAT = 0x40047303 SIOCGPGRP = 0x40047309 SIOCIFCREATE = 0xc0206978 @@ -1243,6 +1313,7 @@ const ( SIOCSDRVSPEC = 0x8028697b SIOCSETVLAN = 0x8020697e SIOCSHIWAT = 0x80047300 + SIOCSIF6LOWPAN = 0x802069c4 SIOCSIFADDR = 0x8020690c SIOCSIFALTMTU = 0x80206945 SIOCSIFASYNCMAP = 0x8020697d @@ -1270,6 +1341,7 @@ const ( SOCK_RDM = 0x4 SOCK_SEQPACKET = 0x5 SOCK_STREAM = 0x1 + SOL_LOCAL = 0x0 SOL_SOCKET = 0xffff SOMAXCONN = 0x80 SO_ACCEPTCONN = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm.go deleted file mode 100644 index 03feefbf8c..0000000000 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm.go +++ /dev/null @@ -1,1788 +0,0 @@ -// mkerrors.sh -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build arm,darwin - -// Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- _const.go - -package unix - -import "syscall" - -const ( - AF_APPLETALK = 0x10 - AF_CCITT = 0xa - AF_CHAOS = 0x5 - AF_CNT = 0x15 - AF_COIP = 0x14 - AF_DATAKIT = 0x9 - AF_DECnet = 0xc - AF_DLI = 0xd - AF_E164 = 0x1c - AF_ECMA = 0x8 - AF_HYLINK = 0xf - AF_IEEE80211 = 0x25 - AF_IMPLINK = 0x3 - AF_INET = 0x2 - AF_INET6 = 0x1e - AF_IPX = 0x17 - AF_ISDN = 0x1c - AF_ISO = 0x7 - AF_LAT = 0xe - AF_LINK = 0x12 - AF_LOCAL = 0x1 - AF_MAX = 0x28 - AF_NATM = 0x1f - AF_NDRV = 0x1b - AF_NETBIOS = 0x21 - AF_NS = 0x6 - AF_OSI = 0x7 - AF_PPP = 0x22 - AF_PUP = 0x4 - AF_RESERVED_36 = 0x24 - AF_ROUTE = 0x11 - AF_SIP = 0x18 - AF_SNA = 0xb - AF_SYSTEM = 0x20 - AF_SYS_CONTROL = 0x2 - AF_UNIX = 0x1 - AF_UNSPEC = 0x0 - AF_UTUN = 0x26 - ALTWERASE = 0x200 - ATTR_BIT_MAP_COUNT = 0x5 - ATTR_CMN_ACCESSMASK = 0x20000 - ATTR_CMN_ACCTIME = 0x1000 - ATTR_CMN_ADDEDTIME = 0x10000000 - ATTR_CMN_BKUPTIME = 0x2000 - ATTR_CMN_CHGTIME = 0x800 - ATTR_CMN_CRTIME = 0x200 - ATTR_CMN_DATA_PROTECT_FLAGS = 0x40000000 - ATTR_CMN_DEVID = 0x2 - ATTR_CMN_DOCUMENT_ID = 0x100000 - ATTR_CMN_ERROR = 0x20000000 - ATTR_CMN_EXTENDED_SECURITY = 0x400000 - ATTR_CMN_FILEID = 0x2000000 - ATTR_CMN_FLAGS = 0x40000 - ATTR_CMN_FNDRINFO = 0x4000 - ATTR_CMN_FSID = 0x4 - ATTR_CMN_FULLPATH = 0x8000000 - ATTR_CMN_GEN_COUNT = 0x80000 - ATTR_CMN_GRPID = 0x10000 - ATTR_CMN_GRPUUID = 0x1000000 - ATTR_CMN_MODTIME = 0x400 - ATTR_CMN_NAME = 0x1 - ATTR_CMN_NAMEDATTRCOUNT = 0x80000 - ATTR_CMN_NAMEDATTRLIST = 0x100000 - ATTR_CMN_OBJID = 0x20 - ATTR_CMN_OBJPERMANENTID = 0x40 - ATTR_CMN_OBJTAG = 0x10 - ATTR_CMN_OBJTYPE = 0x8 - ATTR_CMN_OWNERID = 0x8000 - ATTR_CMN_PARENTID = 0x4000000 - ATTR_CMN_PAROBJID = 0x80 - ATTR_CMN_RETURNED_ATTRS = 0x80000000 - ATTR_CMN_SCRIPT = 0x100 - ATTR_CMN_SETMASK = 0x41c7ff00 - ATTR_CMN_USERACCESS = 0x200000 - ATTR_CMN_UUID = 0x800000 - ATTR_CMN_VALIDMASK = 0xffffffff - ATTR_CMN_VOLSETMASK = 0x6700 - ATTR_FILE_ALLOCSIZE = 0x4 - ATTR_FILE_CLUMPSIZE = 0x10 - ATTR_FILE_DATAALLOCSIZE = 0x400 - ATTR_FILE_DATAEXTENTS = 0x800 - ATTR_FILE_DATALENGTH = 0x200 - ATTR_FILE_DEVTYPE = 0x20 - ATTR_FILE_FILETYPE = 0x40 - ATTR_FILE_FORKCOUNT = 0x80 - ATTR_FILE_FORKLIST = 0x100 - ATTR_FILE_IOBLOCKSIZE = 0x8 - ATTR_FILE_LINKCOUNT = 0x1 - ATTR_FILE_RSRCALLOCSIZE = 0x2000 - ATTR_FILE_RSRCEXTENTS = 0x4000 - ATTR_FILE_RSRCLENGTH = 0x1000 - ATTR_FILE_SETMASK = 0x20 - ATTR_FILE_TOTALSIZE = 0x2 - ATTR_FILE_VALIDMASK = 0x37ff - ATTR_VOL_ALLOCATIONCLUMP = 0x40 - ATTR_VOL_ATTRIBUTES = 0x40000000 - ATTR_VOL_CAPABILITIES = 0x20000 - ATTR_VOL_DIRCOUNT = 0x400 - ATTR_VOL_ENCODINGSUSED = 0x10000 - ATTR_VOL_FILECOUNT = 0x200 - ATTR_VOL_FSTYPE = 0x1 - ATTR_VOL_INFO = 0x80000000 - ATTR_VOL_IOBLOCKSIZE = 0x80 - ATTR_VOL_MAXOBJCOUNT = 0x800 - ATTR_VOL_MINALLOCATION = 0x20 - ATTR_VOL_MOUNTEDDEVICE = 0x8000 - ATTR_VOL_MOUNTFLAGS = 0x4000 - ATTR_VOL_MOUNTPOINT = 0x1000 - ATTR_VOL_NAME = 0x2000 - ATTR_VOL_OBJCOUNT = 0x100 - ATTR_VOL_QUOTA_SIZE = 0x10000000 - ATTR_VOL_RESERVED_SIZE = 0x20000000 - ATTR_VOL_SETMASK = 0x80002000 - ATTR_VOL_SIGNATURE = 0x2 - ATTR_VOL_SIZE = 0x4 - ATTR_VOL_SPACEAVAIL = 0x10 - ATTR_VOL_SPACEFREE = 0x8 - ATTR_VOL_UUID = 0x40000 - ATTR_VOL_VALIDMASK = 0xf007ffff - B0 = 0x0 - B110 = 0x6e - B115200 = 0x1c200 - B1200 = 0x4b0 - B134 = 0x86 - B14400 = 0x3840 - B150 = 0x96 - B1800 = 0x708 - B19200 = 0x4b00 - B200 = 0xc8 - B230400 = 0x38400 - B2400 = 0x960 - B28800 = 0x7080 - B300 = 0x12c - B38400 = 0x9600 - B4800 = 0x12c0 - B50 = 0x32 - B57600 = 0xe100 - B600 = 0x258 - B7200 = 0x1c20 - B75 = 0x4b - B76800 = 0x12c00 - B9600 = 0x2580 - BIOCFLUSH = 0x20004268 - BIOCGBLEN = 0x40044266 - BIOCGDLT = 0x4004426a - BIOCGDLTLIST = 0xc00c4279 - BIOCGETIF = 0x4020426b - BIOCGHDRCMPLT = 0x40044274 - BIOCGRSIG = 0x40044272 - BIOCGRTIMEOUT = 0x4010426e - BIOCGSEESENT = 0x40044276 - BIOCGSTATS = 0x4008426f - BIOCIMMEDIATE = 0x80044270 - BIOCPROMISC = 0x20004269 - BIOCSBLEN = 0xc0044266 - BIOCSDLT = 0x80044278 - BIOCSETF = 0x80104267 - BIOCSETFNR = 0x8010427e - BIOCSETIF = 0x8020426c - BIOCSHDRCMPLT = 0x80044275 - BIOCSRSIG = 0x80044273 - BIOCSRTIMEOUT = 0x8010426d - BIOCSSEESENT = 0x80044277 - BIOCVERSION = 0x40044271 - BPF_A = 0x10 - BPF_ABS = 0x20 - BPF_ADD = 0x0 - BPF_ALIGNMENT = 0x4 - BPF_ALU = 0x4 - BPF_AND = 0x50 - BPF_B = 0x10 - BPF_DIV = 0x30 - BPF_H = 0x8 - BPF_IMM = 0x0 - BPF_IND = 0x40 - BPF_JA = 0x0 - BPF_JEQ = 0x10 - BPF_JGE = 0x30 - BPF_JGT = 0x20 - BPF_JMP = 0x5 - BPF_JSET = 0x40 - BPF_K = 0x0 - BPF_LD = 0x0 - BPF_LDX = 0x1 - BPF_LEN = 0x80 - BPF_LSH = 0x60 - BPF_MAJOR_VERSION = 0x1 - BPF_MAXBUFSIZE = 0x80000 - BPF_MAXINSNS = 0x200 - BPF_MEM = 0x60 - BPF_MEMWORDS = 0x10 - BPF_MINBUFSIZE = 0x20 - BPF_MINOR_VERSION = 0x1 - BPF_MISC = 0x7 - BPF_MSH = 0xa0 - BPF_MUL = 0x20 - BPF_NEG = 0x80 - BPF_OR = 0x40 - BPF_RELEASE = 0x30bb6 - BPF_RET = 0x6 - BPF_RSH = 0x70 - BPF_ST = 0x2 - BPF_STX = 0x3 - BPF_SUB = 0x10 - BPF_TAX = 0x0 - BPF_TXA = 0x80 - BPF_W = 0x0 - BPF_X = 0x8 - BRKINT = 0x2 - BS0 = 0x0 - BS1 = 0x8000 - BSDLY = 0x8000 - CFLUSH = 0xf - CLOCAL = 0x8000 - CLOCK_MONOTONIC = 0x6 - CLOCK_MONOTONIC_RAW = 0x4 - CLOCK_MONOTONIC_RAW_APPROX = 0x5 - CLOCK_PROCESS_CPUTIME_ID = 0xc - CLOCK_REALTIME = 0x0 - CLOCK_THREAD_CPUTIME_ID = 0x10 - CLOCK_UPTIME_RAW = 0x8 - CLOCK_UPTIME_RAW_APPROX = 0x9 - CLONE_NOFOLLOW = 0x1 - CLONE_NOOWNERCOPY = 0x2 - CR0 = 0x0 - CR1 = 0x1000 - CR2 = 0x2000 - CR3 = 0x3000 - CRDLY = 0x3000 - CREAD = 0x800 - CRTSCTS = 0x30000 - CS5 = 0x0 - CS6 = 0x100 - CS7 = 0x200 - CS8 = 0x300 - CSIZE = 0x300 - CSTART = 0x11 - CSTATUS = 0x14 - CSTOP = 0x13 - CSTOPB = 0x400 - CSUSP = 0x1a - CTLIOCGINFO = 0xc0644e03 - CTL_HW = 0x6 - CTL_KERN = 0x1 - CTL_MAXNAME = 0xc - CTL_NET = 0x4 - DLT_A429 = 0xb8 - DLT_A653_ICM = 0xb9 - DLT_AIRONET_HEADER = 0x78 - DLT_AOS = 0xde - DLT_APPLE_IP_OVER_IEEE1394 = 0x8a - DLT_ARCNET = 0x7 - DLT_ARCNET_LINUX = 0x81 - DLT_ATM_CLIP = 0x13 - DLT_ATM_RFC1483 = 0xb - DLT_AURORA = 0x7e - DLT_AX25 = 0x3 - DLT_AX25_KISS = 0xca - DLT_BACNET_MS_TP = 0xa5 - DLT_BLUETOOTH_HCI_H4 = 0xbb - DLT_BLUETOOTH_HCI_H4_WITH_PHDR = 0xc9 - DLT_CAN20B = 0xbe - DLT_CAN_SOCKETCAN = 0xe3 - DLT_CHAOS = 0x5 - DLT_CHDLC = 0x68 - DLT_CISCO_IOS = 0x76 - DLT_C_HDLC = 0x68 - DLT_C_HDLC_WITH_DIR = 0xcd - DLT_DBUS = 0xe7 - DLT_DECT = 0xdd - DLT_DOCSIS = 0x8f - DLT_DVB_CI = 0xeb - DLT_ECONET = 0x73 - DLT_EN10MB = 0x1 - DLT_EN3MB = 0x2 - DLT_ENC = 0x6d - DLT_ERF = 0xc5 - DLT_ERF_ETH = 0xaf - DLT_ERF_POS = 0xb0 - DLT_FC_2 = 0xe0 - DLT_FC_2_WITH_FRAME_DELIMS = 0xe1 - DLT_FDDI = 0xa - DLT_FLEXRAY = 0xd2 - DLT_FRELAY = 0x6b - DLT_FRELAY_WITH_DIR = 0xce - DLT_GCOM_SERIAL = 0xad - DLT_GCOM_T1E1 = 0xac - DLT_GPF_F = 0xab - DLT_GPF_T = 0xaa - DLT_GPRS_LLC = 0xa9 - DLT_GSMTAP_ABIS = 0xda - DLT_GSMTAP_UM = 0xd9 - DLT_HHDLC = 0x79 - DLT_IBM_SN = 0x92 - DLT_IBM_SP = 0x91 - DLT_IEEE802 = 0x6 - DLT_IEEE802_11 = 0x69 - DLT_IEEE802_11_RADIO = 0x7f - DLT_IEEE802_11_RADIO_AVS = 0xa3 - DLT_IEEE802_15_4 = 0xc3 - DLT_IEEE802_15_4_LINUX = 0xbf - DLT_IEEE802_15_4_NOFCS = 0xe6 - DLT_IEEE802_15_4_NONASK_PHY = 0xd7 - DLT_IEEE802_16_MAC_CPS = 0xbc - DLT_IEEE802_16_MAC_CPS_RADIO = 0xc1 - DLT_IPFILTER = 0x74 - DLT_IPMB = 0xc7 - DLT_IPMB_LINUX = 0xd1 - DLT_IPNET = 0xe2 - DLT_IPOIB = 0xf2 - DLT_IPV4 = 0xe4 - DLT_IPV6 = 0xe5 - DLT_IP_OVER_FC = 0x7a - DLT_JUNIPER_ATM1 = 0x89 - DLT_JUNIPER_ATM2 = 0x87 - DLT_JUNIPER_ATM_CEMIC = 0xee - DLT_JUNIPER_CHDLC = 0xb5 - DLT_JUNIPER_ES = 0x84 - DLT_JUNIPER_ETHER = 0xb2 - DLT_JUNIPER_FIBRECHANNEL = 0xea - DLT_JUNIPER_FRELAY = 0xb4 - DLT_JUNIPER_GGSN = 0x85 - DLT_JUNIPER_ISM = 0xc2 - DLT_JUNIPER_MFR = 0x86 - DLT_JUNIPER_MLFR = 0x83 - DLT_JUNIPER_MLPPP = 0x82 - DLT_JUNIPER_MONITOR = 0xa4 - DLT_JUNIPER_PIC_PEER = 0xae - DLT_JUNIPER_PPP = 0xb3 - DLT_JUNIPER_PPPOE = 0xa7 - DLT_JUNIPER_PPPOE_ATM = 0xa8 - DLT_JUNIPER_SERVICES = 0x88 - DLT_JUNIPER_SRX_E2E = 0xe9 - DLT_JUNIPER_ST = 0xc8 - DLT_JUNIPER_VP = 0xb7 - DLT_JUNIPER_VS = 0xe8 - DLT_LAPB_WITH_DIR = 0xcf - DLT_LAPD = 0xcb - DLT_LIN = 0xd4 - DLT_LINUX_EVDEV = 0xd8 - DLT_LINUX_IRDA = 0x90 - DLT_LINUX_LAPD = 0xb1 - DLT_LINUX_PPP_WITHDIRECTION = 0xa6 - DLT_LINUX_SLL = 0x71 - DLT_LOOP = 0x6c - DLT_LTALK = 0x72 - DLT_MATCHING_MAX = 0xf5 - DLT_MATCHING_MIN = 0x68 - DLT_MFR = 0xb6 - DLT_MOST = 0xd3 - DLT_MPEG_2_TS = 0xf3 - DLT_MPLS = 0xdb - DLT_MTP2 = 0x8c - DLT_MTP2_WITH_PHDR = 0x8b - DLT_MTP3 = 0x8d - DLT_MUX27010 = 0xec - DLT_NETANALYZER = 0xf0 - DLT_NETANALYZER_TRANSPARENT = 0xf1 - DLT_NFC_LLCP = 0xf5 - DLT_NFLOG = 0xef - DLT_NG40 = 0xf4 - DLT_NULL = 0x0 - DLT_PCI_EXP = 0x7d - DLT_PFLOG = 0x75 - DLT_PFSYNC = 0x12 - DLT_PPI = 0xc0 - DLT_PPP = 0x9 - DLT_PPP_BSDOS = 0x10 - DLT_PPP_ETHER = 0x33 - DLT_PPP_PPPD = 0xa6 - DLT_PPP_SERIAL = 0x32 - DLT_PPP_WITH_DIR = 0xcc - DLT_PPP_WITH_DIRECTION = 0xa6 - DLT_PRISM_HEADER = 0x77 - DLT_PRONET = 0x4 - DLT_RAIF1 = 0xc6 - DLT_RAW = 0xc - DLT_RIO = 0x7c - DLT_SCCP = 0x8e - DLT_SITA = 0xc4 - DLT_SLIP = 0x8 - DLT_SLIP_BSDOS = 0xf - DLT_STANAG_5066_D_PDU = 0xed - DLT_SUNATM = 0x7b - DLT_SYMANTEC_FIREWALL = 0x63 - DLT_TZSP = 0x80 - DLT_USB = 0xba - DLT_USB_LINUX = 0xbd - DLT_USB_LINUX_MMAPPED = 0xdc - DLT_USER0 = 0x93 - DLT_USER1 = 0x94 - DLT_USER10 = 0x9d - DLT_USER11 = 0x9e - DLT_USER12 = 0x9f - DLT_USER13 = 0xa0 - DLT_USER14 = 0xa1 - DLT_USER15 = 0xa2 - DLT_USER2 = 0x95 - DLT_USER3 = 0x96 - DLT_USER4 = 0x97 - DLT_USER5 = 0x98 - DLT_USER6 = 0x99 - DLT_USER7 = 0x9a - DLT_USER8 = 0x9b - DLT_USER9 = 0x9c - DLT_WIHART = 0xdf - DLT_X2E_SERIAL = 0xd5 - DLT_X2E_XORAYA = 0xd6 - DT_BLK = 0x6 - DT_CHR = 0x2 - DT_DIR = 0x4 - DT_FIFO = 0x1 - DT_LNK = 0xa - DT_REG = 0x8 - DT_SOCK = 0xc - DT_UNKNOWN = 0x0 - DT_WHT = 0xe - ECHO = 0x8 - ECHOCTL = 0x40 - ECHOE = 0x2 - ECHOK = 0x4 - ECHOKE = 0x1 - ECHONL = 0x10 - ECHOPRT = 0x20 - EVFILT_AIO = -0x3 - EVFILT_EXCEPT = -0xf - EVFILT_FS = -0x9 - EVFILT_MACHPORT = -0x8 - EVFILT_PROC = -0x5 - EVFILT_READ = -0x1 - EVFILT_SIGNAL = -0x6 - EVFILT_SYSCOUNT = 0xf - EVFILT_THREADMARKER = 0xf - EVFILT_TIMER = -0x7 - EVFILT_USER = -0xa - EVFILT_VM = -0xc - EVFILT_VNODE = -0x4 - EVFILT_WRITE = -0x2 - EV_ADD = 0x1 - EV_CLEAR = 0x20 - EV_DELETE = 0x2 - EV_DISABLE = 0x8 - EV_DISPATCH = 0x80 - EV_DISPATCH2 = 0x180 - EV_ENABLE = 0x4 - EV_EOF = 0x8000 - EV_ERROR = 0x4000 - EV_FLAG0 = 0x1000 - EV_FLAG1 = 0x2000 - EV_ONESHOT = 0x10 - EV_OOBAND = 0x2000 - EV_POLL = 0x1000 - EV_RECEIPT = 0x40 - EV_SYSFLAGS = 0xf000 - EV_UDATA_SPECIFIC = 0x100 - EV_VANISHED = 0x200 - EXTA = 0x4b00 - EXTB = 0x9600 - EXTPROC = 0x800 - FD_CLOEXEC = 0x1 - FD_SETSIZE = 0x400 - FF0 = 0x0 - FF1 = 0x4000 - FFDLY = 0x4000 - FLUSHO = 0x800000 - FSOPT_ATTR_CMN_EXTENDED = 0x20 - FSOPT_NOFOLLOW = 0x1 - FSOPT_NOINMEMUPDATE = 0x2 - FSOPT_PACK_INVAL_ATTRS = 0x8 - FSOPT_REPORT_FULLSIZE = 0x4 - F_ADDFILESIGS = 0x3d - F_ADDFILESIGS_FOR_DYLD_SIM = 0x53 - F_ADDFILESIGS_RETURN = 0x61 - F_ADDSIGS = 0x3b - F_ALLOCATEALL = 0x4 - F_ALLOCATECONTIG = 0x2 - F_BARRIERFSYNC = 0x55 - F_CHECK_LV = 0x62 - F_CHKCLEAN = 0x29 - F_DUPFD = 0x0 - F_DUPFD_CLOEXEC = 0x43 - F_FINDSIGS = 0x4e - F_FLUSH_DATA = 0x28 - F_FREEZE_FS = 0x35 - F_FULLFSYNC = 0x33 - F_GETCODEDIR = 0x48 - F_GETFD = 0x1 - F_GETFL = 0x3 - F_GETLK = 0x7 - F_GETLKPID = 0x42 - F_GETNOSIGPIPE = 0x4a - F_GETOWN = 0x5 - F_GETPATH = 0x32 - F_GETPATH_MTMINFO = 0x47 - F_GETPROTECTIONCLASS = 0x3f - F_GETPROTECTIONLEVEL = 0x4d - F_GLOBAL_NOCACHE = 0x37 - F_LOG2PHYS = 0x31 - F_LOG2PHYS_EXT = 0x41 - F_NOCACHE = 0x30 - F_NODIRECT = 0x3e - F_OK = 0x0 - F_PATHPKG_CHECK = 0x34 - F_PEOFPOSMODE = 0x3 - F_PREALLOCATE = 0x2a - F_PUNCHHOLE = 0x63 - F_RDADVISE = 0x2c - F_RDAHEAD = 0x2d - F_RDLCK = 0x1 - F_SETBACKINGSTORE = 0x46 - F_SETFD = 0x2 - F_SETFL = 0x4 - F_SETLK = 0x8 - F_SETLKW = 0x9 - F_SETLKWTIMEOUT = 0xa - F_SETNOSIGPIPE = 0x49 - F_SETOWN = 0x6 - F_SETPROTECTIONCLASS = 0x40 - F_SETSIZE = 0x2b - F_SINGLE_WRITER = 0x4c - F_THAW_FS = 0x36 - F_TRANSCODEKEY = 0x4b - F_TRIM_ACTIVE_FILE = 0x64 - F_UNLCK = 0x2 - F_VOLPOSMODE = 0x4 - F_WRLCK = 0x3 - HUPCL = 0x4000 - HW_MACHINE = 0x1 - ICANON = 0x100 - ICMP6_FILTER = 0x12 - ICRNL = 0x100 - IEXTEN = 0x400 - IFF_ALLMULTI = 0x200 - IFF_ALTPHYS = 0x4000 - IFF_BROADCAST = 0x2 - IFF_DEBUG = 0x4 - IFF_LINK0 = 0x1000 - IFF_LINK1 = 0x2000 - IFF_LINK2 = 0x4000 - IFF_LOOPBACK = 0x8 - IFF_MULTICAST = 0x8000 - IFF_NOARP = 0x80 - IFF_NOTRAILERS = 0x20 - IFF_OACTIVE = 0x400 - IFF_POINTOPOINT = 0x10 - IFF_PROMISC = 0x100 - IFF_RUNNING = 0x40 - IFF_SIMPLEX = 0x800 - IFF_UP = 0x1 - IFNAMSIZ = 0x10 - IFT_1822 = 0x2 - IFT_AAL5 = 0x31 - IFT_ARCNET = 0x23 - IFT_ARCNETPLUS = 0x24 - IFT_ATM = 0x25 - IFT_BRIDGE = 0xd1 - IFT_CARP = 0xf8 - IFT_CELLULAR = 0xff - IFT_CEPT = 0x13 - IFT_DS3 = 0x1e - IFT_ENC = 0xf4 - IFT_EON = 0x19 - IFT_ETHER = 0x6 - IFT_FAITH = 0x38 - IFT_FDDI = 0xf - IFT_FRELAY = 0x20 - IFT_FRELAYDCE = 0x2c - IFT_GIF = 0x37 - IFT_HDH1822 = 0x3 - IFT_HIPPI = 0x2f - IFT_HSSI = 0x2e - IFT_HY = 0xe - IFT_IEEE1394 = 0x90 - IFT_IEEE8023ADLAG = 0x88 - IFT_ISDNBASIC = 0x14 - IFT_ISDNPRIMARY = 0x15 - IFT_ISO88022LLC = 0x29 - IFT_ISO88023 = 0x7 - IFT_ISO88024 = 0x8 - IFT_ISO88025 = 0x9 - IFT_ISO88026 = 0xa - IFT_L2VLAN = 0x87 - IFT_LAPB = 0x10 - IFT_LOCALTALK = 0x2a - IFT_LOOP = 0x18 - IFT_MIOX25 = 0x26 - IFT_MODEM = 0x30 - IFT_NSIP = 0x1b - IFT_OTHER = 0x1 - IFT_P10 = 0xc - IFT_P80 = 0xd - IFT_PARA = 0x22 - IFT_PDP = 0xff - IFT_PFLOG = 0xf5 - IFT_PFSYNC = 0xf6 - IFT_PKTAP = 0xfe - IFT_PPP = 0x17 - IFT_PROPMUX = 0x36 - IFT_PROPVIRTUAL = 0x35 - IFT_PTPSERIAL = 0x16 - IFT_RS232 = 0x21 - IFT_SDLC = 0x11 - IFT_SIP = 0x1f - IFT_SLIP = 0x1c - IFT_SMDSDXI = 0x2b - IFT_SMDSICIP = 0x34 - IFT_SONET = 0x27 - IFT_SONETPATH = 0x32 - IFT_SONETVT = 0x33 - IFT_STARLAN = 0xb - IFT_STF = 0x39 - IFT_T1 = 0x12 - IFT_ULTRA = 0x1d - IFT_V35 = 0x2d - IFT_X25 = 0x5 - IFT_X25DDN = 0x4 - IFT_X25PLE = 0x28 - IFT_XETHER = 0x1a - IGNBRK = 0x1 - IGNCR = 0x80 - IGNPAR = 0x4 - IMAXBEL = 0x2000 - INLCR = 0x40 - INPCK = 0x10 - IN_CLASSA_HOST = 0xffffff - IN_CLASSA_MAX = 0x80 - IN_CLASSA_NET = 0xff000000 - IN_CLASSA_NSHIFT = 0x18 - IN_CLASSB_HOST = 0xffff - IN_CLASSB_MAX = 0x10000 - IN_CLASSB_NET = 0xffff0000 - IN_CLASSB_NSHIFT = 0x10 - IN_CLASSC_HOST = 0xff - IN_CLASSC_NET = 0xffffff00 - IN_CLASSC_NSHIFT = 0x8 - IN_CLASSD_HOST = 0xfffffff - IN_CLASSD_NET = 0xf0000000 - IN_CLASSD_NSHIFT = 0x1c - IN_LINKLOCALNETNUM = 0xa9fe0000 - IN_LOOPBACKNET = 0x7f - IPPROTO_3PC = 0x22 - IPPROTO_ADFS = 0x44 - IPPROTO_AH = 0x33 - IPPROTO_AHIP = 0x3d - IPPROTO_APES = 0x63 - IPPROTO_ARGUS = 0xd - IPPROTO_AX25 = 0x5d - IPPROTO_BHA = 0x31 - IPPROTO_BLT = 0x1e - IPPROTO_BRSATMON = 0x4c - IPPROTO_CFTP = 0x3e - IPPROTO_CHAOS = 0x10 - IPPROTO_CMTP = 0x26 - IPPROTO_CPHB = 0x49 - IPPROTO_CPNX = 0x48 - IPPROTO_DDP = 0x25 - IPPROTO_DGP = 0x56 - IPPROTO_DIVERT = 0xfe - IPPROTO_DONE = 0x101 - IPPROTO_DSTOPTS = 0x3c - IPPROTO_EGP = 0x8 - IPPROTO_EMCON = 0xe - IPPROTO_ENCAP = 0x62 - IPPROTO_EON = 0x50 - IPPROTO_ESP = 0x32 - IPPROTO_ETHERIP = 0x61 - IPPROTO_FRAGMENT = 0x2c - IPPROTO_GGP = 0x3 - IPPROTO_GMTP = 0x64 - IPPROTO_GRE = 0x2f - IPPROTO_HELLO = 0x3f - IPPROTO_HMP = 0x14 - IPPROTO_HOPOPTS = 0x0 - IPPROTO_ICMP = 0x1 - IPPROTO_ICMPV6 = 0x3a - IPPROTO_IDP = 0x16 - IPPROTO_IDPR = 0x23 - IPPROTO_IDRP = 0x2d - IPPROTO_IGMP = 0x2 - IPPROTO_IGP = 0x55 - IPPROTO_IGRP = 0x58 - IPPROTO_IL = 0x28 - IPPROTO_INLSP = 0x34 - IPPROTO_INP = 0x20 - IPPROTO_IP = 0x0 - IPPROTO_IPCOMP = 0x6c - IPPROTO_IPCV = 0x47 - IPPROTO_IPEIP = 0x5e - IPPROTO_IPIP = 0x4 - IPPROTO_IPPC = 0x43 - IPPROTO_IPV4 = 0x4 - IPPROTO_IPV6 = 0x29 - IPPROTO_IRTP = 0x1c - IPPROTO_KRYPTOLAN = 0x41 - IPPROTO_LARP = 0x5b - IPPROTO_LEAF1 = 0x19 - IPPROTO_LEAF2 = 0x1a - IPPROTO_MAX = 0x100 - IPPROTO_MAXID = 0x34 - IPPROTO_MEAS = 0x13 - IPPROTO_MHRP = 0x30 - IPPROTO_MICP = 0x5f - IPPROTO_MTP = 0x5c - IPPROTO_MUX = 0x12 - IPPROTO_ND = 0x4d - IPPROTO_NHRP = 0x36 - IPPROTO_NONE = 0x3b - IPPROTO_NSP = 0x1f - IPPROTO_NVPII = 0xb - IPPROTO_OSPFIGP = 0x59 - IPPROTO_PGM = 0x71 - IPPROTO_PIGP = 0x9 - IPPROTO_PIM = 0x67 - IPPROTO_PRM = 0x15 - IPPROTO_PUP = 0xc - IPPROTO_PVP = 0x4b - IPPROTO_RAW = 0xff - IPPROTO_RCCMON = 0xa - IPPROTO_RDP = 0x1b - IPPROTO_ROUTING = 0x2b - IPPROTO_RSVP = 0x2e - IPPROTO_RVD = 0x42 - IPPROTO_SATEXPAK = 0x40 - IPPROTO_SATMON = 0x45 - IPPROTO_SCCSP = 0x60 - IPPROTO_SCTP = 0x84 - IPPROTO_SDRP = 0x2a - IPPROTO_SEP = 0x21 - IPPROTO_SRPC = 0x5a - IPPROTO_ST = 0x7 - IPPROTO_SVMTP = 0x52 - IPPROTO_SWIPE = 0x35 - IPPROTO_TCF = 0x57 - IPPROTO_TCP = 0x6 - IPPROTO_TP = 0x1d - IPPROTO_TPXX = 0x27 - IPPROTO_TRUNK1 = 0x17 - IPPROTO_TRUNK2 = 0x18 - IPPROTO_TTP = 0x54 - IPPROTO_UDP = 0x11 - IPPROTO_VINES = 0x53 - IPPROTO_VISA = 0x46 - IPPROTO_VMTP = 0x51 - IPPROTO_WBEXPAK = 0x4f - IPPROTO_WBMON = 0x4e - IPPROTO_WSN = 0x4a - IPPROTO_XNET = 0xf - IPPROTO_XTP = 0x24 - IPV6_2292DSTOPTS = 0x17 - IPV6_2292HOPLIMIT = 0x14 - IPV6_2292HOPOPTS = 0x16 - IPV6_2292NEXTHOP = 0x15 - IPV6_2292PKTINFO = 0x13 - IPV6_2292PKTOPTIONS = 0x19 - IPV6_2292RTHDR = 0x18 - IPV6_BINDV6ONLY = 0x1b - IPV6_BOUND_IF = 0x7d - IPV6_CHECKSUM = 0x1a - IPV6_DEFAULT_MULTICAST_HOPS = 0x1 - IPV6_DEFAULT_MULTICAST_LOOP = 0x1 - IPV6_DEFHLIM = 0x40 - IPV6_FAITH = 0x1d - IPV6_FLOWINFO_MASK = 0xffffff0f - IPV6_FLOWLABEL_MASK = 0xffff0f00 - IPV6_FLOW_ECN_MASK = 0x300 - IPV6_FRAGTTL = 0x3c - IPV6_FW_ADD = 0x1e - IPV6_FW_DEL = 0x1f - IPV6_FW_FLUSH = 0x20 - IPV6_FW_GET = 0x22 - IPV6_FW_ZERO = 0x21 - IPV6_HLIMDEC = 0x1 - IPV6_IPSEC_POLICY = 0x1c - IPV6_JOIN_GROUP = 0xc - IPV6_LEAVE_GROUP = 0xd - IPV6_MAXHLIM = 0xff - IPV6_MAXOPTHDR = 0x800 - IPV6_MAXPACKET = 0xffff - IPV6_MAX_GROUP_SRC_FILTER = 0x200 - IPV6_MAX_MEMBERSHIPS = 0xfff - IPV6_MAX_SOCK_SRC_FILTER = 0x80 - IPV6_MIN_MEMBERSHIPS = 0x1f - IPV6_MMTU = 0x500 - IPV6_MULTICAST_HOPS = 0xa - IPV6_MULTICAST_IF = 0x9 - IPV6_MULTICAST_LOOP = 0xb - IPV6_PORTRANGE = 0xe - IPV6_PORTRANGE_DEFAULT = 0x0 - IPV6_PORTRANGE_HIGH = 0x1 - IPV6_PORTRANGE_LOW = 0x2 - IPV6_RECVTCLASS = 0x23 - IPV6_RTHDR_LOOSE = 0x0 - IPV6_RTHDR_STRICT = 0x1 - IPV6_RTHDR_TYPE_0 = 0x0 - IPV6_SOCKOPT_RESERVED1 = 0x3 - IPV6_TCLASS = 0x24 - IPV6_UNICAST_HOPS = 0x4 - IPV6_V6ONLY = 0x1b - IPV6_VERSION = 0x60 - IPV6_VERSION_MASK = 0xf0 - IP_ADD_MEMBERSHIP = 0xc - IP_ADD_SOURCE_MEMBERSHIP = 0x46 - IP_BLOCK_SOURCE = 0x48 - IP_BOUND_IF = 0x19 - IP_DEFAULT_MULTICAST_LOOP = 0x1 - IP_DEFAULT_MULTICAST_TTL = 0x1 - IP_DF = 0x4000 - IP_DROP_MEMBERSHIP = 0xd - IP_DROP_SOURCE_MEMBERSHIP = 0x47 - IP_DUMMYNET_CONFIGURE = 0x3c - IP_DUMMYNET_DEL = 0x3d - IP_DUMMYNET_FLUSH = 0x3e - IP_DUMMYNET_GET = 0x40 - IP_FAITH = 0x16 - IP_FW_ADD = 0x28 - IP_FW_DEL = 0x29 - IP_FW_FLUSH = 0x2a - IP_FW_GET = 0x2c - IP_FW_RESETLOG = 0x2d - IP_FW_ZERO = 0x2b - IP_HDRINCL = 0x2 - IP_IPSEC_POLICY = 0x15 - IP_MAXPACKET = 0xffff - IP_MAX_GROUP_SRC_FILTER = 0x200 - IP_MAX_MEMBERSHIPS = 0xfff - IP_MAX_SOCK_MUTE_FILTER = 0x80 - IP_MAX_SOCK_SRC_FILTER = 0x80 - IP_MF = 0x2000 - IP_MIN_MEMBERSHIPS = 0x1f - IP_MSFILTER = 0x4a - IP_MSS = 0x240 - IP_MULTICAST_IF = 0x9 - IP_MULTICAST_IFINDEX = 0x42 - IP_MULTICAST_LOOP = 0xb - IP_MULTICAST_TTL = 0xa - IP_MULTICAST_VIF = 0xe - IP_NAT__XXX = 0x37 - IP_OFFMASK = 0x1fff - IP_OLD_FW_ADD = 0x32 - IP_OLD_FW_DEL = 0x33 - IP_OLD_FW_FLUSH = 0x34 - IP_OLD_FW_GET = 0x36 - IP_OLD_FW_RESETLOG = 0x38 - IP_OLD_FW_ZERO = 0x35 - IP_OPTIONS = 0x1 - IP_PKTINFO = 0x1a - IP_PORTRANGE = 0x13 - IP_PORTRANGE_DEFAULT = 0x0 - IP_PORTRANGE_HIGH = 0x1 - IP_PORTRANGE_LOW = 0x2 - IP_RECVDSTADDR = 0x7 - IP_RECVIF = 0x14 - IP_RECVOPTS = 0x5 - IP_RECVPKTINFO = 0x1a - IP_RECVRETOPTS = 0x6 - IP_RECVTOS = 0x1b - IP_RECVTTL = 0x18 - IP_RETOPTS = 0x8 - IP_RF = 0x8000 - IP_RSVP_OFF = 0x10 - IP_RSVP_ON = 0xf - IP_RSVP_VIF_OFF = 0x12 - IP_RSVP_VIF_ON = 0x11 - IP_STRIPHDR = 0x17 - IP_TOS = 0x3 - IP_TRAFFIC_MGT_BACKGROUND = 0x41 - IP_TTL = 0x4 - IP_UNBLOCK_SOURCE = 0x49 - ISIG = 0x80 - ISTRIP = 0x20 - IUTF8 = 0x4000 - IXANY = 0x800 - IXOFF = 0x400 - IXON = 0x200 - KERN_HOSTNAME = 0xa - KERN_OSRELEASE = 0x2 - KERN_OSTYPE = 0x1 - KERN_VERSION = 0x4 - LOCK_EX = 0x2 - LOCK_NB = 0x4 - LOCK_SH = 0x1 - LOCK_UN = 0x8 - MADV_CAN_REUSE = 0x9 - MADV_DONTNEED = 0x4 - MADV_FREE = 0x5 - MADV_FREE_REUSABLE = 0x7 - MADV_FREE_REUSE = 0x8 - MADV_NORMAL = 0x0 - MADV_PAGEOUT = 0xa - MADV_RANDOM = 0x1 - MADV_SEQUENTIAL = 0x2 - MADV_WILLNEED = 0x3 - MADV_ZERO_WIRED_PAGES = 0x6 - MAP_ANON = 0x1000 - MAP_ANONYMOUS = 0x1000 - MAP_COPY = 0x2 - MAP_FILE = 0x0 - MAP_FIXED = 0x10 - MAP_HASSEMAPHORE = 0x200 - MAP_JIT = 0x800 - MAP_NOCACHE = 0x400 - MAP_NOEXTEND = 0x100 - MAP_NORESERVE = 0x40 - MAP_PRIVATE = 0x2 - MAP_RENAME = 0x20 - MAP_RESERVED0080 = 0x80 - MAP_RESILIENT_CODESIGN = 0x2000 - MAP_RESILIENT_MEDIA = 0x4000 - MAP_SHARED = 0x1 - MCL_CURRENT = 0x1 - MCL_FUTURE = 0x2 - MNT_ASYNC = 0x40 - MNT_AUTOMOUNTED = 0x400000 - MNT_CMDFLAGS = 0xf0000 - MNT_CPROTECT = 0x80 - MNT_DEFWRITE = 0x2000000 - MNT_DONTBROWSE = 0x100000 - MNT_DOVOLFS = 0x8000 - MNT_DWAIT = 0x4 - MNT_EXPORTED = 0x100 - MNT_FORCE = 0x80000 - MNT_IGNORE_OWNERSHIP = 0x200000 - MNT_JOURNALED = 0x800000 - MNT_LOCAL = 0x1000 - MNT_MULTILABEL = 0x4000000 - MNT_NOATIME = 0x10000000 - MNT_NOBLOCK = 0x20000 - MNT_NODEV = 0x10 - MNT_NOEXEC = 0x4 - MNT_NOSUID = 0x8 - MNT_NOUSERXATTR = 0x1000000 - MNT_NOWAIT = 0x2 - MNT_QUARANTINE = 0x400 - MNT_QUOTA = 0x2000 - MNT_RDONLY = 0x1 - MNT_RELOAD = 0x40000 - MNT_ROOTFS = 0x4000 - MNT_SYNCHRONOUS = 0x2 - MNT_UNION = 0x20 - MNT_UNKNOWNPERMISSIONS = 0x200000 - MNT_UPDATE = 0x10000 - MNT_VISFLAGMASK = 0x17f0f5ff - MNT_WAIT = 0x1 - MSG_CTRUNC = 0x20 - MSG_DONTROUTE = 0x4 - MSG_DONTWAIT = 0x80 - MSG_EOF = 0x100 - MSG_EOR = 0x8 - MSG_FLUSH = 0x400 - MSG_HAVEMORE = 0x2000 - MSG_HOLD = 0x800 - MSG_NEEDSA = 0x10000 - MSG_OOB = 0x1 - MSG_PEEK = 0x2 - MSG_RCVMORE = 0x4000 - MSG_SEND = 0x1000 - MSG_TRUNC = 0x10 - MSG_WAITALL = 0x40 - MSG_WAITSTREAM = 0x200 - MS_ASYNC = 0x1 - MS_DEACTIVATE = 0x8 - MS_INVALIDATE = 0x2 - MS_KILLPAGES = 0x4 - MS_SYNC = 0x10 - NAME_MAX = 0xff - NET_RT_DUMP = 0x1 - NET_RT_DUMP2 = 0x7 - NET_RT_FLAGS = 0x2 - NET_RT_IFLIST = 0x3 - NET_RT_IFLIST2 = 0x6 - NET_RT_MAXID = 0xa - NET_RT_STAT = 0x4 - NET_RT_TRASH = 0x5 - NFDBITS = 0x20 - NL0 = 0x0 - NL1 = 0x100 - NL2 = 0x200 - NL3 = 0x300 - NLDLY = 0x300 - NOFLSH = 0x80000000 - NOKERNINFO = 0x2000000 - NOTE_ABSOLUTE = 0x8 - NOTE_ATTRIB = 0x8 - NOTE_BACKGROUND = 0x40 - NOTE_CHILD = 0x4 - NOTE_CRITICAL = 0x20 - NOTE_DELETE = 0x1 - NOTE_EXEC = 0x20000000 - NOTE_EXIT = 0x80000000 - NOTE_EXITSTATUS = 0x4000000 - NOTE_EXIT_CSERROR = 0x40000 - NOTE_EXIT_DECRYPTFAIL = 0x10000 - NOTE_EXIT_DETAIL = 0x2000000 - NOTE_EXIT_DETAIL_MASK = 0x70000 - NOTE_EXIT_MEMORY = 0x20000 - NOTE_EXIT_REPARENTED = 0x80000 - NOTE_EXTEND = 0x4 - NOTE_FFAND = 0x40000000 - NOTE_FFCOPY = 0xc0000000 - NOTE_FFCTRLMASK = 0xc0000000 - NOTE_FFLAGSMASK = 0xffffff - NOTE_FFNOP = 0x0 - NOTE_FFOR = 0x80000000 - NOTE_FORK = 0x40000000 - NOTE_FUNLOCK = 0x100 - NOTE_LEEWAY = 0x10 - NOTE_LINK = 0x10 - NOTE_LOWAT = 0x1 - NOTE_MACH_CONTINUOUS_TIME = 0x80 - NOTE_NONE = 0x80 - NOTE_NSECONDS = 0x4 - NOTE_OOB = 0x2 - NOTE_PCTRLMASK = -0x100000 - NOTE_PDATAMASK = 0xfffff - NOTE_REAP = 0x10000000 - NOTE_RENAME = 0x20 - NOTE_REVOKE = 0x40 - NOTE_SECONDS = 0x1 - NOTE_SIGNAL = 0x8000000 - NOTE_TRACK = 0x1 - NOTE_TRACKERR = 0x2 - NOTE_TRIGGER = 0x1000000 - NOTE_USECONDS = 0x2 - NOTE_VM_ERROR = 0x10000000 - NOTE_VM_PRESSURE = 0x80000000 - NOTE_VM_PRESSURE_SUDDEN_TERMINATE = 0x20000000 - NOTE_VM_PRESSURE_TERMINATE = 0x40000000 - NOTE_WRITE = 0x2 - OCRNL = 0x10 - OFDEL = 0x20000 - OFILL = 0x80 - ONLCR = 0x2 - ONLRET = 0x40 - ONOCR = 0x20 - ONOEOT = 0x8 - OPOST = 0x1 - OXTABS = 0x4 - O_ACCMODE = 0x3 - O_ALERT = 0x20000000 - O_APPEND = 0x8 - O_ASYNC = 0x40 - O_CLOEXEC = 0x1000000 - O_CREAT = 0x200 - O_DIRECTORY = 0x100000 - O_DP_GETRAWENCRYPTED = 0x1 - O_DP_GETRAWUNENCRYPTED = 0x2 - O_DSYNC = 0x400000 - O_EVTONLY = 0x8000 - O_EXCL = 0x800 - O_EXLOCK = 0x20 - O_FSYNC = 0x80 - O_NDELAY = 0x4 - O_NOCTTY = 0x20000 - O_NOFOLLOW = 0x100 - O_NONBLOCK = 0x4 - O_POPUP = 0x80000000 - O_RDONLY = 0x0 - O_RDWR = 0x2 - O_SHLOCK = 0x10 - O_SYMLINK = 0x200000 - O_SYNC = 0x80 - O_TRUNC = 0x400 - O_WRONLY = 0x1 - PARENB = 0x1000 - PARMRK = 0x8 - PARODD = 0x2000 - PENDIN = 0x20000000 - PRIO_PGRP = 0x1 - PRIO_PROCESS = 0x0 - PRIO_USER = 0x2 - PROT_EXEC = 0x4 - PROT_NONE = 0x0 - PROT_READ = 0x1 - PROT_WRITE = 0x2 - PT_ATTACH = 0xa - PT_ATTACHEXC = 0xe - PT_CONTINUE = 0x7 - PT_DENY_ATTACH = 0x1f - PT_DETACH = 0xb - PT_FIRSTMACH = 0x20 - PT_FORCEQUOTA = 0x1e - PT_KILL = 0x8 - PT_READ_D = 0x2 - PT_READ_I = 0x1 - PT_READ_U = 0x3 - PT_SIGEXC = 0xc - PT_STEP = 0x9 - PT_THUPDATE = 0xd - PT_TRACE_ME = 0x0 - PT_WRITE_D = 0x5 - PT_WRITE_I = 0x4 - PT_WRITE_U = 0x6 - RLIMIT_AS = 0x5 - RLIMIT_CORE = 0x4 - RLIMIT_CPU = 0x0 - RLIMIT_CPU_USAGE_MONITOR = 0x2 - RLIMIT_DATA = 0x2 - RLIMIT_FSIZE = 0x1 - RLIMIT_MEMLOCK = 0x6 - RLIMIT_NOFILE = 0x8 - RLIMIT_NPROC = 0x7 - RLIMIT_RSS = 0x5 - RLIMIT_STACK = 0x3 - RLIM_INFINITY = 0x7fffffffffffffff - RTAX_AUTHOR = 0x6 - RTAX_BRD = 0x7 - RTAX_DST = 0x0 - RTAX_GATEWAY = 0x1 - RTAX_GENMASK = 0x3 - RTAX_IFA = 0x5 - RTAX_IFP = 0x4 - RTAX_MAX = 0x8 - RTAX_NETMASK = 0x2 - RTA_AUTHOR = 0x40 - RTA_BRD = 0x80 - RTA_DST = 0x1 - RTA_GATEWAY = 0x2 - RTA_GENMASK = 0x8 - RTA_IFA = 0x20 - RTA_IFP = 0x10 - RTA_NETMASK = 0x4 - RTF_BLACKHOLE = 0x1000 - RTF_BROADCAST = 0x400000 - RTF_CLONING = 0x100 - RTF_CONDEMNED = 0x2000000 - RTF_DELCLONE = 0x80 - RTF_DONE = 0x40 - RTF_DYNAMIC = 0x10 - RTF_GATEWAY = 0x2 - RTF_HOST = 0x4 - RTF_IFREF = 0x4000000 - RTF_IFSCOPE = 0x1000000 - RTF_LLINFO = 0x400 - RTF_LOCAL = 0x200000 - RTF_MODIFIED = 0x20 - RTF_MULTICAST = 0x800000 - RTF_NOIFREF = 0x2000 - RTF_PINNED = 0x100000 - RTF_PRCLONING = 0x10000 - RTF_PROTO1 = 0x8000 - RTF_PROTO2 = 0x4000 - RTF_PROTO3 = 0x40000 - RTF_PROXY = 0x8000000 - RTF_REJECT = 0x8 - RTF_ROUTER = 0x10000000 - RTF_STATIC = 0x800 - RTF_UP = 0x1 - RTF_WASCLONED = 0x20000 - RTF_XRESOLVE = 0x200 - RTM_ADD = 0x1 - RTM_CHANGE = 0x3 - RTM_DELADDR = 0xd - RTM_DELETE = 0x2 - RTM_DELMADDR = 0x10 - RTM_GET = 0x4 - RTM_GET2 = 0x14 - RTM_IFINFO = 0xe - RTM_IFINFO2 = 0x12 - RTM_LOCK = 0x8 - RTM_LOSING = 0x5 - RTM_MISS = 0x7 - RTM_NEWADDR = 0xc - RTM_NEWMADDR = 0xf - RTM_NEWMADDR2 = 0x13 - RTM_OLDADD = 0x9 - RTM_OLDDEL = 0xa - RTM_REDIRECT = 0x6 - RTM_RESOLVE = 0xb - RTM_RTTUNIT = 0xf4240 - RTM_VERSION = 0x5 - RTV_EXPIRE = 0x4 - RTV_HOPCOUNT = 0x2 - RTV_MTU = 0x1 - RTV_RPIPE = 0x8 - RTV_RTT = 0x40 - RTV_RTTVAR = 0x80 - RTV_SPIPE = 0x10 - RTV_SSTHRESH = 0x20 - RUSAGE_CHILDREN = -0x1 - RUSAGE_SELF = 0x0 - SCM_CREDS = 0x3 - SCM_RIGHTS = 0x1 - SCM_TIMESTAMP = 0x2 - SCM_TIMESTAMP_MONOTONIC = 0x4 - SHUT_RD = 0x0 - SHUT_RDWR = 0x2 - SHUT_WR = 0x1 - SIOCADDMULTI = 0x80206931 - SIOCAIFADDR = 0x8040691a - SIOCARPIPLL = 0xc0206928 - SIOCATMARK = 0x40047307 - SIOCAUTOADDR = 0xc0206926 - SIOCAUTONETMASK = 0x80206927 - SIOCDELMULTI = 0x80206932 - SIOCDIFADDR = 0x80206919 - SIOCDIFPHYADDR = 0x80206941 - SIOCGDRVSPEC = 0xc028697b - SIOCGETVLAN = 0xc020697f - SIOCGHIWAT = 0x40047301 - SIOCGIFADDR = 0xc0206921 - SIOCGIFALTMTU = 0xc0206948 - SIOCGIFASYNCMAP = 0xc020697c - SIOCGIFBOND = 0xc0206947 - SIOCGIFBRDADDR = 0xc0206923 - SIOCGIFCAP = 0xc020695b - SIOCGIFCONF = 0xc00c6924 - SIOCGIFDEVMTU = 0xc0206944 - SIOCGIFDSTADDR = 0xc0206922 - SIOCGIFFLAGS = 0xc0206911 - SIOCGIFGENERIC = 0xc020693a - SIOCGIFKPI = 0xc0206987 - SIOCGIFMAC = 0xc0206982 - SIOCGIFMEDIA = 0xc02c6938 - SIOCGIFMETRIC = 0xc0206917 - SIOCGIFMTU = 0xc0206933 - SIOCGIFNETMASK = 0xc0206925 - SIOCGIFPDSTADDR = 0xc0206940 - SIOCGIFPHYS = 0xc0206935 - SIOCGIFPSRCADDR = 0xc020693f - SIOCGIFSTATUS = 0xc331693d - SIOCGIFVLAN = 0xc020697f - SIOCGIFWAKEFLAGS = 0xc0206988 - SIOCGLOWAT = 0x40047303 - SIOCGPGRP = 0x40047309 - SIOCIFCREATE = 0xc0206978 - SIOCIFCREATE2 = 0xc020697a - SIOCIFDESTROY = 0x80206979 - SIOCIFGCLONERS = 0xc0106981 - SIOCRSLVMULTI = 0xc010693b - SIOCSDRVSPEC = 0x8028697b - SIOCSETVLAN = 0x8020697e - SIOCSHIWAT = 0x80047300 - SIOCSIFADDR = 0x8020690c - SIOCSIFALTMTU = 0x80206945 - SIOCSIFASYNCMAP = 0x8020697d - SIOCSIFBOND = 0x80206946 - SIOCSIFBRDADDR = 0x80206913 - SIOCSIFCAP = 0x8020695a - SIOCSIFDSTADDR = 0x8020690e - SIOCSIFFLAGS = 0x80206910 - SIOCSIFGENERIC = 0x80206939 - SIOCSIFKPI = 0x80206986 - SIOCSIFLLADDR = 0x8020693c - SIOCSIFMAC = 0x80206983 - SIOCSIFMEDIA = 0xc0206937 - SIOCSIFMETRIC = 0x80206918 - SIOCSIFMTU = 0x80206934 - SIOCSIFNETMASK = 0x80206916 - SIOCSIFPHYADDR = 0x8040693e - SIOCSIFPHYS = 0x80206936 - SIOCSIFVLAN = 0x8020697e - SIOCSLOWAT = 0x80047302 - SIOCSPGRP = 0x80047308 - SOCK_DGRAM = 0x2 - SOCK_MAXADDRLEN = 0xff - SOCK_RAW = 0x3 - SOCK_RDM = 0x4 - SOCK_SEQPACKET = 0x5 - SOCK_STREAM = 0x1 - SOL_SOCKET = 0xffff - SOMAXCONN = 0x80 - SO_ACCEPTCONN = 0x2 - SO_BROADCAST = 0x20 - SO_DEBUG = 0x1 - SO_DONTROUTE = 0x10 - SO_DONTTRUNC = 0x2000 - SO_ERROR = 0x1007 - SO_KEEPALIVE = 0x8 - SO_LABEL = 0x1010 - SO_LINGER = 0x80 - SO_LINGER_SEC = 0x1080 - SO_NETSVC_MARKING_LEVEL = 0x1119 - SO_NET_SERVICE_TYPE = 0x1116 - SO_NKE = 0x1021 - SO_NOADDRERR = 0x1023 - SO_NOSIGPIPE = 0x1022 - SO_NOTIFYCONFLICT = 0x1026 - SO_NP_EXTENSIONS = 0x1083 - SO_NREAD = 0x1020 - SO_NUMRCVPKT = 0x1112 - SO_NWRITE = 0x1024 - SO_OOBINLINE = 0x100 - SO_PEERLABEL = 0x1011 - SO_RANDOMPORT = 0x1082 - SO_RCVBUF = 0x1002 - SO_RCVLOWAT = 0x1004 - SO_RCVTIMEO = 0x1006 - SO_REUSEADDR = 0x4 - SO_REUSEPORT = 0x200 - SO_REUSESHAREUID = 0x1025 - SO_SNDBUF = 0x1001 - SO_SNDLOWAT = 0x1003 - SO_SNDTIMEO = 0x1005 - SO_TIMESTAMP = 0x400 - SO_TIMESTAMP_MONOTONIC = 0x800 - SO_TYPE = 0x1008 - SO_UPCALLCLOSEWAIT = 0x1027 - SO_USELOOPBACK = 0x40 - SO_WANTMORE = 0x4000 - SO_WANTOOBFLAG = 0x8000 - S_IEXEC = 0x40 - S_IFBLK = 0x6000 - S_IFCHR = 0x2000 - S_IFDIR = 0x4000 - S_IFIFO = 0x1000 - S_IFLNK = 0xa000 - S_IFMT = 0xf000 - S_IFREG = 0x8000 - S_IFSOCK = 0xc000 - S_IFWHT = 0xe000 - S_IREAD = 0x100 - S_IRGRP = 0x20 - S_IROTH = 0x4 - S_IRUSR = 0x100 - S_IRWXG = 0x38 - S_IRWXO = 0x7 - S_IRWXU = 0x1c0 - S_ISGID = 0x400 - S_ISTXT = 0x200 - S_ISUID = 0x800 - S_ISVTX = 0x200 - S_IWGRP = 0x10 - S_IWOTH = 0x2 - S_IWRITE = 0x80 - S_IWUSR = 0x80 - S_IXGRP = 0x8 - S_IXOTH = 0x1 - S_IXUSR = 0x40 - TAB0 = 0x0 - TAB1 = 0x400 - TAB2 = 0x800 - TAB3 = 0x4 - TABDLY = 0xc04 - TCIFLUSH = 0x1 - TCIOFF = 0x3 - TCIOFLUSH = 0x3 - TCION = 0x4 - TCOFLUSH = 0x2 - TCOOFF = 0x1 - TCOON = 0x2 - TCP_CONNECTIONTIMEOUT = 0x20 - TCP_CONNECTION_INFO = 0x106 - TCP_ENABLE_ECN = 0x104 - TCP_FASTOPEN = 0x105 - TCP_KEEPALIVE = 0x10 - TCP_KEEPCNT = 0x102 - TCP_KEEPINTVL = 0x101 - TCP_MAXHLEN = 0x3c - TCP_MAXOLEN = 0x28 - TCP_MAXSEG = 0x2 - TCP_MAXWIN = 0xffff - TCP_MAX_SACK = 0x4 - TCP_MAX_WINSHIFT = 0xe - TCP_MINMSS = 0xd8 - TCP_MSS = 0x200 - TCP_NODELAY = 0x1 - TCP_NOOPT = 0x8 - TCP_NOPUSH = 0x4 - TCP_NOTSENT_LOWAT = 0x201 - TCP_RXT_CONNDROPTIME = 0x80 - TCP_RXT_FINDROP = 0x100 - TCP_SENDMOREACKS = 0x103 - TCSAFLUSH = 0x2 - TIOCCBRK = 0x2000747a - TIOCCDTR = 0x20007478 - TIOCCONS = 0x80047462 - TIOCDCDTIMESTAMP = 0x40107458 - TIOCDRAIN = 0x2000745e - TIOCDSIMICROCODE = 0x20007455 - TIOCEXCL = 0x2000740d - TIOCEXT = 0x80047460 - TIOCFLUSH = 0x80047410 - TIOCGDRAINWAIT = 0x40047456 - TIOCGETA = 0x40487413 - TIOCGETD = 0x4004741a - TIOCGPGRP = 0x40047477 - TIOCGWINSZ = 0x40087468 - TIOCIXOFF = 0x20007480 - TIOCIXON = 0x20007481 - TIOCMBIC = 0x8004746b - TIOCMBIS = 0x8004746c - TIOCMGDTRWAIT = 0x4004745a - TIOCMGET = 0x4004746a - TIOCMODG = 0x40047403 - TIOCMODS = 0x80047404 - TIOCMSDTRWAIT = 0x8004745b - TIOCMSET = 0x8004746d - TIOCM_CAR = 0x40 - TIOCM_CD = 0x40 - TIOCM_CTS = 0x20 - TIOCM_DSR = 0x100 - TIOCM_DTR = 0x2 - TIOCM_LE = 0x1 - TIOCM_RI = 0x80 - TIOCM_RNG = 0x80 - TIOCM_RTS = 0x4 - TIOCM_SR = 0x10 - TIOCM_ST = 0x8 - TIOCNOTTY = 0x20007471 - TIOCNXCL = 0x2000740e - TIOCOUTQ = 0x40047473 - TIOCPKT = 0x80047470 - TIOCPKT_DATA = 0x0 - TIOCPKT_DOSTOP = 0x20 - TIOCPKT_FLUSHREAD = 0x1 - TIOCPKT_FLUSHWRITE = 0x2 - TIOCPKT_IOCTL = 0x40 - TIOCPKT_NOSTOP = 0x10 - TIOCPKT_START = 0x8 - TIOCPKT_STOP = 0x4 - TIOCPTYGNAME = 0x40807453 - TIOCPTYGRANT = 0x20007454 - TIOCPTYUNLK = 0x20007452 - TIOCREMOTE = 0x80047469 - TIOCSBRK = 0x2000747b - TIOCSCONS = 0x20007463 - TIOCSCTTY = 0x20007461 - TIOCSDRAINWAIT = 0x80047457 - TIOCSDTR = 0x20007479 - TIOCSETA = 0x80487414 - TIOCSETAF = 0x80487416 - TIOCSETAW = 0x80487415 - TIOCSETD = 0x8004741b - TIOCSIG = 0x2000745f - TIOCSPGRP = 0x80047476 - TIOCSTART = 0x2000746e - TIOCSTAT = 0x20007465 - TIOCSTI = 0x80017472 - TIOCSTOP = 0x2000746f - TIOCSWINSZ = 0x80087467 - TIOCTIMESTAMP = 0x40107459 - TIOCUCNTL = 0x80047466 - TOSTOP = 0x400000 - VDISCARD = 0xf - VDSUSP = 0xb - VEOF = 0x0 - VEOL = 0x1 - VEOL2 = 0x2 - VERASE = 0x3 - VINTR = 0x8 - VKILL = 0x5 - VLNEXT = 0xe - VMIN = 0x10 - VM_LOADAVG = 0x2 - VM_MACHFACTOR = 0x4 - VM_MAXID = 0x6 - VM_METER = 0x1 - VM_SWAPUSAGE = 0x5 - VQUIT = 0x9 - VREPRINT = 0x6 - VSTART = 0xc - VSTATUS = 0x12 - VSTOP = 0xd - VSUSP = 0xa - VT0 = 0x0 - VT1 = 0x10000 - VTDLY = 0x10000 - VTIME = 0x11 - VWERASE = 0x4 - WCONTINUED = 0x10 - WCOREFLAG = 0x80 - WEXITED = 0x4 - WNOHANG = 0x1 - WNOWAIT = 0x20 - WORDSIZE = 0x40 - WSTOPPED = 0x8 - WUNTRACED = 0x2 - XATTR_CREATE = 0x2 - XATTR_NODEFAULT = 0x10 - XATTR_NOFOLLOW = 0x1 - XATTR_NOSECURITY = 0x8 - XATTR_REPLACE = 0x4 - XATTR_SHOWCOMPRESSION = 0x20 -) - -// Errors -const ( - E2BIG = syscall.Errno(0x7) - EACCES = syscall.Errno(0xd) - EADDRINUSE = syscall.Errno(0x30) - EADDRNOTAVAIL = syscall.Errno(0x31) - EAFNOSUPPORT = syscall.Errno(0x2f) - EAGAIN = syscall.Errno(0x23) - EALREADY = syscall.Errno(0x25) - EAUTH = syscall.Errno(0x50) - EBADARCH = syscall.Errno(0x56) - EBADEXEC = syscall.Errno(0x55) - EBADF = syscall.Errno(0x9) - EBADMACHO = syscall.Errno(0x58) - EBADMSG = syscall.Errno(0x5e) - EBADRPC = syscall.Errno(0x48) - EBUSY = syscall.Errno(0x10) - ECANCELED = syscall.Errno(0x59) - ECHILD = syscall.Errno(0xa) - ECONNABORTED = syscall.Errno(0x35) - ECONNREFUSED = syscall.Errno(0x3d) - ECONNRESET = syscall.Errno(0x36) - EDEADLK = syscall.Errno(0xb) - EDESTADDRREQ = syscall.Errno(0x27) - EDEVERR = syscall.Errno(0x53) - EDOM = syscall.Errno(0x21) - EDQUOT = syscall.Errno(0x45) - EEXIST = syscall.Errno(0x11) - EFAULT = syscall.Errno(0xe) - EFBIG = syscall.Errno(0x1b) - EFTYPE = syscall.Errno(0x4f) - EHOSTDOWN = syscall.Errno(0x40) - EHOSTUNREACH = syscall.Errno(0x41) - EIDRM = syscall.Errno(0x5a) - EILSEQ = syscall.Errno(0x5c) - EINPROGRESS = syscall.Errno(0x24) - EINTR = syscall.Errno(0x4) - EINVAL = syscall.Errno(0x16) - EIO = syscall.Errno(0x5) - EISCONN = syscall.Errno(0x38) - EISDIR = syscall.Errno(0x15) - ELAST = syscall.Errno(0x6a) - ELOOP = syscall.Errno(0x3e) - EMFILE = syscall.Errno(0x18) - EMLINK = syscall.Errno(0x1f) - EMSGSIZE = syscall.Errno(0x28) - EMULTIHOP = syscall.Errno(0x5f) - ENAMETOOLONG = syscall.Errno(0x3f) - ENEEDAUTH = syscall.Errno(0x51) - ENETDOWN = syscall.Errno(0x32) - ENETRESET = syscall.Errno(0x34) - ENETUNREACH = syscall.Errno(0x33) - ENFILE = syscall.Errno(0x17) - ENOATTR = syscall.Errno(0x5d) - ENOBUFS = syscall.Errno(0x37) - ENODATA = syscall.Errno(0x60) - ENODEV = syscall.Errno(0x13) - ENOENT = syscall.Errno(0x2) - ENOEXEC = syscall.Errno(0x8) - ENOLCK = syscall.Errno(0x4d) - ENOLINK = syscall.Errno(0x61) - ENOMEM = syscall.Errno(0xc) - ENOMSG = syscall.Errno(0x5b) - ENOPOLICY = syscall.Errno(0x67) - ENOPROTOOPT = syscall.Errno(0x2a) - ENOSPC = syscall.Errno(0x1c) - ENOSR = syscall.Errno(0x62) - ENOSTR = syscall.Errno(0x63) - ENOSYS = syscall.Errno(0x4e) - ENOTBLK = syscall.Errno(0xf) - ENOTCONN = syscall.Errno(0x39) - ENOTDIR = syscall.Errno(0x14) - ENOTEMPTY = syscall.Errno(0x42) - ENOTRECOVERABLE = syscall.Errno(0x68) - ENOTSOCK = syscall.Errno(0x26) - ENOTSUP = syscall.Errno(0x2d) - ENOTTY = syscall.Errno(0x19) - ENXIO = syscall.Errno(0x6) - EOPNOTSUPP = syscall.Errno(0x66) - EOVERFLOW = syscall.Errno(0x54) - EOWNERDEAD = syscall.Errno(0x69) - EPERM = syscall.Errno(0x1) - EPFNOSUPPORT = syscall.Errno(0x2e) - EPIPE = syscall.Errno(0x20) - EPROCLIM = syscall.Errno(0x43) - EPROCUNAVAIL = syscall.Errno(0x4c) - EPROGMISMATCH = syscall.Errno(0x4b) - EPROGUNAVAIL = syscall.Errno(0x4a) - EPROTO = syscall.Errno(0x64) - EPROTONOSUPPORT = syscall.Errno(0x2b) - EPROTOTYPE = syscall.Errno(0x29) - EPWROFF = syscall.Errno(0x52) - EQFULL = syscall.Errno(0x6a) - ERANGE = syscall.Errno(0x22) - EREMOTE = syscall.Errno(0x47) - EROFS = syscall.Errno(0x1e) - ERPCMISMATCH = syscall.Errno(0x49) - ESHLIBVERS = syscall.Errno(0x57) - ESHUTDOWN = syscall.Errno(0x3a) - ESOCKTNOSUPPORT = syscall.Errno(0x2c) - ESPIPE = syscall.Errno(0x1d) - ESRCH = syscall.Errno(0x3) - ESTALE = syscall.Errno(0x46) - ETIME = syscall.Errno(0x65) - ETIMEDOUT = syscall.Errno(0x3c) - ETOOMANYREFS = syscall.Errno(0x3b) - ETXTBSY = syscall.Errno(0x1a) - EUSERS = syscall.Errno(0x44) - EWOULDBLOCK = syscall.Errno(0x23) - EXDEV = syscall.Errno(0x12) -) - -// Signals -const ( - SIGABRT = syscall.Signal(0x6) - SIGALRM = syscall.Signal(0xe) - SIGBUS = syscall.Signal(0xa) - SIGCHLD = syscall.Signal(0x14) - SIGCONT = syscall.Signal(0x13) - SIGEMT = syscall.Signal(0x7) - SIGFPE = syscall.Signal(0x8) - SIGHUP = syscall.Signal(0x1) - SIGILL = syscall.Signal(0x4) - SIGINFO = syscall.Signal(0x1d) - SIGINT = syscall.Signal(0x2) - SIGIO = syscall.Signal(0x17) - SIGIOT = syscall.Signal(0x6) - SIGKILL = syscall.Signal(0x9) - SIGPIPE = syscall.Signal(0xd) - SIGPROF = syscall.Signal(0x1b) - SIGQUIT = syscall.Signal(0x3) - SIGSEGV = syscall.Signal(0xb) - SIGSTOP = syscall.Signal(0x11) - SIGSYS = syscall.Signal(0xc) - SIGTERM = syscall.Signal(0xf) - SIGTRAP = syscall.Signal(0x5) - SIGTSTP = syscall.Signal(0x12) - SIGTTIN = syscall.Signal(0x15) - SIGTTOU = syscall.Signal(0x16) - SIGURG = syscall.Signal(0x10) - SIGUSR1 = syscall.Signal(0x1e) - SIGUSR2 = syscall.Signal(0x1f) - SIGVTALRM = syscall.Signal(0x1a) - SIGWINCH = syscall.Signal(0x1c) - SIGXCPU = syscall.Signal(0x18) - SIGXFSZ = syscall.Signal(0x19) -) - -// Error table -var errorList = [...]struct { - num syscall.Errno - name string - desc string -}{ - {1, "EPERM", "operation not permitted"}, - {2, "ENOENT", "no such file or directory"}, - {3, "ESRCH", "no such process"}, - {4, "EINTR", "interrupted system call"}, - {5, "EIO", "input/output error"}, - {6, "ENXIO", "device not configured"}, - {7, "E2BIG", "argument list too long"}, - {8, "ENOEXEC", "exec format error"}, - {9, "EBADF", "bad file descriptor"}, - {10, "ECHILD", "no child processes"}, - {11, "EDEADLK", "resource deadlock avoided"}, - {12, "ENOMEM", "cannot allocate memory"}, - {13, "EACCES", "permission denied"}, - {14, "EFAULT", "bad address"}, - {15, "ENOTBLK", "block device required"}, - {16, "EBUSY", "resource busy"}, - {17, "EEXIST", "file exists"}, - {18, "EXDEV", "cross-device link"}, - {19, "ENODEV", "operation not supported by device"}, - {20, "ENOTDIR", "not a directory"}, - {21, "EISDIR", "is a directory"}, - {22, "EINVAL", "invalid argument"}, - {23, "ENFILE", "too many open files in system"}, - {24, "EMFILE", "too many open files"}, - {25, "ENOTTY", "inappropriate ioctl for device"}, - {26, "ETXTBSY", "text file busy"}, - {27, "EFBIG", "file too large"}, - {28, "ENOSPC", "no space left on device"}, - {29, "ESPIPE", "illegal seek"}, - {30, "EROFS", "read-only file system"}, - {31, "EMLINK", "too many links"}, - {32, "EPIPE", "broken pipe"}, - {33, "EDOM", "numerical argument out of domain"}, - {34, "ERANGE", "result too large"}, - {35, "EAGAIN", "resource temporarily unavailable"}, - {36, "EINPROGRESS", "operation now in progress"}, - {37, "EALREADY", "operation already in progress"}, - {38, "ENOTSOCK", "socket operation on non-socket"}, - {39, "EDESTADDRREQ", "destination address required"}, - {40, "EMSGSIZE", "message too long"}, - {41, "EPROTOTYPE", "protocol wrong type for socket"}, - {42, "ENOPROTOOPT", "protocol not available"}, - {43, "EPROTONOSUPPORT", "protocol not supported"}, - {44, "ESOCKTNOSUPPORT", "socket type not supported"}, - {45, "ENOTSUP", "operation not supported"}, - {46, "EPFNOSUPPORT", "protocol family not supported"}, - {47, "EAFNOSUPPORT", "address family not supported by protocol family"}, - {48, "EADDRINUSE", "address already in use"}, - {49, "EADDRNOTAVAIL", "can't assign requested address"}, - {50, "ENETDOWN", "network is down"}, - {51, "ENETUNREACH", "network is unreachable"}, - {52, "ENETRESET", "network dropped connection on reset"}, - {53, "ECONNABORTED", "software caused connection abort"}, - {54, "ECONNRESET", "connection reset by peer"}, - {55, "ENOBUFS", "no buffer space available"}, - {56, "EISCONN", "socket is already connected"}, - {57, "ENOTCONN", "socket is not connected"}, - {58, "ESHUTDOWN", "can't send after socket shutdown"}, - {59, "ETOOMANYREFS", "too many references: can't splice"}, - {60, "ETIMEDOUT", "operation timed out"}, - {61, "ECONNREFUSED", "connection refused"}, - {62, "ELOOP", "too many levels of symbolic links"}, - {63, "ENAMETOOLONG", "file name too long"}, - {64, "EHOSTDOWN", "host is down"}, - {65, "EHOSTUNREACH", "no route to host"}, - {66, "ENOTEMPTY", "directory not empty"}, - {67, "EPROCLIM", "too many processes"}, - {68, "EUSERS", "too many users"}, - {69, "EDQUOT", "disc quota exceeded"}, - {70, "ESTALE", "stale NFS file handle"}, - {71, "EREMOTE", "too many levels of remote in path"}, - {72, "EBADRPC", "RPC struct is bad"}, - {73, "ERPCMISMATCH", "RPC version wrong"}, - {74, "EPROGUNAVAIL", "RPC prog. not avail"}, - {75, "EPROGMISMATCH", "program version wrong"}, - {76, "EPROCUNAVAIL", "bad procedure for program"}, - {77, "ENOLCK", "no locks available"}, - {78, "ENOSYS", "function not implemented"}, - {79, "EFTYPE", "inappropriate file type or format"}, - {80, "EAUTH", "authentication error"}, - {81, "ENEEDAUTH", "need authenticator"}, - {82, "EPWROFF", "device power is off"}, - {83, "EDEVERR", "device error"}, - {84, "EOVERFLOW", "value too large to be stored in data type"}, - {85, "EBADEXEC", "bad executable (or shared library)"}, - {86, "EBADARCH", "bad CPU type in executable"}, - {87, "ESHLIBVERS", "shared library version mismatch"}, - {88, "EBADMACHO", "malformed Mach-o file"}, - {89, "ECANCELED", "operation canceled"}, - {90, "EIDRM", "identifier removed"}, - {91, "ENOMSG", "no message of desired type"}, - {92, "EILSEQ", "illegal byte sequence"}, - {93, "ENOATTR", "attribute not found"}, - {94, "EBADMSG", "bad message"}, - {95, "EMULTIHOP", "EMULTIHOP (Reserved)"}, - {96, "ENODATA", "no message available on STREAM"}, - {97, "ENOLINK", "ENOLINK (Reserved)"}, - {98, "ENOSR", "no STREAM resources"}, - {99, "ENOSTR", "not a STREAM"}, - {100, "EPROTO", "protocol error"}, - {101, "ETIME", "STREAM ioctl timeout"}, - {102, "EOPNOTSUPP", "operation not supported on socket"}, - {103, "ENOPOLICY", "policy not found"}, - {104, "ENOTRECOVERABLE", "state not recoverable"}, - {105, "EOWNERDEAD", "previous owner died"}, - {106, "EQFULL", "interface output queue is full"}, -} - -// Signal table -var signalList = [...]struct { - num syscall.Signal - name string - desc string -}{ - {1, "SIGHUP", "hangup"}, - {2, "SIGINT", "interrupt"}, - {3, "SIGQUIT", "quit"}, - {4, "SIGILL", "illegal instruction"}, - {5, "SIGTRAP", "trace/BPT trap"}, - {6, "SIGABRT", "abort trap"}, - {7, "SIGEMT", "EMT trap"}, - {8, "SIGFPE", "floating point exception"}, - {9, "SIGKILL", "killed"}, - {10, "SIGBUS", "bus error"}, - {11, "SIGSEGV", "segmentation fault"}, - {12, "SIGSYS", "bad system call"}, - {13, "SIGPIPE", "broken pipe"}, - {14, "SIGALRM", "alarm clock"}, - {15, "SIGTERM", "terminated"}, - {16, "SIGURG", "urgent I/O condition"}, - {17, "SIGSTOP", "suspended (signal)"}, - {18, "SIGTSTP", "suspended"}, - {19, "SIGCONT", "continued"}, - {20, "SIGCHLD", "child exited"}, - {21, "SIGTTIN", "stopped (tty input)"}, - {22, "SIGTTOU", "stopped (tty output)"}, - {23, "SIGIO", "I/O possible"}, - {24, "SIGXCPU", "cputime limit exceeded"}, - {25, "SIGXFSZ", "filesize limit exceeded"}, - {26, "SIGVTALRM", "virtual timer expired"}, - {27, "SIGPROF", "profiling timer expired"}, - {28, "SIGWINCH", "window size changes"}, - {29, "SIGINFO", "information request"}, - {30, "SIGUSR1", "user defined signal 1"}, - {31, "SIGUSR2", "user defined signal 2"}, -} diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go index b40fb1f696..e644eaf5e7 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go @@ -1,6 +1,7 @@ // mkerrors.sh -m64 // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build arm64 && darwin // +build arm64,darwin // Code generated by cmd/cgo -godefs; DO NOT EDIT. @@ -32,7 +33,7 @@ const ( AF_LAT = 0xe AF_LINK = 0x12 AF_LOCAL = 0x1 - AF_MAX = 0x28 + AF_MAX = 0x29 AF_NATM = 0x1f AF_NDRV = 0x1b AF_NETBIOS = 0x21 @@ -49,6 +50,7 @@ const ( AF_UNIX = 0x1 AF_UNSPEC = 0x0 AF_UTUN = 0x26 + AF_VSOCK = 0x28 ALTWERASE = 0x200 ATTR_BIT_MAP_COUNT = 0x5 ATTR_CMN_ACCESSMASK = 0x20000 @@ -83,7 +85,7 @@ const ( ATTR_CMN_PAROBJID = 0x80 ATTR_CMN_RETURNED_ATTRS = 0x80000000 ATTR_CMN_SCRIPT = 0x100 - ATTR_CMN_SETMASK = 0x41c7ff00 + ATTR_CMN_SETMASK = 0x51c7ff00 ATTR_CMN_USERACCESS = 0x200000 ATTR_CMN_UUID = 0x800000 ATTR_CMN_VALIDMASK = 0xffffffff @@ -357,7 +359,7 @@ const ( DLT_LINUX_SLL = 0x71 DLT_LOOP = 0x6c DLT_LTALK = 0x72 - DLT_MATCHING_MAX = 0xf5 + DLT_MATCHING_MAX = 0x10a DLT_MATCHING_MIN = 0x68 DLT_MFR = 0xb6 DLT_MOST = 0xd3 @@ -398,6 +400,7 @@ const ( DLT_SYMANTEC_FIREWALL = 0x63 DLT_TZSP = 0x80 DLT_USB = 0xba + DLT_USB_DARWIN = 0x10a DLT_USB_LINUX = 0xbd DLT_USB_LINUX_MMAPPED = 0xdc DLT_USER0 = 0x93 @@ -442,8 +445,8 @@ const ( EVFILT_PROC = -0x5 EVFILT_READ = -0x1 EVFILT_SIGNAL = -0x6 - EVFILT_SYSCOUNT = 0xf - EVFILT_THREADMARKER = 0xf + EVFILT_SYSCOUNT = 0x11 + EVFILT_THREADMARKER = 0x11 EVFILT_TIMER = -0x7 EVFILT_USER = -0xa EVFILT_VM = -0xc @@ -481,9 +484,12 @@ const ( FSOPT_NOINMEMUPDATE = 0x2 FSOPT_PACK_INVAL_ATTRS = 0x8 FSOPT_REPORT_FULLSIZE = 0x4 + FSOPT_RETURN_REALDEV = 0x200 F_ADDFILESIGS = 0x3d F_ADDFILESIGS_FOR_DYLD_SIM = 0x53 + F_ADDFILESIGS_INFO = 0x67 F_ADDFILESIGS_RETURN = 0x61 + F_ADDFILESUPPL = 0x68 F_ADDSIGS = 0x3b F_ALLOCATEALL = 0x4 F_ALLOCATECONTIG = 0x2 @@ -505,8 +511,10 @@ const ( F_GETOWN = 0x5 F_GETPATH = 0x32 F_GETPATH_MTMINFO = 0x47 + F_GETPATH_NOFIRMLINK = 0x66 F_GETPROTECTIONCLASS = 0x3f F_GETPROTECTIONLEVEL = 0x4d + F_GETSIGSINFO = 0x69 F_GLOBAL_NOCACHE = 0x37 F_LOG2PHYS = 0x31 F_LOG2PHYS_EXT = 0x41 @@ -531,6 +539,7 @@ const ( F_SETPROTECTIONCLASS = 0x40 F_SETSIZE = 0x2b F_SINGLE_WRITER = 0x4c + F_SPECULATIVE_READ = 0x65 F_THAW_FS = 0x36 F_TRANSCODEKEY = 0x4b F_TRIM_ACTIVE_FILE = 0x64 @@ -562,6 +571,7 @@ const ( IFF_UP = 0x1 IFNAMSIZ = 0x10 IFT_1822 = 0x2 + IFT_6LOWPAN = 0x40 IFT_AAL5 = 0x31 IFT_ARCNET = 0x23 IFT_ARCNETPLUS = 0x24 @@ -766,16 +776,28 @@ const ( IPV6_2292PKTINFO = 0x13 IPV6_2292PKTOPTIONS = 0x19 IPV6_2292RTHDR = 0x18 + IPV6_3542DSTOPTS = 0x32 + IPV6_3542HOPLIMIT = 0x2f + IPV6_3542HOPOPTS = 0x31 + IPV6_3542NEXTHOP = 0x30 + IPV6_3542PKTINFO = 0x2e + IPV6_3542RTHDR = 0x33 + IPV6_ADDR_MC_FLAGS_PREFIX = 0x20 + IPV6_ADDR_MC_FLAGS_TRANSIENT = 0x10 + IPV6_ADDR_MC_FLAGS_UNICAST_BASED = 0x30 + IPV6_AUTOFLOWLABEL = 0x3b IPV6_BINDV6ONLY = 0x1b IPV6_BOUND_IF = 0x7d IPV6_CHECKSUM = 0x1a IPV6_DEFAULT_MULTICAST_HOPS = 0x1 IPV6_DEFAULT_MULTICAST_LOOP = 0x1 IPV6_DEFHLIM = 0x40 + IPV6_DONTFRAG = 0x3e + IPV6_DSTOPTS = 0x32 IPV6_FAITH = 0x1d IPV6_FLOWINFO_MASK = 0xffffff0f IPV6_FLOWLABEL_MASK = 0xffff0f00 - IPV6_FLOW_ECN_MASK = 0x300 + IPV6_FLOW_ECN_MASK = 0x3000 IPV6_FRAGTTL = 0x3c IPV6_FW_ADD = 0x1e IPV6_FW_DEL = 0x1f @@ -783,6 +805,8 @@ const ( IPV6_FW_GET = 0x22 IPV6_FW_ZERO = 0x21 IPV6_HLIMDEC = 0x1 + IPV6_HOPLIMIT = 0x2f + IPV6_HOPOPTS = 0x31 IPV6_IPSEC_POLICY = 0x1c IPV6_JOIN_GROUP = 0xc IPV6_LEAVE_GROUP = 0xd @@ -794,20 +818,34 @@ const ( IPV6_MAX_SOCK_SRC_FILTER = 0x80 IPV6_MIN_MEMBERSHIPS = 0x1f IPV6_MMTU = 0x500 + IPV6_MSFILTER = 0x4a IPV6_MULTICAST_HOPS = 0xa IPV6_MULTICAST_IF = 0x9 IPV6_MULTICAST_LOOP = 0xb + IPV6_NEXTHOP = 0x30 + IPV6_PATHMTU = 0x2c + IPV6_PKTINFO = 0x2e IPV6_PORTRANGE = 0xe IPV6_PORTRANGE_DEFAULT = 0x0 IPV6_PORTRANGE_HIGH = 0x1 IPV6_PORTRANGE_LOW = 0x2 + IPV6_PREFER_TEMPADDR = 0x3f + IPV6_RECVDSTOPTS = 0x28 + IPV6_RECVHOPLIMIT = 0x25 + IPV6_RECVHOPOPTS = 0x27 + IPV6_RECVPATHMTU = 0x2b + IPV6_RECVPKTINFO = 0x3d + IPV6_RECVRTHDR = 0x26 IPV6_RECVTCLASS = 0x23 + IPV6_RTHDR = 0x33 + IPV6_RTHDRDSTOPTS = 0x39 IPV6_RTHDR_LOOSE = 0x0 IPV6_RTHDR_STRICT = 0x1 IPV6_RTHDR_TYPE_0 = 0x0 IPV6_SOCKOPT_RESERVED1 = 0x3 IPV6_TCLASS = 0x24 IPV6_UNICAST_HOPS = 0x4 + IPV6_USE_MIN_MTU = 0x2a IPV6_V6ONLY = 0x1b IPV6_VERSION = 0x60 IPV6_VERSION_MASK = 0xf0 @@ -818,6 +856,7 @@ const ( IP_DEFAULT_MULTICAST_LOOP = 0x1 IP_DEFAULT_MULTICAST_TTL = 0x1 IP_DF = 0x4000 + IP_DONTFRAG = 0x1c IP_DROP_MEMBERSHIP = 0xd IP_DROP_SOURCE_MEMBERSHIP = 0x47 IP_DUMMYNET_CONFIGURE = 0x3c @@ -889,6 +928,12 @@ const ( KERN_OSRELEASE = 0x2 KERN_OSTYPE = 0x1 KERN_VERSION = 0x4 + LOCAL_PEERCRED = 0x1 + LOCAL_PEEREPID = 0x3 + LOCAL_PEEREUUID = 0x5 + LOCAL_PEERPID = 0x2 + LOCAL_PEERTOKEN = 0x6 + LOCAL_PEERUUID = 0x4 LOCK_EX = 0x2 LOCK_NB = 0x4 LOCK_SH = 0x1 @@ -904,6 +949,7 @@ const ( MADV_SEQUENTIAL = 0x2 MADV_WILLNEED = 0x3 MADV_ZERO_WIRED_PAGES = 0x6 + MAP_32BIT = 0x8000 MAP_ANON = 0x1000 MAP_ANONYMOUS = 0x1000 MAP_COPY = 0x2 @@ -920,6 +966,17 @@ const ( MAP_RESILIENT_CODESIGN = 0x2000 MAP_RESILIENT_MEDIA = 0x4000 MAP_SHARED = 0x1 + MAP_TRANSLATED_ALLOW_EXECUTE = 0x20000 + MAP_UNIX03 = 0x40000 + MCAST_BLOCK_SOURCE = 0x54 + MCAST_EXCLUDE = 0x2 + MCAST_INCLUDE = 0x1 + MCAST_JOIN_GROUP = 0x50 + MCAST_JOIN_SOURCE_GROUP = 0x52 + MCAST_LEAVE_GROUP = 0x51 + MCAST_LEAVE_SOURCE_GROUP = 0x53 + MCAST_UNBLOCK_SOURCE = 0x55 + MCAST_UNDEFINED = 0x0 MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MNT_ASYNC = 0x40 @@ -931,6 +988,7 @@ const ( MNT_DOVOLFS = 0x8000 MNT_DWAIT = 0x4 MNT_EXPORTED = 0x100 + MNT_EXT_ROOT_DATA_VOL = 0x1 MNT_FORCE = 0x80000 MNT_IGNORE_OWNERSHIP = 0x200000 MNT_JOURNALED = 0x800000 @@ -947,12 +1005,15 @@ const ( MNT_QUOTA = 0x2000 MNT_RDONLY = 0x1 MNT_RELOAD = 0x40000 + MNT_REMOVABLE = 0x200 MNT_ROOTFS = 0x4000 + MNT_SNAPSHOT = 0x40000000 + MNT_STRICTATIME = 0x80000000 MNT_SYNCHRONOUS = 0x2 MNT_UNION = 0x20 MNT_UNKNOWNPERMISSIONS = 0x200000 MNT_UPDATE = 0x10000 - MNT_VISFLAGMASK = 0x17f0f5ff + MNT_VISFLAGMASK = 0xd7f0f7ff MNT_WAIT = 0x1 MSG_CTRUNC = 0x20 MSG_DONTROUTE = 0x4 @@ -963,6 +1024,7 @@ const ( MSG_HAVEMORE = 0x2000 MSG_HOLD = 0x800 MSG_NEEDSA = 0x10000 + MSG_NOSIGNAL = 0x80000 MSG_OOB = 0x1 MSG_PEEK = 0x2 MSG_RCVMORE = 0x4000 @@ -979,9 +1041,10 @@ const ( NET_RT_DUMP = 0x1 NET_RT_DUMP2 = 0x7 NET_RT_FLAGS = 0x2 + NET_RT_FLAGS_PRIV = 0xa NET_RT_IFLIST = 0x3 NET_RT_IFLIST2 = 0x6 - NET_RT_MAXID = 0xa + NET_RT_MAXID = 0xb NET_RT_STAT = 0x4 NET_RT_TRASH = 0x5 NFDBITS = 0x20 @@ -1019,6 +1082,7 @@ const ( NOTE_LEEWAY = 0x10 NOTE_LINK = 0x10 NOTE_LOWAT = 0x1 + NOTE_MACHTIME = 0x100 NOTE_MACH_CONTINUOUS_TIME = 0x80 NOTE_NONE = 0x80 NOTE_NSECONDS = 0x4 @@ -1065,6 +1129,7 @@ const ( O_NDELAY = 0x4 O_NOCTTY = 0x20000 O_NOFOLLOW = 0x100 + O_NOFOLLOW_ANY = 0x20000000 O_NONBLOCK = 0x4 O_POPUP = 0x80000000 O_RDONLY = 0x0 @@ -1136,6 +1201,7 @@ const ( RTF_BROADCAST = 0x400000 RTF_CLONING = 0x100 RTF_CONDEMNED = 0x2000000 + RTF_DEAD = 0x20000000 RTF_DELCLONE = 0x80 RTF_DONE = 0x40 RTF_DYNAMIC = 0x10 @@ -1143,6 +1209,7 @@ const ( RTF_HOST = 0x4 RTF_IFREF = 0x4000000 RTF_IFSCOPE = 0x1000000 + RTF_LLDATA = 0x400 RTF_LLINFO = 0x400 RTF_LOCAL = 0x200000 RTF_MODIFIED = 0x20 @@ -1210,6 +1277,7 @@ const ( SIOCGDRVSPEC = 0xc028697b SIOCGETVLAN = 0xc020697f SIOCGHIWAT = 0x40047301 + SIOCGIF6LOWPAN = 0xc02069c5 SIOCGIFADDR = 0xc0206921 SIOCGIFALTMTU = 0xc0206948 SIOCGIFASYNCMAP = 0xc020697c @@ -1220,6 +1288,7 @@ const ( SIOCGIFDEVMTU = 0xc0206944 SIOCGIFDSTADDR = 0xc0206922 SIOCGIFFLAGS = 0xc0206911 + SIOCGIFFUNCTIONALTYPE = 0xc02069ad SIOCGIFGENERIC = 0xc020693a SIOCGIFKPI = 0xc0206987 SIOCGIFMAC = 0xc0206982 @@ -1233,6 +1302,7 @@ const ( SIOCGIFSTATUS = 0xc331693d SIOCGIFVLAN = 0xc020697f SIOCGIFWAKEFLAGS = 0xc0206988 + SIOCGIFXMEDIA = 0xc02c6948 SIOCGLOWAT = 0x40047303 SIOCGPGRP = 0x40047309 SIOCIFCREATE = 0xc0206978 @@ -1243,6 +1313,7 @@ const ( SIOCSDRVSPEC = 0x8028697b SIOCSETVLAN = 0x8020697e SIOCSHIWAT = 0x80047300 + SIOCSIF6LOWPAN = 0x802069c4 SIOCSIFADDR = 0x8020690c SIOCSIFALTMTU = 0x80206945 SIOCSIFASYNCMAP = 0x8020697d @@ -1270,6 +1341,7 @@ const ( SOCK_RDM = 0x4 SOCK_SEQPACKET = 0x5 SOCK_STREAM = 0x1 + SOL_LOCAL = 0x0 SOL_SOCKET = 0xffff SOMAXCONN = 0x80 SO_ACCEPTCONN = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go index f5e91b7aba..17bba0e44f 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go @@ -1,6 +1,7 @@ // mkerrors.sh -m64 // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build amd64 && dragonfly // +build amd64,dragonfly // Code generated by cmd/cgo -godefs; DO NOT EDIT. diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go index 3689c80848..9c7c5e1654 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go @@ -1,6 +1,7 @@ // mkerrors.sh -m32 // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build 386 && freebsd // +build 386,freebsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. @@ -997,6 +998,11 @@ const ( KERN_OSRELEASE = 0x2 KERN_OSTYPE = 0x1 KERN_VERSION = 0x4 + LOCAL_CONNWAIT = 0x4 + LOCAL_CREDS = 0x2 + LOCAL_CREDS_PERSISTENT = 0x3 + LOCAL_PEERCRED = 0x1 + LOCAL_VENDOR = 0x80000000 LOCK_EX = 0x2 LOCK_NB = 0x4 LOCK_SH = 0x1 @@ -1375,6 +1381,7 @@ const ( SOCK_RDM = 0x4 SOCK_SEQPACKET = 0x5 SOCK_STREAM = 0x1 + SOL_LOCAL = 0x0 SOL_SOCKET = 0xffff SOMAXCONN = 0x80 SO_ACCEPTCONN = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go index b8f7c3c930..b265abb25f 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go @@ -1,6 +1,7 @@ // mkerrors.sh -m64 // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build amd64 && freebsd // +build amd64,freebsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. @@ -997,6 +998,11 @@ const ( KERN_OSRELEASE = 0x2 KERN_OSTYPE = 0x1 KERN_VERSION = 0x4 + LOCAL_CONNWAIT = 0x4 + LOCAL_CREDS = 0x2 + LOCAL_CREDS_PERSISTENT = 0x3 + LOCAL_PEERCRED = 0x1 + LOCAL_VENDOR = 0x80000000 LOCK_EX = 0x2 LOCK_NB = 0x4 LOCK_SH = 0x1 @@ -1376,6 +1382,7 @@ const ( SOCK_RDM = 0x4 SOCK_SEQPACKET = 0x5 SOCK_STREAM = 0x1 + SOL_LOCAL = 0x0 SOL_SOCKET = 0xffff SOMAXCONN = 0x80 SO_ACCEPTCONN = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go index be14bb1a4c..3df99f285f 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go @@ -1,6 +1,7 @@ // mkerrors.sh // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build arm && freebsd // +build arm,freebsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. @@ -980,6 +981,11 @@ const ( KERN_OSRELEASE = 0x2 KERN_OSTYPE = 0x1 KERN_VERSION = 0x4 + LOCAL_CONNWAIT = 0x4 + LOCAL_CREDS = 0x2 + LOCAL_CREDS_PERSISTENT = 0x3 + LOCAL_PEERCRED = 0x1 + LOCAL_VENDOR = 0x80000000 LOCK_EX = 0x2 LOCK_NB = 0x4 LOCK_SH = 0x1 @@ -1016,6 +1022,15 @@ const ( MAP_RESERVED0100 = 0x100 MAP_SHARED = 0x1 MAP_STACK = 0x400 + MCAST_BLOCK_SOURCE = 0x54 + MCAST_EXCLUDE = 0x2 + MCAST_INCLUDE = 0x1 + MCAST_JOIN_GROUP = 0x50 + MCAST_JOIN_SOURCE_GROUP = 0x52 + MCAST_LEAVE_GROUP = 0x51 + MCAST_LEAVE_SOURCE_GROUP = 0x53 + MCAST_UNBLOCK_SOURCE = 0x55 + MCAST_UNDEFINED = 0x0 MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MNT_ACLS = 0x8000000 @@ -1341,6 +1356,7 @@ const ( SOCK_RDM = 0x4 SOCK_SEQPACKET = 0x5 SOCK_STREAM = 0x1 + SOL_LOCAL = 0x0 SOL_SOCKET = 0xffff SOMAXCONN = 0x80 SO_ACCEPTCONN = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go index 7ce9c0081a..218d39906d 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go @@ -1,6 +1,7 @@ // mkerrors.sh -m64 // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build arm64 && freebsd // +build arm64,freebsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. @@ -997,6 +998,11 @@ const ( KERN_OSRELEASE = 0x2 KERN_OSTYPE = 0x1 KERN_VERSION = 0x4 + LOCAL_CONNWAIT = 0x4 + LOCAL_CREDS = 0x2 + LOCAL_CREDS_PERSISTENT = 0x3 + LOCAL_PEERCRED = 0x1 + LOCAL_VENDOR = 0x80000000 LOCK_EX = 0x2 LOCK_NB = 0x4 LOCK_SH = 0x1 @@ -1376,6 +1382,7 @@ const ( SOCK_RDM = 0x4 SOCK_SEQPACKET = 0x5 SOCK_STREAM = 0x1 + SOL_LOCAL = 0x0 SOL_SOCKET = 0xffff SOMAXCONN = 0x80 SO_ACCEPTCONN = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index b3463a8b5a..4e4583b615 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -1,5 +1,6 @@ // Code generated by mkmerge.go; DO NOT EDIT. +//go:build linux // +build linux package unix @@ -165,13 +166,16 @@ const ( BPF_ALU64 = 0x7 BPF_AND = 0x50 BPF_ARSH = 0xc0 + BPF_ATOMIC = 0xc0 BPF_B = 0x10 BPF_BUILD_ID_SIZE = 0x14 BPF_CALL = 0x80 + BPF_CMPXCHG = 0xf1 BPF_DIV = 0x30 BPF_DW = 0x18 BPF_END = 0xd0 BPF_EXIT = 0x90 + BPF_FETCH = 0x1 BPF_FROM_BE = 0x8 BPF_FROM_LE = 0x0 BPF_FS_MAGIC = 0xcafe4a11 @@ -239,11 +243,16 @@ const ( BPF_W = 0x0 BPF_X = 0x8 BPF_XADD = 0xc0 + BPF_XCHG = 0xe1 BPF_XOR = 0xa0 BRKINT = 0x2 BS0 = 0x0 BTRFS_SUPER_MAGIC = 0x9123683e BTRFS_TEST_MAGIC = 0x73727279 + BUS_BLUETOOTH = 0x5 + BUS_HIL = 0x4 + BUS_USB = 0x3 + BUS_VIRTUAL = 0x6 CAN_BCM = 0x2 CAN_EFF_FLAG = 0x80000000 CAN_EFF_ID_BITS = 0x1d @@ -313,6 +322,7 @@ const ( CAN_J1939 = 0x7 CAN_MAX_DLC = 0x8 CAN_MAX_DLEN = 0x8 + CAN_MAX_RAW_DLC = 0xf CAN_MCNET = 0x5 CAN_MTU = 0x10 CAN_NPROTO = 0x8 @@ -484,9 +494,9 @@ const ( DM_UUID_FLAG = 0x4000 DM_UUID_LEN = 0x81 DM_VERSION = 0xc138fd00 - DM_VERSION_EXTRA = "-ioctl (2020-10-01)" + DM_VERSION_EXTRA = "-ioctl (2021-02-01)" DM_VERSION_MAJOR = 0x4 - DM_VERSION_MINOR = 0x2b + DM_VERSION_MINOR = 0x2c DM_VERSION_PATCHLEVEL = 0x0 DT_BLK = 0x6 DT_CHR = 0x2 @@ -664,6 +674,7 @@ const ( ETH_P_CAIF = 0xf7 ETH_P_CAN = 0xc ETH_P_CANFD = 0xd + ETH_P_CFM = 0x8902 ETH_P_CONTROL = 0x16 ETH_P_CUST = 0x6006 ETH_P_DDCMP = 0x6 @@ -834,7 +845,6 @@ const ( FSCRYPT_POLICY_FLAGS_PAD_4 = 0x0 FSCRYPT_POLICY_FLAGS_PAD_8 = 0x1 FSCRYPT_POLICY_FLAGS_PAD_MASK = 0x3 - FSCRYPT_POLICY_FLAGS_VALID = 0x1f FSCRYPT_POLICY_FLAG_DIRECT_KEY = 0x4 FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32 = 0x10 FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64 = 0x8 @@ -854,6 +864,7 @@ const ( FS_IOC_GET_ENCRYPTION_KEY_STATUS = 0xc080661a FS_IOC_GET_ENCRYPTION_POLICY_EX = 0xc0096616 FS_IOC_MEASURE_VERITY = 0xc0046686 + FS_IOC_READ_VERITY_METADATA = 0xc0286687 FS_IOC_REMOVE_ENCRYPTION_KEY = 0xc0406618 FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS = 0xc0406619 FS_KEY_DESCRIPTOR_SIZE = 0x8 @@ -865,10 +876,13 @@ const ( FS_POLICY_FLAGS_PAD_4 = 0x0 FS_POLICY_FLAGS_PAD_8 = 0x1 FS_POLICY_FLAGS_PAD_MASK = 0x3 - FS_POLICY_FLAGS_VALID = 0x1f + FS_POLICY_FLAGS_VALID = 0x7 FS_VERITY_FL = 0x100000 FS_VERITY_HASH_ALG_SHA256 = 0x1 FS_VERITY_HASH_ALG_SHA512 = 0x2 + FS_VERITY_METADATA_TYPE_DESCRIPTOR = 0x2 + FS_VERITY_METADATA_TYPE_MERKLE_TREE = 0x1 + FS_VERITY_METADATA_TYPE_SIGNATURE = 0x3 FUTEXFS_SUPER_MAGIC = 0xbad1dea F_ADD_SEALS = 0x409 F_DUPFD = 0x0 @@ -962,11 +976,17 @@ const ( HDIO_SET_XFER = 0x306 HDIO_TRISTATE_HWIF = 0x31b HDIO_UNREGISTER_HWIF = 0x32a + HID_MAX_DESCRIPTOR_SIZE = 0x1000 HOSTFS_SUPER_MAGIC = 0xc0ffee HPFS_SUPER_MAGIC = 0xf995e849 HUGETLBFS_MAGIC = 0x958458f6 IBSHIFT = 0x10 ICMPV6_FILTER = 0x1 + ICMPV6_FILTER_BLOCK = 0x1 + ICMPV6_FILTER_BLOCKOTHERS = 0x3 + ICMPV6_FILTER_PASS = 0x2 + ICMPV6_FILTER_PASSONLY = 0x4 + ICMP_FILTER = 0x1 ICRNL = 0x100 IFA_F_DADFAILED = 0x8 IFA_F_DEPRECATED = 0x20 @@ -1138,6 +1158,7 @@ const ( IPV6_PMTUDISC_WANT = 0x1 IPV6_RECVDSTOPTS = 0x3a IPV6_RECVERR = 0x19 + IPV6_RECVERR_RFC4884 = 0x1f IPV6_RECVFRAGSIZE = 0x4d IPV6_RECVHOPLIMIT = 0x33 IPV6_RECVHOPOPTS = 0x35 @@ -1202,6 +1223,7 @@ const ( IP_PMTUDISC_PROBE = 0x3 IP_PMTUDISC_WANT = 0x1 IP_RECVERR = 0xb + IP_RECVERR_RFC4884 = 0x1a IP_RECVFRAGSIZE = 0x19 IP_RECVOPTS = 0x6 IP_RECVORIGDSTADDR = 0x14 @@ -1384,6 +1406,10 @@ const ( MCAST_LEAVE_SOURCE_GROUP = 0x2f MCAST_MSFILTER = 0x30 MCAST_UNBLOCK_SOURCE = 0x2c + MEMGETREGIONINFO = 0xc0104d08 + MEMREADOOB64 = 0xc0184d16 + MEMWRITE = 0xc0304d18 + MEMWRITEOOB64 = 0xc0184d15 MFD_ALLOW_SEALING = 0x2 MFD_CLOEXEC = 0x1 MFD_HUGETLB = 0x4 @@ -1472,7 +1498,35 @@ const ( MS_SYNCHRONOUS = 0x10 MS_UNBINDABLE = 0x20000 MS_VERBOSE = 0x8000 + MTD_ABSENT = 0x0 + MTD_BIT_WRITEABLE = 0x800 + MTD_CAP_NANDFLASH = 0x400 + MTD_CAP_NORFLASH = 0xc00 + MTD_CAP_NVRAM = 0x1c00 + MTD_CAP_RAM = 0x1c00 + MTD_CAP_ROM = 0x0 + MTD_DATAFLASH = 0x6 MTD_INODE_FS_MAGIC = 0x11307854 + MTD_MAX_ECCPOS_ENTRIES = 0x40 + MTD_MAX_OOBFREE_ENTRIES = 0x8 + MTD_MLCNANDFLASH = 0x8 + MTD_NANDECC_AUTOPLACE = 0x2 + MTD_NANDECC_AUTOPL_USR = 0x4 + MTD_NANDECC_OFF = 0x0 + MTD_NANDECC_PLACE = 0x1 + MTD_NANDECC_PLACEONLY = 0x3 + MTD_NANDFLASH = 0x4 + MTD_NORFLASH = 0x3 + MTD_NO_ERASE = 0x1000 + MTD_OTP_FACTORY = 0x1 + MTD_OTP_OFF = 0x0 + MTD_OTP_USER = 0x2 + MTD_POWERUP_LOCK = 0x2000 + MTD_RAM = 0x1 + MTD_ROM = 0x2 + MTD_SLC_ON_MLC_EMULATION = 0x4000 + MTD_UBIVOLUME = 0x7 + MTD_WRITEABLE = 0x400 NAME_MAX = 0xff NCP_SUPER_MAGIC = 0x564c NETLINK_ADD_MEMBERSHIP = 0x1 @@ -1659,6 +1713,10 @@ const ( PERF_FLAG_PID_CGROUP = 0x4 PERF_MAX_CONTEXTS_PER_STACK = 0x8 PERF_MAX_STACK_DEPTH = 0x7f + PERF_MEM_BLK_ADDR = 0x4 + PERF_MEM_BLK_DATA = 0x2 + PERF_MEM_BLK_NA = 0x1 + PERF_MEM_BLK_SHIFT = 0x28 PERF_MEM_LOCK_LOCKED = 0x2 PERF_MEM_LOCK_NA = 0x1 PERF_MEM_LOCK_SHIFT = 0x18 @@ -1722,12 +1780,14 @@ const ( PERF_RECORD_MISC_GUEST_USER = 0x5 PERF_RECORD_MISC_HYPERVISOR = 0x3 PERF_RECORD_MISC_KERNEL = 0x1 + PERF_RECORD_MISC_MMAP_BUILD_ID = 0x4000 PERF_RECORD_MISC_MMAP_DATA = 0x2000 PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT = 0x1000 PERF_RECORD_MISC_SWITCH_OUT = 0x2000 PERF_RECORD_MISC_SWITCH_OUT_PREEMPT = 0x4000 PERF_RECORD_MISC_USER = 0x2 PERF_SAMPLE_BRANCH_PLM_ALL = 0x7 + PERF_SAMPLE_WEIGHT_TYPE = 0x1004000 PIPEFS_MAGIC = 0x50495045 PPC_CMM_MAGIC = 0xc7571590 PPPIOCGNPMODE = 0xc008744c @@ -1840,6 +1900,7 @@ const ( PR_SET_SECCOMP = 0x16 PR_SET_SECUREBITS = 0x1c PR_SET_SPECULATION_CTRL = 0x35 + PR_SET_SYSCALL_USER_DISPATCH = 0x3b PR_SET_TAGGED_ADDR_CTRL = 0x37 PR_SET_THP_DISABLE = 0x29 PR_SET_TIMERSLACK = 0x1d @@ -1859,6 +1920,8 @@ const ( PR_SVE_SET_VL_ONEXEC = 0x40000 PR_SVE_VL_INHERIT = 0x20000 PR_SVE_VL_LEN_MASK = 0xffff + PR_SYS_DISPATCH_OFF = 0x0 + PR_SYS_DISPATCH_ON = 0x1 PR_TAGGED_ADDR_ENABLE = 0x1 PR_TASK_PERF_EVENTS_DISABLE = 0x1f PR_TASK_PERF_EVENTS_ENABLE = 0x20 @@ -1978,6 +2041,10 @@ const ( RTCF_NAT = 0x800000 RTCF_VALVE = 0x200000 RTC_AF = 0x20 + RTC_FEATURE_ALARM = 0x0 + RTC_FEATURE_ALARM_RES_MINUTE = 0x1 + RTC_FEATURE_CNT = 0x3 + RTC_FEATURE_NEED_WEEK_DAY = 0x2 RTC_IRQF = 0x80 RTC_MAX_FREQ = 0x2000 RTC_PF = 0x40 @@ -2051,6 +2118,7 @@ const ( RTM_F_LOOKUP_TABLE = 0x1000 RTM_F_NOTIFY = 0x100 RTM_F_OFFLOAD = 0x4000 + RTM_F_OFFLOAD_FAILED = 0x20000000 RTM_F_PREFIX = 0x800 RTM_F_TRAP = 0x8000 RTM_GETACTION = 0x32 @@ -2104,12 +2172,13 @@ const ( RTM_SETLINK = 0x13 RTM_SETNEIGHTBL = 0x43 RTNH_ALIGNTO = 0x4 - RTNH_COMPARE_MASK = 0x19 + RTNH_COMPARE_MASK = 0x59 RTNH_F_DEAD = 0x1 RTNH_F_LINKDOWN = 0x10 RTNH_F_OFFLOAD = 0x8 RTNH_F_ONLINK = 0x4 RTNH_F_PERVASIVE = 0x2 + RTNH_F_TRAP = 0x40 RTNH_F_UNRESOLVED = 0x20 RTN_MAX = 0xb RTPROT_BABEL = 0x2a @@ -2580,6 +2649,7 @@ const ( VMADDR_CID_HOST = 0x2 VMADDR_CID_HYPERVISOR = 0x0 VMADDR_CID_LOCAL = 0x1 + VMADDR_FLAG_TO_HOST = 0x1 VMADDR_PORT_ANY = 0xffffffff VM_SOCKETS_INVALID_VERSION = 0xffffffff VQUIT = 0x1 @@ -2727,6 +2797,9 @@ const ( Z3FOLD_MAGIC = 0x33 ZONEFS_MAGIC = 0x5a4f4653 ZSMALLOC_MAGIC = 0x58295829 + _HIDIOCGRAWNAME_LEN = 0x80 + _HIDIOCGRAWPHYS_LEN = 0x40 + _HIDIOCGRAWUNIQ_LEN = 0x40 ) // Errors diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 336e0b326a..09fc559ed2 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -1,6 +1,7 @@ // mkerrors.sh -Wall -Werror -static -I/tmp/include -m32 // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build 386 && linux // +build 386,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. @@ -59,6 +60,8 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + ECCGETLAYOUT = 0x81484d11 + ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 ECHOE = 0x10 ECHOK = 0x20 @@ -93,6 +96,9 @@ const ( F_SETOWN = 0x8 F_UNLCK = 0x2 F_WRLCK = 0x1 + HIDIOCGRAWINFO = 0x80084803 + HIDIOCGRDESC = 0x90044802 + HIDIOCGRDESCSIZE = 0x80044801 HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 @@ -119,6 +125,19 @@ const ( MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MCL_ONFAULT = 0x4 + MEMERASE = 0x40084d02 + MEMERASE64 = 0x40104d14 + MEMGETBADBLOCK = 0x40084d0b + MEMGETINFO = 0x80204d01 + MEMGETOOBSEL = 0x80c84d0a + MEMGETREGIONCOUNT = 0x80044d07 + MEMISLOCKED = 0x80084d17 + MEMLOCK = 0x40084d05 + MEMREADOOB = 0xc00c4d04 + MEMSETBADBLOCK = 0x40084d0c + MEMUNLOCK = 0x40084d06 + MEMWRITEOOB = 0xc00c4d03 + MTDFILEMODE = 0x4d13 NFDBITS = 0x20 NLDLY = 0x100 NOFLSH = 0x80 @@ -128,6 +147,10 @@ const ( NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 + OTPGETREGIONCOUNT = 0x40044d0e + OTPGETREGIONINFO = 0x400c4d0f + OTPLOCK = 0x800c4d10 + OTPSELECT = 0x80044d0d O_APPEND = 0x400 O_ASYNC = 0x2000 O_CLOEXEC = 0x80000 @@ -165,6 +188,7 @@ const ( PERF_EVENT_IOC_SET_OUTPUT = 0x2405 PPPIOCATTACH = 0x4004743d PPPIOCATTCHAN = 0x40047438 + PPPIOCBRIDGECHAN = 0x40047435 PPPIOCCONNECT = 0x4004743a PPPIOCDETACH = 0x4004743c PPPIOCDISCONN = 0x7439 @@ -192,6 +216,7 @@ const ( PPPIOCSPASS = 0x40087447 PPPIOCSRASYNCMAP = 0x40047454 PPPIOCSXASYNCMAP = 0x4020744f + PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffff PTRACE_GETFPREGS = 0xe @@ -268,6 +293,7 @@ const ( SO_BROADCAST = 0x6 SO_BSDCOMPAT = 0xe SO_BUSY_POLL = 0x2e + SO_BUSY_POLL_BUDGET = 0x46 SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 @@ -290,6 +316,7 @@ const ( SO_PEERCRED = 0x11 SO_PEERGROUPS = 0x3b SO_PEERSEC = 0x1f + SO_PREFER_BUSY_POLL = 0x45 SO_PROTOCOL = 0x26 SO_RCVBUF = 0x8 SO_RCVBUFFORCE = 0x21 @@ -481,6 +508,9 @@ const ( X86_FXSR_MAGIC = 0x0 XCASE = 0x4 XTABS = 0x1800 + _HIDIOCGRAWNAME = 0x80804804 + _HIDIOCGRAWPHYS = 0x80404805 + _HIDIOCGRAWUNIQ = 0x80404808 ) // Errors diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index 961507e937..75730cc22b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -1,6 +1,7 @@ // mkerrors.sh -Wall -Werror -static -I/tmp/include -m64 // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build amd64 && linux // +build amd64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. @@ -59,6 +60,8 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + ECCGETLAYOUT = 0x81484d11 + ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 ECHOE = 0x10 ECHOK = 0x20 @@ -93,6 +96,9 @@ const ( F_SETOWN = 0x8 F_UNLCK = 0x2 F_WRLCK = 0x1 + HIDIOCGRAWINFO = 0x80084803 + HIDIOCGRDESC = 0x90044802 + HIDIOCGRDESCSIZE = 0x80044801 HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 @@ -119,6 +125,19 @@ const ( MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MCL_ONFAULT = 0x4 + MEMERASE = 0x40084d02 + MEMERASE64 = 0x40104d14 + MEMGETBADBLOCK = 0x40084d0b + MEMGETINFO = 0x80204d01 + MEMGETOOBSEL = 0x80c84d0a + MEMGETREGIONCOUNT = 0x80044d07 + MEMISLOCKED = 0x80084d17 + MEMLOCK = 0x40084d05 + MEMREADOOB = 0xc0104d04 + MEMSETBADBLOCK = 0x40084d0c + MEMUNLOCK = 0x40084d06 + MEMWRITEOOB = 0xc0104d03 + MTDFILEMODE = 0x4d13 NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 @@ -128,6 +147,10 @@ const ( NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 + OTPGETREGIONCOUNT = 0x40044d0e + OTPGETREGIONINFO = 0x400c4d0f + OTPLOCK = 0x800c4d10 + OTPSELECT = 0x80044d0d O_APPEND = 0x400 O_ASYNC = 0x2000 O_CLOEXEC = 0x80000 @@ -165,6 +188,7 @@ const ( PERF_EVENT_IOC_SET_OUTPUT = 0x2405 PPPIOCATTACH = 0x4004743d PPPIOCATTCHAN = 0x40047438 + PPPIOCBRIDGECHAN = 0x40047435 PPPIOCCONNECT = 0x4004743a PPPIOCDETACH = 0x4004743c PPPIOCDISCONN = 0x7439 @@ -192,6 +216,7 @@ const ( PPPIOCSPASS = 0x40107447 PPPIOCSRASYNCMAP = 0x40047454 PPPIOCSXASYNCMAP = 0x4020744f + PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffffffffffff PTRACE_ARCH_PRCTL = 0x1e @@ -269,6 +294,7 @@ const ( SO_BROADCAST = 0x6 SO_BSDCOMPAT = 0xe SO_BUSY_POLL = 0x2e + SO_BUSY_POLL_BUDGET = 0x46 SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 @@ -291,6 +317,7 @@ const ( SO_PEERCRED = 0x11 SO_PEERGROUPS = 0x3b SO_PEERSEC = 0x1f + SO_PREFER_BUSY_POLL = 0x45 SO_PROTOCOL = 0x26 SO_RCVBUF = 0x8 SO_RCVBUFFORCE = 0x21 @@ -481,6 +508,9 @@ const ( WORDSIZE = 0x40 XCASE = 0x4 XTABS = 0x1800 + _HIDIOCGRAWNAME = 0x80804804 + _HIDIOCGRAWPHYS = 0x80404805 + _HIDIOCGRAWUNIQ = 0x80404808 ) // Errors diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index a65576db7b..127cf17add 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -1,6 +1,7 @@ // mkerrors.sh -Wall -Werror -static -I/tmp/include // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build arm && linux // +build arm,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. @@ -59,6 +60,8 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + ECCGETLAYOUT = 0x81484d11 + ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 ECHOE = 0x10 ECHOK = 0x20 @@ -92,6 +95,9 @@ const ( F_SETOWN = 0x8 F_UNLCK = 0x2 F_WRLCK = 0x1 + HIDIOCGRAWINFO = 0x80084803 + HIDIOCGRDESC = 0x90044802 + HIDIOCGRDESCSIZE = 0x80044801 HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 @@ -117,6 +123,19 @@ const ( MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MCL_ONFAULT = 0x4 + MEMERASE = 0x40084d02 + MEMERASE64 = 0x40104d14 + MEMGETBADBLOCK = 0x40084d0b + MEMGETINFO = 0x80204d01 + MEMGETOOBSEL = 0x80c84d0a + MEMGETREGIONCOUNT = 0x80044d07 + MEMISLOCKED = 0x80084d17 + MEMLOCK = 0x40084d05 + MEMREADOOB = 0xc00c4d04 + MEMSETBADBLOCK = 0x40084d0c + MEMUNLOCK = 0x40084d06 + MEMWRITEOOB = 0xc00c4d03 + MTDFILEMODE = 0x4d13 NFDBITS = 0x20 NLDLY = 0x100 NOFLSH = 0x80 @@ -126,6 +145,10 @@ const ( NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 + OTPGETREGIONCOUNT = 0x40044d0e + OTPGETREGIONINFO = 0x400c4d0f + OTPLOCK = 0x800c4d10 + OTPSELECT = 0x80044d0d O_APPEND = 0x400 O_ASYNC = 0x2000 O_CLOEXEC = 0x80000 @@ -163,6 +186,7 @@ const ( PERF_EVENT_IOC_SET_OUTPUT = 0x2405 PPPIOCATTACH = 0x4004743d PPPIOCATTCHAN = 0x40047438 + PPPIOCBRIDGECHAN = 0x40047435 PPPIOCCONNECT = 0x4004743a PPPIOCDETACH = 0x4004743c PPPIOCDISCONN = 0x7439 @@ -190,6 +214,7 @@ const ( PPPIOCSPASS = 0x40087447 PPPIOCSRASYNCMAP = 0x40047454 PPPIOCSXASYNCMAP = 0x4020744f + PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffff PTRACE_GETCRUNCHREGS = 0x19 @@ -275,6 +300,7 @@ const ( SO_BROADCAST = 0x6 SO_BSDCOMPAT = 0xe SO_BUSY_POLL = 0x2e + SO_BUSY_POLL_BUDGET = 0x46 SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 @@ -297,6 +323,7 @@ const ( SO_PEERCRED = 0x11 SO_PEERGROUPS = 0x3b SO_PEERSEC = 0x1f + SO_PREFER_BUSY_POLL = 0x45 SO_PROTOCOL = 0x26 SO_RCVBUF = 0x8 SO_RCVBUFFORCE = 0x21 @@ -487,6 +514,9 @@ const ( WORDSIZE = 0x20 XCASE = 0x4 XTABS = 0x1800 + _HIDIOCGRAWNAME = 0x80804804 + _HIDIOCGRAWPHYS = 0x80404805 + _HIDIOCGRAWUNIQ = 0x80404808 ) // Errors diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index cf075caa8c..957ca1ff13 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -1,6 +1,7 @@ // mkerrors.sh -Wall -Werror -static -I/tmp/include -fsigned-char // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build arm64 && linux // +build arm64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. @@ -59,6 +60,8 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + ECCGETLAYOUT = 0x81484d11 + ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 ECHOE = 0x10 ECHOK = 0x20 @@ -95,6 +98,9 @@ const ( F_SETOWN = 0x8 F_UNLCK = 0x2 F_WRLCK = 0x1 + HIDIOCGRAWINFO = 0x80084803 + HIDIOCGRDESC = 0x90044802 + HIDIOCGRDESCSIZE = 0x80044801 HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 @@ -120,6 +126,19 @@ const ( MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MCL_ONFAULT = 0x4 + MEMERASE = 0x40084d02 + MEMERASE64 = 0x40104d14 + MEMGETBADBLOCK = 0x40084d0b + MEMGETINFO = 0x80204d01 + MEMGETOOBSEL = 0x80c84d0a + MEMGETREGIONCOUNT = 0x80044d07 + MEMISLOCKED = 0x80084d17 + MEMLOCK = 0x40084d05 + MEMREADOOB = 0xc0104d04 + MEMSETBADBLOCK = 0x40084d0c + MEMUNLOCK = 0x40084d06 + MEMWRITEOOB = 0xc0104d03 + MTDFILEMODE = 0x4d13 NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 @@ -129,6 +148,10 @@ const ( NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 + OTPGETREGIONCOUNT = 0x40044d0e + OTPGETREGIONINFO = 0x400c4d0f + OTPLOCK = 0x800c4d10 + OTPSELECT = 0x80044d0d O_APPEND = 0x400 O_ASYNC = 0x2000 O_CLOEXEC = 0x80000 @@ -166,6 +189,7 @@ const ( PERF_EVENT_IOC_SET_OUTPUT = 0x2405 PPPIOCATTACH = 0x4004743d PPPIOCATTCHAN = 0x40047438 + PPPIOCBRIDGECHAN = 0x40047435 PPPIOCCONNECT = 0x4004743a PPPIOCDETACH = 0x4004743c PPPIOCDISCONN = 0x7439 @@ -193,8 +217,10 @@ const ( PPPIOCSPASS = 0x40107447 PPPIOCSRASYNCMAP = 0x40047454 PPPIOCSXASYNCMAP = 0x4020744f + PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PROT_BTI = 0x10 + PROT_MTE = 0x20 PR_SET_PTRACER_ANY = 0xffffffffffffffff PTRACE_PEEKMTETAGS = 0x21 PTRACE_POKEMTETAGS = 0x22 @@ -264,6 +290,7 @@ const ( SO_BROADCAST = 0x6 SO_BSDCOMPAT = 0xe SO_BUSY_POLL = 0x2e + SO_BUSY_POLL_BUDGET = 0x46 SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 @@ -286,6 +313,7 @@ const ( SO_PEERCRED = 0x11 SO_PEERGROUPS = 0x3b SO_PEERSEC = 0x1f + SO_PREFER_BUSY_POLL = 0x45 SO_PROTOCOL = 0x26 SO_RCVBUF = 0x8 SO_RCVBUFFORCE = 0x21 @@ -477,6 +505,9 @@ const ( WORDSIZE = 0x40 XCASE = 0x4 XTABS = 0x1800 + _HIDIOCGRAWNAME = 0x80804804 + _HIDIOCGRAWPHYS = 0x80404805 + _HIDIOCGRAWUNIQ = 0x80404808 ) // Errors diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index efe90deeab..314a2054fc 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -1,6 +1,7 @@ // mkerrors.sh -Wall -Werror -static -I/tmp/include // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build mips && linux // +build mips,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. @@ -59,6 +60,8 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + ECCGETLAYOUT = 0x41484d11 + ECCGETSTATS = 0x40104d12 ECHOCTL = 0x200 ECHOE = 0x10 ECHOK = 0x20 @@ -92,6 +95,9 @@ const ( F_SETOWN = 0x18 F_UNLCK = 0x2 F_WRLCK = 0x1 + HIDIOCGRAWINFO = 0x40084803 + HIDIOCGRDESC = 0x50044802 + HIDIOCGRDESCSIZE = 0x40044801 HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x100 @@ -117,6 +123,19 @@ const ( MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MCL_ONFAULT = 0x4 + MEMERASE = 0x80084d02 + MEMERASE64 = 0x80104d14 + MEMGETBADBLOCK = 0x80084d0b + MEMGETINFO = 0x40204d01 + MEMGETOOBSEL = 0x40c84d0a + MEMGETREGIONCOUNT = 0x40044d07 + MEMISLOCKED = 0x40084d17 + MEMLOCK = 0x80084d05 + MEMREADOOB = 0xc00c4d04 + MEMSETBADBLOCK = 0x80084d0c + MEMUNLOCK = 0x80084d06 + MEMWRITEOOB = 0xc00c4d03 + MTDFILEMODE = 0x20004d13 NFDBITS = 0x20 NLDLY = 0x100 NOFLSH = 0x80 @@ -126,6 +145,10 @@ const ( NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 + OTPGETREGIONCOUNT = 0x80044d0e + OTPGETREGIONINFO = 0x800c4d0f + OTPLOCK = 0x400c4d10 + OTPSELECT = 0x40044d0d O_APPEND = 0x8 O_ASYNC = 0x1000 O_CLOEXEC = 0x80000 @@ -163,6 +186,7 @@ const ( PERF_EVENT_IOC_SET_OUTPUT = 0x20002405 PPPIOCATTACH = 0x8004743d PPPIOCATTCHAN = 0x80047438 + PPPIOCBRIDGECHAN = 0x80047435 PPPIOCCONNECT = 0x8004743a PPPIOCDETACH = 0x8004743c PPPIOCDISCONN = 0x20007439 @@ -190,6 +214,7 @@ const ( PPPIOCSPASS = 0x80087447 PPPIOCSRASYNCMAP = 0x80047454 PPPIOCSXASYNCMAP = 0x8020744f + PPPIOCUNBRIDGECHAN = 0x20007434 PPPIOCXFERUNIT = 0x2000744e PR_SET_PTRACER_ANY = 0xffffffff PTRACE_GETFPREGS = 0xe @@ -268,6 +293,7 @@ const ( SO_BROADCAST = 0x20 SO_BSDCOMPAT = 0xe SO_BUSY_POLL = 0x2e + SO_BUSY_POLL_BUDGET = 0x46 SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 @@ -290,6 +316,7 @@ const ( SO_PEERCRED = 0x12 SO_PEERGROUPS = 0x3b SO_PEERSEC = 0x1e + SO_PREFER_BUSY_POLL = 0x45 SO_PROTOCOL = 0x1028 SO_RCVBUF = 0x1002 SO_RCVBUFFORCE = 0x21 @@ -483,6 +510,9 @@ const ( WORDSIZE = 0x20 XCASE = 0x4 XTABS = 0x1800 + _HIDIOCGRAWNAME = 0x40804804 + _HIDIOCGRAWPHYS = 0x40404805 + _HIDIOCGRAWUNIQ = 0x40404808 ) // Errors diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index 8b0e8911dc..457e8de97d 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -1,6 +1,7 @@ // mkerrors.sh -Wall -Werror -static -I/tmp/include // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build mips64 && linux // +build mips64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. @@ -59,6 +60,8 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + ECCGETLAYOUT = 0x41484d11 + ECCGETSTATS = 0x40104d12 ECHOCTL = 0x200 ECHOE = 0x10 ECHOK = 0x20 @@ -92,6 +95,9 @@ const ( F_SETOWN = 0x18 F_UNLCK = 0x2 F_WRLCK = 0x1 + HIDIOCGRAWINFO = 0x40084803 + HIDIOCGRDESC = 0x50044802 + HIDIOCGRDESCSIZE = 0x40044801 HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x100 @@ -117,6 +123,19 @@ const ( MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MCL_ONFAULT = 0x4 + MEMERASE = 0x80084d02 + MEMERASE64 = 0x80104d14 + MEMGETBADBLOCK = 0x80084d0b + MEMGETINFO = 0x40204d01 + MEMGETOOBSEL = 0x40c84d0a + MEMGETREGIONCOUNT = 0x40044d07 + MEMISLOCKED = 0x40084d17 + MEMLOCK = 0x80084d05 + MEMREADOOB = 0xc0104d04 + MEMSETBADBLOCK = 0x80084d0c + MEMUNLOCK = 0x80084d06 + MEMWRITEOOB = 0xc0104d03 + MTDFILEMODE = 0x20004d13 NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 @@ -126,6 +145,10 @@ const ( NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 + OTPGETREGIONCOUNT = 0x80044d0e + OTPGETREGIONINFO = 0x800c4d0f + OTPLOCK = 0x400c4d10 + OTPSELECT = 0x40044d0d O_APPEND = 0x8 O_ASYNC = 0x1000 O_CLOEXEC = 0x80000 @@ -163,6 +186,7 @@ const ( PERF_EVENT_IOC_SET_OUTPUT = 0x20002405 PPPIOCATTACH = 0x8004743d PPPIOCATTCHAN = 0x80047438 + PPPIOCBRIDGECHAN = 0x80047435 PPPIOCCONNECT = 0x8004743a PPPIOCDETACH = 0x8004743c PPPIOCDISCONN = 0x20007439 @@ -190,6 +214,7 @@ const ( PPPIOCSPASS = 0x80107447 PPPIOCSRASYNCMAP = 0x80047454 PPPIOCSXASYNCMAP = 0x8020744f + PPPIOCUNBRIDGECHAN = 0x20007434 PPPIOCXFERUNIT = 0x2000744e PR_SET_PTRACER_ANY = 0xffffffffffffffff PTRACE_GETFPREGS = 0xe @@ -268,6 +293,7 @@ const ( SO_BROADCAST = 0x20 SO_BSDCOMPAT = 0xe SO_BUSY_POLL = 0x2e + SO_BUSY_POLL_BUDGET = 0x46 SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 @@ -290,6 +316,7 @@ const ( SO_PEERCRED = 0x12 SO_PEERGROUPS = 0x3b SO_PEERSEC = 0x1e + SO_PREFER_BUSY_POLL = 0x45 SO_PROTOCOL = 0x1028 SO_RCVBUF = 0x1002 SO_RCVBUFFORCE = 0x21 @@ -483,6 +510,9 @@ const ( WORDSIZE = 0x40 XCASE = 0x4 XTABS = 0x1800 + _HIDIOCGRAWNAME = 0x40804804 + _HIDIOCGRAWPHYS = 0x40404805 + _HIDIOCGRAWUNIQ = 0x40404808 ) // Errors diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index e9430cd1a2..33cd28f6bd 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -1,6 +1,7 @@ // mkerrors.sh -Wall -Werror -static -I/tmp/include // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build mips64le && linux // +build mips64le,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. @@ -59,6 +60,8 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + ECCGETLAYOUT = 0x41484d11 + ECCGETSTATS = 0x40104d12 ECHOCTL = 0x200 ECHOE = 0x10 ECHOK = 0x20 @@ -92,6 +95,9 @@ const ( F_SETOWN = 0x18 F_UNLCK = 0x2 F_WRLCK = 0x1 + HIDIOCGRAWINFO = 0x40084803 + HIDIOCGRDESC = 0x50044802 + HIDIOCGRDESCSIZE = 0x40044801 HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x100 @@ -117,6 +123,19 @@ const ( MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MCL_ONFAULT = 0x4 + MEMERASE = 0x80084d02 + MEMERASE64 = 0x80104d14 + MEMGETBADBLOCK = 0x80084d0b + MEMGETINFO = 0x40204d01 + MEMGETOOBSEL = 0x40c84d0a + MEMGETREGIONCOUNT = 0x40044d07 + MEMISLOCKED = 0x40084d17 + MEMLOCK = 0x80084d05 + MEMREADOOB = 0xc0104d04 + MEMSETBADBLOCK = 0x80084d0c + MEMUNLOCK = 0x80084d06 + MEMWRITEOOB = 0xc0104d03 + MTDFILEMODE = 0x20004d13 NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 @@ -126,6 +145,10 @@ const ( NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 + OTPGETREGIONCOUNT = 0x80044d0e + OTPGETREGIONINFO = 0x800c4d0f + OTPLOCK = 0x400c4d10 + OTPSELECT = 0x40044d0d O_APPEND = 0x8 O_ASYNC = 0x1000 O_CLOEXEC = 0x80000 @@ -163,6 +186,7 @@ const ( PERF_EVENT_IOC_SET_OUTPUT = 0x20002405 PPPIOCATTACH = 0x8004743d PPPIOCATTCHAN = 0x80047438 + PPPIOCBRIDGECHAN = 0x80047435 PPPIOCCONNECT = 0x8004743a PPPIOCDETACH = 0x8004743c PPPIOCDISCONN = 0x20007439 @@ -190,6 +214,7 @@ const ( PPPIOCSPASS = 0x80107447 PPPIOCSRASYNCMAP = 0x80047454 PPPIOCSXASYNCMAP = 0x8020744f + PPPIOCUNBRIDGECHAN = 0x20007434 PPPIOCXFERUNIT = 0x2000744e PR_SET_PTRACER_ANY = 0xffffffffffffffff PTRACE_GETFPREGS = 0xe @@ -268,6 +293,7 @@ const ( SO_BROADCAST = 0x20 SO_BSDCOMPAT = 0xe SO_BUSY_POLL = 0x2e + SO_BUSY_POLL_BUDGET = 0x46 SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 @@ -290,6 +316,7 @@ const ( SO_PEERCRED = 0x12 SO_PEERGROUPS = 0x3b SO_PEERSEC = 0x1e + SO_PREFER_BUSY_POLL = 0x45 SO_PROTOCOL = 0x1028 SO_RCVBUF = 0x1002 SO_RCVBUFFORCE = 0x21 @@ -483,6 +510,9 @@ const ( WORDSIZE = 0x40 XCASE = 0x4 XTABS = 0x1800 + _HIDIOCGRAWNAME = 0x40804804 + _HIDIOCGRAWPHYS = 0x40404805 + _HIDIOCGRAWUNIQ = 0x40404808 ) // Errors diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 61e4f5db67..0e085ba147 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -1,6 +1,7 @@ // mkerrors.sh -Wall -Werror -static -I/tmp/include // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build mipsle && linux // +build mipsle,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. @@ -59,6 +60,8 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + ECCGETLAYOUT = 0x41484d11 + ECCGETSTATS = 0x40104d12 ECHOCTL = 0x200 ECHOE = 0x10 ECHOK = 0x20 @@ -92,6 +95,9 @@ const ( F_SETOWN = 0x18 F_UNLCK = 0x2 F_WRLCK = 0x1 + HIDIOCGRAWINFO = 0x40084803 + HIDIOCGRDESC = 0x50044802 + HIDIOCGRDESCSIZE = 0x40044801 HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x100 @@ -117,6 +123,19 @@ const ( MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MCL_ONFAULT = 0x4 + MEMERASE = 0x80084d02 + MEMERASE64 = 0x80104d14 + MEMGETBADBLOCK = 0x80084d0b + MEMGETINFO = 0x40204d01 + MEMGETOOBSEL = 0x40c84d0a + MEMGETREGIONCOUNT = 0x40044d07 + MEMISLOCKED = 0x40084d17 + MEMLOCK = 0x80084d05 + MEMREADOOB = 0xc00c4d04 + MEMSETBADBLOCK = 0x80084d0c + MEMUNLOCK = 0x80084d06 + MEMWRITEOOB = 0xc00c4d03 + MTDFILEMODE = 0x20004d13 NFDBITS = 0x20 NLDLY = 0x100 NOFLSH = 0x80 @@ -126,6 +145,10 @@ const ( NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 + OTPGETREGIONCOUNT = 0x80044d0e + OTPGETREGIONINFO = 0x800c4d0f + OTPLOCK = 0x400c4d10 + OTPSELECT = 0x40044d0d O_APPEND = 0x8 O_ASYNC = 0x1000 O_CLOEXEC = 0x80000 @@ -163,6 +186,7 @@ const ( PERF_EVENT_IOC_SET_OUTPUT = 0x20002405 PPPIOCATTACH = 0x8004743d PPPIOCATTCHAN = 0x80047438 + PPPIOCBRIDGECHAN = 0x80047435 PPPIOCCONNECT = 0x8004743a PPPIOCDETACH = 0x8004743c PPPIOCDISCONN = 0x20007439 @@ -190,6 +214,7 @@ const ( PPPIOCSPASS = 0x80087447 PPPIOCSRASYNCMAP = 0x80047454 PPPIOCSXASYNCMAP = 0x8020744f + PPPIOCUNBRIDGECHAN = 0x20007434 PPPIOCXFERUNIT = 0x2000744e PR_SET_PTRACER_ANY = 0xffffffff PTRACE_GETFPREGS = 0xe @@ -268,6 +293,7 @@ const ( SO_BROADCAST = 0x20 SO_BSDCOMPAT = 0xe SO_BUSY_POLL = 0x2e + SO_BUSY_POLL_BUDGET = 0x46 SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 @@ -290,6 +316,7 @@ const ( SO_PEERCRED = 0x12 SO_PEERGROUPS = 0x3b SO_PEERSEC = 0x1e + SO_PREFER_BUSY_POLL = 0x45 SO_PROTOCOL = 0x1028 SO_RCVBUF = 0x1002 SO_RCVBUFFORCE = 0x21 @@ -483,6 +510,9 @@ const ( WORDSIZE = 0x20 XCASE = 0x4 XTABS = 0x1800 + _HIDIOCGRAWNAME = 0x40804804 + _HIDIOCGRAWPHYS = 0x40404805 + _HIDIOCGRAWUNIQ = 0x40404808 ) // Errors diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go new file mode 100644 index 0000000000..1b5928cffb --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -0,0 +1,879 @@ +// mkerrors.sh -Wall -Werror -static -I/tmp/include +// Code generated by the command above; see README.md. DO NOT EDIT. + +//go:build ppc && linux +// +build ppc,linux + +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/_const.go + +package unix + +import "syscall" + +const ( + B1000000 = 0x17 + B115200 = 0x11 + B1152000 = 0x18 + B1500000 = 0x19 + B2000000 = 0x1a + B230400 = 0x12 + B2500000 = 0x1b + B3000000 = 0x1c + B3500000 = 0x1d + B4000000 = 0x1e + B460800 = 0x13 + B500000 = 0x14 + B57600 = 0x10 + B576000 = 0x15 + B921600 = 0x16 + BLKBSZGET = 0x40041270 + BLKBSZSET = 0x80041271 + BLKFLSBUF = 0x20001261 + BLKFRAGET = 0x20001265 + BLKFRASET = 0x20001264 + BLKGETSIZE = 0x20001260 + BLKGETSIZE64 = 0x40041272 + BLKPBSZGET = 0x2000127b + BLKRAGET = 0x20001263 + BLKRASET = 0x20001262 + BLKROGET = 0x2000125e + BLKROSET = 0x2000125d + BLKRRPART = 0x2000125f + BLKSECTGET = 0x20001267 + BLKSECTSET = 0x20001266 + BLKSSZGET = 0x20001268 + BOTHER = 0x1f + BS1 = 0x8000 + BSDLY = 0x8000 + CBAUD = 0xff + CBAUDEX = 0x0 + CIBAUD = 0xff0000 + CLOCAL = 0x8000 + CR1 = 0x1000 + CR2 = 0x2000 + CR3 = 0x3000 + CRDLY = 0x3000 + CREAD = 0x800 + CS6 = 0x100 + CS7 = 0x200 + CS8 = 0x300 + CSIZE = 0x300 + CSTOPB = 0x400 + ECCGETLAYOUT = 0x41484d11 + ECCGETSTATS = 0x40104d12 + ECHOCTL = 0x40 + ECHOE = 0x2 + ECHOK = 0x4 + ECHOKE = 0x1 + ECHONL = 0x10 + ECHOPRT = 0x20 + EFD_CLOEXEC = 0x80000 + EFD_NONBLOCK = 0x800 + EPOLL_CLOEXEC = 0x80000 + EXTPROC = 0x10000000 + FF1 = 0x4000 + FFDLY = 0x4000 + FICLONE = 0x80049409 + FICLONERANGE = 0x8020940d + FLUSHO = 0x800000 + FS_IOC_ENABLE_VERITY = 0x80806685 + FS_IOC_GETFLAGS = 0x40046601 + FS_IOC_GET_ENCRYPTION_NONCE = 0x4010661b + FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615 + FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614 + FS_IOC_SETFLAGS = 0x80046602 + FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613 + F_GETLK = 0xc + F_GETLK64 = 0xc + F_GETOWN = 0x9 + F_RDLCK = 0x0 + F_SETLK = 0xd + F_SETLK64 = 0xd + F_SETLKW = 0xe + F_SETLKW64 = 0xe + F_SETOWN = 0x8 + F_UNLCK = 0x2 + F_WRLCK = 0x1 + HIDIOCGRAWINFO = 0x40084803 + HIDIOCGRDESC = 0x50044802 + HIDIOCGRDESCSIZE = 0x40044801 + HUPCL = 0x4000 + ICANON = 0x100 + IEXTEN = 0x400 + IN_CLOEXEC = 0x80000 + IN_NONBLOCK = 0x800 + IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + ISIG = 0x80 + IUCLC = 0x1000 + IXOFF = 0x400 + IXON = 0x200 + MAP_ANON = 0x20 + MAP_ANONYMOUS = 0x20 + MAP_DENYWRITE = 0x800 + MAP_EXECUTABLE = 0x1000 + MAP_GROWSDOWN = 0x100 + MAP_HUGETLB = 0x40000 + MAP_LOCKED = 0x80 + MAP_NONBLOCK = 0x10000 + MAP_NORESERVE = 0x40 + MAP_POPULATE = 0x8000 + MAP_STACK = 0x20000 + MAP_SYNC = 0x80000 + MCL_CURRENT = 0x2000 + MCL_FUTURE = 0x4000 + MCL_ONFAULT = 0x8000 + MEMERASE = 0x80084d02 + MEMERASE64 = 0x80104d14 + MEMGETBADBLOCK = 0x80084d0b + MEMGETINFO = 0x40204d01 + MEMGETOOBSEL = 0x40c84d0a + MEMGETREGIONCOUNT = 0x40044d07 + MEMISLOCKED = 0x40084d17 + MEMLOCK = 0x80084d05 + MEMREADOOB = 0xc00c4d04 + MEMSETBADBLOCK = 0x80084d0c + MEMUNLOCK = 0x80084d06 + MEMWRITEOOB = 0xc00c4d03 + MTDFILEMODE = 0x20004d13 + NFDBITS = 0x20 + NL2 = 0x200 + NL3 = 0x300 + NLDLY = 0x300 + NOFLSH = 0x80000000 + NS_GET_NSTYPE = 0x2000b703 + NS_GET_OWNER_UID = 0x2000b704 + NS_GET_PARENT = 0x2000b702 + NS_GET_USERNS = 0x2000b701 + OLCUC = 0x4 + ONLCR = 0x2 + OTPGETREGIONCOUNT = 0x80044d0e + OTPGETREGIONINFO = 0x800c4d0f + OTPLOCK = 0x400c4d10 + OTPSELECT = 0x40044d0d + O_APPEND = 0x400 + O_ASYNC = 0x2000 + O_CLOEXEC = 0x80000 + O_CREAT = 0x40 + O_DIRECT = 0x20000 + O_DIRECTORY = 0x4000 + O_DSYNC = 0x1000 + O_EXCL = 0x80 + O_FSYNC = 0x101000 + O_LARGEFILE = 0x10000 + O_NDELAY = 0x800 + O_NOATIME = 0x40000 + O_NOCTTY = 0x100 + O_NOFOLLOW = 0x8000 + O_NONBLOCK = 0x800 + O_PATH = 0x200000 + O_RSYNC = 0x101000 + O_SYNC = 0x101000 + O_TMPFILE = 0x404000 + O_TRUNC = 0x200 + PARENB = 0x1000 + PARODD = 0x2000 + PENDIN = 0x20000000 + PERF_EVENT_IOC_DISABLE = 0x20002401 + PERF_EVENT_IOC_ENABLE = 0x20002400 + PERF_EVENT_IOC_ID = 0x40042407 + PERF_EVENT_IOC_MODIFY_ATTRIBUTES = 0x8004240b + PERF_EVENT_IOC_PAUSE_OUTPUT = 0x80042409 + PERF_EVENT_IOC_PERIOD = 0x80082404 + PERF_EVENT_IOC_QUERY_BPF = 0xc004240a + PERF_EVENT_IOC_REFRESH = 0x20002402 + PERF_EVENT_IOC_RESET = 0x20002403 + PERF_EVENT_IOC_SET_BPF = 0x80042408 + PERF_EVENT_IOC_SET_FILTER = 0x80042406 + PERF_EVENT_IOC_SET_OUTPUT = 0x20002405 + PPPIOCATTACH = 0x8004743d + PPPIOCATTCHAN = 0x80047438 + PPPIOCBRIDGECHAN = 0x80047435 + PPPIOCCONNECT = 0x8004743a + PPPIOCDETACH = 0x8004743c + PPPIOCDISCONN = 0x20007439 + PPPIOCGASYNCMAP = 0x40047458 + PPPIOCGCHAN = 0x40047437 + PPPIOCGDEBUG = 0x40047441 + PPPIOCGFLAGS = 0x4004745a + PPPIOCGIDLE = 0x4008743f + PPPIOCGIDLE32 = 0x4008743f + PPPIOCGIDLE64 = 0x4010743f + PPPIOCGL2TPSTATS = 0x40487436 + PPPIOCGMRU = 0x40047453 + PPPIOCGRASYNCMAP = 0x40047455 + PPPIOCGUNIT = 0x40047456 + PPPIOCGXASYNCMAP = 0x40207450 + PPPIOCSACTIVE = 0x80087446 + PPPIOCSASYNCMAP = 0x80047457 + PPPIOCSCOMPRESS = 0x800c744d + PPPIOCSDEBUG = 0x80047440 + PPPIOCSFLAGS = 0x80047459 + PPPIOCSMAXCID = 0x80047451 + PPPIOCSMRRU = 0x8004743b + PPPIOCSMRU = 0x80047452 + PPPIOCSNPMODE = 0x8008744b + PPPIOCSPASS = 0x80087447 + PPPIOCSRASYNCMAP = 0x80047454 + PPPIOCSXASYNCMAP = 0x8020744f + PPPIOCUNBRIDGECHAN = 0x20007434 + PPPIOCXFERUNIT = 0x2000744e + PROT_SAO = 0x10 + PR_SET_PTRACER_ANY = 0xffffffff + PTRACE_GETEVRREGS = 0x14 + PTRACE_GETFPREGS = 0xe + PTRACE_GETREGS64 = 0x16 + PTRACE_GETVRREGS = 0x12 + PTRACE_GETVSRREGS = 0x1b + PTRACE_GET_DEBUGREG = 0x19 + PTRACE_SETEVRREGS = 0x15 + PTRACE_SETFPREGS = 0xf + PTRACE_SETREGS64 = 0x17 + PTRACE_SETVRREGS = 0x13 + PTRACE_SETVSRREGS = 0x1c + PTRACE_SET_DEBUGREG = 0x1a + PTRACE_SINGLEBLOCK = 0x100 + PTRACE_SYSEMU = 0x1d + PTRACE_SYSEMU_SINGLESTEP = 0x1e + PT_CCR = 0x26 + PT_CTR = 0x23 + PT_DAR = 0x29 + PT_DSCR = 0x2c + PT_DSISR = 0x2a + PT_FPR0 = 0x30 + PT_FPR31 = 0x6e + PT_FPSCR = 0x71 + PT_LNK = 0x24 + PT_MQ = 0x27 + PT_MSR = 0x21 + PT_NIP = 0x20 + PT_ORIG_R3 = 0x22 + PT_R0 = 0x0 + PT_R1 = 0x1 + PT_R10 = 0xa + PT_R11 = 0xb + PT_R12 = 0xc + PT_R13 = 0xd + PT_R14 = 0xe + PT_R15 = 0xf + PT_R16 = 0x10 + PT_R17 = 0x11 + PT_R18 = 0x12 + PT_R19 = 0x13 + PT_R2 = 0x2 + PT_R20 = 0x14 + PT_R21 = 0x15 + PT_R22 = 0x16 + PT_R23 = 0x17 + PT_R24 = 0x18 + PT_R25 = 0x19 + PT_R26 = 0x1a + PT_R27 = 0x1b + PT_R28 = 0x1c + PT_R29 = 0x1d + PT_R3 = 0x3 + PT_R30 = 0x1e + PT_R31 = 0x1f + PT_R4 = 0x4 + PT_R5 = 0x5 + PT_R6 = 0x6 + PT_R7 = 0x7 + PT_R8 = 0x8 + PT_R9 = 0x9 + PT_REGS_COUNT = 0x2c + PT_RESULT = 0x2b + PT_TRAP = 0x28 + PT_XER = 0x25 + RLIMIT_AS = 0x9 + RLIMIT_MEMLOCK = 0x8 + RLIMIT_NOFILE = 0x7 + RLIMIT_NPROC = 0x6 + RLIMIT_RSS = 0x5 + RNDADDENTROPY = 0x80085203 + RNDADDTOENTCNT = 0x80045201 + RNDCLEARPOOL = 0x20005206 + RNDGETENTCNT = 0x40045200 + RNDGETPOOL = 0x40085202 + RNDRESEEDCRNG = 0x20005207 + RNDZAPENTCNT = 0x20005204 + RTC_AIE_OFF = 0x20007002 + RTC_AIE_ON = 0x20007001 + RTC_ALM_READ = 0x40247008 + RTC_ALM_SET = 0x80247007 + RTC_EPOCH_READ = 0x4004700d + RTC_EPOCH_SET = 0x8004700e + RTC_IRQP_READ = 0x4004700b + RTC_IRQP_SET = 0x8004700c + RTC_PIE_OFF = 0x20007006 + RTC_PIE_ON = 0x20007005 + RTC_PLL_GET = 0x401c7011 + RTC_PLL_SET = 0x801c7012 + RTC_RD_TIME = 0x40247009 + RTC_SET_TIME = 0x8024700a + RTC_UIE_OFF = 0x20007004 + RTC_UIE_ON = 0x20007003 + RTC_VL_CLR = 0x20007014 + RTC_VL_READ = 0x40047013 + RTC_WIE_OFF = 0x20007010 + RTC_WIE_ON = 0x2000700f + RTC_WKALM_RD = 0x40287010 + RTC_WKALM_SET = 0x8028700f + SCM_TIMESTAMPING = 0x25 + SCM_TIMESTAMPING_OPT_STATS = 0x36 + SCM_TIMESTAMPING_PKTINFO = 0x3a + SCM_TIMESTAMPNS = 0x23 + SCM_TXTIME = 0x3d + SCM_WIFI_STATUS = 0x29 + SFD_CLOEXEC = 0x80000 + SFD_NONBLOCK = 0x800 + SIOCATMARK = 0x8905 + SIOCGPGRP = 0x8904 + SIOCGSTAMPNS_NEW = 0x40108907 + SIOCGSTAMP_NEW = 0x40108906 + SIOCINQ = 0x4004667f + SIOCOUTQ = 0x40047473 + SIOCSPGRP = 0x8902 + SOCK_CLOEXEC = 0x80000 + SOCK_DGRAM = 0x2 + SOCK_NONBLOCK = 0x800 + SOCK_STREAM = 0x1 + SOL_SOCKET = 0x1 + SO_ACCEPTCONN = 0x1e + SO_ATTACH_BPF = 0x32 + SO_ATTACH_REUSEPORT_CBPF = 0x33 + SO_ATTACH_REUSEPORT_EBPF = 0x34 + SO_BINDTODEVICE = 0x19 + SO_BINDTOIFINDEX = 0x3e + SO_BPF_EXTENSIONS = 0x30 + SO_BROADCAST = 0x6 + SO_BSDCOMPAT = 0xe + SO_BUSY_POLL = 0x2e + SO_BUSY_POLL_BUDGET = 0x46 + SO_CNX_ADVICE = 0x35 + SO_COOKIE = 0x39 + SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DOMAIN = 0x27 + SO_DONTROUTE = 0x5 + SO_ERROR = 0x4 + SO_INCOMING_CPU = 0x31 + SO_INCOMING_NAPI_ID = 0x38 + SO_KEEPALIVE = 0x9 + SO_LINGER = 0xd + SO_LOCK_FILTER = 0x2c + SO_MARK = 0x24 + SO_MAX_PACING_RATE = 0x2f + SO_MEMINFO = 0x37 + SO_NOFCS = 0x2b + SO_OOBINLINE = 0xa + SO_PASSCRED = 0x14 + SO_PASSSEC = 0x22 + SO_PEEK_OFF = 0x2a + SO_PEERCRED = 0x15 + SO_PEERGROUPS = 0x3b + SO_PEERSEC = 0x1f + SO_PREFER_BUSY_POLL = 0x45 + SO_PROTOCOL = 0x26 + SO_RCVBUF = 0x8 + SO_RCVBUFFORCE = 0x21 + SO_RCVLOWAT = 0x10 + SO_RCVTIMEO = 0x12 + SO_RCVTIMEO_NEW = 0x42 + SO_RCVTIMEO_OLD = 0x12 + SO_REUSEADDR = 0x2 + SO_REUSEPORT = 0xf + SO_RXQ_OVFL = 0x28 + SO_SECURITY_AUTHENTICATION = 0x16 + SO_SECURITY_ENCRYPTION_NETWORK = 0x18 + SO_SECURITY_ENCRYPTION_TRANSPORT = 0x17 + SO_SELECT_ERR_QUEUE = 0x2d + SO_SNDBUF = 0x7 + SO_SNDBUFFORCE = 0x20 + SO_SNDLOWAT = 0x11 + SO_SNDTIMEO = 0x13 + SO_SNDTIMEO_NEW = 0x43 + SO_SNDTIMEO_OLD = 0x13 + SO_TIMESTAMPING = 0x25 + SO_TIMESTAMPING_NEW = 0x41 + SO_TIMESTAMPING_OLD = 0x25 + SO_TIMESTAMPNS = 0x23 + SO_TIMESTAMPNS_NEW = 0x40 + SO_TIMESTAMPNS_OLD = 0x23 + SO_TIMESTAMP_NEW = 0x3f + SO_TXTIME = 0x3d + SO_TYPE = 0x3 + SO_WIFI_STATUS = 0x29 + SO_ZEROCOPY = 0x3c + TAB1 = 0x400 + TAB2 = 0x800 + TAB3 = 0xc00 + TABDLY = 0xc00 + TCFLSH = 0x2000741f + TCGETA = 0x40147417 + TCGETS = 0x402c7413 + TCSAFLUSH = 0x2 + TCSBRK = 0x2000741d + TCSBRKP = 0x5425 + TCSETA = 0x80147418 + TCSETAF = 0x8014741c + TCSETAW = 0x80147419 + TCSETS = 0x802c7414 + TCSETSF = 0x802c7416 + TCSETSW = 0x802c7415 + TCXONC = 0x2000741e + TFD_CLOEXEC = 0x80000 + TFD_NONBLOCK = 0x800 + TIOCCBRK = 0x5428 + TIOCCONS = 0x541d + TIOCEXCL = 0x540c + TIOCGDEV = 0x40045432 + TIOCGETC = 0x40067412 + TIOCGETD = 0x5424 + TIOCGETP = 0x40067408 + TIOCGEXCL = 0x40045440 + TIOCGICOUNT = 0x545d + TIOCGISO7816 = 0x40285442 + TIOCGLCKTRMIOS = 0x5456 + TIOCGLTC = 0x40067474 + TIOCGPGRP = 0x40047477 + TIOCGPKT = 0x40045438 + TIOCGPTLCK = 0x40045439 + TIOCGPTN = 0x40045430 + TIOCGPTPEER = 0x20005441 + TIOCGRS485 = 0x542e + TIOCGSERIAL = 0x541e + TIOCGSID = 0x5429 + TIOCGSOFTCAR = 0x5419 + TIOCGWINSZ = 0x40087468 + TIOCINQ = 0x4004667f + TIOCLINUX = 0x541c + TIOCMBIC = 0x5417 + TIOCMBIS = 0x5416 + TIOCMGET = 0x5415 + TIOCMIWAIT = 0x545c + TIOCMSET = 0x5418 + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DSR = 0x100 + TIOCM_LOOP = 0x8000 + TIOCM_OUT1 = 0x2000 + TIOCM_OUT2 = 0x4000 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x5422 + TIOCNXCL = 0x540d + TIOCOUTQ = 0x40047473 + TIOCPKT = 0x5420 + TIOCSBRK = 0x5427 + TIOCSCTTY = 0x540e + TIOCSERCONFIG = 0x5453 + TIOCSERGETLSR = 0x5459 + TIOCSERGETMULTI = 0x545a + TIOCSERGSTRUCT = 0x5458 + TIOCSERGWILD = 0x5454 + TIOCSERSETMULTI = 0x545b + TIOCSERSWILD = 0x5455 + TIOCSER_TEMT = 0x1 + TIOCSETC = 0x80067411 + TIOCSETD = 0x5423 + TIOCSETN = 0x8006740a + TIOCSETP = 0x80067409 + TIOCSIG = 0x80045436 + TIOCSISO7816 = 0xc0285443 + TIOCSLCKTRMIOS = 0x5457 + TIOCSLTC = 0x80067475 + TIOCSPGRP = 0x80047476 + TIOCSPTLCK = 0x80045431 + TIOCSRS485 = 0x542f + TIOCSSERIAL = 0x541f + TIOCSSOFTCAR = 0x541a + TIOCSTART = 0x2000746e + TIOCSTI = 0x5412 + TIOCSTOP = 0x2000746f + TIOCSWINSZ = 0x80087467 + TIOCVHANGUP = 0x5437 + TOSTOP = 0x400000 + TUNATTACHFILTER = 0x800854d5 + TUNDETACHFILTER = 0x800854d6 + TUNGETDEVNETNS = 0x200054e3 + TUNGETFEATURES = 0x400454cf + TUNGETFILTER = 0x400854db + TUNGETIFF = 0x400454d2 + TUNGETSNDBUF = 0x400454d3 + TUNGETVNETBE = 0x400454df + TUNGETVNETHDRSZ = 0x400454d7 + TUNGETVNETLE = 0x400454dd + TUNSETCARRIER = 0x800454e2 + TUNSETDEBUG = 0x800454c9 + TUNSETFILTEREBPF = 0x400454e1 + TUNSETGROUP = 0x800454ce + TUNSETIFF = 0x800454ca + TUNSETIFINDEX = 0x800454da + TUNSETLINK = 0x800454cd + TUNSETNOCSUM = 0x800454c8 + TUNSETOFFLOAD = 0x800454d0 + TUNSETOWNER = 0x800454cc + TUNSETPERSIST = 0x800454cb + TUNSETQUEUE = 0x800454d9 + TUNSETSNDBUF = 0x800454d4 + TUNSETSTEERINGEBPF = 0x400454e0 + TUNSETTXFILTER = 0x800454d1 + TUNSETVNETBE = 0x800454de + TUNSETVNETHDRSZ = 0x800454d8 + TUNSETVNETLE = 0x800454dc + UBI_IOCATT = 0x80186f40 + UBI_IOCDET = 0x80046f41 + UBI_IOCEBCH = 0x80044f02 + UBI_IOCEBER = 0x80044f01 + UBI_IOCEBISMAP = 0x40044f05 + UBI_IOCEBMAP = 0x80084f03 + UBI_IOCEBUNMAP = 0x80044f04 + UBI_IOCMKVOL = 0x80986f00 + UBI_IOCRMVOL = 0x80046f01 + UBI_IOCRNVOL = 0x91106f03 + UBI_IOCRPEB = 0x80046f04 + UBI_IOCRSVOL = 0x800c6f02 + UBI_IOCSETVOLPROP = 0x80104f06 + UBI_IOCSPEB = 0x80046f05 + UBI_IOCVOLCRBLK = 0x80804f07 + UBI_IOCVOLRMBLK = 0x20004f08 + UBI_IOCVOLUP = 0x80084f00 + VDISCARD = 0x10 + VEOF = 0x4 + VEOL = 0x6 + VEOL2 = 0x8 + VMIN = 0x5 + VREPRINT = 0xb + VSTART = 0xd + VSTOP = 0xe + VSUSP = 0xc + VSWTC = 0x9 + VT1 = 0x10000 + VTDLY = 0x10000 + VTIME = 0x7 + VWERASE = 0xa + WDIOC_GETBOOTSTATUS = 0x40045702 + WDIOC_GETPRETIMEOUT = 0x40045709 + WDIOC_GETSTATUS = 0x40045701 + WDIOC_GETSUPPORT = 0x40285700 + WDIOC_GETTEMP = 0x40045703 + WDIOC_GETTIMELEFT = 0x4004570a + WDIOC_GETTIMEOUT = 0x40045707 + WDIOC_KEEPALIVE = 0x40045705 + WDIOC_SETOPTIONS = 0x40045704 + WORDSIZE = 0x20 + XCASE = 0x4000 + XTABS = 0xc00 + _HIDIOCGRAWNAME = 0x40804804 + _HIDIOCGRAWPHYS = 0x40404805 + _HIDIOCGRAWUNIQ = 0x40404808 +) + +// Errors +const ( + EADDRINUSE = syscall.Errno(0x62) + EADDRNOTAVAIL = syscall.Errno(0x63) + EADV = syscall.Errno(0x44) + EAFNOSUPPORT = syscall.Errno(0x61) + EALREADY = syscall.Errno(0x72) + EBADE = syscall.Errno(0x34) + EBADFD = syscall.Errno(0x4d) + EBADMSG = syscall.Errno(0x4a) + EBADR = syscall.Errno(0x35) + EBADRQC = syscall.Errno(0x38) + EBADSLT = syscall.Errno(0x39) + EBFONT = syscall.Errno(0x3b) + ECANCELED = syscall.Errno(0x7d) + ECHRNG = syscall.Errno(0x2c) + ECOMM = syscall.Errno(0x46) + ECONNABORTED = syscall.Errno(0x67) + ECONNREFUSED = syscall.Errno(0x6f) + ECONNRESET = syscall.Errno(0x68) + EDEADLK = syscall.Errno(0x23) + EDEADLOCK = syscall.Errno(0x3a) + EDESTADDRREQ = syscall.Errno(0x59) + EDOTDOT = syscall.Errno(0x49) + EDQUOT = syscall.Errno(0x7a) + EHOSTDOWN = syscall.Errno(0x70) + EHOSTUNREACH = syscall.Errno(0x71) + EHWPOISON = syscall.Errno(0x85) + EIDRM = syscall.Errno(0x2b) + EILSEQ = syscall.Errno(0x54) + EINPROGRESS = syscall.Errno(0x73) + EISCONN = syscall.Errno(0x6a) + EISNAM = syscall.Errno(0x78) + EKEYEXPIRED = syscall.Errno(0x7f) + EKEYREJECTED = syscall.Errno(0x81) + EKEYREVOKED = syscall.Errno(0x80) + EL2HLT = syscall.Errno(0x33) + EL2NSYNC = syscall.Errno(0x2d) + EL3HLT = syscall.Errno(0x2e) + EL3RST = syscall.Errno(0x2f) + ELIBACC = syscall.Errno(0x4f) + ELIBBAD = syscall.Errno(0x50) + ELIBEXEC = syscall.Errno(0x53) + ELIBMAX = syscall.Errno(0x52) + ELIBSCN = syscall.Errno(0x51) + ELNRNG = syscall.Errno(0x30) + ELOOP = syscall.Errno(0x28) + EMEDIUMTYPE = syscall.Errno(0x7c) + EMSGSIZE = syscall.Errno(0x5a) + EMULTIHOP = syscall.Errno(0x48) + ENAMETOOLONG = syscall.Errno(0x24) + ENAVAIL = syscall.Errno(0x77) + ENETDOWN = syscall.Errno(0x64) + ENETRESET = syscall.Errno(0x66) + ENETUNREACH = syscall.Errno(0x65) + ENOANO = syscall.Errno(0x37) + ENOBUFS = syscall.Errno(0x69) + ENOCSI = syscall.Errno(0x32) + ENODATA = syscall.Errno(0x3d) + ENOKEY = syscall.Errno(0x7e) + ENOLCK = syscall.Errno(0x25) + ENOLINK = syscall.Errno(0x43) + ENOMEDIUM = syscall.Errno(0x7b) + ENOMSG = syscall.Errno(0x2a) + ENONET = syscall.Errno(0x40) + ENOPKG = syscall.Errno(0x41) + ENOPROTOOPT = syscall.Errno(0x5c) + ENOSR = syscall.Errno(0x3f) + ENOSTR = syscall.Errno(0x3c) + ENOSYS = syscall.Errno(0x26) + ENOTCONN = syscall.Errno(0x6b) + ENOTEMPTY = syscall.Errno(0x27) + ENOTNAM = syscall.Errno(0x76) + ENOTRECOVERABLE = syscall.Errno(0x83) + ENOTSOCK = syscall.Errno(0x58) + ENOTSUP = syscall.Errno(0x5f) + ENOTUNIQ = syscall.Errno(0x4c) + EOPNOTSUPP = syscall.Errno(0x5f) + EOVERFLOW = syscall.Errno(0x4b) + EOWNERDEAD = syscall.Errno(0x82) + EPFNOSUPPORT = syscall.Errno(0x60) + EPROTO = syscall.Errno(0x47) + EPROTONOSUPPORT = syscall.Errno(0x5d) + EPROTOTYPE = syscall.Errno(0x5b) + EREMCHG = syscall.Errno(0x4e) + EREMOTE = syscall.Errno(0x42) + EREMOTEIO = syscall.Errno(0x79) + ERESTART = syscall.Errno(0x55) + ERFKILL = syscall.Errno(0x84) + ESHUTDOWN = syscall.Errno(0x6c) + ESOCKTNOSUPPORT = syscall.Errno(0x5e) + ESRMNT = syscall.Errno(0x45) + ESTALE = syscall.Errno(0x74) + ESTRPIPE = syscall.Errno(0x56) + ETIME = syscall.Errno(0x3e) + ETIMEDOUT = syscall.Errno(0x6e) + ETOOMANYREFS = syscall.Errno(0x6d) + EUCLEAN = syscall.Errno(0x75) + EUNATCH = syscall.Errno(0x31) + EUSERS = syscall.Errno(0x57) + EXFULL = syscall.Errno(0x36) +) + +// Signals +const ( + SIGBUS = syscall.Signal(0x7) + SIGCHLD = syscall.Signal(0x11) + SIGCLD = syscall.Signal(0x11) + SIGCONT = syscall.Signal(0x12) + SIGIO = syscall.Signal(0x1d) + SIGPOLL = syscall.Signal(0x1d) + SIGPROF = syscall.Signal(0x1b) + SIGPWR = syscall.Signal(0x1e) + SIGSTKFLT = syscall.Signal(0x10) + SIGSTOP = syscall.Signal(0x13) + SIGSYS = syscall.Signal(0x1f) + SIGTSTP = syscall.Signal(0x14) + SIGTTIN = syscall.Signal(0x15) + SIGTTOU = syscall.Signal(0x16) + SIGURG = syscall.Signal(0x17) + SIGUSR1 = syscall.Signal(0xa) + SIGUSR2 = syscall.Signal(0xc) + SIGVTALRM = syscall.Signal(0x1a) + SIGWINCH = syscall.Signal(0x1c) + SIGXCPU = syscall.Signal(0x18) + SIGXFSZ = syscall.Signal(0x19) +) + +// Error table +var errorList = [...]struct { + num syscall.Errno + name string + desc string +}{ + {1, "EPERM", "operation not permitted"}, + {2, "ENOENT", "no such file or directory"}, + {3, "ESRCH", "no such process"}, + {4, "EINTR", "interrupted system call"}, + {5, "EIO", "input/output error"}, + {6, "ENXIO", "no such device or address"}, + {7, "E2BIG", "argument list too long"}, + {8, "ENOEXEC", "exec format error"}, + {9, "EBADF", "bad file descriptor"}, + {10, "ECHILD", "no child processes"}, + {11, "EAGAIN", "resource temporarily unavailable"}, + {12, "ENOMEM", "cannot allocate memory"}, + {13, "EACCES", "permission denied"}, + {14, "EFAULT", "bad address"}, + {15, "ENOTBLK", "block device required"}, + {16, "EBUSY", "device or resource busy"}, + {17, "EEXIST", "file exists"}, + {18, "EXDEV", "invalid cross-device link"}, + {19, "ENODEV", "no such device"}, + {20, "ENOTDIR", "not a directory"}, + {21, "EISDIR", "is a directory"}, + {22, "EINVAL", "invalid argument"}, + {23, "ENFILE", "too many open files in system"}, + {24, "EMFILE", "too many open files"}, + {25, "ENOTTY", "inappropriate ioctl for device"}, + {26, "ETXTBSY", "text file busy"}, + {27, "EFBIG", "file too large"}, + {28, "ENOSPC", "no space left on device"}, + {29, "ESPIPE", "illegal seek"}, + {30, "EROFS", "read-only file system"}, + {31, "EMLINK", "too many links"}, + {32, "EPIPE", "broken pipe"}, + {33, "EDOM", "numerical argument out of domain"}, + {34, "ERANGE", "numerical result out of range"}, + {35, "EDEADLK", "resource deadlock avoided"}, + {36, "ENAMETOOLONG", "file name too long"}, + {37, "ENOLCK", "no locks available"}, + {38, "ENOSYS", "function not implemented"}, + {39, "ENOTEMPTY", "directory not empty"}, + {40, "ELOOP", "too many levels of symbolic links"}, + {42, "ENOMSG", "no message of desired type"}, + {43, "EIDRM", "identifier removed"}, + {44, "ECHRNG", "channel number out of range"}, + {45, "EL2NSYNC", "level 2 not synchronized"}, + {46, "EL3HLT", "level 3 halted"}, + {47, "EL3RST", "level 3 reset"}, + {48, "ELNRNG", "link number out of range"}, + {49, "EUNATCH", "protocol driver not attached"}, + {50, "ENOCSI", "no CSI structure available"}, + {51, "EL2HLT", "level 2 halted"}, + {52, "EBADE", "invalid exchange"}, + {53, "EBADR", "invalid request descriptor"}, + {54, "EXFULL", "exchange full"}, + {55, "ENOANO", "no anode"}, + {56, "EBADRQC", "invalid request code"}, + {57, "EBADSLT", "invalid slot"}, + {58, "EDEADLOCK", "file locking deadlock error"}, + {59, "EBFONT", "bad font file format"}, + {60, "ENOSTR", "device not a stream"}, + {61, "ENODATA", "no data available"}, + {62, "ETIME", "timer expired"}, + {63, "ENOSR", "out of streams resources"}, + {64, "ENONET", "machine is not on the network"}, + {65, "ENOPKG", "package not installed"}, + {66, "EREMOTE", "object is remote"}, + {67, "ENOLINK", "link has been severed"}, + {68, "EADV", "advertise error"}, + {69, "ESRMNT", "srmount error"}, + {70, "ECOMM", "communication error on send"}, + {71, "EPROTO", "protocol error"}, + {72, "EMULTIHOP", "multihop attempted"}, + {73, "EDOTDOT", "RFS specific error"}, + {74, "EBADMSG", "bad message"}, + {75, "EOVERFLOW", "value too large for defined data type"}, + {76, "ENOTUNIQ", "name not unique on network"}, + {77, "EBADFD", "file descriptor in bad state"}, + {78, "EREMCHG", "remote address changed"}, + {79, "ELIBACC", "can not access a needed shared library"}, + {80, "ELIBBAD", "accessing a corrupted shared library"}, + {81, "ELIBSCN", ".lib section in a.out corrupted"}, + {82, "ELIBMAX", "attempting to link in too many shared libraries"}, + {83, "ELIBEXEC", "cannot exec a shared library directly"}, + {84, "EILSEQ", "invalid or incomplete multibyte or wide character"}, + {85, "ERESTART", "interrupted system call should be restarted"}, + {86, "ESTRPIPE", "streams pipe error"}, + {87, "EUSERS", "too many users"}, + {88, "ENOTSOCK", "socket operation on non-socket"}, + {89, "EDESTADDRREQ", "destination address required"}, + {90, "EMSGSIZE", "message too long"}, + {91, "EPROTOTYPE", "protocol wrong type for socket"}, + {92, "ENOPROTOOPT", "protocol not available"}, + {93, "EPROTONOSUPPORT", "protocol not supported"}, + {94, "ESOCKTNOSUPPORT", "socket type not supported"}, + {95, "ENOTSUP", "operation not supported"}, + {96, "EPFNOSUPPORT", "protocol family not supported"}, + {97, "EAFNOSUPPORT", "address family not supported by protocol"}, + {98, "EADDRINUSE", "address already in use"}, + {99, "EADDRNOTAVAIL", "cannot assign requested address"}, + {100, "ENETDOWN", "network is down"}, + {101, "ENETUNREACH", "network is unreachable"}, + {102, "ENETRESET", "network dropped connection on reset"}, + {103, "ECONNABORTED", "software caused connection abort"}, + {104, "ECONNRESET", "connection reset by peer"}, + {105, "ENOBUFS", "no buffer space available"}, + {106, "EISCONN", "transport endpoint is already connected"}, + {107, "ENOTCONN", "transport endpoint is not connected"}, + {108, "ESHUTDOWN", "cannot send after transport endpoint shutdown"}, + {109, "ETOOMANYREFS", "too many references: cannot splice"}, + {110, "ETIMEDOUT", "connection timed out"}, + {111, "ECONNREFUSED", "connection refused"}, + {112, "EHOSTDOWN", "host is down"}, + {113, "EHOSTUNREACH", "no route to host"}, + {114, "EALREADY", "operation already in progress"}, + {115, "EINPROGRESS", "operation now in progress"}, + {116, "ESTALE", "stale file handle"}, + {117, "EUCLEAN", "structure needs cleaning"}, + {118, "ENOTNAM", "not a XENIX named type file"}, + {119, "ENAVAIL", "no XENIX semaphores available"}, + {120, "EISNAM", "is a named type file"}, + {121, "EREMOTEIO", "remote I/O error"}, + {122, "EDQUOT", "disk quota exceeded"}, + {123, "ENOMEDIUM", "no medium found"}, + {124, "EMEDIUMTYPE", "wrong medium type"}, + {125, "ECANCELED", "operation canceled"}, + {126, "ENOKEY", "required key not available"}, + {127, "EKEYEXPIRED", "key has expired"}, + {128, "EKEYREVOKED", "key has been revoked"}, + {129, "EKEYREJECTED", "key was rejected by service"}, + {130, "EOWNERDEAD", "owner died"}, + {131, "ENOTRECOVERABLE", "state not recoverable"}, + {132, "ERFKILL", "operation not possible due to RF-kill"}, + {133, "EHWPOISON", "memory page has hardware error"}, +} + +// Signal table +var signalList = [...]struct { + num syscall.Signal + name string + desc string +}{ + {1, "SIGHUP", "hangup"}, + {2, "SIGINT", "interrupt"}, + {3, "SIGQUIT", "quit"}, + {4, "SIGILL", "illegal instruction"}, + {5, "SIGTRAP", "trace/breakpoint trap"}, + {6, "SIGABRT", "aborted"}, + {7, "SIGBUS", "bus error"}, + {8, "SIGFPE", "floating point exception"}, + {9, "SIGKILL", "killed"}, + {10, "SIGUSR1", "user defined signal 1"}, + {11, "SIGSEGV", "segmentation fault"}, + {12, "SIGUSR2", "user defined signal 2"}, + {13, "SIGPIPE", "broken pipe"}, + {14, "SIGALRM", "alarm clock"}, + {15, "SIGTERM", "terminated"}, + {16, "SIGSTKFLT", "stack fault"}, + {17, "SIGCHLD", "child exited"}, + {18, "SIGCONT", "continued"}, + {19, "SIGSTOP", "stopped (signal)"}, + {20, "SIGTSTP", "stopped"}, + {21, "SIGTTIN", "stopped (tty input)"}, + {22, "SIGTTOU", "stopped (tty output)"}, + {23, "SIGURG", "urgent I/O condition"}, + {24, "SIGXCPU", "CPU time limit exceeded"}, + {25, "SIGXFSZ", "file size limit exceeded"}, + {26, "SIGVTALRM", "virtual timer expired"}, + {27, "SIGPROF", "profiling timer expired"}, + {28, "SIGWINCH", "window changed"}, + {29, "SIGIO", "I/O possible"}, + {30, "SIGPWR", "power failure"}, + {31, "SIGSYS", "bad system call"}, +} diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index 973ad93463..f3a41d6ecb 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -1,6 +1,7 @@ // mkerrors.sh -Wall -Werror -static -I/tmp/include // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build ppc64 && linux // +build ppc64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. @@ -59,6 +60,8 @@ const ( CS8 = 0x300 CSIZE = 0x300 CSTOPB = 0x400 + ECCGETLAYOUT = 0x41484d11 + ECCGETSTATS = 0x40104d12 ECHOCTL = 0x40 ECHOE = 0x2 ECHOK = 0x4 @@ -92,6 +95,9 @@ const ( F_SETOWN = 0x8 F_UNLCK = 0x2 F_WRLCK = 0x1 + HIDIOCGRAWINFO = 0x40084803 + HIDIOCGRDESC = 0x50044802 + HIDIOCGRDESCSIZE = 0x40044801 HUPCL = 0x4000 ICANON = 0x100 IEXTEN = 0x400 @@ -117,6 +123,19 @@ const ( MCL_CURRENT = 0x2000 MCL_FUTURE = 0x4000 MCL_ONFAULT = 0x8000 + MEMERASE = 0x80084d02 + MEMERASE64 = 0x80104d14 + MEMGETBADBLOCK = 0x80084d0b + MEMGETINFO = 0x40204d01 + MEMGETOOBSEL = 0x40c84d0a + MEMGETREGIONCOUNT = 0x40044d07 + MEMISLOCKED = 0x40084d17 + MEMLOCK = 0x80084d05 + MEMREADOOB = 0xc0104d04 + MEMSETBADBLOCK = 0x80084d0c + MEMUNLOCK = 0x80084d06 + MEMWRITEOOB = 0xc0104d03 + MTDFILEMODE = 0x20004d13 NFDBITS = 0x40 NL2 = 0x200 NL3 = 0x300 @@ -128,6 +147,10 @@ const ( NS_GET_USERNS = 0x2000b701 OLCUC = 0x4 ONLCR = 0x2 + OTPGETREGIONCOUNT = 0x80044d0e + OTPGETREGIONINFO = 0x800c4d0f + OTPLOCK = 0x400c4d10 + OTPSELECT = 0x40044d0d O_APPEND = 0x400 O_ASYNC = 0x2000 O_CLOEXEC = 0x80000 @@ -165,6 +188,7 @@ const ( PERF_EVENT_IOC_SET_OUTPUT = 0x20002405 PPPIOCATTACH = 0x8004743d PPPIOCATTCHAN = 0x80047438 + PPPIOCBRIDGECHAN = 0x80047435 PPPIOCCONNECT = 0x8004743a PPPIOCDETACH = 0x8004743c PPPIOCDISCONN = 0x20007439 @@ -192,6 +216,7 @@ const ( PPPIOCSPASS = 0x80107447 PPPIOCSRASYNCMAP = 0x80047454 PPPIOCSXASYNCMAP = 0x8020744f + PPPIOCUNBRIDGECHAN = 0x20007434 PPPIOCXFERUNIT = 0x2000744e PROT_SAO = 0x10 PR_SET_PTRACER_ANY = 0xffffffffffffffff @@ -327,6 +352,7 @@ const ( SO_BROADCAST = 0x6 SO_BSDCOMPAT = 0xe SO_BUSY_POLL = 0x2e + SO_BUSY_POLL_BUDGET = 0x46 SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 @@ -349,6 +375,7 @@ const ( SO_PEERCRED = 0x15 SO_PEERGROUPS = 0x3b SO_PEERSEC = 0x1f + SO_PREFER_BUSY_POLL = 0x45 SO_PROTOCOL = 0x26 SO_RCVBUF = 0x8 SO_RCVBUFFORCE = 0x21 @@ -543,6 +570,9 @@ const ( WORDSIZE = 0x40 XCASE = 0x4000 XTABS = 0xc00 + _HIDIOCGRAWNAME = 0x40804804 + _HIDIOCGRAWPHYS = 0x40404805 + _HIDIOCGRAWUNIQ = 0x40404808 ) // Errors diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index 70a7406ba1..6a5a555d5e 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -1,6 +1,7 @@ // mkerrors.sh -Wall -Werror -static -I/tmp/include // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build ppc64le && linux // +build ppc64le,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. @@ -59,6 +60,8 @@ const ( CS8 = 0x300 CSIZE = 0x300 CSTOPB = 0x400 + ECCGETLAYOUT = 0x41484d11 + ECCGETSTATS = 0x40104d12 ECHOCTL = 0x40 ECHOE = 0x2 ECHOK = 0x4 @@ -92,6 +95,9 @@ const ( F_SETOWN = 0x8 F_UNLCK = 0x2 F_WRLCK = 0x1 + HIDIOCGRAWINFO = 0x40084803 + HIDIOCGRDESC = 0x50044802 + HIDIOCGRDESCSIZE = 0x40044801 HUPCL = 0x4000 ICANON = 0x100 IEXTEN = 0x400 @@ -117,6 +123,19 @@ const ( MCL_CURRENT = 0x2000 MCL_FUTURE = 0x4000 MCL_ONFAULT = 0x8000 + MEMERASE = 0x80084d02 + MEMERASE64 = 0x80104d14 + MEMGETBADBLOCK = 0x80084d0b + MEMGETINFO = 0x40204d01 + MEMGETOOBSEL = 0x40c84d0a + MEMGETREGIONCOUNT = 0x40044d07 + MEMISLOCKED = 0x40084d17 + MEMLOCK = 0x80084d05 + MEMREADOOB = 0xc0104d04 + MEMSETBADBLOCK = 0x80084d0c + MEMUNLOCK = 0x80084d06 + MEMWRITEOOB = 0xc0104d03 + MTDFILEMODE = 0x20004d13 NFDBITS = 0x40 NL2 = 0x200 NL3 = 0x300 @@ -128,6 +147,10 @@ const ( NS_GET_USERNS = 0x2000b701 OLCUC = 0x4 ONLCR = 0x2 + OTPGETREGIONCOUNT = 0x80044d0e + OTPGETREGIONINFO = 0x800c4d0f + OTPLOCK = 0x400c4d10 + OTPSELECT = 0x40044d0d O_APPEND = 0x400 O_ASYNC = 0x2000 O_CLOEXEC = 0x80000 @@ -165,6 +188,7 @@ const ( PERF_EVENT_IOC_SET_OUTPUT = 0x20002405 PPPIOCATTACH = 0x8004743d PPPIOCATTCHAN = 0x80047438 + PPPIOCBRIDGECHAN = 0x80047435 PPPIOCCONNECT = 0x8004743a PPPIOCDETACH = 0x8004743c PPPIOCDISCONN = 0x20007439 @@ -192,6 +216,7 @@ const ( PPPIOCSPASS = 0x80107447 PPPIOCSRASYNCMAP = 0x80047454 PPPIOCSXASYNCMAP = 0x8020744f + PPPIOCUNBRIDGECHAN = 0x20007434 PPPIOCXFERUNIT = 0x2000744e PROT_SAO = 0x10 PR_SET_PTRACER_ANY = 0xffffffffffffffff @@ -327,6 +352,7 @@ const ( SO_BROADCAST = 0x6 SO_BSDCOMPAT = 0xe SO_BUSY_POLL = 0x2e + SO_BUSY_POLL_BUDGET = 0x46 SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 @@ -349,6 +375,7 @@ const ( SO_PEERCRED = 0x15 SO_PEERGROUPS = 0x3b SO_PEERSEC = 0x1f + SO_PREFER_BUSY_POLL = 0x45 SO_PROTOCOL = 0x26 SO_RCVBUF = 0x8 SO_RCVBUFFORCE = 0x21 @@ -543,6 +570,9 @@ const ( WORDSIZE = 0x40 XCASE = 0x4000 XTABS = 0xc00 + _HIDIOCGRAWNAME = 0x40804804 + _HIDIOCGRAWPHYS = 0x40404805 + _HIDIOCGRAWUNIQ = 0x40404808 ) // Errors diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index b1bf7997cb..a4da67edbb 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -1,6 +1,7 @@ // mkerrors.sh -Wall -Werror -static -I/tmp/include // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build riscv64 && linux // +build riscv64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. @@ -59,6 +60,8 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + ECCGETLAYOUT = 0x81484d11 + ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 ECHOE = 0x10 ECHOK = 0x20 @@ -92,6 +95,9 @@ const ( F_SETOWN = 0x8 F_UNLCK = 0x2 F_WRLCK = 0x1 + HIDIOCGRAWINFO = 0x80084803 + HIDIOCGRDESC = 0x90044802 + HIDIOCGRDESCSIZE = 0x80044801 HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 @@ -117,6 +123,19 @@ const ( MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MCL_ONFAULT = 0x4 + MEMERASE = 0x40084d02 + MEMERASE64 = 0x40104d14 + MEMGETBADBLOCK = 0x40084d0b + MEMGETINFO = 0x80204d01 + MEMGETOOBSEL = 0x80c84d0a + MEMGETREGIONCOUNT = 0x80044d07 + MEMISLOCKED = 0x80084d17 + MEMLOCK = 0x40084d05 + MEMREADOOB = 0xc0104d04 + MEMSETBADBLOCK = 0x40084d0c + MEMUNLOCK = 0x40084d06 + MEMWRITEOOB = 0xc0104d03 + MTDFILEMODE = 0x4d13 NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 @@ -126,6 +145,10 @@ const ( NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 + OTPGETREGIONCOUNT = 0x40044d0e + OTPGETREGIONINFO = 0x400c4d0f + OTPLOCK = 0x800c4d10 + OTPSELECT = 0x80044d0d O_APPEND = 0x400 O_ASYNC = 0x2000 O_CLOEXEC = 0x80000 @@ -163,6 +186,7 @@ const ( PERF_EVENT_IOC_SET_OUTPUT = 0x2405 PPPIOCATTACH = 0x4004743d PPPIOCATTCHAN = 0x40047438 + PPPIOCBRIDGECHAN = 0x40047435 PPPIOCCONNECT = 0x4004743a PPPIOCDETACH = 0x4004743c PPPIOCDISCONN = 0x7439 @@ -190,6 +214,7 @@ const ( PPPIOCSPASS = 0x40107447 PPPIOCSRASYNCMAP = 0x40047454 PPPIOCSXASYNCMAP = 0x4020744f + PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffffffffffff RLIMIT_AS = 0x9 @@ -256,6 +281,7 @@ const ( SO_BROADCAST = 0x6 SO_BSDCOMPAT = 0xe SO_BUSY_POLL = 0x2e + SO_BUSY_POLL_BUDGET = 0x46 SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 @@ -278,6 +304,7 @@ const ( SO_PEERCRED = 0x11 SO_PEERGROUPS = 0x3b SO_PEERSEC = 0x1f + SO_PREFER_BUSY_POLL = 0x45 SO_PROTOCOL = 0x26 SO_RCVBUF = 0x8 SO_RCVBUFFORCE = 0x21 @@ -468,6 +495,9 @@ const ( WORDSIZE = 0x40 XCASE = 0x4 XTABS = 0x1800 + _HIDIOCGRAWNAME = 0x80804804 + _HIDIOCGRAWPHYS = 0x80404805 + _HIDIOCGRAWUNIQ = 0x80404808 ) // Errors diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index 7053d10ba0..a7028e0efb 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -1,6 +1,7 @@ // mkerrors.sh -Wall -Werror -static -I/tmp/include -fsigned-char // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build s390x && linux // +build s390x,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. @@ -59,6 +60,8 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + ECCGETLAYOUT = 0x81484d11 + ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 ECHOE = 0x10 ECHOK = 0x20 @@ -92,6 +95,9 @@ const ( F_SETOWN = 0x8 F_UNLCK = 0x2 F_WRLCK = 0x1 + HIDIOCGRAWINFO = 0x80084803 + HIDIOCGRDESC = 0x90044802 + HIDIOCGRDESCSIZE = 0x80044801 HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 @@ -117,6 +123,19 @@ const ( MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MCL_ONFAULT = 0x4 + MEMERASE = 0x40084d02 + MEMERASE64 = 0x40104d14 + MEMGETBADBLOCK = 0x40084d0b + MEMGETINFO = 0x80204d01 + MEMGETOOBSEL = 0x80c84d0a + MEMGETREGIONCOUNT = 0x80044d07 + MEMISLOCKED = 0x80084d17 + MEMLOCK = 0x40084d05 + MEMREADOOB = 0xc0104d04 + MEMSETBADBLOCK = 0x40084d0c + MEMUNLOCK = 0x40084d06 + MEMWRITEOOB = 0xc0104d03 + MTDFILEMODE = 0x4d13 NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 @@ -126,6 +145,10 @@ const ( NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 + OTPGETREGIONCOUNT = 0x40044d0e + OTPGETREGIONINFO = 0x400c4d0f + OTPLOCK = 0x800c4d10 + OTPSELECT = 0x80044d0d O_APPEND = 0x400 O_ASYNC = 0x2000 O_CLOEXEC = 0x80000 @@ -163,6 +186,7 @@ const ( PERF_EVENT_IOC_SET_OUTPUT = 0x2405 PPPIOCATTACH = 0x4004743d PPPIOCATTCHAN = 0x40047438 + PPPIOCBRIDGECHAN = 0x40047435 PPPIOCCONNECT = 0x4004743a PPPIOCDETACH = 0x4004743c PPPIOCDISCONN = 0x7439 @@ -190,6 +214,7 @@ const ( PPPIOCSPASS = 0x40107447 PPPIOCSRASYNCMAP = 0x40047454 PPPIOCSXASYNCMAP = 0x4020744f + PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffffffffffff PTRACE_DISABLE_TE = 0x5010 @@ -206,6 +231,8 @@ const ( PTRACE_POKE_SYSTEM_CALL = 0x5008 PTRACE_PROT = 0x15 PTRACE_SINGLEBLOCK = 0xc + PTRACE_SYSEMU = 0x1f + PTRACE_SYSEMU_SINGLESTEP = 0x20 PTRACE_TE_ABORT_RAND = 0x5011 PT_ACR0 = 0x90 PT_ACR1 = 0x94 @@ -329,6 +356,7 @@ const ( SO_BROADCAST = 0x6 SO_BSDCOMPAT = 0xe SO_BUSY_POLL = 0x2e + SO_BUSY_POLL_BUDGET = 0x46 SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 @@ -351,6 +379,7 @@ const ( SO_PEERCRED = 0x11 SO_PEERGROUPS = 0x3b SO_PEERSEC = 0x1f + SO_PREFER_BUSY_POLL = 0x45 SO_PROTOCOL = 0x26 SO_RCVBUF = 0x8 SO_RCVBUFFORCE = 0x21 @@ -541,6 +570,9 @@ const ( WORDSIZE = 0x40 XCASE = 0x4 XTABS = 0x1800 + _HIDIOCGRAWNAME = 0x80804804 + _HIDIOCGRAWPHYS = 0x80404805 + _HIDIOCGRAWUNIQ = 0x80404808 ) // Errors diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index 137cfe7962..ed3b3286c1 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -1,6 +1,7 @@ // mkerrors.sh -Wall -Werror -static -I/tmp/include // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build sparc64 && linux // +build sparc64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. @@ -62,6 +63,8 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + ECCGETLAYOUT = 0x41484d11 + ECCGETSTATS = 0x40104d12 ECHOCTL = 0x200 ECHOE = 0x10 ECHOK = 0x20 @@ -96,6 +99,9 @@ const ( F_SETOWN = 0x6 F_UNLCK = 0x3 F_WRLCK = 0x2 + HIDIOCGRAWINFO = 0x40084803 + HIDIOCGRDESC = 0x50044802 + HIDIOCGRDESCSIZE = 0x40044801 HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 @@ -122,6 +128,19 @@ const ( MCL_CURRENT = 0x2000 MCL_FUTURE = 0x4000 MCL_ONFAULT = 0x8000 + MEMERASE = 0x80084d02 + MEMERASE64 = 0x80104d14 + MEMGETBADBLOCK = 0x80084d0b + MEMGETINFO = 0x40204d01 + MEMGETOOBSEL = 0x40c84d0a + MEMGETREGIONCOUNT = 0x40044d07 + MEMISLOCKED = 0x40084d17 + MEMLOCK = 0x80084d05 + MEMREADOOB = 0xc0104d04 + MEMSETBADBLOCK = 0x80084d0c + MEMUNLOCK = 0x80084d06 + MEMWRITEOOB = 0xc0104d03 + MTDFILEMODE = 0x20004d13 NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 @@ -131,6 +150,10 @@ const ( NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 + OTPGETREGIONCOUNT = 0x80044d0e + OTPGETREGIONINFO = 0x800c4d0f + OTPLOCK = 0x400c4d10 + OTPSELECT = 0x40044d0d O_APPEND = 0x8 O_ASYNC = 0x40 O_CLOEXEC = 0x400000 @@ -168,6 +191,7 @@ const ( PERF_EVENT_IOC_SET_OUTPUT = 0x20002405 PPPIOCATTACH = 0x8004743d PPPIOCATTCHAN = 0x80047438 + PPPIOCBRIDGECHAN = 0x80047435 PPPIOCCONNECT = 0x8004743a PPPIOCDETACH = 0x8004743c PPPIOCDISCONN = 0x20007439 @@ -195,6 +219,7 @@ const ( PPPIOCSPASS = 0x80107447 PPPIOCSRASYNCMAP = 0x80047454 PPPIOCSXASYNCMAP = 0x8020744f + PPPIOCUNBRIDGECHAN = 0x20007434 PPPIOCXFERUNIT = 0x2000744e PR_SET_PTRACER_ANY = 0xffffffffffffffff PTRACE_GETFPAREGS = 0x14 @@ -322,6 +347,7 @@ const ( SO_BROADCAST = 0x20 SO_BSDCOMPAT = 0x400 SO_BUSY_POLL = 0x30 + SO_BUSY_POLL_BUDGET = 0x49 SO_CNX_ADVICE = 0x37 SO_COOKIE = 0x3b SO_DETACH_REUSEPORT_BPF = 0x47 @@ -344,6 +370,7 @@ const ( SO_PEERCRED = 0x40 SO_PEERGROUPS = 0x3d SO_PEERSEC = 0x1e + SO_PREFER_BUSY_POLL = 0x48 SO_PROTOCOL = 0x1028 SO_RCVBUF = 0x1002 SO_RCVBUFFORCE = 0x100b @@ -531,6 +558,9 @@ const ( WORDSIZE = 0x40 XCASE = 0x4 XTABS = 0x1800 + _HIDIOCGRAWNAME = 0x40804804 + _HIDIOCGRAWPHYS = 0x40404805 + _HIDIOCGRAWUNIQ = 0x40404808 __TIOCFLUSH = 0x80047410 ) diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go index 20f3a5799f..72f7420d20 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go @@ -1,6 +1,7 @@ // mkerrors.sh -m32 // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build 386 && netbsd // +build 386,netbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go index 90b8fcd29c..8d4eb0c080 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go @@ -1,6 +1,7 @@ // mkerrors.sh -m64 // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build amd64 && netbsd // +build amd64,netbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go index c5c03993b6..9eef9749f6 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go @@ -1,6 +1,7 @@ // mkerrors.sh -marm // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build arm && netbsd // +build arm,netbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go index 14dd3c1d1e..3b62ba192c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go @@ -1,6 +1,7 @@ // mkerrors.sh -m64 // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build arm64 && netbsd // +build arm64,netbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go index c865a10df4..593cc0feff 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go @@ -1,6 +1,7 @@ // mkerrors.sh -m32 // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build 386 && openbsd // +build 386,openbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go index 9db6b2fb6e..25cb609481 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go @@ -1,6 +1,7 @@ // mkerrors.sh -m64 // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build amd64 && openbsd // +build amd64,openbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go index 7072526a64..a4e4c22314 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go @@ -1,6 +1,7 @@ // mkerrors.sh // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build arm && openbsd // +build arm,openbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go index ac5efbe5ac..90de7dfc33 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go @@ -1,6 +1,7 @@ // mkerrors.sh -m64 // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build arm64 && openbsd // +build arm64,openbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go index a74639a46f..f1154ff56f 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go @@ -1,6 +1,7 @@ // mkerrors.sh -m64 // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build mips64 && openbsd // +build mips64,openbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. diff --git a/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go index 5312c36cc8..1afee6a089 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go @@ -1,6 +1,7 @@ // mkerrors.sh -m64 // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build amd64 && solaris // +build amd64,solaris // Code generated by cmd/cgo -godefs; DO NOT EDIT. @@ -365,6 +366,7 @@ const ( HUPCL = 0x400 IBSHIFT = 0x10 ICANON = 0x2 + ICMP6_FILTER = 0x1 ICRNL = 0x100 IEXTEN = 0x8000 IFF_ADDRCONF = 0x80000 @@ -611,6 +613,7 @@ const ( IP_RECVPKTINFO = 0x1a IP_RECVRETOPTS = 0x6 IP_RECVSLLA = 0xa + IP_RECVTOS = 0xc IP_RECVTTL = 0xb IP_RETOPTS = 0x8 IP_REUSEADDR = 0x104 @@ -703,6 +706,7 @@ const ( O_APPEND = 0x8 O_CLOEXEC = 0x800000 O_CREAT = 0x100 + O_DIRECT = 0x2000000 O_DIRECTORY = 0x1000000 O_DSYNC = 0x40 O_EXCL = 0x400 diff --git a/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go new file mode 100644 index 0000000000..fc7d0506f6 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go @@ -0,0 +1,860 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build zos && s390x +// +build zos,s390x + +// Hand edited based on zerrors_linux_s390x.go +// TODO: auto-generate. + +package unix + +const ( + BRKINT = 0x0001 + CLOCK_MONOTONIC = 0x1 + CLOCK_PROCESS_CPUTIME_ID = 0x2 + CLOCK_REALTIME = 0x0 + CLOCK_THREAD_CPUTIME_ID = 0x3 + CS8 = 0x0030 + CSIZE = 0x0030 + ECHO = 0x00000008 + ECHONL = 0x00000001 + FD_CLOEXEC = 0x01 + FD_CLOFORK = 0x02 + FNDELAY = 0x04 + F_CLOSFD = 9 + F_CONTROL_CVT = 13 + F_DUPFD = 0 + F_DUPFD2 = 8 + F_GETFD = 1 + F_GETFL = 259 + F_GETLK = 5 + F_GETOWN = 10 + F_OK = 0x0 + F_RDLCK = 1 + F_SETFD = 2 + F_SETFL = 4 + F_SETLK = 6 + F_SETLKW = 7 + F_SETOWN = 11 + F_SETTAG = 12 + F_UNLCK = 3 + F_WRLCK = 2 + FSTYPE_ZFS = 0xe9 //"Z" + FSTYPE_HFS = 0xc8 //"H" + FSTYPE_NFS = 0xd5 //"N" + FSTYPE_TFS = 0xe3 //"T" + FSTYPE_AUTOMOUNT = 0xc1 //"A" + IP6F_MORE_FRAG = 0x0001 + IP6F_OFF_MASK = 0xfff8 + IP6F_RESERVED_MASK = 0x0006 + IP6OPT_JUMBO = 0xc2 + IP6OPT_JUMBO_LEN = 6 + IP6OPT_MUTABLE = 0x20 + IP6OPT_NSAP_ADDR = 0xc3 + IP6OPT_PAD1 = 0x00 + IP6OPT_PADN = 0x01 + IP6OPT_ROUTER_ALERT = 0x05 + IP6OPT_TUNNEL_LIMIT = 0x04 + IP6OPT_TYPE_DISCARD = 0x40 + IP6OPT_TYPE_FORCEICMP = 0x80 + IP6OPT_TYPE_ICMP = 0xc0 + IP6OPT_TYPE_SKIP = 0x00 + IP6_ALERT_AN = 0x0002 + IP6_ALERT_MLD = 0x0000 + IP6_ALERT_RSVP = 0x0001 + IPPORT_RESERVED = 1024 + IPPORT_USERRESERVED = 5000 + IPPROTO_AH = 51 + SOL_AH = 51 + IPPROTO_DSTOPTS = 60 + SOL_DSTOPTS = 60 + IPPROTO_EGP = 8 + SOL_EGP = 8 + IPPROTO_ESP = 50 + SOL_ESP = 50 + IPPROTO_FRAGMENT = 44 + SOL_FRAGMENT = 44 + IPPROTO_GGP = 2 + SOL_GGP = 2 + IPPROTO_HOPOPTS = 0 + SOL_HOPOPTS = 0 + IPPROTO_ICMP = 1 + SOL_ICMP = 1 + IPPROTO_ICMPV6 = 58 + SOL_ICMPV6 = 58 + IPPROTO_IDP = 22 + SOL_IDP = 22 + IPPROTO_IP = 0 + SOL_IP = 0 + IPPROTO_IPV6 = 41 + SOL_IPV6 = 41 + IPPROTO_MAX = 256 + SOL_MAX = 256 + IPPROTO_NONE = 59 + SOL_NONE = 59 + IPPROTO_PUP = 12 + SOL_PUP = 12 + IPPROTO_RAW = 255 + SOL_RAW = 255 + IPPROTO_ROUTING = 43 + SOL_ROUTING = 43 + IPPROTO_TCP = 6 + SOL_TCP = 6 + IPPROTO_UDP = 17 + SOL_UDP = 17 + IPV6_ADDR_PREFERENCES = 32 + IPV6_CHECKSUM = 19 + IPV6_DONTFRAG = 29 + IPV6_DSTOPTS = 23 + IPV6_HOPLIMIT = 11 + IPV6_HOPOPTS = 22 + IPV6_JOIN_GROUP = 5 + IPV6_LEAVE_GROUP = 6 + IPV6_MULTICAST_HOPS = 9 + IPV6_MULTICAST_IF = 7 + IPV6_MULTICAST_LOOP = 4 + IPV6_NEXTHOP = 20 + IPV6_PATHMTU = 12 + IPV6_PKTINFO = 13 + IPV6_PREFER_SRC_CGA = 0x10 + IPV6_PREFER_SRC_COA = 0x02 + IPV6_PREFER_SRC_HOME = 0x01 + IPV6_PREFER_SRC_NONCGA = 0x20 + IPV6_PREFER_SRC_PUBLIC = 0x08 + IPV6_PREFER_SRC_TMP = 0x04 + IPV6_RECVDSTOPTS = 28 + IPV6_RECVHOPLIMIT = 14 + IPV6_RECVHOPOPTS = 26 + IPV6_RECVPATHMTU = 16 + IPV6_RECVPKTINFO = 15 + IPV6_RECVRTHDR = 25 + IPV6_RECVTCLASS = 31 + IPV6_RTHDR = 21 + IPV6_RTHDRDSTOPTS = 24 + IPV6_RTHDR_TYPE_0 = 0 + IPV6_TCLASS = 30 + IPV6_UNICAST_HOPS = 3 + IPV6_USE_MIN_MTU = 18 + IPV6_V6ONLY = 10 + IP_ADD_MEMBERSHIP = 5 + IP_ADD_SOURCE_MEMBERSHIP = 12 + IP_BLOCK_SOURCE = 10 + IP_DEFAULT_MULTICAST_LOOP = 1 + IP_DEFAULT_MULTICAST_TTL = 1 + IP_DROP_MEMBERSHIP = 6 + IP_DROP_SOURCE_MEMBERSHIP = 13 + IP_MAX_MEMBERSHIPS = 20 + IP_MULTICAST_IF = 7 + IP_MULTICAST_LOOP = 4 + IP_MULTICAST_TTL = 3 + IP_OPTIONS = 1 + IP_PKTINFO = 101 + IP_RECVPKTINFO = 102 + IP_TOS = 2 + IP_TTL = 3 + IP_UNBLOCK_SOURCE = 11 + ICANON = 0x0010 + ICMP6_FILTER = 0x26 + ICRNL = 0x0002 + IEXTEN = 0x0020 + IGNBRK = 0x0004 + IGNCR = 0x0008 + INLCR = 0x0020 + ISIG = 0x0040 + ISTRIP = 0x0080 + IXON = 0x0200 + IXOFF = 0x0100 + LOCK_SH = 0x1 // Not exist on zOS + LOCK_EX = 0x2 // Not exist on zOS + LOCK_NB = 0x4 // Not exist on zOS + LOCK_UN = 0x8 // Not exist on zOS + POLLIN = 0x0003 + POLLOUT = 0x0004 + POLLPRI = 0x0010 + POLLERR = 0x0020 + POLLHUP = 0x0040 + POLLNVAL = 0x0080 + PROT_READ = 0x1 // mmap - page can be read + PROT_WRITE = 0x2 // page can be written + PROT_NONE = 0x4 // can't be accessed + PROT_EXEC = 0x8 // can be executed + MAP_PRIVATE = 0x1 // changes are private + MAP_SHARED = 0x2 // changes are shared + MAP_FIXED = 0x4 // place exactly + MCAST_JOIN_GROUP = 40 + MCAST_LEAVE_GROUP = 41 + MCAST_JOIN_SOURCE_GROUP = 42 + MCAST_LEAVE_SOURCE_GROUP = 43 + MCAST_BLOCK_SOURCE = 44 + MCAST_UNBLOCK_SOURCE = 45 + MS_SYNC = 0x1 // msync - synchronous writes + MS_ASYNC = 0x2 // asynchronous writes + MS_INVALIDATE = 0x4 // invalidate mappings + MTM_RDONLY = 0x80000000 + MTM_RDWR = 0x40000000 + MTM_UMOUNT = 0x10000000 + MTM_IMMED = 0x08000000 + MTM_FORCE = 0x04000000 + MTM_DRAIN = 0x02000000 + MTM_RESET = 0x01000000 + MTM_SAMEMODE = 0x00100000 + MTM_UNQSEFORCE = 0x00040000 + MTM_NOSUID = 0x00000400 + MTM_SYNCHONLY = 0x00000200 + MTM_REMOUNT = 0x00000100 + MTM_NOSECURITY = 0x00000080 + NFDBITS = 0x20 + O_ACCMODE = 0x03 + O_APPEND = 0x08 + O_ASYNCSIG = 0x0200 + O_CREAT = 0x80 + O_EXCL = 0x40 + O_GETFL = 0x0F + O_LARGEFILE = 0x0400 + O_NONBLOCK = 0x04 + O_RDONLY = 0x02 + O_RDWR = 0x03 + O_SYNC = 0x0100 + O_TRUNC = 0x10 + O_WRONLY = 0x01 + O_NOCTTY = 0x20 + OPOST = 0x0001 + ONLCR = 0x0004 + PARENB = 0x0200 + PARMRK = 0x0400 + QUERYCVT = 3 + RUSAGE_CHILDREN = -0x1 + RUSAGE_SELF = 0x0 // RUSAGE_THREAD unsupported on z/OS + SEEK_CUR = 1 + SEEK_END = 2 + SEEK_SET = 0 + SETAUTOCVTALL = 5 + SETAUTOCVTON = 2 + SETCVTALL = 4 + SETCVTOFF = 0 + SETCVTON = 1 + AF_APPLETALK = 16 + AF_CCITT = 10 + AF_CHAOS = 5 + AF_DATAKIT = 9 + AF_DLI = 13 + AF_ECMA = 8 + AF_HYLINK = 15 + AF_IMPLINK = 3 + AF_INET = 2 + AF_INET6 = 19 + AF_INTF = 20 + AF_IUCV = 17 + AF_LAT = 14 + AF_LINK = 18 + AF_MAX = 30 + AF_NBS = 7 + AF_NDD = 23 + AF_NETWARE = 22 + AF_NS = 6 + AF_PUP = 4 + AF_RIF = 21 + AF_ROUTE = 20 + AF_SNA = 11 + AF_UNIX = 1 + AF_UNSPEC = 0 + IBMTCP_IMAGE = 1 + MSG_ACK_EXPECTED = 0x10 + MSG_ACK_GEN = 0x40 + MSG_ACK_TIMEOUT = 0x20 + MSG_CONNTERM = 0x80 + MSG_CTRUNC = 0x20 + MSG_DONTROUTE = 0x4 + MSG_EOF = 0x8000 + MSG_EOR = 0x8 + MSG_MAXIOVLEN = 16 + MSG_NONBLOCK = 0x4000 + MSG_OOB = 0x1 + MSG_PEEK = 0x2 + MSG_TRUNC = 0x10 + MSG_WAITALL = 0x40 + PRIO_PROCESS = 1 + PRIO_PGRP = 2 + PRIO_USER = 3 + RLIMIT_CPU = 0 + RLIMIT_FSIZE = 1 + RLIMIT_DATA = 2 + RLIMIT_STACK = 3 + RLIMIT_CORE = 4 + RLIMIT_AS = 5 + RLIMIT_NOFILE = 6 + RLIMIT_MEMLIMIT = 7 + RLIM_INFINITY = 2147483647 + SCM_RIGHTS = 0x01 + SF_CLOSE = 0x00000002 + SF_REUSE = 0x00000001 + SHUT_RD = 0 + SHUT_RDWR = 2 + SHUT_WR = 1 + SOCK_CONN_DGRAM = 6 + SOCK_DGRAM = 2 + SOCK_RAW = 3 + SOCK_RDM = 4 + SOCK_SEQPACKET = 5 + SOCK_STREAM = 1 + SOL_SOCKET = 0xffff + SOMAXCONN = 10 + SO_ACCEPTCONN = 0x0002 + SO_ACCEPTECONNABORTED = 0x0006 + SO_ACKNOW = 0x7700 + SO_BROADCAST = 0x0020 + SO_BULKMODE = 0x8000 + SO_CKSUMRECV = 0x0800 + SO_CLOSE = 0x01 + SO_CLUSTERCONNTYPE = 0x00004001 + SO_CLUSTERCONNTYPE_INTERNAL = 8 + SO_CLUSTERCONNTYPE_NOCONN = 0 + SO_CLUSTERCONNTYPE_NONE = 1 + SO_CLUSTERCONNTYPE_SAME_CLUSTER = 2 + SO_CLUSTERCONNTYPE_SAME_IMAGE = 4 + SO_DEBUG = 0x0001 + SO_DONTROUTE = 0x0010 + SO_ERROR = 0x1007 + SO_IGNOREINCOMINGPUSH = 0x1 + SO_IGNORESOURCEVIPA = 0x0002 + SO_KEEPALIVE = 0x0008 + SO_LINGER = 0x0080 + SO_NONBLOCKLOCAL = 0x8001 + SO_NOREUSEADDR = 0x1000 + SO_OOBINLINE = 0x0100 + SO_OPTACK = 0x8004 + SO_OPTMSS = 0x8003 + SO_RCVBUF = 0x1002 + SO_RCVLOWAT = 0x1004 + SO_RCVTIMEO = 0x1006 + SO_REUSEADDR = 0x0004 + SO_REUSEPORT = 0x0200 + SO_SECINFO = 0x00004002 + SO_SET = 0x0200 + SO_SNDBUF = 0x1001 + SO_SNDLOWAT = 0x1003 + SO_SNDTIMEO = 0x1005 + SO_TYPE = 0x1008 + SO_UNSET = 0x0400 + SO_USELOOPBACK = 0x0040 + SO_USE_IFBUFS = 0x0400 + S_ISUID = 0x0800 + S_ISGID = 0x0400 + S_ISVTX = 0x0200 + S_IRUSR = 0x0100 + S_IWUSR = 0x0080 + S_IXUSR = 0x0040 + S_IRWXU = 0x01C0 + S_IRGRP = 0x0020 + S_IWGRP = 0x0010 + S_IXGRP = 0x0008 + S_IRWXG = 0x0038 + S_IROTH = 0x0004 + S_IWOTH = 0x0002 + S_IXOTH = 0x0001 + S_IRWXO = 0x0007 + S_IREAD = S_IRUSR + S_IWRITE = S_IWUSR + S_IEXEC = S_IXUSR + S_IFDIR = 0x01000000 + S_IFCHR = 0x02000000 + S_IFREG = 0x03000000 + S_IFFIFO = 0x04000000 + S_IFIFO = 0x04000000 + S_IFLNK = 0x05000000 + S_IFBLK = 0x06000000 + S_IFSOCK = 0x07000000 + S_IFVMEXTL = 0xFE000000 + S_IFVMEXTL_EXEC = 0x00010000 + S_IFVMEXTL_DATA = 0x00020000 + S_IFVMEXTL_MEL = 0x00030000 + S_IFEXTL = 0x00000001 + S_IFPROGCTL = 0x00000002 + S_IFAPFCTL = 0x00000004 + S_IFNOSHARE = 0x00000008 + S_IFSHARELIB = 0x00000010 + S_IFMT = 0xFF000000 + S_IFMST = 0x00FF0000 + TCP_KEEPALIVE = 0x8 + TCP_NODELAY = 0x1 + TCP_INFO = 0xb + TCP_USER_TIMEOUT = 0x1 + TIOCGWINSZ = 0x4008a368 + TIOCSWINSZ = 0x8008a367 + TIOCSBRK = 0x2000a77b + TIOCCBRK = 0x2000a77a + TIOCSTI = 0x8001a772 + TIOCGPGRP = 0x4004a777 // _IOR(167, 119, int) + TCSANOW = 0 + TCSETS = 0 // equivalent to TCSANOW for tcsetattr + TCSADRAIN = 1 + TCSETSW = 1 // equivalent to TCSADRAIN for tcsetattr + TCSAFLUSH = 2 + TCSETSF = 2 // equivalent to TCSAFLUSH for tcsetattr + TCGETS = 3 // not defined in ioctl.h -- zos golang only + TCIFLUSH = 0 + TCOFLUSH = 1 + TCIOFLUSH = 2 + TCOOFF = 0 + TCOON = 1 + TCIOFF = 2 + TCION = 3 + TIOCSPGRP = 0x8004a776 + TIOCNOTTY = 0x2000a771 + TIOCEXCL = 0x2000a70d + TIOCNXCL = 0x2000a70e + TIOCGETD = 0x4004a700 + TIOCSETD = 0x8004a701 + TIOCPKT = 0x8004a770 + TIOCSTOP = 0x2000a76f + TIOCSTART = 0x2000a76e + TIOCUCNTL = 0x8004a766 + TIOCREMOTE = 0x8004a769 + TIOCMGET = 0x4004a76a + TIOCMSET = 0x8004a76d + TIOCMBIC = 0x8004a76b + TIOCMBIS = 0x8004a76c + VINTR = 0 + VQUIT = 1 + VERASE = 2 + VKILL = 3 + VEOF = 4 + VEOL = 5 + VMIN = 6 + VSTART = 7 + VSTOP = 8 + VSUSP = 9 + VTIME = 10 + WCONTINUED = 0x4 + WNOHANG = 0x1 + WUNTRACED = 0x2 + _BPX_SWAP = 1 + _BPX_NONSWAP = 2 + MCL_CURRENT = 1 // for Linux compatibility -- no zos semantics + MCL_FUTURE = 2 // for Linux compatibility -- no zos semantics + MCL_ONFAULT = 3 // for Linux compatibility -- no zos semantics + MADV_NORMAL = 0 // for Linux compatibility -- no zos semantics + MADV_RANDOM = 1 // for Linux compatibility -- no zos semantics + MADV_SEQUENTIAL = 2 // for Linux compatibility -- no zos semantics + MADV_WILLNEED = 3 // for Linux compatibility -- no zos semantics + MADV_REMOVE = 4 // for Linux compatibility -- no zos semantics + MADV_DONTFORK = 5 // for Linux compatibility -- no zos semantics + MADV_DOFORK = 6 // for Linux compatibility -- no zos semantics + MADV_HWPOISON = 7 // for Linux compatibility -- no zos semantics + MADV_MERGEABLE = 8 // for Linux compatibility -- no zos semantics + MADV_UNMERGEABLE = 9 // for Linux compatibility -- no zos semantics + MADV_SOFT_OFFLINE = 10 // for Linux compatibility -- no zos semantics + MADV_HUGEPAGE = 11 // for Linux compatibility -- no zos semantics + MADV_NOHUGEPAGE = 12 // for Linux compatibility -- no zos semantics + MADV_DONTDUMP = 13 // for Linux compatibility -- no zos semantics + MADV_DODUMP = 14 // for Linux compatibility -- no zos semantics + MADV_FREE = 15 // for Linux compatibility -- no zos semantics + MADV_WIPEONFORK = 16 // for Linux compatibility -- no zos semantics + MADV_KEEPONFORK = 17 // for Linux compatibility -- no zos semantics + AT_SYMLINK_NOFOLLOW = 1 // for Unix compatibility -- no zos semantics + AT_FDCWD = 2 // for Unix compatibility -- no zos semantics +) + +const ( + EDOM = Errno(1) + ERANGE = Errno(2) + EACCES = Errno(111) + EAGAIN = Errno(112) + EBADF = Errno(113) + EBUSY = Errno(114) + ECHILD = Errno(115) + EDEADLK = Errno(116) + EEXIST = Errno(117) + EFAULT = Errno(118) + EFBIG = Errno(119) + EINTR = Errno(120) + EINVAL = Errno(121) + EIO = Errno(122) + EISDIR = Errno(123) + EMFILE = Errno(124) + EMLINK = Errno(125) + ENAMETOOLONG = Errno(126) + ENFILE = Errno(127) + ENODEV = Errno(128) + ENOENT = Errno(129) + ENOEXEC = Errno(130) + ENOLCK = Errno(131) + ENOMEM = Errno(132) + ENOSPC = Errno(133) + ENOSYS = Errno(134) + ENOTDIR = Errno(135) + ENOTEMPTY = Errno(136) + ENOTTY = Errno(137) + ENXIO = Errno(138) + EPERM = Errno(139) + EPIPE = Errno(140) + EROFS = Errno(141) + ESPIPE = Errno(142) + ESRCH = Errno(143) + EXDEV = Errno(144) + E2BIG = Errno(145) + ELOOP = Errno(146) + EILSEQ = Errno(147) + ENODATA = Errno(148) + EOVERFLOW = Errno(149) + EMVSNOTUP = Errno(150) + ECMSSTORAGE = Errno(151) + EMVSDYNALC = Errno(151) + EMVSCVAF = Errno(152) + EMVSCATLG = Errno(153) + ECMSINITIAL = Errno(156) + EMVSINITIAL = Errno(156) + ECMSERR = Errno(157) + EMVSERR = Errno(157) + EMVSPARM = Errno(158) + ECMSPFSFILE = Errno(159) + EMVSPFSFILE = Errno(159) + EMVSBADCHAR = Errno(160) + ECMSPFSPERM = Errno(162) + EMVSPFSPERM = Errno(162) + EMVSSAFEXTRERR = Errno(163) + EMVSSAF2ERR = Errno(164) + EMVSTODNOTSET = Errno(165) + EMVSPATHOPTS = Errno(166) + EMVSNORTL = Errno(167) + EMVSEXPIRE = Errno(168) + EMVSPASSWORD = Errno(169) + EMVSWLMERROR = Errno(170) + EMVSCPLERROR = Errno(171) + EMVSARMERROR = Errno(172) + ELENOFORK = Errno(200) + ELEMSGERR = Errno(201) + EFPMASKINV = Errno(202) + EFPMODEINV = Errno(203) + EBUFLEN = Errno(227) + EEXTLINK = Errno(228) + ENODD = Errno(229) + ECMSESMERR = Errno(230) + ECPERR = Errno(231) + ELEMULTITHREAD = Errno(232) + ELEFENCE = Errno(244) + EBADDATA = Errno(245) + EUNKNOWN = Errno(246) + ENOTSUP = Errno(247) + EBADNAME = Errno(248) + ENOTSAFE = Errno(249) + ELEMULTITHREADFORK = Errno(257) + ECUNNOENV = Errno(258) + ECUNNOCONV = Errno(259) + ECUNNOTALIGNED = Errno(260) + ECUNERR = Errno(262) + EIBMBADCALL = Errno(1000) + EIBMBADPARM = Errno(1001) + EIBMSOCKOUTOFRANGE = Errno(1002) + EIBMSOCKINUSE = Errno(1003) + EIBMIUCVERR = Errno(1004) + EOFFLOADboxERROR = Errno(1005) + EOFFLOADboxRESTART = Errno(1006) + EOFFLOADboxDOWN = Errno(1007) + EIBMCONFLICT = Errno(1008) + EIBMCANCELLED = Errno(1009) + EIBMBADTCPNAME = Errno(1011) + ENOTBLK = Errno(1100) + ETXTBSY = Errno(1101) + EWOULDBLOCK = Errno(1102) + EINPROGRESS = Errno(1103) + EALREADY = Errno(1104) + ENOTSOCK = Errno(1105) + EDESTADDRREQ = Errno(1106) + EMSGSIZE = Errno(1107) + EPROTOTYPE = Errno(1108) + ENOPROTOOPT = Errno(1109) + EPROTONOSUPPORT = Errno(1110) + ESOCKTNOSUPPORT = Errno(1111) + EOPNOTSUPP = Errno(1112) + EPFNOSUPPORT = Errno(1113) + EAFNOSUPPORT = Errno(1114) + EADDRINUSE = Errno(1115) + EADDRNOTAVAIL = Errno(1116) + ENETDOWN = Errno(1117) + ENETUNREACH = Errno(1118) + ENETRESET = Errno(1119) + ECONNABORTED = Errno(1120) + ECONNRESET = Errno(1121) + ENOBUFS = Errno(1122) + EISCONN = Errno(1123) + ENOTCONN = Errno(1124) + ESHUTDOWN = Errno(1125) + ETOOMANYREFS = Errno(1126) + ETIMEDOUT = Errno(1127) + ECONNREFUSED = Errno(1128) + EHOSTDOWN = Errno(1129) + EHOSTUNREACH = Errno(1130) + EPROCLIM = Errno(1131) + EUSERS = Errno(1132) + EDQUOT = Errno(1133) + ESTALE = Errno(1134) + EREMOTE = Errno(1135) + ENOSTR = Errno(1136) + ETIME = Errno(1137) + ENOSR = Errno(1138) + ENOMSG = Errno(1139) + EBADMSG = Errno(1140) + EIDRM = Errno(1141) + ENONET = Errno(1142) + ERREMOTE = Errno(1143) + ENOLINK = Errno(1144) + EADV = Errno(1145) + ESRMNT = Errno(1146) + ECOMM = Errno(1147) + EPROTO = Errno(1148) + EMULTIHOP = Errno(1149) + EDOTDOT = Errno(1150) + EREMCHG = Errno(1151) + ECANCELED = Errno(1152) + EINTRNODATA = Errno(1159) + ENOREUSE = Errno(1160) + ENOMOVE = Errno(1161) +) + +// Signals +const ( + SIGHUP = Signal(1) + SIGINT = Signal(2) + SIGABRT = Signal(3) + SIGILL = Signal(4) + SIGPOLL = Signal(5) + SIGURG = Signal(6) + SIGSTOP = Signal(7) + SIGFPE = Signal(8) + SIGKILL = Signal(9) + SIGBUS = Signal(10) + SIGSEGV = Signal(11) + SIGSYS = Signal(12) + SIGPIPE = Signal(13) + SIGALRM = Signal(14) + SIGTERM = Signal(15) + SIGUSR1 = Signal(16) + SIGUSR2 = Signal(17) + SIGABND = Signal(18) + SIGCONT = Signal(19) + SIGCHLD = Signal(20) + SIGTTIN = Signal(21) + SIGTTOU = Signal(22) + SIGIO = Signal(23) + SIGQUIT = Signal(24) + SIGTSTP = Signal(25) + SIGTRAP = Signal(26) + SIGIOERR = Signal(27) + SIGWINCH = Signal(28) + SIGXCPU = Signal(29) + SIGXFSZ = Signal(30) + SIGVTALRM = Signal(31) + SIGPROF = Signal(32) + SIGDANGER = Signal(33) + SIGTHSTOP = Signal(34) + SIGTHCONT = Signal(35) + SIGTRACE = Signal(37) + SIGDCE = Signal(38) + SIGDUMP = Signal(39) +) + +// Error table +var errorList = [...]struct { + num Errno + name string + desc string +}{ + {1, "EDC5001I", "A domain error occurred."}, + {2, "EDC5002I", "A range error occurred."}, + {111, "EDC5111I", "Permission denied."}, + {112, "EDC5112I", "Resource temporarily unavailable."}, + {113, "EDC5113I", "Bad file descriptor."}, + {114, "EDC5114I", "Resource busy."}, + {115, "EDC5115I", "No child processes."}, + {116, "EDC5116I", "Resource deadlock avoided."}, + {117, "EDC5117I", "File exists."}, + {118, "EDC5118I", "Incorrect address."}, + {119, "EDC5119I", "File too large."}, + {120, "EDC5120I", "Interrupted function call."}, + {121, "EDC5121I", "Invalid argument."}, + {122, "EDC5122I", "Input/output error."}, + {123, "EDC5123I", "Is a directory."}, + {124, "EDC5124I", "Too many open files."}, + {125, "EDC5125I", "Too many links."}, + {126, "EDC5126I", "Filename too long."}, + {127, "EDC5127I", "Too many open files in system."}, + {128, "EDC5128I", "No such device."}, + {129, "EDC5129I", "No such file or directory."}, + {130, "EDC5130I", "Exec format error."}, + {131, "EDC5131I", "No locks available."}, + {132, "EDC5132I", "Not enough memory."}, + {133, "EDC5133I", "No space left on device."}, + {134, "EDC5134I", "Function not implemented."}, + {135, "EDC5135I", "Not a directory."}, + {136, "EDC5136I", "Directory not empty."}, + {137, "EDC5137I", "Inappropriate I/O control operation."}, + {138, "EDC5138I", "No such device or address."}, + {139, "EDC5139I", "Operation not permitted."}, + {140, "EDC5140I", "Broken pipe."}, + {141, "EDC5141I", "Read-only file system."}, + {142, "EDC5142I", "Invalid seek."}, + {143, "EDC5143I", "No such process."}, + {144, "EDC5144I", "Improper link."}, + {145, "EDC5145I", "The parameter list is too long, or the message to receive was too large for the buffer."}, + {146, "EDC5146I", "Too many levels of symbolic links."}, + {147, "EDC5147I", "Illegal byte sequence."}, + {148, "", ""}, + {149, "EDC5149I", "Value Overflow Error."}, + {150, "EDC5150I", "UNIX System Services is not active."}, + {151, "EDC5151I", "Dynamic allocation error."}, + {152, "EDC5152I", "Common VTOC access facility (CVAF) error."}, + {153, "EDC5153I", "Catalog obtain error."}, + {156, "EDC5156I", "Process initialization error."}, + {157, "EDC5157I", "An internal error has occurred."}, + {158, "EDC5158I", "Bad parameters were passed to the service."}, + {159, "EDC5159I", "The Physical File System encountered a permanent file error."}, + {160, "EDC5160I", "Bad character in environment variable name."}, + {162, "EDC5162I", "The Physical File System encountered a system error."}, + {163, "EDC5163I", "SAF/RACF extract error."}, + {164, "EDC5164I", "SAF/RACF error."}, + {165, "EDC5165I", "System TOD clock not set."}, + {166, "EDC5166I", "Access mode argument on function call conflicts with PATHOPTS parameter on JCL DD statement."}, + {167, "EDC5167I", "Access to the UNIX System Services version of the C RTL is denied."}, + {168, "EDC5168I", "Password has expired."}, + {169, "EDC5169I", "Password is invalid."}, + {170, "EDC5170I", "An error was encountered with WLM."}, + {171, "EDC5171I", "An error was encountered with CPL."}, + {172, "EDC5172I", "An error was encountered with Application Response Measurement (ARM) component."}, + {200, "EDC5200I", "The application contains a Language Environment member language that cannot tolerate a fork()."}, + {201, "EDC5201I", "The Language Environment message file was not found in the hierarchical file system."}, + {202, "EDC5202E", "DLL facilities are not supported under SPC environment."}, + {203, "EDC5203E", "DLL facilities are not supported under POSIX environment."}, + {227, "EDC5227I", "Buffer is not long enough to contain a path definition"}, + {228, "EDC5228I", "The file referred to is an external link"}, + {229, "EDC5229I", "No path definition for ddname in effect"}, + {230, "EDC5230I", "ESM error."}, + {231, "EDC5231I", "CP or the external security manager had an error"}, + {232, "EDC5232I", "The function failed because it was invoked from a multithread environment."}, + {244, "EDC5244I", "The program, module or DLL is not supported in this environment."}, + {245, "EDC5245I", "Data is not valid."}, + {246, "EDC5246I", "Unknown system state."}, + {247, "EDC5247I", "Operation not supported."}, + {248, "EDC5248I", "The object name specified is not correct."}, + {249, "EDC5249I", "The function is not allowed."}, + {257, "EDC5257I", "Function cannot be called in the child process of a fork() from a multithreaded process until exec() is called."}, + {258, "EDC5258I", "A CUN_RS_NO_UNI_ENV error was issued by Unicode Services."}, + {259, "EDC5259I", "A CUN_RS_NO_CONVERSION error was issued by Unicode Services."}, + {260, "EDC5260I", "A CUN_RS_TABLE_NOT_ALIGNED error was issued by Unicode Services."}, + {262, "EDC5262I", "An iconv() function encountered an unexpected error while using Unicode Services."}, + {1000, "EDC8000I", "A bad socket-call constant was found in the IUCV header."}, + {1001, "EDC8001I", "An error was found in the IUCV header."}, + {1002, "EDC8002I", "A socket descriptor is out of range."}, + {1003, "EDC8003I", "A socket descriptor is in use."}, + {1004, "EDC8004I", "Request failed because of an IUCV error."}, + {1005, "EDC8005I", "Offload box error."}, + {1006, "EDC8006I", "Offload box restarted."}, + {1007, "EDC8007I", "Offload box down."}, + {1008, "EDC8008I", "Already a conflicting call outstanding on socket."}, + {1009, "EDC8009I", "Request cancelled using a SOCKcallCANCEL request."}, + {1011, "EDC8011I", "A name of a PFS was specified that either is not configured or is not a Sockets PFS."}, + {1100, "EDC8100I", "Block device required."}, + {1101, "EDC8101I", "Text file busy."}, + {1102, "EDC8102I", "Operation would block."}, + {1103, "EDC8103I", "Operation now in progress."}, + {1104, "EDC8104I", "Connection already in progress."}, + {1105, "EDC8105I", "Socket operation on non-socket."}, + {1106, "EDC8106I", "Destination address required."}, + {1107, "EDC8107I", "Message too long."}, + {1108, "EDC8108I", "Protocol wrong type for socket."}, + {1109, "EDC8109I", "Protocol not available."}, + {1110, "EDC8110I", "Protocol not supported."}, + {1111, "EDC8111I", "Socket type not supported."}, + {1112, "EDC8112I", "Operation not supported on socket."}, + {1113, "EDC8113I", "Protocol family not supported."}, + {1114, "EDC8114I", "Address family not supported."}, + {1115, "EDC8115I", "Address already in use."}, + {1116, "EDC8116I", "Address not available."}, + {1117, "EDC8117I", "Network is down."}, + {1118, "EDC8118I", "Network is unreachable."}, + {1119, "EDC8119I", "Network dropped connection on reset."}, + {1120, "EDC8120I", "Connection ended abnormally."}, + {1121, "EDC8121I", "Connection reset."}, + {1122, "EDC8122I", "No buffer space available."}, + {1123, "EDC8123I", "Socket already connected."}, + {1124, "EDC8124I", "Socket not connected."}, + {1125, "EDC8125I", "Can't send after socket shutdown."}, + {1126, "EDC8126I", "Too many references; can't splice."}, + {1127, "EDC8127I", "Connection timed out."}, + {1128, "EDC8128I", "Connection refused."}, + {1129, "EDC8129I", "Host is not available."}, + {1130, "EDC8130I", "Host cannot be reached."}, + {1131, "EDC8131I", "Too many processes."}, + {1132, "EDC8132I", "Too many users."}, + {1133, "EDC8133I", "Disk quota exceeded."}, + {1134, "EDC8134I", "Stale file handle."}, + {1135, "", ""}, + {1136, "EDC8136I", "File is not a STREAM."}, + {1137, "EDC8137I", "STREAMS ioctl() timeout."}, + {1138, "EDC8138I", "No STREAMS resources."}, + {1139, "EDC8139I", "The message identified by set_id and msg_id is not in the message catalog."}, + {1140, "EDC8140I", "Bad message."}, + {1141, "EDC8141I", "Identifier removed."}, + {1142, "", ""}, + {1143, "", ""}, + {1144, "EDC8144I", "The link has been severed."}, + {1145, "", ""}, + {1146, "", ""}, + {1147, "", ""}, + {1148, "EDC8148I", "Protocol error."}, + {1149, "EDC8149I", "Multihop not allowed."}, + {1150, "", ""}, + {1151, "", ""}, + {1152, "EDC8152I", "The asynchronous I/O request has been canceled."}, + {1159, "EDC8159I", "Function call was interrupted before any data was received."}, + {1160, "EDC8160I", "Socket reuse is not supported."}, + {1161, "EDC8161I", "The file system cannot currently be moved."}, +} + +// Signal table +var signalList = [...]struct { + num Signal + name string + desc string +}{ + {1, "SIGHUP", "hangup"}, + {2, "SIGINT", "interrupt"}, + {3, "SIGABT", "aborted"}, + {4, "SIGILL", "illegal instruction"}, + {5, "SIGPOLL", "pollable event"}, + {6, "SIGURG", "urgent I/O condition"}, + {7, "SIGSTOP", "stop process"}, + {8, "SIGFPE", "floating point exception"}, + {9, "SIGKILL", "killed"}, + {10, "SIGBUS", "bus error"}, + {11, "SIGSEGV", "segmentation fault"}, + {12, "SIGSYS", "bad argument to routine"}, + {13, "SIGPIPE", "broken pipe"}, + {14, "SIGALRM", "alarm clock"}, + {15, "SIGTERM", "terminated"}, + {16, "SIGUSR1", "user defined signal 1"}, + {17, "SIGUSR2", "user defined signal 2"}, + {18, "SIGABND", "abend"}, + {19, "SIGCONT", "continued"}, + {20, "SIGCHLD", "child exited"}, + {21, "SIGTTIN", "stopped (tty input)"}, + {22, "SIGTTOU", "stopped (tty output)"}, + {23, "SIGIO", "I/O possible"}, + {24, "SIGQUIT", "quit"}, + {25, "SIGTSTP", "stopped"}, + {26, "SIGTRAP", "trace/breakpoint trap"}, + {27, "SIGIOER", "I/O error"}, + {28, "SIGWINCH", "window changed"}, + {29, "SIGXCPU", "CPU time limit exceeded"}, + {30, "SIGXFSZ", "file size limit exceeded"}, + {31, "SIGVTALRM", "virtual timer expired"}, + {32, "SIGPROF", "profiling timer expired"}, + {33, "SIGDANGER", "danger"}, + {34, "SIGTHSTOP", "stop thread"}, + {35, "SIGTHCONT", "continue thread"}, + {37, "SIGTRACE", "trace"}, + {38, "", "DCE"}, + {39, "SIGDUMP", "dump"}, +} diff --git a/vendor/golang.org/x/sys/unix/zptrace_armnn_linux.go b/vendor/golang.org/x/sys/unix/zptrace_armnn_linux.go index 89c5920e0c..bd001a6e1c 100644 --- a/vendor/golang.org/x/sys/unix/zptrace_armnn_linux.go +++ b/vendor/golang.org/x/sys/unix/zptrace_armnn_linux.go @@ -1,5 +1,6 @@ // Code generated by linux/mkall.go generatePtracePair("arm", "arm64"). DO NOT EDIT. +//go:build linux && (arm || arm64) // +build linux // +build arm arm64 diff --git a/vendor/golang.org/x/sys/unix/zptrace_mipsnn_linux.go b/vendor/golang.org/x/sys/unix/zptrace_mipsnn_linux.go index 24b841eec5..c34d0639be 100644 --- a/vendor/golang.org/x/sys/unix/zptrace_mipsnn_linux.go +++ b/vendor/golang.org/x/sys/unix/zptrace_mipsnn_linux.go @@ -1,5 +1,6 @@ // Code generated by linux/mkall.go generatePtracePair("mips", "mips64"). DO NOT EDIT. +//go:build linux && (mips || mips64) // +build linux // +build mips mips64 diff --git a/vendor/golang.org/x/sys/unix/zptrace_mipsnnle_linux.go b/vendor/golang.org/x/sys/unix/zptrace_mipsnnle_linux.go index 47b0489565..3ccf0c0c4a 100644 --- a/vendor/golang.org/x/sys/unix/zptrace_mipsnnle_linux.go +++ b/vendor/golang.org/x/sys/unix/zptrace_mipsnnle_linux.go @@ -1,5 +1,6 @@ // Code generated by linux/mkall.go generatePtracePair("mipsle", "mips64le"). DO NOT EDIT. +//go:build linux && (mipsle || mips64le) // +build linux // +build mipsle mips64le diff --git a/vendor/golang.org/x/sys/unix/zptrace_x86_linux.go b/vendor/golang.org/x/sys/unix/zptrace_x86_linux.go index ea5d9cb536..7d65857004 100644 --- a/vendor/golang.org/x/sys/unix/zptrace_x86_linux.go +++ b/vendor/golang.org/x/sys/unix/zptrace_x86_linux.go @@ -1,5 +1,6 @@ // Code generated by linux/mkall.go generatePtracePair("386", "amd64"). DO NOT EDIT. +//go:build linux && (386 || amd64) // +build linux // +build 386 amd64 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go index ed657ff1bc..91a23cc728 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go @@ -1,6 +1,7 @@ // go run mksyscall_aix_ppc.go -aix -tags aix,ppc syscall_aix.go syscall_aix_ppc.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build aix && ppc // +build aix,ppc package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go index 664b293b43..33c2609b8b 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go @@ -1,6 +1,7 @@ // go run mksyscall_aix_ppc64.go -aix -tags aix,ppc64 syscall_aix.go syscall_aix_ppc64.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build aix && ppc64 // +build aix,ppc64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go index 0550da06d1..8b737fa971 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go @@ -1,8 +1,8 @@ // go run mksyscall_aix_ppc64.go -aix -tags aix,ppc64 syscall_aix.go syscall_aix_ppc64.go // Code generated by the command above; see README.md. DO NOT EDIT. -// +build aix,ppc64 -// +build gc +//go:build aix && ppc64 && gc +// +build aix,ppc64,gc package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go index cde4dbc5f5..3c260917ed 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go @@ -1,8 +1,8 @@ // go run mksyscall_aix_ppc64.go -aix -tags aix,ppc64 syscall_aix.go syscall_aix_ppc64.go // Code generated by the command above; see README.md. DO NOT EDIT. -// +build aix,ppc64 -// +build gccgo +//go:build aix && ppc64 && gccgo +// +build aix,ppc64,gccgo package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_13.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_13.go deleted file mode 100644 index c8c142c59a..0000000000 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_13.go +++ /dev/null @@ -1,39 +0,0 @@ -// go run mksyscall.go -l32 -tags darwin,386,go1.13 syscall_darwin.1_13.go -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build darwin,386,go1.13 - -package unix - -import ( - "syscall" - "unsafe" -) - -var _ syscall.Errno - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func closedir(dir uintptr) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_closedir_trampoline), uintptr(dir), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_closedir_trampoline() - -//go:cgo_import_dynamic libc_closedir closedir "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func readdir_r(dir uintptr, entry *Dirent, result **Dirent) (res Errno) { - r0, _, _ := syscall_syscall(funcPC(libc_readdir_r_trampoline), uintptr(dir), uintptr(unsafe.Pointer(entry)), uintptr(unsafe.Pointer(result))) - res = Errno(r0) - return -} - -func libc_readdir_r_trampoline() - -//go:cgo_import_dynamic libc_readdir_r readdir_r "/usr/lib/libSystem.B.dylib" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_13.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_13.s deleted file mode 100644 index 00da1ebfca..0000000000 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_13.s +++ /dev/null @@ -1,12 +0,0 @@ -// go run mkasm_darwin.go 386 -// Code generated by the command above; DO NOT EDIT. - -// +build go1.13 - -#include "textflag.h" -TEXT ·libc_fdopendir_trampoline(SB),NOSPLIT,$0-0 - JMP libc_fdopendir(SB) -TEXT ·libc_closedir_trampoline(SB),NOSPLIT,$0-0 - JMP libc_closedir(SB) -TEXT ·libc_readdir_r_trampoline(SB),NOSPLIT,$0-0 - JMP libc_readdir_r(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go deleted file mode 100644 index 3877183483..0000000000 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go +++ /dev/null @@ -1,2430 +0,0 @@ -// go run mksyscall.go -l32 -tags darwin,386,go1.12 syscall_bsd.go syscall_darwin.go syscall_darwin_386.go -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build darwin,386,go1.12 - -package unix - -import ( - "syscall" - "unsafe" -) - -var _ syscall.Errno - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func getgroups(ngid int, gid *_Gid_t) (n int, err error) { - r0, _, e1 := syscall_rawSyscall(funcPC(libc_getgroups_trampoline), uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_getgroups_trampoline() - -//go:cgo_import_dynamic libc_getgroups getgroups "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func setgroups(ngid int, gid *_Gid_t) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_setgroups_trampoline), uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_setgroups_trampoline() - -//go:cgo_import_dynamic libc_setgroups setgroups "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { - r0, _, e1 := syscall_syscall6(funcPC(libc_wait4_trampoline), uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) - wpid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_wait4_trampoline() - -//go:cgo_import_dynamic libc_wait4 wait4 "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_accept_trampoline), uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_accept_trampoline() - -//go:cgo_import_dynamic libc_accept accept "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_bind_trampoline), uintptr(s), uintptr(addr), uintptr(addrlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_bind_trampoline() - -//go:cgo_import_dynamic libc_bind bind "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_connect_trampoline), uintptr(s), uintptr(addr), uintptr(addrlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_connect_trampoline() - -//go:cgo_import_dynamic libc_connect connect "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func socket(domain int, typ int, proto int) (fd int, err error) { - r0, _, e1 := syscall_rawSyscall(funcPC(libc_socket_trampoline), uintptr(domain), uintptr(typ), uintptr(proto)) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_socket_trampoline() - -//go:cgo_import_dynamic libc_socket socket "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { - _, _, e1 := syscall_syscall6(funcPC(libc_getsockopt_trampoline), uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_getsockopt_trampoline() - -//go:cgo_import_dynamic libc_getsockopt getsockopt "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { - _, _, e1 := syscall_syscall6(funcPC(libc_setsockopt_trampoline), uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_setsockopt_trampoline() - -//go:cgo_import_dynamic libc_setsockopt setsockopt "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_getpeername_trampoline), uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_getpeername_trampoline() - -//go:cgo_import_dynamic libc_getpeername getpeername "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_getsockname_trampoline), uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_getsockname_trampoline() - -//go:cgo_import_dynamic libc_getsockname getsockname "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Shutdown(s int, how int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_shutdown_trampoline), uintptr(s), uintptr(how), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_shutdown_trampoline() - -//go:cgo_import_dynamic libc_shutdown shutdown "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { - _, _, e1 := syscall_rawSyscall6(funcPC(libc_socketpair_trampoline), uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_socketpair_trampoline() - -//go:cgo_import_dynamic libc_socketpair socketpair "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := syscall_syscall6(funcPC(libc_recvfrom_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_recvfrom_trampoline() - -//go:cgo_import_dynamic libc_recvfrom recvfrom "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := syscall_syscall6(funcPC(libc_sendto_trampoline), uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_sendto_trampoline() - -//go:cgo_import_dynamic libc_sendto sendto "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_recvmsg_trampoline), uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_recvmsg_trampoline() - -//go:cgo_import_dynamic libc_recvmsg recvmsg "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_sendmsg_trampoline), uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_sendmsg_trampoline() - -//go:cgo_import_dynamic libc_sendmsg sendmsg "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) { - r0, _, e1 := syscall_syscall6(funcPC(libc_kevent_trampoline), uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_kevent_trampoline() - -//go:cgo_import_dynamic libc_kevent kevent "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func utimes(path string, timeval *[2]Timeval) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_utimes_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_utimes_trampoline() - -//go:cgo_import_dynamic libc_utimes utimes "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func futimes(fd int, timeval *[2]Timeval) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_futimes_trampoline), uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_futimes_trampoline() - -//go:cgo_import_dynamic libc_futimes futimes "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_poll_trampoline), uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_poll_trampoline() - -//go:cgo_import_dynamic libc_poll poll "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Madvise(b []byte, behav int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := syscall_syscall(funcPC(libc_madvise_trampoline), uintptr(_p0), uintptr(len(b)), uintptr(behav)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_madvise_trampoline() - -//go:cgo_import_dynamic libc_madvise madvise "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := syscall_syscall(funcPC(libc_mlock_trampoline), uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_mlock_trampoline() - -//go:cgo_import_dynamic libc_mlock mlock "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlockall(flags int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_mlockall_trampoline), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_mlockall_trampoline() - -//go:cgo_import_dynamic libc_mlockall mlockall "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mprotect(b []byte, prot int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := syscall_syscall(funcPC(libc_mprotect_trampoline), uintptr(_p0), uintptr(len(b)), uintptr(prot)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_mprotect_trampoline() - -//go:cgo_import_dynamic libc_mprotect mprotect "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Msync(b []byte, flags int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := syscall_syscall(funcPC(libc_msync_trampoline), uintptr(_p0), uintptr(len(b)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_msync_trampoline() - -//go:cgo_import_dynamic libc_msync msync "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := syscall_syscall(funcPC(libc_munlock_trampoline), uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_munlock_trampoline() - -//go:cgo_import_dynamic libc_munlock munlock "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlockall() (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_munlockall_trampoline), 0, 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_munlockall_trampoline() - -//go:cgo_import_dynamic libc_munlockall munlockall "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func pipe(p *[2]int32) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_pipe_trampoline), uintptr(unsafe.Pointer(p)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_pipe_trampoline() - -//go:cgo_import_dynamic libc_pipe pipe "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func getxattr(path string, attr string, dest *byte, size int, position uint32, options int) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - r0, _, e1 := syscall_syscall6(funcPC(libc_getxattr_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(position), uintptr(options)) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_getxattr_trampoline() - -//go:cgo_import_dynamic libc_getxattr getxattr "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fgetxattr(fd int, attr string, dest *byte, size int, position uint32, options int) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - r0, _, e1 := syscall_syscall6(funcPC(libc_fgetxattr_trampoline), uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(position), uintptr(options)) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_fgetxattr_trampoline() - -//go:cgo_import_dynamic libc_fgetxattr fgetxattr "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func setxattr(path string, attr string, data *byte, size int, position uint32, options int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := syscall_syscall6(funcPC(libc_setxattr_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(data)), uintptr(size), uintptr(position), uintptr(options)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_setxattr_trampoline() - -//go:cgo_import_dynamic libc_setxattr setxattr "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fsetxattr(fd int, attr string, data *byte, size int, position uint32, options int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := syscall_syscall6(funcPC(libc_fsetxattr_trampoline), uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(data)), uintptr(size), uintptr(position), uintptr(options)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_fsetxattr_trampoline() - -//go:cgo_import_dynamic libc_fsetxattr fsetxattr "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func removexattr(path string, attr string, options int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_removexattr_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(options)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_removexattr_trampoline() - -//go:cgo_import_dynamic libc_removexattr removexattr "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fremovexattr(fd int, attr string, options int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_fremovexattr_trampoline), uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(options)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_fremovexattr_trampoline() - -//go:cgo_import_dynamic libc_fremovexattr fremovexattr "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func listxattr(path string, dest *byte, size int, options int) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - r0, _, e1 := syscall_syscall6(funcPC(libc_listxattr_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(options), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_listxattr_trampoline() - -//go:cgo_import_dynamic libc_listxattr listxattr "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func flistxattr(fd int, dest *byte, size int, options int) (sz int, err error) { - r0, _, e1 := syscall_syscall6(funcPC(libc_flistxattr_trampoline), uintptr(fd), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(options), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_flistxattr_trampoline() - -//go:cgo_import_dynamic libc_flistxattr flistxattr "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func setattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) { - _, _, e1 := syscall_syscall6(funcPC(libc_setattrlist_trampoline), uintptr(unsafe.Pointer(path)), uintptr(list), uintptr(buf), uintptr(size), uintptr(options), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_setattrlist_trampoline() - -//go:cgo_import_dynamic libc_setattrlist setattrlist "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_fcntl_trampoline), uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_fcntl_trampoline() - -//go:cgo_import_dynamic libc_fcntl fcntl "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func kill(pid int, signum int, posix int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_kill_trampoline), uintptr(pid), uintptr(signum), uintptr(posix)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_kill_trampoline() - -//go:cgo_import_dynamic libc_kill kill "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ioctl(fd int, req uint, arg uintptr) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_ioctl_trampoline), uintptr(fd), uintptr(req), uintptr(arg)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_ioctl_trampoline() - -//go:cgo_import_dynamic libc_ioctl ioctl "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := syscall_syscall6(funcPC(libc_sysctl_trampoline), uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_sysctl_trampoline() - -//go:cgo_import_dynamic libc_sysctl sysctl "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) { - _, _, e1 := syscall_syscall9(funcPC(libc_sendfile_trampoline), uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(offset>>32), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_sendfile_trampoline() - -//go:cgo_import_dynamic libc_sendfile sendfile "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Access(path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_access_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_access_trampoline() - -//go:cgo_import_dynamic libc_access access "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_adjtime_trampoline), uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_adjtime_trampoline() - -//go:cgo_import_dynamic libc_adjtime adjtime "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chdir(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_chdir_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_chdir_trampoline() - -//go:cgo_import_dynamic libc_chdir chdir "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chflags(path string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_chflags_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_chflags_trampoline() - -//go:cgo_import_dynamic libc_chflags chflags "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chmod(path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_chmod_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_chmod_trampoline() - -//go:cgo_import_dynamic libc_chmod chmod "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chown(path string, uid int, gid int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_chown_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_chown_trampoline() - -//go:cgo_import_dynamic libc_chown chown "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chroot(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_chroot_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_chroot_trampoline() - -//go:cgo_import_dynamic libc_chroot chroot "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ClockGettime(clockid int32, time *Timespec) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_clock_gettime_trampoline), uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_clock_gettime_trampoline() - -//go:cgo_import_dynamic libc_clock_gettime clock_gettime "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Close(fd int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_close_trampoline), uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_close_trampoline() - -//go:cgo_import_dynamic libc_close close "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Clonefile(src string, dst string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(src) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(dst) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_clonefile_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_clonefile_trampoline() - -//go:cgo_import_dynamic libc_clonefile clonefile "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Clonefileat(srcDirfd int, src string, dstDirfd int, dst string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(src) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(dst) - if err != nil { - return - } - _, _, e1 := syscall_syscall6(funcPC(libc_clonefileat_trampoline), uintptr(srcDirfd), uintptr(unsafe.Pointer(_p0)), uintptr(dstDirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_clonefileat_trampoline() - -//go:cgo_import_dynamic libc_clonefileat clonefileat "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup(fd int) (nfd int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_dup_trampoline), uintptr(fd), 0, 0) - nfd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_dup_trampoline() - -//go:cgo_import_dynamic libc_dup dup "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup2(from int, to int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_dup2_trampoline), uintptr(from), uintptr(to), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_dup2_trampoline() - -//go:cgo_import_dynamic libc_dup2 dup2 "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Exchangedata(path1 string, path2 string, options int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path1) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(path2) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_exchangedata_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(options)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_exchangedata_trampoline() - -//go:cgo_import_dynamic libc_exchangedata exchangedata "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Exit(code int) { - syscall_syscall(funcPC(libc_exit_trampoline), uintptr(code), 0, 0) - return -} - -func libc_exit_trampoline() - -//go:cgo_import_dynamic libc_exit exit "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall6(funcPC(libc_faccessat_trampoline), uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_faccessat_trampoline() - -//go:cgo_import_dynamic libc_faccessat faccessat "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchdir(fd int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_fchdir_trampoline), uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_fchdir_trampoline() - -//go:cgo_import_dynamic libc_fchdir fchdir "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchflags(fd int, flags int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_fchflags_trampoline), uintptr(fd), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_fchflags_trampoline() - -//go:cgo_import_dynamic libc_fchflags fchflags "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchmod(fd int, mode uint32) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_fchmod_trampoline), uintptr(fd), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_fchmod_trampoline() - -//go:cgo_import_dynamic libc_fchmod fchmod "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall6(funcPC(libc_fchmodat_trampoline), uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_fchmodat_trampoline() - -//go:cgo_import_dynamic libc_fchmodat fchmodat "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchown(fd int, uid int, gid int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_fchown_trampoline), uintptr(fd), uintptr(uid), uintptr(gid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_fchown_trampoline() - -//go:cgo_import_dynamic libc_fchown fchown "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall6(funcPC(libc_fchownat_trampoline), uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_fchownat_trampoline() - -//go:cgo_import_dynamic libc_fchownat fchownat "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fclonefileat(srcDirfd int, dstDirfd int, dst string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(dst) - if err != nil { - return - } - _, _, e1 := syscall_syscall6(funcPC(libc_fclonefileat_trampoline), uintptr(srcDirfd), uintptr(dstDirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_fclonefileat_trampoline() - -//go:cgo_import_dynamic libc_fclonefileat fclonefileat "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Flock(fd int, how int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_flock_trampoline), uintptr(fd), uintptr(how), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_flock_trampoline() - -//go:cgo_import_dynamic libc_flock flock "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fpathconf(fd int, name int) (val int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_fpathconf_trampoline), uintptr(fd), uintptr(name), 0) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_fpathconf_trampoline() - -//go:cgo_import_dynamic libc_fpathconf fpathconf "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fsync(fd int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_fsync_trampoline), uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_fsync_trampoline() - -//go:cgo_import_dynamic libc_fsync fsync "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Ftruncate(fd int, length int64) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_ftruncate_trampoline), uintptr(fd), uintptr(length), uintptr(length>>32)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_ftruncate_trampoline() - -//go:cgo_import_dynamic libc_ftruncate ftruncate "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getcwd(buf []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := syscall_syscall(funcPC(libc_getcwd_trampoline), uintptr(_p0), uintptr(len(buf)), 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_getcwd_trampoline() - -//go:cgo_import_dynamic libc_getcwd getcwd "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getdtablesize() (size int) { - r0, _, _ := syscall_syscall(funcPC(libc_getdtablesize_trampoline), 0, 0, 0) - size = int(r0) - return -} - -func libc_getdtablesize_trampoline() - -//go:cgo_import_dynamic libc_getdtablesize getdtablesize "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getegid() (egid int) { - r0, _, _ := syscall_rawSyscall(funcPC(libc_getegid_trampoline), 0, 0, 0) - egid = int(r0) - return -} - -func libc_getegid_trampoline() - -//go:cgo_import_dynamic libc_getegid getegid "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Geteuid() (uid int) { - r0, _, _ := syscall_rawSyscall(funcPC(libc_geteuid_trampoline), 0, 0, 0) - uid = int(r0) - return -} - -func libc_geteuid_trampoline() - -//go:cgo_import_dynamic libc_geteuid geteuid "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getgid() (gid int) { - r0, _, _ := syscall_rawSyscall(funcPC(libc_getgid_trampoline), 0, 0, 0) - gid = int(r0) - return -} - -func libc_getgid_trampoline() - -//go:cgo_import_dynamic libc_getgid getgid "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpgid(pid int) (pgid int, err error) { - r0, _, e1 := syscall_rawSyscall(funcPC(libc_getpgid_trampoline), uintptr(pid), 0, 0) - pgid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_getpgid_trampoline() - -//go:cgo_import_dynamic libc_getpgid getpgid "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpgrp() (pgrp int) { - r0, _, _ := syscall_rawSyscall(funcPC(libc_getpgrp_trampoline), 0, 0, 0) - pgrp = int(r0) - return -} - -func libc_getpgrp_trampoline() - -//go:cgo_import_dynamic libc_getpgrp getpgrp "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpid() (pid int) { - r0, _, _ := syscall_rawSyscall(funcPC(libc_getpid_trampoline), 0, 0, 0) - pid = int(r0) - return -} - -func libc_getpid_trampoline() - -//go:cgo_import_dynamic libc_getpid getpid "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getppid() (ppid int) { - r0, _, _ := syscall_rawSyscall(funcPC(libc_getppid_trampoline), 0, 0, 0) - ppid = int(r0) - return -} - -func libc_getppid_trampoline() - -//go:cgo_import_dynamic libc_getppid getppid "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpriority(which int, who int) (prio int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_getpriority_trampoline), uintptr(which), uintptr(who), 0) - prio = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_getpriority_trampoline() - -//go:cgo_import_dynamic libc_getpriority getpriority "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_getrlimit_trampoline), uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_getrlimit_trampoline() - -//go:cgo_import_dynamic libc_getrlimit getrlimit "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getrusage(who int, rusage *Rusage) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_getrusage_trampoline), uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_getrusage_trampoline() - -//go:cgo_import_dynamic libc_getrusage getrusage "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getsid(pid int) (sid int, err error) { - r0, _, e1 := syscall_rawSyscall(funcPC(libc_getsid_trampoline), uintptr(pid), 0, 0) - sid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_getsid_trampoline() - -//go:cgo_import_dynamic libc_getsid getsid "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Gettimeofday(tp *Timeval) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_gettimeofday_trampoline), uintptr(unsafe.Pointer(tp)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_gettimeofday_trampoline() - -//go:cgo_import_dynamic libc_gettimeofday gettimeofday "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getuid() (uid int) { - r0, _, _ := syscall_rawSyscall(funcPC(libc_getuid_trampoline), 0, 0, 0) - uid = int(r0) - return -} - -func libc_getuid_trampoline() - -//go:cgo_import_dynamic libc_getuid getuid "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Issetugid() (tainted bool) { - r0, _, _ := syscall_rawSyscall(funcPC(libc_issetugid_trampoline), 0, 0, 0) - tainted = bool(r0 != 0) - return -} - -func libc_issetugid_trampoline() - -//go:cgo_import_dynamic libc_issetugid issetugid "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Kqueue() (fd int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_kqueue_trampoline), 0, 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_kqueue_trampoline() - -//go:cgo_import_dynamic libc_kqueue kqueue "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lchown(path string, uid int, gid int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_lchown_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_lchown_trampoline() - -//go:cgo_import_dynamic libc_lchown lchown "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Link(path string, link string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(link) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_link_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_link_trampoline() - -//go:cgo_import_dynamic libc_link link "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(link) - if err != nil { - return - } - _, _, e1 := syscall_syscall6(funcPC(libc_linkat_trampoline), uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_linkat_trampoline() - -//go:cgo_import_dynamic libc_linkat linkat "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Listen(s int, backlog int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_listen_trampoline), uintptr(s), uintptr(backlog), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_listen_trampoline() - -//go:cgo_import_dynamic libc_listen listen "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mkdir(path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_mkdir_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_mkdir_trampoline() - -//go:cgo_import_dynamic libc_mkdir mkdir "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mkdirat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_mkdirat_trampoline), uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_mkdirat_trampoline() - -//go:cgo_import_dynamic libc_mkdirat mkdirat "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mkfifo(path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_mkfifo_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_mkfifo_trampoline() - -//go:cgo_import_dynamic libc_mkfifo mkfifo "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mknod(path string, mode uint32, dev int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_mknod_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_mknod_trampoline() - -//go:cgo_import_dynamic libc_mknod mknod "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Open(path string, mode int, perm uint32) (fd int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - r0, _, e1 := syscall_syscall(funcPC(libc_open_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_open_trampoline() - -//go:cgo_import_dynamic libc_open open "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - r0, _, e1 := syscall_syscall6(funcPC(libc_openat_trampoline), uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_openat_trampoline() - -//go:cgo_import_dynamic libc_openat openat "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Pathconf(path string, name int) (val int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - r0, _, e1 := syscall_syscall(funcPC(libc_pathconf_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_pathconf_trampoline() - -//go:cgo_import_dynamic libc_pathconf pathconf "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Pread(fd int, p []byte, offset int64) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := syscall_syscall6(funcPC(libc_pread_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), uintptr(offset>>32), 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_pread_trampoline() - -//go:cgo_import_dynamic libc_pread pread "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := syscall_syscall6(funcPC(libc_pwrite_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), uintptr(offset>>32), 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_pwrite_trampoline() - -//go:cgo_import_dynamic libc_pwrite pwrite "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func read(fd int, p []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := syscall_syscall(funcPC(libc_read_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(p))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_read_trampoline() - -//go:cgo_import_dynamic libc_read read "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Readlink(path string, buf []byte) (n int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(buf) > 0 { - _p1 = unsafe.Pointer(&buf[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := syscall_syscall(funcPC(libc_readlink_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_readlink_trampoline() - -//go:cgo_import_dynamic libc_readlink readlink "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(buf) > 0 { - _p1 = unsafe.Pointer(&buf[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := syscall_syscall6(funcPC(libc_readlinkat_trampoline), uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_readlinkat_trampoline() - -//go:cgo_import_dynamic libc_readlinkat readlinkat "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Rename(from string, to string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(from) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(to) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_rename_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_rename_trampoline() - -//go:cgo_import_dynamic libc_rename rename "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Renameat(fromfd int, from string, tofd int, to string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(from) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(to) - if err != nil { - return - } - _, _, e1 := syscall_syscall6(funcPC(libc_renameat_trampoline), uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_renameat_trampoline() - -//go:cgo_import_dynamic libc_renameat renameat "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Revoke(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_revoke_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_revoke_trampoline() - -//go:cgo_import_dynamic libc_revoke revoke "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Rmdir(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_rmdir_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_rmdir_trampoline() - -//go:cgo_import_dynamic libc_rmdir rmdir "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { - r0, r1, e1 := syscall_syscall6(funcPC(libc_lseek_trampoline), uintptr(fd), uintptr(offset), uintptr(offset>>32), uintptr(whence), 0, 0) - newoffset = int64(int64(r1)<<32 | int64(r0)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_lseek_trampoline() - -//go:cgo_import_dynamic libc_lseek lseek "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { - r0, _, e1 := syscall_syscall6(funcPC(libc_select_trampoline), uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_select_trampoline() - -//go:cgo_import_dynamic libc_select select "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setegid(egid int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_setegid_trampoline), uintptr(egid), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_setegid_trampoline() - -//go:cgo_import_dynamic libc_setegid setegid "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Seteuid(euid int) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_seteuid_trampoline), uintptr(euid), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_seteuid_trampoline() - -//go:cgo_import_dynamic libc_seteuid seteuid "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setgid(gid int) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_setgid_trampoline), uintptr(gid), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_setgid_trampoline() - -//go:cgo_import_dynamic libc_setgid setgid "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setlogin(name string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(name) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_setlogin_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_setlogin_trampoline() - -//go:cgo_import_dynamic libc_setlogin setlogin "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setpgid(pid int, pgid int) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_setpgid_trampoline), uintptr(pid), uintptr(pgid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_setpgid_trampoline() - -//go:cgo_import_dynamic libc_setpgid setpgid "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setpriority(which int, who int, prio int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_setpriority_trampoline), uintptr(which), uintptr(who), uintptr(prio)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_setpriority_trampoline() - -//go:cgo_import_dynamic libc_setpriority setpriority "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setprivexec(flag int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_setprivexec_trampoline), uintptr(flag), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_setprivexec_trampoline() - -//go:cgo_import_dynamic libc_setprivexec setprivexec "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setregid(rgid int, egid int) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_setregid_trampoline), uintptr(rgid), uintptr(egid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_setregid_trampoline() - -//go:cgo_import_dynamic libc_setregid setregid "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_setreuid_trampoline), uintptr(ruid), uintptr(euid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_setreuid_trampoline() - -//go:cgo_import_dynamic libc_setreuid setreuid "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_setrlimit_trampoline), uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_setrlimit_trampoline() - -//go:cgo_import_dynamic libc_setrlimit setrlimit "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setsid() (pid int, err error) { - r0, _, e1 := syscall_rawSyscall(funcPC(libc_setsid_trampoline), 0, 0, 0) - pid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_setsid_trampoline() - -//go:cgo_import_dynamic libc_setsid setsid "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Settimeofday(tp *Timeval) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_settimeofday_trampoline), uintptr(unsafe.Pointer(tp)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_settimeofday_trampoline() - -//go:cgo_import_dynamic libc_settimeofday settimeofday "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setuid(uid int) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_setuid_trampoline), uintptr(uid), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_setuid_trampoline() - -//go:cgo_import_dynamic libc_setuid setuid "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Symlink(path string, link string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(link) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_symlink_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_symlink_trampoline() - -//go:cgo_import_dynamic libc_symlink symlink "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_symlinkat_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_symlinkat_trampoline() - -//go:cgo_import_dynamic libc_symlinkat symlinkat "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Sync() (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_sync_trampoline), 0, 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_sync_trampoline() - -//go:cgo_import_dynamic libc_sync sync "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Truncate(path string, length int64) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_truncate_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(length), uintptr(length>>32)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_truncate_trampoline() - -//go:cgo_import_dynamic libc_truncate truncate "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Umask(newmask int) (oldmask int) { - r0, _, _ := syscall_syscall(funcPC(libc_umask_trampoline), uintptr(newmask), 0, 0) - oldmask = int(r0) - return -} - -func libc_umask_trampoline() - -//go:cgo_import_dynamic libc_umask umask "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Undelete(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_undelete_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_undelete_trampoline() - -//go:cgo_import_dynamic libc_undelete undelete "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unlink(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_unlink_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_unlink_trampoline() - -//go:cgo_import_dynamic libc_unlink unlink "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unlinkat(dirfd int, path string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_unlinkat_trampoline), uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_unlinkat_trampoline() - -//go:cgo_import_dynamic libc_unlinkat unlinkat "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unmount(path string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_unmount_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_unmount_trampoline() - -//go:cgo_import_dynamic libc_unmount unmount "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func write(fd int, p []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := syscall_syscall(funcPC(libc_write_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(p))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_write_trampoline() - -//go:cgo_import_dynamic libc_write write "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { - r0, _, e1 := syscall_syscall9(funcPC(libc_mmap_trampoline), uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos), uintptr(pos>>32), 0, 0) - ret = uintptr(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_mmap_trampoline() - -//go:cgo_import_dynamic libc_mmap mmap "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func munmap(addr uintptr, length uintptr) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_munmap_trampoline), uintptr(addr), uintptr(length), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_munmap_trampoline() - -//go:cgo_import_dynamic libc_munmap munmap "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_read_trampoline), uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_write_trampoline), uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fstat(fd int, stat *Stat_t) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_fstat64_trampoline), uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_fstat64_trampoline() - -//go:cgo_import_dynamic libc_fstat64 fstat64 "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall6(funcPC(libc_fstatat64_trampoline), uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_fstatat64_trampoline() - -//go:cgo_import_dynamic libc_fstatat64 fstatat64 "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fstatfs(fd int, stat *Statfs_t) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_fstatfs64_trampoline), uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_fstatfs64_trampoline() - -//go:cgo_import_dynamic libc_fstatfs64 fstatfs64 "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func getfsstat(buf unsafe.Pointer, size uintptr, flags int) (n int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_getfsstat64_trampoline), uintptr(buf), uintptr(size), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_getfsstat64_trampoline() - -//go:cgo_import_dynamic libc_getfsstat64 getfsstat64 "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lstat(path string, stat *Stat_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_lstat64_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_lstat64_trampoline() - -//go:cgo_import_dynamic libc_lstat64 lstat64 "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ptrace1(request int, pid int, addr uintptr, data uintptr) (err error) { - _, _, e1 := syscall_syscall6(funcPC(libc_ptrace_trampoline), uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_ptrace_trampoline() - -//go:cgo_import_dynamic libc_ptrace ptrace "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Stat(path string, stat *Stat_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_stat64_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_stat64_trampoline() - -//go:cgo_import_dynamic libc_stat64 stat64 "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Statfs(path string, stat *Statfs_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_statfs64_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_statfs64_trampoline() - -//go:cgo_import_dynamic libc_statfs64 statfs64 "/usr/lib/libSystem.B.dylib" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.s deleted file mode 100644 index 1c53979a10..0000000000 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.s +++ /dev/null @@ -1,290 +0,0 @@ -// go run mkasm_darwin.go 386 -// Code generated by the command above; DO NOT EDIT. - -// +build go1.12 - -#include "textflag.h" -TEXT ·libc_getgroups_trampoline(SB),NOSPLIT,$0-0 - JMP libc_getgroups(SB) -TEXT ·libc_setgroups_trampoline(SB),NOSPLIT,$0-0 - JMP libc_setgroups(SB) -TEXT ·libc_wait4_trampoline(SB),NOSPLIT,$0-0 - JMP libc_wait4(SB) -TEXT ·libc_accept_trampoline(SB),NOSPLIT,$0-0 - JMP libc_accept(SB) -TEXT ·libc_bind_trampoline(SB),NOSPLIT,$0-0 - JMP libc_bind(SB) -TEXT ·libc_connect_trampoline(SB),NOSPLIT,$0-0 - JMP libc_connect(SB) -TEXT ·libc_socket_trampoline(SB),NOSPLIT,$0-0 - JMP libc_socket(SB) -TEXT ·libc_getsockopt_trampoline(SB),NOSPLIT,$0-0 - JMP libc_getsockopt(SB) -TEXT ·libc_setsockopt_trampoline(SB),NOSPLIT,$0-0 - JMP libc_setsockopt(SB) -TEXT ·libc_getpeername_trampoline(SB),NOSPLIT,$0-0 - JMP libc_getpeername(SB) -TEXT ·libc_getsockname_trampoline(SB),NOSPLIT,$0-0 - JMP libc_getsockname(SB) -TEXT ·libc_shutdown_trampoline(SB),NOSPLIT,$0-0 - JMP libc_shutdown(SB) -TEXT ·libc_socketpair_trampoline(SB),NOSPLIT,$0-0 - JMP libc_socketpair(SB) -TEXT ·libc_recvfrom_trampoline(SB),NOSPLIT,$0-0 - JMP libc_recvfrom(SB) -TEXT ·libc_sendto_trampoline(SB),NOSPLIT,$0-0 - JMP libc_sendto(SB) -TEXT ·libc_recvmsg_trampoline(SB),NOSPLIT,$0-0 - JMP libc_recvmsg(SB) -TEXT ·libc_sendmsg_trampoline(SB),NOSPLIT,$0-0 - JMP libc_sendmsg(SB) -TEXT ·libc_kevent_trampoline(SB),NOSPLIT,$0-0 - JMP libc_kevent(SB) -TEXT ·libc_utimes_trampoline(SB),NOSPLIT,$0-0 - JMP libc_utimes(SB) -TEXT ·libc_futimes_trampoline(SB),NOSPLIT,$0-0 - JMP libc_futimes(SB) -TEXT ·libc_poll_trampoline(SB),NOSPLIT,$0-0 - JMP libc_poll(SB) -TEXT ·libc_madvise_trampoline(SB),NOSPLIT,$0-0 - JMP libc_madvise(SB) -TEXT ·libc_mlock_trampoline(SB),NOSPLIT,$0-0 - JMP libc_mlock(SB) -TEXT ·libc_mlockall_trampoline(SB),NOSPLIT,$0-0 - JMP libc_mlockall(SB) -TEXT ·libc_mprotect_trampoline(SB),NOSPLIT,$0-0 - JMP libc_mprotect(SB) -TEXT ·libc_msync_trampoline(SB),NOSPLIT,$0-0 - JMP libc_msync(SB) -TEXT ·libc_munlock_trampoline(SB),NOSPLIT,$0-0 - JMP libc_munlock(SB) -TEXT ·libc_munlockall_trampoline(SB),NOSPLIT,$0-0 - JMP libc_munlockall(SB) -TEXT ·libc_pipe_trampoline(SB),NOSPLIT,$0-0 - JMP libc_pipe(SB) -TEXT ·libc_getxattr_trampoline(SB),NOSPLIT,$0-0 - JMP libc_getxattr(SB) -TEXT ·libc_fgetxattr_trampoline(SB),NOSPLIT,$0-0 - JMP libc_fgetxattr(SB) -TEXT ·libc_setxattr_trampoline(SB),NOSPLIT,$0-0 - JMP libc_setxattr(SB) -TEXT ·libc_fsetxattr_trampoline(SB),NOSPLIT,$0-0 - JMP libc_fsetxattr(SB) -TEXT ·libc_removexattr_trampoline(SB),NOSPLIT,$0-0 - JMP libc_removexattr(SB) -TEXT ·libc_fremovexattr_trampoline(SB),NOSPLIT,$0-0 - JMP libc_fremovexattr(SB) -TEXT ·libc_listxattr_trampoline(SB),NOSPLIT,$0-0 - JMP libc_listxattr(SB) -TEXT ·libc_flistxattr_trampoline(SB),NOSPLIT,$0-0 - JMP libc_flistxattr(SB) -TEXT ·libc_setattrlist_trampoline(SB),NOSPLIT,$0-0 - JMP libc_setattrlist(SB) -TEXT ·libc_fcntl_trampoline(SB),NOSPLIT,$0-0 - JMP libc_fcntl(SB) -TEXT ·libc_kill_trampoline(SB),NOSPLIT,$0-0 - JMP libc_kill(SB) -TEXT ·libc_ioctl_trampoline(SB),NOSPLIT,$0-0 - JMP libc_ioctl(SB) -TEXT ·libc_sysctl_trampoline(SB),NOSPLIT,$0-0 - JMP libc_sysctl(SB) -TEXT ·libc_sendfile_trampoline(SB),NOSPLIT,$0-0 - JMP libc_sendfile(SB) -TEXT ·libc_access_trampoline(SB),NOSPLIT,$0-0 - JMP libc_access(SB) -TEXT ·libc_adjtime_trampoline(SB),NOSPLIT,$0-0 - JMP libc_adjtime(SB) -TEXT ·libc_chdir_trampoline(SB),NOSPLIT,$0-0 - JMP libc_chdir(SB) -TEXT ·libc_chflags_trampoline(SB),NOSPLIT,$0-0 - JMP libc_chflags(SB) -TEXT ·libc_chmod_trampoline(SB),NOSPLIT,$0-0 - JMP libc_chmod(SB) -TEXT ·libc_chown_trampoline(SB),NOSPLIT,$0-0 - JMP libc_chown(SB) -TEXT ·libc_chroot_trampoline(SB),NOSPLIT,$0-0 - JMP libc_chroot(SB) -TEXT ·libc_clock_gettime_trampoline(SB),NOSPLIT,$0-0 - JMP libc_clock_gettime(SB) -TEXT ·libc_close_trampoline(SB),NOSPLIT,$0-0 - JMP libc_close(SB) -TEXT ·libc_clonefile_trampoline(SB),NOSPLIT,$0-0 - JMP libc_clonefile(SB) -TEXT ·libc_clonefileat_trampoline(SB),NOSPLIT,$0-0 - JMP libc_clonefileat(SB) -TEXT ·libc_dup_trampoline(SB),NOSPLIT,$0-0 - JMP libc_dup(SB) -TEXT ·libc_dup2_trampoline(SB),NOSPLIT,$0-0 - JMP libc_dup2(SB) -TEXT ·libc_exchangedata_trampoline(SB),NOSPLIT,$0-0 - JMP libc_exchangedata(SB) -TEXT ·libc_exit_trampoline(SB),NOSPLIT,$0-0 - JMP libc_exit(SB) -TEXT ·libc_faccessat_trampoline(SB),NOSPLIT,$0-0 - JMP libc_faccessat(SB) -TEXT ·libc_fchdir_trampoline(SB),NOSPLIT,$0-0 - JMP libc_fchdir(SB) -TEXT ·libc_fchflags_trampoline(SB),NOSPLIT,$0-0 - JMP libc_fchflags(SB) -TEXT ·libc_fchmod_trampoline(SB),NOSPLIT,$0-0 - JMP libc_fchmod(SB) -TEXT ·libc_fchmodat_trampoline(SB),NOSPLIT,$0-0 - JMP libc_fchmodat(SB) -TEXT ·libc_fchown_trampoline(SB),NOSPLIT,$0-0 - JMP libc_fchown(SB) -TEXT ·libc_fchownat_trampoline(SB),NOSPLIT,$0-0 - JMP libc_fchownat(SB) -TEXT ·libc_fclonefileat_trampoline(SB),NOSPLIT,$0-0 - JMP libc_fclonefileat(SB) -TEXT ·libc_flock_trampoline(SB),NOSPLIT,$0-0 - JMP libc_flock(SB) -TEXT ·libc_fpathconf_trampoline(SB),NOSPLIT,$0-0 - JMP libc_fpathconf(SB) -TEXT ·libc_fsync_trampoline(SB),NOSPLIT,$0-0 - JMP libc_fsync(SB) -TEXT ·libc_ftruncate_trampoline(SB),NOSPLIT,$0-0 - JMP libc_ftruncate(SB) -TEXT ·libc_getcwd_trampoline(SB),NOSPLIT,$0-0 - JMP libc_getcwd(SB) -TEXT ·libc_getdtablesize_trampoline(SB),NOSPLIT,$0-0 - JMP libc_getdtablesize(SB) -TEXT ·libc_getegid_trampoline(SB),NOSPLIT,$0-0 - JMP libc_getegid(SB) -TEXT ·libc_geteuid_trampoline(SB),NOSPLIT,$0-0 - JMP libc_geteuid(SB) -TEXT ·libc_getgid_trampoline(SB),NOSPLIT,$0-0 - JMP libc_getgid(SB) -TEXT ·libc_getpgid_trampoline(SB),NOSPLIT,$0-0 - JMP libc_getpgid(SB) -TEXT ·libc_getpgrp_trampoline(SB),NOSPLIT,$0-0 - JMP libc_getpgrp(SB) -TEXT ·libc_getpid_trampoline(SB),NOSPLIT,$0-0 - JMP libc_getpid(SB) -TEXT ·libc_getppid_trampoline(SB),NOSPLIT,$0-0 - JMP libc_getppid(SB) -TEXT ·libc_getpriority_trampoline(SB),NOSPLIT,$0-0 - JMP libc_getpriority(SB) -TEXT ·libc_getrlimit_trampoline(SB),NOSPLIT,$0-0 - JMP libc_getrlimit(SB) -TEXT ·libc_getrusage_trampoline(SB),NOSPLIT,$0-0 - JMP libc_getrusage(SB) -TEXT ·libc_getsid_trampoline(SB),NOSPLIT,$0-0 - JMP libc_getsid(SB) -TEXT ·libc_gettimeofday_trampoline(SB),NOSPLIT,$0-0 - JMP libc_gettimeofday(SB) -TEXT ·libc_getuid_trampoline(SB),NOSPLIT,$0-0 - JMP libc_getuid(SB) -TEXT ·libc_issetugid_trampoline(SB),NOSPLIT,$0-0 - JMP libc_issetugid(SB) -TEXT ·libc_kqueue_trampoline(SB),NOSPLIT,$0-0 - JMP libc_kqueue(SB) -TEXT ·libc_lchown_trampoline(SB),NOSPLIT,$0-0 - JMP libc_lchown(SB) -TEXT ·libc_link_trampoline(SB),NOSPLIT,$0-0 - JMP libc_link(SB) -TEXT ·libc_linkat_trampoline(SB),NOSPLIT,$0-0 - JMP libc_linkat(SB) -TEXT ·libc_listen_trampoline(SB),NOSPLIT,$0-0 - JMP libc_listen(SB) -TEXT ·libc_mkdir_trampoline(SB),NOSPLIT,$0-0 - JMP libc_mkdir(SB) -TEXT ·libc_mkdirat_trampoline(SB),NOSPLIT,$0-0 - JMP libc_mkdirat(SB) -TEXT ·libc_mkfifo_trampoline(SB),NOSPLIT,$0-0 - JMP libc_mkfifo(SB) -TEXT ·libc_mknod_trampoline(SB),NOSPLIT,$0-0 - JMP libc_mknod(SB) -TEXT ·libc_open_trampoline(SB),NOSPLIT,$0-0 - JMP libc_open(SB) -TEXT ·libc_openat_trampoline(SB),NOSPLIT,$0-0 - JMP libc_openat(SB) -TEXT ·libc_pathconf_trampoline(SB),NOSPLIT,$0-0 - JMP libc_pathconf(SB) -TEXT ·libc_pread_trampoline(SB),NOSPLIT,$0-0 - JMP libc_pread(SB) -TEXT ·libc_pwrite_trampoline(SB),NOSPLIT,$0-0 - JMP libc_pwrite(SB) -TEXT ·libc_read_trampoline(SB),NOSPLIT,$0-0 - JMP libc_read(SB) -TEXT ·libc_readlink_trampoline(SB),NOSPLIT,$0-0 - JMP libc_readlink(SB) -TEXT ·libc_readlinkat_trampoline(SB),NOSPLIT,$0-0 - JMP libc_readlinkat(SB) -TEXT ·libc_rename_trampoline(SB),NOSPLIT,$0-0 - JMP libc_rename(SB) -TEXT ·libc_renameat_trampoline(SB),NOSPLIT,$0-0 - JMP libc_renameat(SB) -TEXT ·libc_revoke_trampoline(SB),NOSPLIT,$0-0 - JMP libc_revoke(SB) -TEXT ·libc_rmdir_trampoline(SB),NOSPLIT,$0-0 - JMP libc_rmdir(SB) -TEXT ·libc_lseek_trampoline(SB),NOSPLIT,$0-0 - JMP libc_lseek(SB) -TEXT ·libc_select_trampoline(SB),NOSPLIT,$0-0 - JMP libc_select(SB) -TEXT ·libc_setegid_trampoline(SB),NOSPLIT,$0-0 - JMP libc_setegid(SB) -TEXT ·libc_seteuid_trampoline(SB),NOSPLIT,$0-0 - JMP libc_seteuid(SB) -TEXT ·libc_setgid_trampoline(SB),NOSPLIT,$0-0 - JMP libc_setgid(SB) -TEXT ·libc_setlogin_trampoline(SB),NOSPLIT,$0-0 - JMP libc_setlogin(SB) -TEXT ·libc_setpgid_trampoline(SB),NOSPLIT,$0-0 - JMP libc_setpgid(SB) -TEXT ·libc_setpriority_trampoline(SB),NOSPLIT,$0-0 - JMP libc_setpriority(SB) -TEXT ·libc_setprivexec_trampoline(SB),NOSPLIT,$0-0 - JMP libc_setprivexec(SB) -TEXT ·libc_setregid_trampoline(SB),NOSPLIT,$0-0 - JMP libc_setregid(SB) -TEXT ·libc_setreuid_trampoline(SB),NOSPLIT,$0-0 - JMP libc_setreuid(SB) -TEXT ·libc_setrlimit_trampoline(SB),NOSPLIT,$0-0 - JMP libc_setrlimit(SB) -TEXT ·libc_setsid_trampoline(SB),NOSPLIT,$0-0 - JMP libc_setsid(SB) -TEXT ·libc_settimeofday_trampoline(SB),NOSPLIT,$0-0 - JMP libc_settimeofday(SB) -TEXT ·libc_setuid_trampoline(SB),NOSPLIT,$0-0 - JMP libc_setuid(SB) -TEXT ·libc_symlink_trampoline(SB),NOSPLIT,$0-0 - JMP libc_symlink(SB) -TEXT ·libc_symlinkat_trampoline(SB),NOSPLIT,$0-0 - JMP libc_symlinkat(SB) -TEXT ·libc_sync_trampoline(SB),NOSPLIT,$0-0 - JMP libc_sync(SB) -TEXT ·libc_truncate_trampoline(SB),NOSPLIT,$0-0 - JMP libc_truncate(SB) -TEXT ·libc_umask_trampoline(SB),NOSPLIT,$0-0 - JMP libc_umask(SB) -TEXT ·libc_undelete_trampoline(SB),NOSPLIT,$0-0 - JMP libc_undelete(SB) -TEXT ·libc_unlink_trampoline(SB),NOSPLIT,$0-0 - JMP libc_unlink(SB) -TEXT ·libc_unlinkat_trampoline(SB),NOSPLIT,$0-0 - JMP libc_unlinkat(SB) -TEXT ·libc_unmount_trampoline(SB),NOSPLIT,$0-0 - JMP libc_unmount(SB) -TEXT ·libc_write_trampoline(SB),NOSPLIT,$0-0 - JMP libc_write(SB) -TEXT ·libc_mmap_trampoline(SB),NOSPLIT,$0-0 - JMP libc_mmap(SB) -TEXT ·libc_munmap_trampoline(SB),NOSPLIT,$0-0 - JMP libc_munmap(SB) -TEXT ·libc_fstat64_trampoline(SB),NOSPLIT,$0-0 - JMP libc_fstat64(SB) -TEXT ·libc_fstatat64_trampoline(SB),NOSPLIT,$0-0 - JMP libc_fstatat64(SB) -TEXT ·libc_fstatfs64_trampoline(SB),NOSPLIT,$0-0 - JMP libc_fstatfs64(SB) -TEXT ·libc_getfsstat64_trampoline(SB),NOSPLIT,$0-0 - JMP libc_getfsstat64(SB) -TEXT ·libc_lstat64_trampoline(SB),NOSPLIT,$0-0 - JMP libc_lstat64(SB) -TEXT ·libc_ptrace_trampoline(SB),NOSPLIT,$0-0 - JMP libc_ptrace(SB) -TEXT ·libc_stat64_trampoline(SB),NOSPLIT,$0-0 - JMP libc_stat64(SB) -TEXT ·libc_statfs64_trampoline(SB),NOSPLIT,$0-0 - JMP libc_statfs64(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.go index 8882623613..a06eb09324 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.go @@ -1,6 +1,7 @@ // go run mksyscall.go -tags darwin,amd64,go1.13 syscall_darwin.1_13.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build darwin && amd64 && go1.13 // +build darwin,amd64,go1.13 package unix @@ -15,25 +16,25 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func closedir(dir uintptr) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_closedir_trampoline), uintptr(dir), 0, 0) + _, _, e1 := syscall_syscall(libc_closedir_trampoline_addr, uintptr(dir), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_closedir_trampoline() +var libc_closedir_trampoline_addr uintptr //go:cgo_import_dynamic libc_closedir closedir "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func readdir_r(dir uintptr, entry *Dirent, result **Dirent) (res Errno) { - r0, _, _ := syscall_syscall(funcPC(libc_readdir_r_trampoline), uintptr(dir), uintptr(unsafe.Pointer(entry)), uintptr(unsafe.Pointer(result))) + r0, _, _ := syscall_syscall(libc_readdir_r_trampoline_addr, uintptr(dir), uintptr(unsafe.Pointer(entry)), uintptr(unsafe.Pointer(result))) res = Errno(r0) return } -func libc_readdir_r_trampoline() +var libc_readdir_r_trampoline_addr uintptr //go:cgo_import_dynamic libc_readdir_r readdir_r "/usr/lib/libSystem.B.dylib" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.s index d671e8311f..d6c3e25c01 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.s @@ -1,12 +1,25 @@ // go run mkasm_darwin.go amd64 // Code generated by the command above; DO NOT EDIT. +//go:build go1.13 // +build go1.13 #include "textflag.h" -TEXT ·libc_fdopendir_trampoline(SB),NOSPLIT,$0-0 + +TEXT libc_fdopendir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fdopendir(SB) -TEXT ·libc_closedir_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_fdopendir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fdopendir_trampoline_addr(SB)/8, $libc_fdopendir_trampoline<>(SB) + +TEXT libc_closedir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_closedir(SB) -TEXT ·libc_readdir_r_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_closedir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_closedir_trampoline_addr(SB)/8, $libc_closedir_trampoline<>(SB) + +TEXT libc_readdir_r_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_readdir_r(SB) + +GLOBL ·libc_readdir_r_trampoline_addr(SB), RODATA, $8 +DATA ·libc_readdir_r_trampoline_addr(SB)/8, $libc_readdir_r_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go index 508e5639bf..d4efe8d457 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go @@ -1,6 +1,7 @@ // go run mksyscall.go -tags darwin,amd64,go1.12 syscall_bsd.go syscall_darwin.go syscall_darwin_amd64.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build darwin && amd64 && go1.12 // +build darwin,amd64,go1.12 package unix @@ -15,7 +16,7 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getgroups(ngid int, gid *_Gid_t) (n int, err error) { - r0, _, e1 := syscall_rawSyscall(funcPC(libc_getgroups_trampoline), uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + r0, _, e1 := syscall_rawSyscall(libc_getgroups_trampoline_addr, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -23,28 +24,28 @@ func getgroups(ngid int, gid *_Gid_t) (n int, err error) { return } -func libc_getgroups_trampoline() +var libc_getgroups_trampoline_addr uintptr //go:cgo_import_dynamic libc_getgroups getgroups "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func setgroups(ngid int, gid *_Gid_t) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_setgroups_trampoline), uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + _, _, e1 := syscall_rawSyscall(libc_setgroups_trampoline_addr, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_setgroups_trampoline() +var libc_setgroups_trampoline_addr uintptr //go:cgo_import_dynamic libc_setgroups setgroups "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { - r0, _, e1 := syscall_syscall6(funcPC(libc_wait4_trampoline), uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + r0, _, e1 := syscall_syscall6(libc_wait4_trampoline_addr, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) wpid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -52,14 +53,14 @@ func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err return } -func libc_wait4_trampoline() +var libc_wait4_trampoline_addr uintptr //go:cgo_import_dynamic libc_wait4 wait4 "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_accept_trampoline), uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + r0, _, e1 := syscall_syscall(libc_accept_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -67,42 +68,42 @@ func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { return } -func libc_accept_trampoline() +var libc_accept_trampoline_addr uintptr //go:cgo_import_dynamic libc_accept accept "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_bind_trampoline), uintptr(s), uintptr(addr), uintptr(addrlen)) + _, _, e1 := syscall_syscall(libc_bind_trampoline_addr, uintptr(s), uintptr(addr), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } -func libc_bind_trampoline() +var libc_bind_trampoline_addr uintptr //go:cgo_import_dynamic libc_bind bind "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_connect_trampoline), uintptr(s), uintptr(addr), uintptr(addrlen)) + _, _, e1 := syscall_syscall(libc_connect_trampoline_addr, uintptr(s), uintptr(addr), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } -func libc_connect_trampoline() +var libc_connect_trampoline_addr uintptr //go:cgo_import_dynamic libc_connect connect "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func socket(domain int, typ int, proto int) (fd int, err error) { - r0, _, e1 := syscall_rawSyscall(funcPC(libc_socket_trampoline), uintptr(domain), uintptr(typ), uintptr(proto)) + r0, _, e1 := syscall_rawSyscall(libc_socket_trampoline_addr, uintptr(domain), uintptr(typ), uintptr(proto)) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -110,91 +111,91 @@ func socket(domain int, typ int, proto int) (fd int, err error) { return } -func libc_socket_trampoline() +var libc_socket_trampoline_addr uintptr //go:cgo_import_dynamic libc_socket socket "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { - _, _, e1 := syscall_syscall6(funcPC(libc_getsockopt_trampoline), uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + _, _, e1 := syscall_syscall6(libc_getsockopt_trampoline_addr, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_getsockopt_trampoline() +var libc_getsockopt_trampoline_addr uintptr //go:cgo_import_dynamic libc_getsockopt getsockopt "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { - _, _, e1 := syscall_syscall6(funcPC(libc_setsockopt_trampoline), uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + _, _, e1 := syscall_syscall6(libc_setsockopt_trampoline_addr, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_setsockopt_trampoline() +var libc_setsockopt_trampoline_addr uintptr //go:cgo_import_dynamic libc_setsockopt setsockopt "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_getpeername_trampoline), uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + _, _, e1 := syscall_rawSyscall(libc_getpeername_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) if e1 != 0 { err = errnoErr(e1) } return } -func libc_getpeername_trampoline() +var libc_getpeername_trampoline_addr uintptr //go:cgo_import_dynamic libc_getpeername getpeername "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_getsockname_trampoline), uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + _, _, e1 := syscall_rawSyscall(libc_getsockname_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) if e1 != 0 { err = errnoErr(e1) } return } -func libc_getsockname_trampoline() +var libc_getsockname_trampoline_addr uintptr //go:cgo_import_dynamic libc_getsockname getsockname "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Shutdown(s int, how int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_shutdown_trampoline), uintptr(s), uintptr(how), 0) + _, _, e1 := syscall_syscall(libc_shutdown_trampoline_addr, uintptr(s), uintptr(how), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_shutdown_trampoline() +var libc_shutdown_trampoline_addr uintptr //go:cgo_import_dynamic libc_shutdown shutdown "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { - _, _, e1 := syscall_rawSyscall6(funcPC(libc_socketpair_trampoline), uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + _, _, e1 := syscall_rawSyscall6(libc_socketpair_trampoline_addr, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_socketpair_trampoline() +var libc_socketpair_trampoline_addr uintptr //go:cgo_import_dynamic libc_socketpair socketpair "/usr/lib/libSystem.B.dylib" @@ -207,7 +208,7 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := syscall_syscall6(funcPC(libc_recvfrom_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + r0, _, e1 := syscall_syscall6(libc_recvfrom_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -215,7 +216,7 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl return } -func libc_recvfrom_trampoline() +var libc_recvfrom_trampoline_addr uintptr //go:cgo_import_dynamic libc_recvfrom recvfrom "/usr/lib/libSystem.B.dylib" @@ -228,21 +229,21 @@ func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) ( } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := syscall_syscall6(funcPC(libc_sendto_trampoline), uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + _, _, e1 := syscall_syscall6(libc_sendto_trampoline_addr, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } -func libc_sendto_trampoline() +var libc_sendto_trampoline_addr uintptr //go:cgo_import_dynamic libc_sendto sendto "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_recvmsg_trampoline), uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + r0, _, e1 := syscall_syscall(libc_recvmsg_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -250,14 +251,14 @@ func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { return } -func libc_recvmsg_trampoline() +var libc_recvmsg_trampoline_addr uintptr //go:cgo_import_dynamic libc_recvmsg recvmsg "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_sendmsg_trampoline), uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + r0, _, e1 := syscall_syscall(libc_sendmsg_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -265,14 +266,14 @@ func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { return } -func libc_sendmsg_trampoline() +var libc_sendmsg_trampoline_addr uintptr //go:cgo_import_dynamic libc_sendmsg sendmsg "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) { - r0, _, e1 := syscall_syscall6(funcPC(libc_kevent_trampoline), uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) + r0, _, e1 := syscall_syscall6(libc_kevent_trampoline_addr, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -280,7 +281,7 @@ func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, ne return } -func libc_kevent_trampoline() +var libc_kevent_trampoline_addr uintptr //go:cgo_import_dynamic libc_kevent kevent "/usr/lib/libSystem.B.dylib" @@ -292,35 +293,35 @@ func utimes(path string, timeval *[2]Timeval) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_utimes_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) + _, _, e1 := syscall_syscall(libc_utimes_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_utimes_trampoline() +var libc_utimes_trampoline_addr uintptr //go:cgo_import_dynamic libc_utimes utimes "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func futimes(fd int, timeval *[2]Timeval) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_futimes_trampoline), uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) + _, _, e1 := syscall_syscall(libc_futimes_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_futimes_trampoline() +var libc_futimes_trampoline_addr uintptr //go:cgo_import_dynamic libc_futimes futimes "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_poll_trampoline), uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + r0, _, e1 := syscall_syscall(libc_poll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -328,7 +329,7 @@ func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { return } -func libc_poll_trampoline() +var libc_poll_trampoline_addr uintptr //go:cgo_import_dynamic libc_poll poll "/usr/lib/libSystem.B.dylib" @@ -341,14 +342,14 @@ func Madvise(b []byte, behav int) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := syscall_syscall(funcPC(libc_madvise_trampoline), uintptr(_p0), uintptr(len(b)), uintptr(behav)) + _, _, e1 := syscall_syscall(libc_madvise_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(behav)) if e1 != 0 { err = errnoErr(e1) } return } -func libc_madvise_trampoline() +var libc_madvise_trampoline_addr uintptr //go:cgo_import_dynamic libc_madvise madvise "/usr/lib/libSystem.B.dylib" @@ -361,28 +362,28 @@ func Mlock(b []byte) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := syscall_syscall(funcPC(libc_mlock_trampoline), uintptr(_p0), uintptr(len(b)), 0) + _, _, e1 := syscall_syscall(libc_mlock_trampoline_addr, uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_mlock_trampoline() +var libc_mlock_trampoline_addr uintptr //go:cgo_import_dynamic libc_mlock mlock "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mlockall(flags int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_mlockall_trampoline), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall(libc_mlockall_trampoline_addr, uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_mlockall_trampoline() +var libc_mlockall_trampoline_addr uintptr //go:cgo_import_dynamic libc_mlockall mlockall "/usr/lib/libSystem.B.dylib" @@ -395,14 +396,14 @@ func Mprotect(b []byte, prot int) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := syscall_syscall(funcPC(libc_mprotect_trampoline), uintptr(_p0), uintptr(len(b)), uintptr(prot)) + _, _, e1 := syscall_syscall(libc_mprotect_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(prot)) if e1 != 0 { err = errnoErr(e1) } return } -func libc_mprotect_trampoline() +var libc_mprotect_trampoline_addr uintptr //go:cgo_import_dynamic libc_mprotect mprotect "/usr/lib/libSystem.B.dylib" @@ -415,14 +416,14 @@ func Msync(b []byte, flags int) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := syscall_syscall(funcPC(libc_msync_trampoline), uintptr(_p0), uintptr(len(b)), uintptr(flags)) + _, _, e1 := syscall_syscall(libc_msync_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } return } -func libc_msync_trampoline() +var libc_msync_trampoline_addr uintptr //go:cgo_import_dynamic libc_msync msync "/usr/lib/libSystem.B.dylib" @@ -435,42 +436,42 @@ func Munlock(b []byte) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := syscall_syscall(funcPC(libc_munlock_trampoline), uintptr(_p0), uintptr(len(b)), 0) + _, _, e1 := syscall_syscall(libc_munlock_trampoline_addr, uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_munlock_trampoline() +var libc_munlock_trampoline_addr uintptr //go:cgo_import_dynamic libc_munlock munlock "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Munlockall() (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_munlockall_trampoline), 0, 0, 0) + _, _, e1 := syscall_syscall(libc_munlockall_trampoline_addr, 0, 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_munlockall_trampoline() +var libc_munlockall_trampoline_addr uintptr //go:cgo_import_dynamic libc_munlockall munlockall "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func pipe(p *[2]int32) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_pipe_trampoline), uintptr(unsafe.Pointer(p)), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_pipe_trampoline_addr, uintptr(unsafe.Pointer(p)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_pipe_trampoline() +var libc_pipe_trampoline_addr uintptr //go:cgo_import_dynamic libc_pipe pipe "/usr/lib/libSystem.B.dylib" @@ -487,7 +488,7 @@ func getxattr(path string, attr string, dest *byte, size int, position uint32, o if err != nil { return } - r0, _, e1 := syscall_syscall6(funcPC(libc_getxattr_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(position), uintptr(options)) + r0, _, e1 := syscall_syscall6(libc_getxattr_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(position), uintptr(options)) sz = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -495,7 +496,7 @@ func getxattr(path string, attr string, dest *byte, size int, position uint32, o return } -func libc_getxattr_trampoline() +var libc_getxattr_trampoline_addr uintptr //go:cgo_import_dynamic libc_getxattr getxattr "/usr/lib/libSystem.B.dylib" @@ -507,7 +508,7 @@ func fgetxattr(fd int, attr string, dest *byte, size int, position uint32, optio if err != nil { return } - r0, _, e1 := syscall_syscall6(funcPC(libc_fgetxattr_trampoline), uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(position), uintptr(options)) + r0, _, e1 := syscall_syscall6(libc_fgetxattr_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(position), uintptr(options)) sz = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -515,7 +516,7 @@ func fgetxattr(fd int, attr string, dest *byte, size int, position uint32, optio return } -func libc_fgetxattr_trampoline() +var libc_fgetxattr_trampoline_addr uintptr //go:cgo_import_dynamic libc_fgetxattr fgetxattr "/usr/lib/libSystem.B.dylib" @@ -532,14 +533,14 @@ func setxattr(path string, attr string, data *byte, size int, position uint32, o if err != nil { return } - _, _, e1 := syscall_syscall6(funcPC(libc_setxattr_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(data)), uintptr(size), uintptr(position), uintptr(options)) + _, _, e1 := syscall_syscall6(libc_setxattr_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(data)), uintptr(size), uintptr(position), uintptr(options)) if e1 != 0 { err = errnoErr(e1) } return } -func libc_setxattr_trampoline() +var libc_setxattr_trampoline_addr uintptr //go:cgo_import_dynamic libc_setxattr setxattr "/usr/lib/libSystem.B.dylib" @@ -551,14 +552,14 @@ func fsetxattr(fd int, attr string, data *byte, size int, position uint32, optio if err != nil { return } - _, _, e1 := syscall_syscall6(funcPC(libc_fsetxattr_trampoline), uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(data)), uintptr(size), uintptr(position), uintptr(options)) + _, _, e1 := syscall_syscall6(libc_fsetxattr_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(data)), uintptr(size), uintptr(position), uintptr(options)) if e1 != 0 { err = errnoErr(e1) } return } -func libc_fsetxattr_trampoline() +var libc_fsetxattr_trampoline_addr uintptr //go:cgo_import_dynamic libc_fsetxattr fsetxattr "/usr/lib/libSystem.B.dylib" @@ -575,14 +576,14 @@ func removexattr(path string, attr string, options int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_removexattr_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(options)) + _, _, e1 := syscall_syscall(libc_removexattr_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(options)) if e1 != 0 { err = errnoErr(e1) } return } -func libc_removexattr_trampoline() +var libc_removexattr_trampoline_addr uintptr //go:cgo_import_dynamic libc_removexattr removexattr "/usr/lib/libSystem.B.dylib" @@ -594,14 +595,14 @@ func fremovexattr(fd int, attr string, options int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_fremovexattr_trampoline), uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(options)) + _, _, e1 := syscall_syscall(libc_fremovexattr_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(options)) if e1 != 0 { err = errnoErr(e1) } return } -func libc_fremovexattr_trampoline() +var libc_fremovexattr_trampoline_addr uintptr //go:cgo_import_dynamic libc_fremovexattr fremovexattr "/usr/lib/libSystem.B.dylib" @@ -613,7 +614,7 @@ func listxattr(path string, dest *byte, size int, options int) (sz int, err erro if err != nil { return } - r0, _, e1 := syscall_syscall6(funcPC(libc_listxattr_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(options), 0, 0) + r0, _, e1 := syscall_syscall6(libc_listxattr_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(options), 0, 0) sz = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -621,14 +622,14 @@ func listxattr(path string, dest *byte, size int, options int) (sz int, err erro return } -func libc_listxattr_trampoline() +var libc_listxattr_trampoline_addr uintptr //go:cgo_import_dynamic libc_listxattr listxattr "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func flistxattr(fd int, dest *byte, size int, options int) (sz int, err error) { - r0, _, e1 := syscall_syscall6(funcPC(libc_flistxattr_trampoline), uintptr(fd), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(options), 0, 0) + r0, _, e1 := syscall_syscall6(libc_flistxattr_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(options), 0, 0) sz = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -636,28 +637,28 @@ func flistxattr(fd int, dest *byte, size int, options int) (sz int, err error) { return } -func libc_flistxattr_trampoline() +var libc_flistxattr_trampoline_addr uintptr //go:cgo_import_dynamic libc_flistxattr flistxattr "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func setattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) { - _, _, e1 := syscall_syscall6(funcPC(libc_setattrlist_trampoline), uintptr(unsafe.Pointer(path)), uintptr(list), uintptr(buf), uintptr(size), uintptr(options), 0) + _, _, e1 := syscall_syscall6(libc_setattrlist_trampoline_addr, uintptr(unsafe.Pointer(path)), uintptr(list), uintptr(buf), uintptr(size), uintptr(options), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_setattrlist_trampoline() +var libc_setattrlist_trampoline_addr uintptr //go:cgo_import_dynamic libc_setattrlist setattrlist "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_fcntl_trampoline), uintptr(fd), uintptr(cmd), uintptr(arg)) + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) val = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -665,35 +666,35 @@ func fcntl(fd int, cmd int, arg int) (val int, err error) { return } -func libc_fcntl_trampoline() +var libc_fcntl_trampoline_addr uintptr //go:cgo_import_dynamic libc_fcntl fcntl "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func kill(pid int, signum int, posix int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_kill_trampoline), uintptr(pid), uintptr(signum), uintptr(posix)) + _, _, e1 := syscall_syscall(libc_kill_trampoline_addr, uintptr(pid), uintptr(signum), uintptr(posix)) if e1 != 0 { err = errnoErr(e1) } return } -func libc_kill_trampoline() +var libc_kill_trampoline_addr uintptr //go:cgo_import_dynamic libc_kill kill "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func ioctl(fd int, req uint, arg uintptr) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_ioctl_trampoline), uintptr(fd), uintptr(req), uintptr(arg)) + _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { err = errnoErr(e1) } return } -func libc_ioctl_trampoline() +var libc_ioctl_trampoline_addr uintptr //go:cgo_import_dynamic libc_ioctl ioctl "/usr/lib/libSystem.B.dylib" @@ -706,28 +707,28 @@ func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := syscall_syscall6(funcPC(libc_sysctl_trampoline), uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + _, _, e1 := syscall_syscall6(libc_sysctl_trampoline_addr, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) if e1 != 0 { err = errnoErr(e1) } return } -func libc_sysctl_trampoline() +var libc_sysctl_trampoline_addr uintptr //go:cgo_import_dynamic libc_sysctl sysctl "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) { - _, _, e1 := syscall_syscall6(funcPC(libc_sendfile_trampoline), uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags)) + _, _, e1 := syscall_syscall6(libc_sendfile_trampoline_addr, uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } return } -func libc_sendfile_trampoline() +var libc_sendfile_trampoline_addr uintptr //go:cgo_import_dynamic libc_sendfile sendfile "/usr/lib/libSystem.B.dylib" @@ -739,28 +740,28 @@ func Access(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_access_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_access_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_access_trampoline() +var libc_access_trampoline_addr uintptr //go:cgo_import_dynamic libc_access access "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_adjtime_trampoline), uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) + _, _, e1 := syscall_syscall(libc_adjtime_trampoline_addr, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_adjtime_trampoline() +var libc_adjtime_trampoline_addr uintptr //go:cgo_import_dynamic libc_adjtime adjtime "/usr/lib/libSystem.B.dylib" @@ -772,14 +773,14 @@ func Chdir(path string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_chdir_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_chdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_chdir_trampoline() +var libc_chdir_trampoline_addr uintptr //go:cgo_import_dynamic libc_chdir chdir "/usr/lib/libSystem.B.dylib" @@ -791,14 +792,14 @@ func Chflags(path string, flags int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_chflags_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + _, _, e1 := syscall_syscall(libc_chflags_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_chflags_trampoline() +var libc_chflags_trampoline_addr uintptr //go:cgo_import_dynamic libc_chflags chflags "/usr/lib/libSystem.B.dylib" @@ -810,14 +811,14 @@ func Chmod(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_chmod_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_chmod_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_chmod_trampoline() +var libc_chmod_trampoline_addr uintptr //go:cgo_import_dynamic libc_chmod chmod "/usr/lib/libSystem.B.dylib" @@ -829,14 +830,14 @@ func Chown(path string, uid int, gid int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_chown_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + _, _, e1 := syscall_syscall(libc_chown_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) if e1 != 0 { err = errnoErr(e1) } return } -func libc_chown_trampoline() +var libc_chown_trampoline_addr uintptr //go:cgo_import_dynamic libc_chown chown "/usr/lib/libSystem.B.dylib" @@ -848,42 +849,42 @@ func Chroot(path string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_chroot_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_chroot_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_chroot_trampoline() +var libc_chroot_trampoline_addr uintptr //go:cgo_import_dynamic libc_chroot chroot "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func ClockGettime(clockid int32, time *Timespec) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_clock_gettime_trampoline), uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + _, _, e1 := syscall_syscall(libc_clock_gettime_trampoline_addr, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_clock_gettime_trampoline() +var libc_clock_gettime_trampoline_addr uintptr //go:cgo_import_dynamic libc_clock_gettime clock_gettime "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Close(fd int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_close_trampoline), uintptr(fd), 0, 0) + _, _, e1 := syscall_syscall(libc_close_trampoline_addr, uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_close_trampoline() +var libc_close_trampoline_addr uintptr //go:cgo_import_dynamic libc_close close "/usr/lib/libSystem.B.dylib" @@ -900,14 +901,14 @@ func Clonefile(src string, dst string, flags int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_clonefile_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags)) + _, _, e1 := syscall_syscall(libc_clonefile_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } return } -func libc_clonefile_trampoline() +var libc_clonefile_trampoline_addr uintptr //go:cgo_import_dynamic libc_clonefile clonefile "/usr/lib/libSystem.B.dylib" @@ -924,21 +925,21 @@ func Clonefileat(srcDirfd int, src string, dstDirfd int, dst string, flags int) if err != nil { return } - _, _, e1 := syscall_syscall6(funcPC(libc_clonefileat_trampoline), uintptr(srcDirfd), uintptr(unsafe.Pointer(_p0)), uintptr(dstDirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + _, _, e1 := syscall_syscall6(libc_clonefileat_trampoline_addr, uintptr(srcDirfd), uintptr(unsafe.Pointer(_p0)), uintptr(dstDirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_clonefileat_trampoline() +var libc_clonefileat_trampoline_addr uintptr //go:cgo_import_dynamic libc_clonefileat clonefileat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup(fd int) (nfd int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_dup_trampoline), uintptr(fd), 0, 0) + r0, _, e1 := syscall_syscall(libc_dup_trampoline_addr, uintptr(fd), 0, 0) nfd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -946,21 +947,21 @@ func Dup(fd int) (nfd int, err error) { return } -func libc_dup_trampoline() +var libc_dup_trampoline_addr uintptr //go:cgo_import_dynamic libc_dup dup "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup2(from int, to int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_dup2_trampoline), uintptr(from), uintptr(to), 0) + _, _, e1 := syscall_syscall(libc_dup2_trampoline_addr, uintptr(from), uintptr(to), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_dup2_trampoline() +var libc_dup2_trampoline_addr uintptr //go:cgo_import_dynamic libc_dup2 dup2 "/usr/lib/libSystem.B.dylib" @@ -977,25 +978,25 @@ func Exchangedata(path1 string, path2 string, options int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_exchangedata_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(options)) + _, _, e1 := syscall_syscall(libc_exchangedata_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(options)) if e1 != 0 { err = errnoErr(e1) } return } -func libc_exchangedata_trampoline() +var libc_exchangedata_trampoline_addr uintptr //go:cgo_import_dynamic libc_exchangedata exchangedata "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Exit(code int) { - syscall_syscall(funcPC(libc_exit_trampoline), uintptr(code), 0, 0) + syscall_syscall(libc_exit_trampoline_addr, uintptr(code), 0, 0) return } -func libc_exit_trampoline() +var libc_exit_trampoline_addr uintptr //go:cgo_import_dynamic libc_exit exit "/usr/lib/libSystem.B.dylib" @@ -1007,56 +1008,56 @@ func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall6(funcPC(libc_faccessat_trampoline), uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_faccessat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_faccessat_trampoline() +var libc_faccessat_trampoline_addr uintptr //go:cgo_import_dynamic libc_faccessat faccessat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchdir(fd int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_fchdir_trampoline), uintptr(fd), 0, 0) + _, _, e1 := syscall_syscall(libc_fchdir_trampoline_addr, uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_fchdir_trampoline() +var libc_fchdir_trampoline_addr uintptr //go:cgo_import_dynamic libc_fchdir fchdir "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchflags(fd int, flags int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_fchflags_trampoline), uintptr(fd), uintptr(flags), 0) + _, _, e1 := syscall_syscall(libc_fchflags_trampoline_addr, uintptr(fd), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_fchflags_trampoline() +var libc_fchflags_trampoline_addr uintptr //go:cgo_import_dynamic libc_fchflags fchflags "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchmod(fd int, mode uint32) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_fchmod_trampoline), uintptr(fd), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_fchmod_trampoline_addr, uintptr(fd), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_fchmod_trampoline() +var libc_fchmod_trampoline_addr uintptr //go:cgo_import_dynamic libc_fchmod fchmod "/usr/lib/libSystem.B.dylib" @@ -1068,28 +1069,28 @@ func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall6(funcPC(libc_fchmodat_trampoline), uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_fchmodat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_fchmodat_trampoline() +var libc_fchmodat_trampoline_addr uintptr //go:cgo_import_dynamic libc_fchmodat fchmodat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchown(fd int, uid int, gid int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_fchown_trampoline), uintptr(fd), uintptr(uid), uintptr(gid)) + _, _, e1 := syscall_syscall(libc_fchown_trampoline_addr, uintptr(fd), uintptr(uid), uintptr(gid)) if e1 != 0 { err = errnoErr(e1) } return } -func libc_fchown_trampoline() +var libc_fchown_trampoline_addr uintptr //go:cgo_import_dynamic libc_fchown fchown "/usr/lib/libSystem.B.dylib" @@ -1101,14 +1102,14 @@ func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall6(funcPC(libc_fchownat_trampoline), uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) + _, _, e1 := syscall_syscall6(libc_fchownat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_fchownat_trampoline() +var libc_fchownat_trampoline_addr uintptr //go:cgo_import_dynamic libc_fchownat fchownat "/usr/lib/libSystem.B.dylib" @@ -1120,35 +1121,35 @@ func Fclonefileat(srcDirfd int, dstDirfd int, dst string, flags int) (err error) if err != nil { return } - _, _, e1 := syscall_syscall6(funcPC(libc_fclonefileat_trampoline), uintptr(srcDirfd), uintptr(dstDirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_fclonefileat_trampoline_addr, uintptr(srcDirfd), uintptr(dstDirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_fclonefileat_trampoline() +var libc_fclonefileat_trampoline_addr uintptr //go:cgo_import_dynamic libc_fclonefileat fclonefileat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Flock(fd int, how int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_flock_trampoline), uintptr(fd), uintptr(how), 0) + _, _, e1 := syscall_syscall(libc_flock_trampoline_addr, uintptr(fd), uintptr(how), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_flock_trampoline() +var libc_flock_trampoline_addr uintptr //go:cgo_import_dynamic libc_flock flock "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fpathconf(fd int, name int) (val int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_fpathconf_trampoline), uintptr(fd), uintptr(name), 0) + r0, _, e1 := syscall_syscall(libc_fpathconf_trampoline_addr, uintptr(fd), uintptr(name), 0) val = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1156,35 +1157,35 @@ func Fpathconf(fd int, name int) (val int, err error) { return } -func libc_fpathconf_trampoline() +var libc_fpathconf_trampoline_addr uintptr //go:cgo_import_dynamic libc_fpathconf fpathconf "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fsync(fd int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_fsync_trampoline), uintptr(fd), 0, 0) + _, _, e1 := syscall_syscall(libc_fsync_trampoline_addr, uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_fsync_trampoline() +var libc_fsync_trampoline_addr uintptr //go:cgo_import_dynamic libc_fsync fsync "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Ftruncate(fd int, length int64) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_ftruncate_trampoline), uintptr(fd), uintptr(length), 0) + _, _, e1 := syscall_syscall(libc_ftruncate_trampoline_addr, uintptr(fd), uintptr(length), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_ftruncate_trampoline() +var libc_ftruncate_trampoline_addr uintptr //go:cgo_import_dynamic libc_ftruncate ftruncate "/usr/lib/libSystem.B.dylib" @@ -1197,7 +1198,7 @@ func Getcwd(buf []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := syscall_syscall(funcPC(libc_getcwd_trampoline), uintptr(_p0), uintptr(len(buf)), 0) + r0, _, e1 := syscall_syscall(libc_getcwd_trampoline_addr, uintptr(_p0), uintptr(len(buf)), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1205,62 +1206,62 @@ func Getcwd(buf []byte) (n int, err error) { return } -func libc_getcwd_trampoline() +var libc_getcwd_trampoline_addr uintptr //go:cgo_import_dynamic libc_getcwd getcwd "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getdtablesize() (size int) { - r0, _, _ := syscall_syscall(funcPC(libc_getdtablesize_trampoline), 0, 0, 0) + r0, _, _ := syscall_syscall(libc_getdtablesize_trampoline_addr, 0, 0, 0) size = int(r0) return } -func libc_getdtablesize_trampoline() +var libc_getdtablesize_trampoline_addr uintptr //go:cgo_import_dynamic libc_getdtablesize getdtablesize "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getegid() (egid int) { - r0, _, _ := syscall_rawSyscall(funcPC(libc_getegid_trampoline), 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getegid_trampoline_addr, 0, 0, 0) egid = int(r0) return } -func libc_getegid_trampoline() +var libc_getegid_trampoline_addr uintptr //go:cgo_import_dynamic libc_getegid getegid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Geteuid() (uid int) { - r0, _, _ := syscall_rawSyscall(funcPC(libc_geteuid_trampoline), 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_geteuid_trampoline_addr, 0, 0, 0) uid = int(r0) return } -func libc_geteuid_trampoline() +var libc_geteuid_trampoline_addr uintptr //go:cgo_import_dynamic libc_geteuid geteuid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getgid() (gid int) { - r0, _, _ := syscall_rawSyscall(funcPC(libc_getgid_trampoline), 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getgid_trampoline_addr, 0, 0, 0) gid = int(r0) return } -func libc_getgid_trampoline() +var libc_getgid_trampoline_addr uintptr //go:cgo_import_dynamic libc_getgid getgid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpgid(pid int) (pgid int, err error) { - r0, _, e1 := syscall_rawSyscall(funcPC(libc_getpgid_trampoline), uintptr(pid), 0, 0) + r0, _, e1 := syscall_rawSyscall(libc_getpgid_trampoline_addr, uintptr(pid), 0, 0) pgid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1268,50 +1269,50 @@ func Getpgid(pid int) (pgid int, err error) { return } -func libc_getpgid_trampoline() +var libc_getpgid_trampoline_addr uintptr //go:cgo_import_dynamic libc_getpgid getpgid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpgrp() (pgrp int) { - r0, _, _ := syscall_rawSyscall(funcPC(libc_getpgrp_trampoline), 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getpgrp_trampoline_addr, 0, 0, 0) pgrp = int(r0) return } -func libc_getpgrp_trampoline() +var libc_getpgrp_trampoline_addr uintptr //go:cgo_import_dynamic libc_getpgrp getpgrp "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpid() (pid int) { - r0, _, _ := syscall_rawSyscall(funcPC(libc_getpid_trampoline), 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getpid_trampoline_addr, 0, 0, 0) pid = int(r0) return } -func libc_getpid_trampoline() +var libc_getpid_trampoline_addr uintptr //go:cgo_import_dynamic libc_getpid getpid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getppid() (ppid int) { - r0, _, _ := syscall_rawSyscall(funcPC(libc_getppid_trampoline), 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getppid_trampoline_addr, 0, 0, 0) ppid = int(r0) return } -func libc_getppid_trampoline() +var libc_getppid_trampoline_addr uintptr //go:cgo_import_dynamic libc_getppid getppid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpriority(which int, who int) (prio int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_getpriority_trampoline), uintptr(which), uintptr(who), 0) + r0, _, e1 := syscall_syscall(libc_getpriority_trampoline_addr, uintptr(which), uintptr(who), 0) prio = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1319,42 +1320,42 @@ func Getpriority(which int, who int) (prio int, err error) { return } -func libc_getpriority_trampoline() +var libc_getpriority_trampoline_addr uintptr //go:cgo_import_dynamic libc_getpriority getpriority "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_getrlimit_trampoline), uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + _, _, e1 := syscall_rawSyscall(libc_getrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_getrlimit_trampoline() +var libc_getrlimit_trampoline_addr uintptr //go:cgo_import_dynamic libc_getrlimit getrlimit "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getrusage(who int, rusage *Rusage) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_getrusage_trampoline), uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + _, _, e1 := syscall_rawSyscall(libc_getrusage_trampoline_addr, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_getrusage_trampoline() +var libc_getrusage_trampoline_addr uintptr //go:cgo_import_dynamic libc_getrusage getrusage "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getsid(pid int) (sid int, err error) { - r0, _, e1 := syscall_rawSyscall(funcPC(libc_getsid_trampoline), uintptr(pid), 0, 0) + r0, _, e1 := syscall_rawSyscall(libc_getsid_trampoline_addr, uintptr(pid), 0, 0) sid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1362,52 +1363,52 @@ func Getsid(pid int) (sid int, err error) { return } -func libc_getsid_trampoline() +var libc_getsid_trampoline_addr uintptr //go:cgo_import_dynamic libc_getsid getsid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Gettimeofday(tp *Timeval) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_gettimeofday_trampoline), uintptr(unsafe.Pointer(tp)), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_gettimeofday_trampoline_addr, uintptr(unsafe.Pointer(tp)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_gettimeofday_trampoline() +var libc_gettimeofday_trampoline_addr uintptr //go:cgo_import_dynamic libc_gettimeofday gettimeofday "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getuid() (uid int) { - r0, _, _ := syscall_rawSyscall(funcPC(libc_getuid_trampoline), 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getuid_trampoline_addr, 0, 0, 0) uid = int(r0) return } -func libc_getuid_trampoline() +var libc_getuid_trampoline_addr uintptr //go:cgo_import_dynamic libc_getuid getuid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Issetugid() (tainted bool) { - r0, _, _ := syscall_rawSyscall(funcPC(libc_issetugid_trampoline), 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_issetugid_trampoline_addr, 0, 0, 0) tainted = bool(r0 != 0) return } -func libc_issetugid_trampoline() +var libc_issetugid_trampoline_addr uintptr //go:cgo_import_dynamic libc_issetugid issetugid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Kqueue() (fd int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_kqueue_trampoline), 0, 0, 0) + r0, _, e1 := syscall_syscall(libc_kqueue_trampoline_addr, 0, 0, 0) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1415,7 +1416,7 @@ func Kqueue() (fd int, err error) { return } -func libc_kqueue_trampoline() +var libc_kqueue_trampoline_addr uintptr //go:cgo_import_dynamic libc_kqueue kqueue "/usr/lib/libSystem.B.dylib" @@ -1427,14 +1428,14 @@ func Lchown(path string, uid int, gid int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_lchown_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + _, _, e1 := syscall_syscall(libc_lchown_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) if e1 != 0 { err = errnoErr(e1) } return } -func libc_lchown_trampoline() +var libc_lchown_trampoline_addr uintptr //go:cgo_import_dynamic libc_lchown lchown "/usr/lib/libSystem.B.dylib" @@ -1451,14 +1452,14 @@ func Link(path string, link string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_link_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + _, _, e1 := syscall_syscall(libc_link_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_link_trampoline() +var libc_link_trampoline_addr uintptr //go:cgo_import_dynamic libc_link link "/usr/lib/libSystem.B.dylib" @@ -1475,28 +1476,28 @@ func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err er if err != nil { return } - _, _, e1 := syscall_syscall6(funcPC(libc_linkat_trampoline), uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + _, _, e1 := syscall_syscall6(libc_linkat_trampoline_addr, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_linkat_trampoline() +var libc_linkat_trampoline_addr uintptr //go:cgo_import_dynamic libc_linkat linkat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Listen(s int, backlog int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_listen_trampoline), uintptr(s), uintptr(backlog), 0) + _, _, e1 := syscall_syscall(libc_listen_trampoline_addr, uintptr(s), uintptr(backlog), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_listen_trampoline() +var libc_listen_trampoline_addr uintptr //go:cgo_import_dynamic libc_listen listen "/usr/lib/libSystem.B.dylib" @@ -1508,14 +1509,14 @@ func Mkdir(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_mkdir_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_mkdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_mkdir_trampoline() +var libc_mkdir_trampoline_addr uintptr //go:cgo_import_dynamic libc_mkdir mkdir "/usr/lib/libSystem.B.dylib" @@ -1527,14 +1528,14 @@ func Mkdirat(dirfd int, path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_mkdirat_trampoline), uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + _, _, e1 := syscall_syscall(libc_mkdirat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) if e1 != 0 { err = errnoErr(e1) } return } -func libc_mkdirat_trampoline() +var libc_mkdirat_trampoline_addr uintptr //go:cgo_import_dynamic libc_mkdirat mkdirat "/usr/lib/libSystem.B.dylib" @@ -1546,14 +1547,14 @@ func Mkfifo(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_mkfifo_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_mkfifo_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_mkfifo_trampoline() +var libc_mkfifo_trampoline_addr uintptr //go:cgo_import_dynamic libc_mkfifo mkfifo "/usr/lib/libSystem.B.dylib" @@ -1565,14 +1566,14 @@ func Mknod(path string, mode uint32, dev int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_mknod_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) + _, _, e1 := syscall_syscall(libc_mknod_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) if e1 != 0 { err = errnoErr(e1) } return } -func libc_mknod_trampoline() +var libc_mknod_trampoline_addr uintptr //go:cgo_import_dynamic libc_mknod mknod "/usr/lib/libSystem.B.dylib" @@ -1584,7 +1585,7 @@ func Open(path string, mode int, perm uint32) (fd int, err error) { if err != nil { return } - r0, _, e1 := syscall_syscall(funcPC(libc_open_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) + r0, _, e1 := syscall_syscall(libc_open_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1592,7 +1593,7 @@ func Open(path string, mode int, perm uint32) (fd int, err error) { return } -func libc_open_trampoline() +var libc_open_trampoline_addr uintptr //go:cgo_import_dynamic libc_open open "/usr/lib/libSystem.B.dylib" @@ -1604,7 +1605,7 @@ func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { if err != nil { return } - r0, _, e1 := syscall_syscall6(funcPC(libc_openat_trampoline), uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) + r0, _, e1 := syscall_syscall6(libc_openat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1612,7 +1613,7 @@ func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { return } -func libc_openat_trampoline() +var libc_openat_trampoline_addr uintptr //go:cgo_import_dynamic libc_openat openat "/usr/lib/libSystem.B.dylib" @@ -1624,7 +1625,7 @@ func Pathconf(path string, name int) (val int, err error) { if err != nil { return } - r0, _, e1 := syscall_syscall(funcPC(libc_pathconf_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) + r0, _, e1 := syscall_syscall(libc_pathconf_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) val = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1632,7 +1633,7 @@ func Pathconf(path string, name int) (val int, err error) { return } -func libc_pathconf_trampoline() +var libc_pathconf_trampoline_addr uintptr //go:cgo_import_dynamic libc_pathconf pathconf "/usr/lib/libSystem.B.dylib" @@ -1645,7 +1646,7 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := syscall_syscall6(funcPC(libc_pread_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + r0, _, e1 := syscall_syscall6(libc_pread_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1653,7 +1654,7 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { return } -func libc_pread_trampoline() +var libc_pread_trampoline_addr uintptr //go:cgo_import_dynamic libc_pread pread "/usr/lib/libSystem.B.dylib" @@ -1666,7 +1667,7 @@ func Pwrite(fd int, p []byte, offset int64) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := syscall_syscall6(funcPC(libc_pwrite_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + r0, _, e1 := syscall_syscall6(libc_pwrite_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1674,7 +1675,7 @@ func Pwrite(fd int, p []byte, offset int64) (n int, err error) { return } -func libc_pwrite_trampoline() +var libc_pwrite_trampoline_addr uintptr //go:cgo_import_dynamic libc_pwrite pwrite "/usr/lib/libSystem.B.dylib" @@ -1687,7 +1688,7 @@ func read(fd int, p []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := syscall_syscall(funcPC(libc_read_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(p))) + r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1695,7 +1696,7 @@ func read(fd int, p []byte) (n int, err error) { return } -func libc_read_trampoline() +var libc_read_trampoline_addr uintptr //go:cgo_import_dynamic libc_read read "/usr/lib/libSystem.B.dylib" @@ -1713,7 +1714,7 @@ func Readlink(path string, buf []byte) (n int, err error) { } else { _p1 = unsafe.Pointer(&_zero) } - r0, _, e1 := syscall_syscall(funcPC(libc_readlink_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) + r0, _, e1 := syscall_syscall(libc_readlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1721,7 +1722,7 @@ func Readlink(path string, buf []byte) (n int, err error) { return } -func libc_readlink_trampoline() +var libc_readlink_trampoline_addr uintptr //go:cgo_import_dynamic libc_readlink readlink "/usr/lib/libSystem.B.dylib" @@ -1739,7 +1740,7 @@ func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { } else { _p1 = unsafe.Pointer(&_zero) } - r0, _, e1 := syscall_syscall6(funcPC(libc_readlinkat_trampoline), uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) + r0, _, e1 := syscall_syscall6(libc_readlinkat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1747,7 +1748,7 @@ func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { return } -func libc_readlinkat_trampoline() +var libc_readlinkat_trampoline_addr uintptr //go:cgo_import_dynamic libc_readlinkat readlinkat "/usr/lib/libSystem.B.dylib" @@ -1764,14 +1765,14 @@ func Rename(from string, to string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_rename_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + _, _, e1 := syscall_syscall(libc_rename_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_rename_trampoline() +var libc_rename_trampoline_addr uintptr //go:cgo_import_dynamic libc_rename rename "/usr/lib/libSystem.B.dylib" @@ -1788,14 +1789,14 @@ func Renameat(fromfd int, from string, tofd int, to string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall6(funcPC(libc_renameat_trampoline), uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) + _, _, e1 := syscall_syscall6(libc_renameat_trampoline_addr, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_renameat_trampoline() +var libc_renameat_trampoline_addr uintptr //go:cgo_import_dynamic libc_renameat renameat "/usr/lib/libSystem.B.dylib" @@ -1807,14 +1808,14 @@ func Revoke(path string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_revoke_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_revoke_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_revoke_trampoline() +var libc_revoke_trampoline_addr uintptr //go:cgo_import_dynamic libc_revoke revoke "/usr/lib/libSystem.B.dylib" @@ -1826,21 +1827,21 @@ func Rmdir(path string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_rmdir_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_rmdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_rmdir_trampoline() +var libc_rmdir_trampoline_addr uintptr //go:cgo_import_dynamic libc_rmdir rmdir "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_lseek_trampoline), uintptr(fd), uintptr(offset), uintptr(whence)) + r0, _, e1 := syscall_syscall(libc_lseek_trampoline_addr, uintptr(fd), uintptr(offset), uintptr(whence)) newoffset = int64(r0) if e1 != 0 { err = errnoErr(e1) @@ -1848,14 +1849,14 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { return } -func libc_lseek_trampoline() +var libc_lseek_trampoline_addr uintptr //go:cgo_import_dynamic libc_lseek lseek "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { - r0, _, e1 := syscall_syscall6(funcPC(libc_select_trampoline), uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + r0, _, e1 := syscall_syscall6(libc_select_trampoline_addr, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1863,49 +1864,49 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err return } -func libc_select_trampoline() +var libc_select_trampoline_addr uintptr //go:cgo_import_dynamic libc_select select "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setegid(egid int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_setegid_trampoline), uintptr(egid), 0, 0) + _, _, e1 := syscall_syscall(libc_setegid_trampoline_addr, uintptr(egid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_setegid_trampoline() +var libc_setegid_trampoline_addr uintptr //go:cgo_import_dynamic libc_setegid setegid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Seteuid(euid int) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_seteuid_trampoline), uintptr(euid), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_seteuid_trampoline_addr, uintptr(euid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_seteuid_trampoline() +var libc_seteuid_trampoline_addr uintptr //go:cgo_import_dynamic libc_seteuid seteuid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setgid(gid int) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_setgid_trampoline), uintptr(gid), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_setgid_trampoline_addr, uintptr(gid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_setgid_trampoline() +var libc_setgid_trampoline_addr uintptr //go:cgo_import_dynamic libc_setgid setgid "/usr/lib/libSystem.B.dylib" @@ -1917,105 +1918,105 @@ func Setlogin(name string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_setlogin_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_setlogin_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_setlogin_trampoline() +var libc_setlogin_trampoline_addr uintptr //go:cgo_import_dynamic libc_setlogin setlogin "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setpgid(pid int, pgid int) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_setpgid_trampoline), uintptr(pid), uintptr(pgid), 0) + _, _, e1 := syscall_rawSyscall(libc_setpgid_trampoline_addr, uintptr(pid), uintptr(pgid), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_setpgid_trampoline() +var libc_setpgid_trampoline_addr uintptr //go:cgo_import_dynamic libc_setpgid setpgid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setpriority(which int, who int, prio int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_setpriority_trampoline), uintptr(which), uintptr(who), uintptr(prio)) + _, _, e1 := syscall_syscall(libc_setpriority_trampoline_addr, uintptr(which), uintptr(who), uintptr(prio)) if e1 != 0 { err = errnoErr(e1) } return } -func libc_setpriority_trampoline() +var libc_setpriority_trampoline_addr uintptr //go:cgo_import_dynamic libc_setpriority setpriority "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setprivexec(flag int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_setprivexec_trampoline), uintptr(flag), 0, 0) + _, _, e1 := syscall_syscall(libc_setprivexec_trampoline_addr, uintptr(flag), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_setprivexec_trampoline() +var libc_setprivexec_trampoline_addr uintptr //go:cgo_import_dynamic libc_setprivexec setprivexec "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setregid(rgid int, egid int) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_setregid_trampoline), uintptr(rgid), uintptr(egid), 0) + _, _, e1 := syscall_rawSyscall(libc_setregid_trampoline_addr, uintptr(rgid), uintptr(egid), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_setregid_trampoline() +var libc_setregid_trampoline_addr uintptr //go:cgo_import_dynamic libc_setregid setregid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_setreuid_trampoline), uintptr(ruid), uintptr(euid), 0) + _, _, e1 := syscall_rawSyscall(libc_setreuid_trampoline_addr, uintptr(ruid), uintptr(euid), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_setreuid_trampoline() +var libc_setreuid_trampoline_addr uintptr //go:cgo_import_dynamic libc_setreuid setreuid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_setrlimit_trampoline), uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + _, _, e1 := syscall_rawSyscall(libc_setrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_setrlimit_trampoline() +var libc_setrlimit_trampoline_addr uintptr //go:cgo_import_dynamic libc_setrlimit setrlimit "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setsid() (pid int, err error) { - r0, _, e1 := syscall_rawSyscall(funcPC(libc_setsid_trampoline), 0, 0, 0) + r0, _, e1 := syscall_rawSyscall(libc_setsid_trampoline_addr, 0, 0, 0) pid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -2023,35 +2024,35 @@ func Setsid() (pid int, err error) { return } -func libc_setsid_trampoline() +var libc_setsid_trampoline_addr uintptr //go:cgo_import_dynamic libc_setsid setsid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Settimeofday(tp *Timeval) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_settimeofday_trampoline), uintptr(unsafe.Pointer(tp)), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_settimeofday_trampoline_addr, uintptr(unsafe.Pointer(tp)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_settimeofday_trampoline() +var libc_settimeofday_trampoline_addr uintptr //go:cgo_import_dynamic libc_settimeofday settimeofday "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setuid(uid int) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_setuid_trampoline), uintptr(uid), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_setuid_trampoline_addr, uintptr(uid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_setuid_trampoline() +var libc_setuid_trampoline_addr uintptr //go:cgo_import_dynamic libc_setuid setuid "/usr/lib/libSystem.B.dylib" @@ -2068,14 +2069,14 @@ func Symlink(path string, link string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_symlink_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + _, _, e1 := syscall_syscall(libc_symlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_symlink_trampoline() +var libc_symlink_trampoline_addr uintptr //go:cgo_import_dynamic libc_symlink symlink "/usr/lib/libSystem.B.dylib" @@ -2092,28 +2093,28 @@ func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_symlinkat_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) + _, _, e1 := syscall_syscall(libc_symlinkat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) if e1 != 0 { err = errnoErr(e1) } return } -func libc_symlinkat_trampoline() +var libc_symlinkat_trampoline_addr uintptr //go:cgo_import_dynamic libc_symlinkat symlinkat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Sync() (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_sync_trampoline), 0, 0, 0) + _, _, e1 := syscall_syscall(libc_sync_trampoline_addr, 0, 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_sync_trampoline() +var libc_sync_trampoline_addr uintptr //go:cgo_import_dynamic libc_sync sync "/usr/lib/libSystem.B.dylib" @@ -2125,26 +2126,26 @@ func Truncate(path string, length int64) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_truncate_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(length), 0) + _, _, e1 := syscall_syscall(libc_truncate_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_truncate_trampoline() +var libc_truncate_trampoline_addr uintptr //go:cgo_import_dynamic libc_truncate truncate "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Umask(newmask int) (oldmask int) { - r0, _, _ := syscall_syscall(funcPC(libc_umask_trampoline), uintptr(newmask), 0, 0) + r0, _, _ := syscall_syscall(libc_umask_trampoline_addr, uintptr(newmask), 0, 0) oldmask = int(r0) return } -func libc_umask_trampoline() +var libc_umask_trampoline_addr uintptr //go:cgo_import_dynamic libc_umask umask "/usr/lib/libSystem.B.dylib" @@ -2156,14 +2157,14 @@ func Undelete(path string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_undelete_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_undelete_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_undelete_trampoline() +var libc_undelete_trampoline_addr uintptr //go:cgo_import_dynamic libc_undelete undelete "/usr/lib/libSystem.B.dylib" @@ -2175,14 +2176,14 @@ func Unlink(path string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_unlink_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_unlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_unlink_trampoline() +var libc_unlink_trampoline_addr uintptr //go:cgo_import_dynamic libc_unlink unlink "/usr/lib/libSystem.B.dylib" @@ -2194,14 +2195,14 @@ func Unlinkat(dirfd int, path string, flags int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_unlinkat_trampoline), uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + _, _, e1 := syscall_syscall(libc_unlinkat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } return } -func libc_unlinkat_trampoline() +var libc_unlinkat_trampoline_addr uintptr //go:cgo_import_dynamic libc_unlinkat unlinkat "/usr/lib/libSystem.B.dylib" @@ -2213,14 +2214,14 @@ func Unmount(path string, flags int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_unmount_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + _, _, e1 := syscall_syscall(libc_unmount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_unmount_trampoline() +var libc_unmount_trampoline_addr uintptr //go:cgo_import_dynamic libc_unmount unmount "/usr/lib/libSystem.B.dylib" @@ -2233,7 +2234,7 @@ func write(fd int, p []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := syscall_syscall(funcPC(libc_write_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(p))) + r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -2241,14 +2242,14 @@ func write(fd int, p []byte) (n int, err error) { return } -func libc_write_trampoline() +var libc_write_trampoline_addr uintptr //go:cgo_import_dynamic libc_write write "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { - r0, _, e1 := syscall_syscall6(funcPC(libc_mmap_trampoline), uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos)) + r0, _, e1 := syscall_syscall6(libc_mmap_trampoline_addr, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos)) ret = uintptr(r0) if e1 != 0 { err = errnoErr(e1) @@ -2256,28 +2257,28 @@ func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) ( return } -func libc_mmap_trampoline() +var libc_mmap_trampoline_addr uintptr //go:cgo_import_dynamic libc_mmap mmap "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func munmap(addr uintptr, length uintptr) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_munmap_trampoline), uintptr(addr), uintptr(length), 0) + _, _, e1 := syscall_syscall(libc_munmap_trampoline_addr, uintptr(addr), uintptr(length), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_munmap_trampoline() +var libc_munmap_trampoline_addr uintptr //go:cgo_import_dynamic libc_munmap munmap "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_read_trampoline), uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -2288,7 +2289,7 @@ func readlen(fd int, buf *byte, nbuf int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_write_trampoline), uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -2299,14 +2300,14 @@ func writelen(fd int, buf *byte, nbuf int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fstat(fd int, stat *Stat_t) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_fstat64_trampoline), uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_fstat64_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_fstat64_trampoline() +var libc_fstat64_trampoline_addr uintptr //go:cgo_import_dynamic libc_fstat64 fstat64 "/usr/lib/libSystem.B.dylib" @@ -2318,35 +2319,35 @@ func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall6(funcPC(libc_fstatat64_trampoline), uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_fstatat64_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_fstatat64_trampoline() +var libc_fstatat64_trampoline_addr uintptr //go:cgo_import_dynamic libc_fstatat64 fstatat64 "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fstatfs(fd int, stat *Statfs_t) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_fstatfs64_trampoline), uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_fstatfs64_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_fstatfs64_trampoline() +var libc_fstatfs64_trampoline_addr uintptr //go:cgo_import_dynamic libc_fstatfs64 fstatfs64 "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getfsstat(buf unsafe.Pointer, size uintptr, flags int) (n int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_getfsstat64_trampoline), uintptr(buf), uintptr(size), uintptr(flags)) + r0, _, e1 := syscall_syscall(libc_getfsstat64_trampoline_addr, uintptr(buf), uintptr(size), uintptr(flags)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -2354,7 +2355,7 @@ func getfsstat(buf unsafe.Pointer, size uintptr, flags int) (n int, err error) { return } -func libc_getfsstat64_trampoline() +var libc_getfsstat64_trampoline_addr uintptr //go:cgo_import_dynamic libc_getfsstat64 getfsstat64 "/usr/lib/libSystem.B.dylib" @@ -2366,28 +2367,28 @@ func Lstat(path string, stat *Stat_t) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_lstat64_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_lstat64_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_lstat64_trampoline() +var libc_lstat64_trampoline_addr uintptr //go:cgo_import_dynamic libc_lstat64 lstat64 "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func ptrace1(request int, pid int, addr uintptr, data uintptr) (err error) { - _, _, e1 := syscall_syscall6(funcPC(libc_ptrace_trampoline), uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + _, _, e1 := syscall_syscall6(libc_ptrace_trampoline_addr, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_ptrace_trampoline() +var libc_ptrace_trampoline_addr uintptr //go:cgo_import_dynamic libc_ptrace ptrace "/usr/lib/libSystem.B.dylib" @@ -2399,14 +2400,14 @@ func Stat(path string, stat *Stat_t) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_stat64_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_stat64_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_stat64_trampoline() +var libc_stat64_trampoline_addr uintptr //go:cgo_import_dynamic libc_stat64 stat64 "/usr/lib/libSystem.B.dylib" @@ -2418,13 +2419,13 @@ func Statfs(path string, stat *Statfs_t) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_statfs64_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_statfs64_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_statfs64_trampoline() +var libc_statfs64_trampoline_addr uintptr //go:cgo_import_dynamic libc_statfs64 statfs64 "/usr/lib/libSystem.B.dylib" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s index c77bd6e20b..bc169c2ab9 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s @@ -1,290 +1,859 @@ // go run mkasm_darwin.go amd64 // Code generated by the command above; DO NOT EDIT. +//go:build go1.12 // +build go1.12 #include "textflag.h" -TEXT ·libc_getgroups_trampoline(SB),NOSPLIT,$0-0 + +TEXT libc_getgroups_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getgroups(SB) -TEXT ·libc_setgroups_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_getgroups_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getgroups_trampoline_addr(SB)/8, $libc_getgroups_trampoline<>(SB) + +TEXT libc_setgroups_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setgroups(SB) -TEXT ·libc_wait4_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_setgroups_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setgroups_trampoline_addr(SB)/8, $libc_setgroups_trampoline<>(SB) + +TEXT libc_wait4_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_wait4(SB) -TEXT ·libc_accept_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_wait4_trampoline_addr(SB), RODATA, $8 +DATA ·libc_wait4_trampoline_addr(SB)/8, $libc_wait4_trampoline<>(SB) + +TEXT libc_accept_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_accept(SB) -TEXT ·libc_bind_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_accept_trampoline_addr(SB), RODATA, $8 +DATA ·libc_accept_trampoline_addr(SB)/8, $libc_accept_trampoline<>(SB) + +TEXT libc_bind_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_bind(SB) -TEXT ·libc_connect_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_bind_trampoline_addr(SB), RODATA, $8 +DATA ·libc_bind_trampoline_addr(SB)/8, $libc_bind_trampoline<>(SB) + +TEXT libc_connect_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_connect(SB) -TEXT ·libc_socket_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_connect_trampoline_addr(SB), RODATA, $8 +DATA ·libc_connect_trampoline_addr(SB)/8, $libc_connect_trampoline<>(SB) + +TEXT libc_socket_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_socket(SB) -TEXT ·libc_getsockopt_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_socket_trampoline_addr(SB), RODATA, $8 +DATA ·libc_socket_trampoline_addr(SB)/8, $libc_socket_trampoline<>(SB) + +TEXT libc_getsockopt_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getsockopt(SB) -TEXT ·libc_setsockopt_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_getsockopt_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getsockopt_trampoline_addr(SB)/8, $libc_getsockopt_trampoline<>(SB) + +TEXT libc_setsockopt_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setsockopt(SB) -TEXT ·libc_getpeername_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_setsockopt_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setsockopt_trampoline_addr(SB)/8, $libc_setsockopt_trampoline<>(SB) + +TEXT libc_getpeername_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpeername(SB) -TEXT ·libc_getsockname_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_getpeername_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpeername_trampoline_addr(SB)/8, $libc_getpeername_trampoline<>(SB) + +TEXT libc_getsockname_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getsockname(SB) -TEXT ·libc_shutdown_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_getsockname_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getsockname_trampoline_addr(SB)/8, $libc_getsockname_trampoline<>(SB) + +TEXT libc_shutdown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_shutdown(SB) -TEXT ·libc_socketpair_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_shutdown_trampoline_addr(SB), RODATA, $8 +DATA ·libc_shutdown_trampoline_addr(SB)/8, $libc_shutdown_trampoline<>(SB) + +TEXT libc_socketpair_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_socketpair(SB) -TEXT ·libc_recvfrom_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_socketpair_trampoline_addr(SB), RODATA, $8 +DATA ·libc_socketpair_trampoline_addr(SB)/8, $libc_socketpair_trampoline<>(SB) + +TEXT libc_recvfrom_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_recvfrom(SB) -TEXT ·libc_sendto_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_recvfrom_trampoline_addr(SB), RODATA, $8 +DATA ·libc_recvfrom_trampoline_addr(SB)/8, $libc_recvfrom_trampoline<>(SB) + +TEXT libc_sendto_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendto(SB) -TEXT ·libc_recvmsg_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_sendto_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sendto_trampoline_addr(SB)/8, $libc_sendto_trampoline<>(SB) + +TEXT libc_recvmsg_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_recvmsg(SB) -TEXT ·libc_sendmsg_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_recvmsg_trampoline_addr(SB), RODATA, $8 +DATA ·libc_recvmsg_trampoline_addr(SB)/8, $libc_recvmsg_trampoline<>(SB) + +TEXT libc_sendmsg_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendmsg(SB) -TEXT ·libc_kevent_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_sendmsg_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sendmsg_trampoline_addr(SB)/8, $libc_sendmsg_trampoline<>(SB) + +TEXT libc_kevent_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_kevent(SB) -TEXT ·libc_utimes_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_kevent_trampoline_addr(SB), RODATA, $8 +DATA ·libc_kevent_trampoline_addr(SB)/8, $libc_kevent_trampoline<>(SB) + +TEXT libc_utimes_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimes(SB) -TEXT ·libc_futimes_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_utimes_trampoline_addr(SB), RODATA, $8 +DATA ·libc_utimes_trampoline_addr(SB)/8, $libc_utimes_trampoline<>(SB) + +TEXT libc_futimes_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_futimes(SB) -TEXT ·libc_poll_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_futimes_trampoline_addr(SB), RODATA, $8 +DATA ·libc_futimes_trampoline_addr(SB)/8, $libc_futimes_trampoline<>(SB) + +TEXT libc_poll_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_poll(SB) -TEXT ·libc_madvise_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_poll_trampoline_addr(SB), RODATA, $8 +DATA ·libc_poll_trampoline_addr(SB)/8, $libc_poll_trampoline<>(SB) + +TEXT libc_madvise_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_madvise(SB) -TEXT ·libc_mlock_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_madvise_trampoline_addr(SB), RODATA, $8 +DATA ·libc_madvise_trampoline_addr(SB)/8, $libc_madvise_trampoline<>(SB) + +TEXT libc_mlock_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mlock(SB) -TEXT ·libc_mlockall_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_mlock_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mlock_trampoline_addr(SB)/8, $libc_mlock_trampoline<>(SB) + +TEXT libc_mlockall_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mlockall(SB) -TEXT ·libc_mprotect_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_mlockall_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mlockall_trampoline_addr(SB)/8, $libc_mlockall_trampoline<>(SB) + +TEXT libc_mprotect_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mprotect(SB) -TEXT ·libc_msync_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_mprotect_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mprotect_trampoline_addr(SB)/8, $libc_mprotect_trampoline<>(SB) + +TEXT libc_msync_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_msync(SB) -TEXT ·libc_munlock_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_msync_trampoline_addr(SB), RODATA, $8 +DATA ·libc_msync_trampoline_addr(SB)/8, $libc_msync_trampoline<>(SB) + +TEXT libc_munlock_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_munlock(SB) -TEXT ·libc_munlockall_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_munlock_trampoline_addr(SB), RODATA, $8 +DATA ·libc_munlock_trampoline_addr(SB)/8, $libc_munlock_trampoline<>(SB) + +TEXT libc_munlockall_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_munlockall(SB) -TEXT ·libc_pipe_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_munlockall_trampoline_addr(SB), RODATA, $8 +DATA ·libc_munlockall_trampoline_addr(SB)/8, $libc_munlockall_trampoline<>(SB) + +TEXT libc_pipe_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pipe(SB) -TEXT ·libc_getxattr_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_pipe_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pipe_trampoline_addr(SB)/8, $libc_pipe_trampoline<>(SB) + +TEXT libc_getxattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getxattr(SB) -TEXT ·libc_fgetxattr_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_getxattr_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getxattr_trampoline_addr(SB)/8, $libc_getxattr_trampoline<>(SB) + +TEXT libc_fgetxattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fgetxattr(SB) -TEXT ·libc_setxattr_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_fgetxattr_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fgetxattr_trampoline_addr(SB)/8, $libc_fgetxattr_trampoline<>(SB) + +TEXT libc_setxattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setxattr(SB) -TEXT ·libc_fsetxattr_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_setxattr_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setxattr_trampoline_addr(SB)/8, $libc_setxattr_trampoline<>(SB) + +TEXT libc_fsetxattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fsetxattr(SB) -TEXT ·libc_removexattr_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_fsetxattr_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fsetxattr_trampoline_addr(SB)/8, $libc_fsetxattr_trampoline<>(SB) + +TEXT libc_removexattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_removexattr(SB) -TEXT ·libc_fremovexattr_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_removexattr_trampoline_addr(SB), RODATA, $8 +DATA ·libc_removexattr_trampoline_addr(SB)/8, $libc_removexattr_trampoline<>(SB) + +TEXT libc_fremovexattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fremovexattr(SB) -TEXT ·libc_listxattr_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_fremovexattr_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fremovexattr_trampoline_addr(SB)/8, $libc_fremovexattr_trampoline<>(SB) + +TEXT libc_listxattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_listxattr(SB) -TEXT ·libc_flistxattr_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_listxattr_trampoline_addr(SB), RODATA, $8 +DATA ·libc_listxattr_trampoline_addr(SB)/8, $libc_listxattr_trampoline<>(SB) + +TEXT libc_flistxattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_flistxattr(SB) -TEXT ·libc_setattrlist_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_flistxattr_trampoline_addr(SB), RODATA, $8 +DATA ·libc_flistxattr_trampoline_addr(SB)/8, $libc_flistxattr_trampoline<>(SB) + +TEXT libc_setattrlist_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setattrlist(SB) -TEXT ·libc_fcntl_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_setattrlist_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setattrlist_trampoline_addr(SB)/8, $libc_setattrlist_trampoline<>(SB) + +TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fcntl(SB) -TEXT ·libc_kill_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_fcntl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fcntl_trampoline_addr(SB)/8, $libc_fcntl_trampoline<>(SB) + +TEXT libc_kill_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_kill(SB) -TEXT ·libc_ioctl_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_kill_trampoline_addr(SB), RODATA, $8 +DATA ·libc_kill_trampoline_addr(SB)/8, $libc_kill_trampoline<>(SB) + +TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ioctl(SB) -TEXT ·libc_sysctl_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_ioctl_trampoline_addr(SB)/8, $libc_ioctl_trampoline<>(SB) + +TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sysctl(SB) -TEXT ·libc_sendfile_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) + +TEXT libc_sendfile_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendfile(SB) -TEXT ·libc_access_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_sendfile_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sendfile_trampoline_addr(SB)/8, $libc_sendfile_trampoline<>(SB) + +TEXT libc_access_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_access(SB) -TEXT ·libc_adjtime_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_access_trampoline_addr(SB), RODATA, $8 +DATA ·libc_access_trampoline_addr(SB)/8, $libc_access_trampoline<>(SB) + +TEXT libc_adjtime_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_adjtime(SB) -TEXT ·libc_chdir_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_adjtime_trampoline_addr(SB), RODATA, $8 +DATA ·libc_adjtime_trampoline_addr(SB)/8, $libc_adjtime_trampoline<>(SB) + +TEXT libc_chdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chdir(SB) -TEXT ·libc_chflags_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_chdir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chdir_trampoline_addr(SB)/8, $libc_chdir_trampoline<>(SB) + +TEXT libc_chflags_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chflags(SB) -TEXT ·libc_chmod_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_chflags_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chflags_trampoline_addr(SB)/8, $libc_chflags_trampoline<>(SB) + +TEXT libc_chmod_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chmod(SB) -TEXT ·libc_chown_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_chmod_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chmod_trampoline_addr(SB)/8, $libc_chmod_trampoline<>(SB) + +TEXT libc_chown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chown(SB) -TEXT ·libc_chroot_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_chown_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chown_trampoline_addr(SB)/8, $libc_chown_trampoline<>(SB) + +TEXT libc_chroot_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chroot(SB) -TEXT ·libc_clock_gettime_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_chroot_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chroot_trampoline_addr(SB)/8, $libc_chroot_trampoline<>(SB) + +TEXT libc_clock_gettime_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_clock_gettime(SB) -TEXT ·libc_close_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_clock_gettime_trampoline_addr(SB), RODATA, $8 +DATA ·libc_clock_gettime_trampoline_addr(SB)/8, $libc_clock_gettime_trampoline<>(SB) + +TEXT libc_close_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_close(SB) -TEXT ·libc_clonefile_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_close_trampoline_addr(SB), RODATA, $8 +DATA ·libc_close_trampoline_addr(SB)/8, $libc_close_trampoline<>(SB) + +TEXT libc_clonefile_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_clonefile(SB) -TEXT ·libc_clonefileat_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_clonefile_trampoline_addr(SB), RODATA, $8 +DATA ·libc_clonefile_trampoline_addr(SB)/8, $libc_clonefile_trampoline<>(SB) + +TEXT libc_clonefileat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_clonefileat(SB) -TEXT ·libc_dup_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_clonefileat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_clonefileat_trampoline_addr(SB)/8, $libc_clonefileat_trampoline<>(SB) + +TEXT libc_dup_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_dup(SB) -TEXT ·libc_dup2_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_dup_trampoline_addr(SB), RODATA, $8 +DATA ·libc_dup_trampoline_addr(SB)/8, $libc_dup_trampoline<>(SB) + +TEXT libc_dup2_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_dup2(SB) -TEXT ·libc_exchangedata_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_dup2_trampoline_addr(SB), RODATA, $8 +DATA ·libc_dup2_trampoline_addr(SB)/8, $libc_dup2_trampoline<>(SB) + +TEXT libc_exchangedata_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_exchangedata(SB) -TEXT ·libc_exit_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_exchangedata_trampoline_addr(SB), RODATA, $8 +DATA ·libc_exchangedata_trampoline_addr(SB)/8, $libc_exchangedata_trampoline<>(SB) + +TEXT libc_exit_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_exit(SB) -TEXT ·libc_faccessat_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_exit_trampoline_addr(SB), RODATA, $8 +DATA ·libc_exit_trampoline_addr(SB)/8, $libc_exit_trampoline<>(SB) + +TEXT libc_faccessat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_faccessat(SB) -TEXT ·libc_fchdir_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_faccessat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_faccessat_trampoline_addr(SB)/8, $libc_faccessat_trampoline<>(SB) + +TEXT libc_fchdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchdir(SB) -TEXT ·libc_fchflags_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_fchdir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchdir_trampoline_addr(SB)/8, $libc_fchdir_trampoline<>(SB) + +TEXT libc_fchflags_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchflags(SB) -TEXT ·libc_fchmod_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_fchflags_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchflags_trampoline_addr(SB)/8, $libc_fchflags_trampoline<>(SB) + +TEXT libc_fchmod_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchmod(SB) -TEXT ·libc_fchmodat_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_fchmod_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchmod_trampoline_addr(SB)/8, $libc_fchmod_trampoline<>(SB) + +TEXT libc_fchmodat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchmodat(SB) -TEXT ·libc_fchown_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_fchmodat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchmodat_trampoline_addr(SB)/8, $libc_fchmodat_trampoline<>(SB) + +TEXT libc_fchown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchown(SB) -TEXT ·libc_fchownat_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_fchown_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchown_trampoline_addr(SB)/8, $libc_fchown_trampoline<>(SB) + +TEXT libc_fchownat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchownat(SB) -TEXT ·libc_fclonefileat_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_fchownat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchownat_trampoline_addr(SB)/8, $libc_fchownat_trampoline<>(SB) + +TEXT libc_fclonefileat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fclonefileat(SB) -TEXT ·libc_flock_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_fclonefileat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fclonefileat_trampoline_addr(SB)/8, $libc_fclonefileat_trampoline<>(SB) + +TEXT libc_flock_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_flock(SB) -TEXT ·libc_fpathconf_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_flock_trampoline_addr(SB), RODATA, $8 +DATA ·libc_flock_trampoline_addr(SB)/8, $libc_flock_trampoline<>(SB) + +TEXT libc_fpathconf_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fpathconf(SB) -TEXT ·libc_fsync_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_fpathconf_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fpathconf_trampoline_addr(SB)/8, $libc_fpathconf_trampoline<>(SB) + +TEXT libc_fsync_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fsync(SB) -TEXT ·libc_ftruncate_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_fsync_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fsync_trampoline_addr(SB)/8, $libc_fsync_trampoline<>(SB) + +TEXT libc_ftruncate_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ftruncate(SB) -TEXT ·libc_getcwd_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_ftruncate_trampoline_addr(SB), RODATA, $8 +DATA ·libc_ftruncate_trampoline_addr(SB)/8, $libc_ftruncate_trampoline<>(SB) + +TEXT libc_getcwd_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getcwd(SB) -TEXT ·libc_getdtablesize_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getcwd_trampoline_addr(SB)/8, $libc_getcwd_trampoline<>(SB) + +TEXT libc_getdtablesize_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getdtablesize(SB) -TEXT ·libc_getegid_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_getdtablesize_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getdtablesize_trampoline_addr(SB)/8, $libc_getdtablesize_trampoline<>(SB) + +TEXT libc_getegid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getegid(SB) -TEXT ·libc_geteuid_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_getegid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getegid_trampoline_addr(SB)/8, $libc_getegid_trampoline<>(SB) + +TEXT libc_geteuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_geteuid(SB) -TEXT ·libc_getgid_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_geteuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_geteuid_trampoline_addr(SB)/8, $libc_geteuid_trampoline<>(SB) + +TEXT libc_getgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getgid(SB) -TEXT ·libc_getpgid_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_getgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getgid_trampoline_addr(SB)/8, $libc_getgid_trampoline<>(SB) + +TEXT libc_getpgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpgid(SB) -TEXT ·libc_getpgrp_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_getpgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpgid_trampoline_addr(SB)/8, $libc_getpgid_trampoline<>(SB) + +TEXT libc_getpgrp_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpgrp(SB) -TEXT ·libc_getpid_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_getpgrp_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpgrp_trampoline_addr(SB)/8, $libc_getpgrp_trampoline<>(SB) + +TEXT libc_getpid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpid(SB) -TEXT ·libc_getppid_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_getpid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpid_trampoline_addr(SB)/8, $libc_getpid_trampoline<>(SB) + +TEXT libc_getppid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getppid(SB) -TEXT ·libc_getpriority_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_getppid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getppid_trampoline_addr(SB)/8, $libc_getppid_trampoline<>(SB) + +TEXT libc_getpriority_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpriority(SB) -TEXT ·libc_getrlimit_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_getpriority_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpriority_trampoline_addr(SB)/8, $libc_getpriority_trampoline<>(SB) + +TEXT libc_getrlimit_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getrlimit(SB) -TEXT ·libc_getrusage_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_getrlimit_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getrlimit_trampoline_addr(SB)/8, $libc_getrlimit_trampoline<>(SB) + +TEXT libc_getrusage_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getrusage(SB) -TEXT ·libc_getsid_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_getrusage_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getrusage_trampoline_addr(SB)/8, $libc_getrusage_trampoline<>(SB) + +TEXT libc_getsid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getsid(SB) -TEXT ·libc_gettimeofday_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_getsid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getsid_trampoline_addr(SB)/8, $libc_getsid_trampoline<>(SB) + +TEXT libc_gettimeofday_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_gettimeofday(SB) -TEXT ·libc_getuid_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_gettimeofday_trampoline_addr(SB), RODATA, $8 +DATA ·libc_gettimeofday_trampoline_addr(SB)/8, $libc_gettimeofday_trampoline<>(SB) + +TEXT libc_getuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getuid(SB) -TEXT ·libc_issetugid_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_getuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getuid_trampoline_addr(SB)/8, $libc_getuid_trampoline<>(SB) + +TEXT libc_issetugid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_issetugid(SB) -TEXT ·libc_kqueue_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_issetugid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_issetugid_trampoline_addr(SB)/8, $libc_issetugid_trampoline<>(SB) + +TEXT libc_kqueue_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_kqueue(SB) -TEXT ·libc_lchown_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_kqueue_trampoline_addr(SB), RODATA, $8 +DATA ·libc_kqueue_trampoline_addr(SB)/8, $libc_kqueue_trampoline<>(SB) + +TEXT libc_lchown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_lchown(SB) -TEXT ·libc_link_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_lchown_trampoline_addr(SB), RODATA, $8 +DATA ·libc_lchown_trampoline_addr(SB)/8, $libc_lchown_trampoline<>(SB) + +TEXT libc_link_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_link(SB) -TEXT ·libc_linkat_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_link_trampoline_addr(SB), RODATA, $8 +DATA ·libc_link_trampoline_addr(SB)/8, $libc_link_trampoline<>(SB) + +TEXT libc_linkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_linkat(SB) -TEXT ·libc_listen_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_linkat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_linkat_trampoline_addr(SB)/8, $libc_linkat_trampoline<>(SB) + +TEXT libc_listen_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_listen(SB) -TEXT ·libc_mkdir_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_listen_trampoline_addr(SB), RODATA, $8 +DATA ·libc_listen_trampoline_addr(SB)/8, $libc_listen_trampoline<>(SB) + +TEXT libc_mkdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mkdir(SB) -TEXT ·libc_mkdirat_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_mkdir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mkdir_trampoline_addr(SB)/8, $libc_mkdir_trampoline<>(SB) + +TEXT libc_mkdirat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mkdirat(SB) -TEXT ·libc_mkfifo_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_mkdirat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mkdirat_trampoline_addr(SB)/8, $libc_mkdirat_trampoline<>(SB) + +TEXT libc_mkfifo_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mkfifo(SB) -TEXT ·libc_mknod_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_mkfifo_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mkfifo_trampoline_addr(SB)/8, $libc_mkfifo_trampoline<>(SB) + +TEXT libc_mknod_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mknod(SB) -TEXT ·libc_open_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_mknod_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mknod_trampoline_addr(SB)/8, $libc_mknod_trampoline<>(SB) + +TEXT libc_open_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_open(SB) -TEXT ·libc_openat_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_open_trampoline_addr(SB), RODATA, $8 +DATA ·libc_open_trampoline_addr(SB)/8, $libc_open_trampoline<>(SB) + +TEXT libc_openat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_openat(SB) -TEXT ·libc_pathconf_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_openat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_openat_trampoline_addr(SB)/8, $libc_openat_trampoline<>(SB) + +TEXT libc_pathconf_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pathconf(SB) -TEXT ·libc_pread_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_pathconf_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pathconf_trampoline_addr(SB)/8, $libc_pathconf_trampoline<>(SB) + +TEXT libc_pread_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pread(SB) -TEXT ·libc_pwrite_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_pread_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pread_trampoline_addr(SB)/8, $libc_pread_trampoline<>(SB) + +TEXT libc_pwrite_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pwrite(SB) -TEXT ·libc_read_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_pwrite_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pwrite_trampoline_addr(SB)/8, $libc_pwrite_trampoline<>(SB) + +TEXT libc_read_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_read(SB) -TEXT ·libc_readlink_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_read_trampoline_addr(SB), RODATA, $8 +DATA ·libc_read_trampoline_addr(SB)/8, $libc_read_trampoline<>(SB) + +TEXT libc_readlink_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_readlink(SB) -TEXT ·libc_readlinkat_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_readlink_trampoline_addr(SB), RODATA, $8 +DATA ·libc_readlink_trampoline_addr(SB)/8, $libc_readlink_trampoline<>(SB) + +TEXT libc_readlinkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_readlinkat(SB) -TEXT ·libc_rename_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_readlinkat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_readlinkat_trampoline_addr(SB)/8, $libc_readlinkat_trampoline<>(SB) + +TEXT libc_rename_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_rename(SB) -TEXT ·libc_renameat_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_rename_trampoline_addr(SB), RODATA, $8 +DATA ·libc_rename_trampoline_addr(SB)/8, $libc_rename_trampoline<>(SB) + +TEXT libc_renameat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_renameat(SB) -TEXT ·libc_revoke_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_renameat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_renameat_trampoline_addr(SB)/8, $libc_renameat_trampoline<>(SB) + +TEXT libc_revoke_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_revoke(SB) -TEXT ·libc_rmdir_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_revoke_trampoline_addr(SB), RODATA, $8 +DATA ·libc_revoke_trampoline_addr(SB)/8, $libc_revoke_trampoline<>(SB) + +TEXT libc_rmdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_rmdir(SB) -TEXT ·libc_lseek_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_rmdir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_rmdir_trampoline_addr(SB)/8, $libc_rmdir_trampoline<>(SB) + +TEXT libc_lseek_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_lseek(SB) -TEXT ·libc_select_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_lseek_trampoline_addr(SB), RODATA, $8 +DATA ·libc_lseek_trampoline_addr(SB)/8, $libc_lseek_trampoline<>(SB) + +TEXT libc_select_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_select(SB) -TEXT ·libc_setegid_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_select_trampoline_addr(SB), RODATA, $8 +DATA ·libc_select_trampoline_addr(SB)/8, $libc_select_trampoline<>(SB) + +TEXT libc_setegid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setegid(SB) -TEXT ·libc_seteuid_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_setegid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setegid_trampoline_addr(SB)/8, $libc_setegid_trampoline<>(SB) + +TEXT libc_seteuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_seteuid(SB) -TEXT ·libc_setgid_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_seteuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_seteuid_trampoline_addr(SB)/8, $libc_seteuid_trampoline<>(SB) + +TEXT libc_setgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setgid(SB) -TEXT ·libc_setlogin_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_setgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setgid_trampoline_addr(SB)/8, $libc_setgid_trampoline<>(SB) + +TEXT libc_setlogin_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setlogin(SB) -TEXT ·libc_setpgid_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_setlogin_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setlogin_trampoline_addr(SB)/8, $libc_setlogin_trampoline<>(SB) + +TEXT libc_setpgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setpgid(SB) -TEXT ·libc_setpriority_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_setpgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setpgid_trampoline_addr(SB)/8, $libc_setpgid_trampoline<>(SB) + +TEXT libc_setpriority_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setpriority(SB) -TEXT ·libc_setprivexec_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_setpriority_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setpriority_trampoline_addr(SB)/8, $libc_setpriority_trampoline<>(SB) + +TEXT libc_setprivexec_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setprivexec(SB) -TEXT ·libc_setregid_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_setprivexec_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setprivexec_trampoline_addr(SB)/8, $libc_setprivexec_trampoline<>(SB) + +TEXT libc_setregid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setregid(SB) -TEXT ·libc_setreuid_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_setregid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setregid_trampoline_addr(SB)/8, $libc_setregid_trampoline<>(SB) + +TEXT libc_setreuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setreuid(SB) -TEXT ·libc_setrlimit_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_setreuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setreuid_trampoline_addr(SB)/8, $libc_setreuid_trampoline<>(SB) + +TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setrlimit(SB) -TEXT ·libc_setsid_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setrlimit_trampoline_addr(SB)/8, $libc_setrlimit_trampoline<>(SB) + +TEXT libc_setsid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setsid(SB) -TEXT ·libc_settimeofday_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_setsid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setsid_trampoline_addr(SB)/8, $libc_setsid_trampoline<>(SB) + +TEXT libc_settimeofday_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_settimeofday(SB) -TEXT ·libc_setuid_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_settimeofday_trampoline_addr(SB), RODATA, $8 +DATA ·libc_settimeofday_trampoline_addr(SB)/8, $libc_settimeofday_trampoline<>(SB) + +TEXT libc_setuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setuid(SB) -TEXT ·libc_symlink_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_setuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setuid_trampoline_addr(SB)/8, $libc_setuid_trampoline<>(SB) + +TEXT libc_symlink_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_symlink(SB) -TEXT ·libc_symlinkat_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_symlink_trampoline_addr(SB), RODATA, $8 +DATA ·libc_symlink_trampoline_addr(SB)/8, $libc_symlink_trampoline<>(SB) + +TEXT libc_symlinkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_symlinkat(SB) -TEXT ·libc_sync_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_symlinkat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_symlinkat_trampoline_addr(SB)/8, $libc_symlinkat_trampoline<>(SB) + +TEXT libc_sync_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sync(SB) -TEXT ·libc_truncate_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_sync_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sync_trampoline_addr(SB)/8, $libc_sync_trampoline<>(SB) + +TEXT libc_truncate_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_truncate(SB) -TEXT ·libc_umask_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_truncate_trampoline_addr(SB), RODATA, $8 +DATA ·libc_truncate_trampoline_addr(SB)/8, $libc_truncate_trampoline<>(SB) + +TEXT libc_umask_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_umask(SB) -TEXT ·libc_undelete_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_umask_trampoline_addr(SB), RODATA, $8 +DATA ·libc_umask_trampoline_addr(SB)/8, $libc_umask_trampoline<>(SB) + +TEXT libc_undelete_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_undelete(SB) -TEXT ·libc_unlink_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_undelete_trampoline_addr(SB), RODATA, $8 +DATA ·libc_undelete_trampoline_addr(SB)/8, $libc_undelete_trampoline<>(SB) + +TEXT libc_unlink_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_unlink(SB) -TEXT ·libc_unlinkat_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_unlink_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unlink_trampoline_addr(SB)/8, $libc_unlink_trampoline<>(SB) + +TEXT libc_unlinkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_unlinkat(SB) -TEXT ·libc_unmount_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_unlinkat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unlinkat_trampoline_addr(SB)/8, $libc_unlinkat_trampoline<>(SB) + +TEXT libc_unmount_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_unmount(SB) -TEXT ·libc_write_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_unmount_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unmount_trampoline_addr(SB)/8, $libc_unmount_trampoline<>(SB) + +TEXT libc_write_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_write(SB) -TEXT ·libc_mmap_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_write_trampoline_addr(SB), RODATA, $8 +DATA ·libc_write_trampoline_addr(SB)/8, $libc_write_trampoline<>(SB) + +TEXT libc_mmap_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mmap(SB) -TEXT ·libc_munmap_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_mmap_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mmap_trampoline_addr(SB)/8, $libc_mmap_trampoline<>(SB) + +TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_munmap(SB) -TEXT ·libc_fstat64_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 +DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) + +TEXT libc_fstat64_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fstat64(SB) -TEXT ·libc_fstatat64_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_fstat64_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fstat64_trampoline_addr(SB)/8, $libc_fstat64_trampoline<>(SB) + +TEXT libc_fstatat64_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fstatat64(SB) -TEXT ·libc_fstatfs64_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_fstatat64_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fstatat64_trampoline_addr(SB)/8, $libc_fstatat64_trampoline<>(SB) + +TEXT libc_fstatfs64_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fstatfs64(SB) -TEXT ·libc_getfsstat64_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_fstatfs64_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fstatfs64_trampoline_addr(SB)/8, $libc_fstatfs64_trampoline<>(SB) + +TEXT libc_getfsstat64_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getfsstat64(SB) -TEXT ·libc_lstat64_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_getfsstat64_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getfsstat64_trampoline_addr(SB)/8, $libc_getfsstat64_trampoline<>(SB) + +TEXT libc_lstat64_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_lstat64(SB) -TEXT ·libc_ptrace_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_lstat64_trampoline_addr(SB), RODATA, $8 +DATA ·libc_lstat64_trampoline_addr(SB)/8, $libc_lstat64_trampoline<>(SB) + +TEXT libc_ptrace_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ptrace(SB) -TEXT ·libc_stat64_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_ptrace_trampoline_addr(SB), RODATA, $8 +DATA ·libc_ptrace_trampoline_addr(SB)/8, $libc_ptrace_trampoline<>(SB) + +TEXT libc_stat64_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_stat64(SB) -TEXT ·libc_statfs64_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_stat64_trampoline_addr(SB), RODATA, $8 +DATA ·libc_stat64_trampoline_addr(SB)/8, $libc_stat64_trampoline<>(SB) + +TEXT libc_statfs64_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_statfs64(SB) + +GLOBL ·libc_statfs64_trampoline_addr(SB), RODATA, $8 +DATA ·libc_statfs64_trampoline_addr(SB)/8, $libc_statfs64_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_13.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_13.go deleted file mode 100644 index de4738fff8..0000000000 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_13.go +++ /dev/null @@ -1,39 +0,0 @@ -// go run mksyscall.go -l32 -tags darwin,arm,go1.13 syscall_darwin.1_13.go -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build darwin,arm,go1.13 - -package unix - -import ( - "syscall" - "unsafe" -) - -var _ syscall.Errno - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func closedir(dir uintptr) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_closedir_trampoline), uintptr(dir), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_closedir_trampoline() - -//go:cgo_import_dynamic libc_closedir closedir "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func readdir_r(dir uintptr, entry *Dirent, result **Dirent) (res Errno) { - r0, _, _ := syscall_syscall(funcPC(libc_readdir_r_trampoline), uintptr(dir), uintptr(unsafe.Pointer(entry)), uintptr(unsafe.Pointer(result))) - res = Errno(r0) - return -} - -func libc_readdir_r_trampoline() - -//go:cgo_import_dynamic libc_readdir_r readdir_r "/usr/lib/libSystem.B.dylib" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_13.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_13.s deleted file mode 100644 index 488e55707a..0000000000 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_13.s +++ /dev/null @@ -1,12 +0,0 @@ -// go run mkasm_darwin.go arm -// Code generated by the command above; DO NOT EDIT. - -// +build go1.13 - -#include "textflag.h" -TEXT ·libc_fdopendir_trampoline(SB),NOSPLIT,$0-0 - JMP libc_fdopendir(SB) -TEXT ·libc_closedir_trampoline(SB),NOSPLIT,$0-0 - JMP libc_closedir(SB) -TEXT ·libc_readdir_r_trampoline(SB),NOSPLIT,$0-0 - JMP libc_readdir_r(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go deleted file mode 100644 index c0c771f402..0000000000 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go +++ /dev/null @@ -1,2416 +0,0 @@ -// go run mksyscall.go -l32 -tags darwin,arm,go1.12 syscall_bsd.go syscall_darwin.go syscall_darwin_arm.go -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build darwin,arm,go1.12 - -package unix - -import ( - "syscall" - "unsafe" -) - -var _ syscall.Errno - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func getgroups(ngid int, gid *_Gid_t) (n int, err error) { - r0, _, e1 := syscall_rawSyscall(funcPC(libc_getgroups_trampoline), uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_getgroups_trampoline() - -//go:cgo_import_dynamic libc_getgroups getgroups "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func setgroups(ngid int, gid *_Gid_t) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_setgroups_trampoline), uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_setgroups_trampoline() - -//go:cgo_import_dynamic libc_setgroups setgroups "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { - r0, _, e1 := syscall_syscall6(funcPC(libc_wait4_trampoline), uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) - wpid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_wait4_trampoline() - -//go:cgo_import_dynamic libc_wait4 wait4 "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_accept_trampoline), uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_accept_trampoline() - -//go:cgo_import_dynamic libc_accept accept "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_bind_trampoline), uintptr(s), uintptr(addr), uintptr(addrlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_bind_trampoline() - -//go:cgo_import_dynamic libc_bind bind "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_connect_trampoline), uintptr(s), uintptr(addr), uintptr(addrlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_connect_trampoline() - -//go:cgo_import_dynamic libc_connect connect "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func socket(domain int, typ int, proto int) (fd int, err error) { - r0, _, e1 := syscall_rawSyscall(funcPC(libc_socket_trampoline), uintptr(domain), uintptr(typ), uintptr(proto)) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_socket_trampoline() - -//go:cgo_import_dynamic libc_socket socket "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { - _, _, e1 := syscall_syscall6(funcPC(libc_getsockopt_trampoline), uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_getsockopt_trampoline() - -//go:cgo_import_dynamic libc_getsockopt getsockopt "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { - _, _, e1 := syscall_syscall6(funcPC(libc_setsockopt_trampoline), uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_setsockopt_trampoline() - -//go:cgo_import_dynamic libc_setsockopt setsockopt "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_getpeername_trampoline), uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_getpeername_trampoline() - -//go:cgo_import_dynamic libc_getpeername getpeername "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_getsockname_trampoline), uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_getsockname_trampoline() - -//go:cgo_import_dynamic libc_getsockname getsockname "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Shutdown(s int, how int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_shutdown_trampoline), uintptr(s), uintptr(how), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_shutdown_trampoline() - -//go:cgo_import_dynamic libc_shutdown shutdown "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { - _, _, e1 := syscall_rawSyscall6(funcPC(libc_socketpair_trampoline), uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_socketpair_trampoline() - -//go:cgo_import_dynamic libc_socketpair socketpair "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := syscall_syscall6(funcPC(libc_recvfrom_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_recvfrom_trampoline() - -//go:cgo_import_dynamic libc_recvfrom recvfrom "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := syscall_syscall6(funcPC(libc_sendto_trampoline), uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_sendto_trampoline() - -//go:cgo_import_dynamic libc_sendto sendto "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_recvmsg_trampoline), uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_recvmsg_trampoline() - -//go:cgo_import_dynamic libc_recvmsg recvmsg "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_sendmsg_trampoline), uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_sendmsg_trampoline() - -//go:cgo_import_dynamic libc_sendmsg sendmsg "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) { - r0, _, e1 := syscall_syscall6(funcPC(libc_kevent_trampoline), uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_kevent_trampoline() - -//go:cgo_import_dynamic libc_kevent kevent "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func utimes(path string, timeval *[2]Timeval) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_utimes_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_utimes_trampoline() - -//go:cgo_import_dynamic libc_utimes utimes "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func futimes(fd int, timeval *[2]Timeval) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_futimes_trampoline), uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_futimes_trampoline() - -//go:cgo_import_dynamic libc_futimes futimes "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_poll_trampoline), uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_poll_trampoline() - -//go:cgo_import_dynamic libc_poll poll "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Madvise(b []byte, behav int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := syscall_syscall(funcPC(libc_madvise_trampoline), uintptr(_p0), uintptr(len(b)), uintptr(behav)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_madvise_trampoline() - -//go:cgo_import_dynamic libc_madvise madvise "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := syscall_syscall(funcPC(libc_mlock_trampoline), uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_mlock_trampoline() - -//go:cgo_import_dynamic libc_mlock mlock "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlockall(flags int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_mlockall_trampoline), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_mlockall_trampoline() - -//go:cgo_import_dynamic libc_mlockall mlockall "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mprotect(b []byte, prot int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := syscall_syscall(funcPC(libc_mprotect_trampoline), uintptr(_p0), uintptr(len(b)), uintptr(prot)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_mprotect_trampoline() - -//go:cgo_import_dynamic libc_mprotect mprotect "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Msync(b []byte, flags int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := syscall_syscall(funcPC(libc_msync_trampoline), uintptr(_p0), uintptr(len(b)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_msync_trampoline() - -//go:cgo_import_dynamic libc_msync msync "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := syscall_syscall(funcPC(libc_munlock_trampoline), uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_munlock_trampoline() - -//go:cgo_import_dynamic libc_munlock munlock "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlockall() (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_munlockall_trampoline), 0, 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_munlockall_trampoline() - -//go:cgo_import_dynamic libc_munlockall munlockall "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func pipe(p *[2]int32) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_pipe_trampoline), uintptr(unsafe.Pointer(p)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_pipe_trampoline() - -//go:cgo_import_dynamic libc_pipe pipe "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func getxattr(path string, attr string, dest *byte, size int, position uint32, options int) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - r0, _, e1 := syscall_syscall6(funcPC(libc_getxattr_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(position), uintptr(options)) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_getxattr_trampoline() - -//go:cgo_import_dynamic libc_getxattr getxattr "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fgetxattr(fd int, attr string, dest *byte, size int, position uint32, options int) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - r0, _, e1 := syscall_syscall6(funcPC(libc_fgetxattr_trampoline), uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(position), uintptr(options)) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_fgetxattr_trampoline() - -//go:cgo_import_dynamic libc_fgetxattr fgetxattr "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func setxattr(path string, attr string, data *byte, size int, position uint32, options int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := syscall_syscall6(funcPC(libc_setxattr_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(data)), uintptr(size), uintptr(position), uintptr(options)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_setxattr_trampoline() - -//go:cgo_import_dynamic libc_setxattr setxattr "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fsetxattr(fd int, attr string, data *byte, size int, position uint32, options int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := syscall_syscall6(funcPC(libc_fsetxattr_trampoline), uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(data)), uintptr(size), uintptr(position), uintptr(options)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_fsetxattr_trampoline() - -//go:cgo_import_dynamic libc_fsetxattr fsetxattr "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func removexattr(path string, attr string, options int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_removexattr_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(options)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_removexattr_trampoline() - -//go:cgo_import_dynamic libc_removexattr removexattr "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fremovexattr(fd int, attr string, options int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_fremovexattr_trampoline), uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(options)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_fremovexattr_trampoline() - -//go:cgo_import_dynamic libc_fremovexattr fremovexattr "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func listxattr(path string, dest *byte, size int, options int) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - r0, _, e1 := syscall_syscall6(funcPC(libc_listxattr_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(options), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_listxattr_trampoline() - -//go:cgo_import_dynamic libc_listxattr listxattr "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func flistxattr(fd int, dest *byte, size int, options int) (sz int, err error) { - r0, _, e1 := syscall_syscall6(funcPC(libc_flistxattr_trampoline), uintptr(fd), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(options), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_flistxattr_trampoline() - -//go:cgo_import_dynamic libc_flistxattr flistxattr "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func setattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) { - _, _, e1 := syscall_syscall6(funcPC(libc_setattrlist_trampoline), uintptr(unsafe.Pointer(path)), uintptr(list), uintptr(buf), uintptr(size), uintptr(options), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_setattrlist_trampoline() - -//go:cgo_import_dynamic libc_setattrlist setattrlist "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_fcntl_trampoline), uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_fcntl_trampoline() - -//go:cgo_import_dynamic libc_fcntl fcntl "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func kill(pid int, signum int, posix int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_kill_trampoline), uintptr(pid), uintptr(signum), uintptr(posix)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_kill_trampoline() - -//go:cgo_import_dynamic libc_kill kill "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ioctl(fd int, req uint, arg uintptr) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_ioctl_trampoline), uintptr(fd), uintptr(req), uintptr(arg)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_ioctl_trampoline() - -//go:cgo_import_dynamic libc_ioctl ioctl "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := syscall_syscall6(funcPC(libc_sysctl_trampoline), uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_sysctl_trampoline() - -//go:cgo_import_dynamic libc_sysctl sysctl "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) { - _, _, e1 := syscall_syscall9(funcPC(libc_sendfile_trampoline), uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(offset>>32), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_sendfile_trampoline() - -//go:cgo_import_dynamic libc_sendfile sendfile "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Access(path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_access_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_access_trampoline() - -//go:cgo_import_dynamic libc_access access "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_adjtime_trampoline), uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_adjtime_trampoline() - -//go:cgo_import_dynamic libc_adjtime adjtime "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chdir(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_chdir_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_chdir_trampoline() - -//go:cgo_import_dynamic libc_chdir chdir "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chflags(path string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_chflags_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_chflags_trampoline() - -//go:cgo_import_dynamic libc_chflags chflags "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chmod(path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_chmod_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_chmod_trampoline() - -//go:cgo_import_dynamic libc_chmod chmod "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chown(path string, uid int, gid int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_chown_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_chown_trampoline() - -//go:cgo_import_dynamic libc_chown chown "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chroot(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_chroot_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_chroot_trampoline() - -//go:cgo_import_dynamic libc_chroot chroot "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ClockGettime(clockid int32, time *Timespec) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_clock_gettime_trampoline), uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_clock_gettime_trampoline() - -//go:cgo_import_dynamic libc_clock_gettime clock_gettime "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Close(fd int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_close_trampoline), uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_close_trampoline() - -//go:cgo_import_dynamic libc_close close "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Clonefile(src string, dst string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(src) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(dst) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_clonefile_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_clonefile_trampoline() - -//go:cgo_import_dynamic libc_clonefile clonefile "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Clonefileat(srcDirfd int, src string, dstDirfd int, dst string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(src) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(dst) - if err != nil { - return - } - _, _, e1 := syscall_syscall6(funcPC(libc_clonefileat_trampoline), uintptr(srcDirfd), uintptr(unsafe.Pointer(_p0)), uintptr(dstDirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_clonefileat_trampoline() - -//go:cgo_import_dynamic libc_clonefileat clonefileat "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup(fd int) (nfd int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_dup_trampoline), uintptr(fd), 0, 0) - nfd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_dup_trampoline() - -//go:cgo_import_dynamic libc_dup dup "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup2(from int, to int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_dup2_trampoline), uintptr(from), uintptr(to), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_dup2_trampoline() - -//go:cgo_import_dynamic libc_dup2 dup2 "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Exchangedata(path1 string, path2 string, options int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path1) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(path2) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_exchangedata_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(options)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_exchangedata_trampoline() - -//go:cgo_import_dynamic libc_exchangedata exchangedata "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Exit(code int) { - syscall_syscall(funcPC(libc_exit_trampoline), uintptr(code), 0, 0) - return -} - -func libc_exit_trampoline() - -//go:cgo_import_dynamic libc_exit exit "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall6(funcPC(libc_faccessat_trampoline), uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_faccessat_trampoline() - -//go:cgo_import_dynamic libc_faccessat faccessat "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchdir(fd int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_fchdir_trampoline), uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_fchdir_trampoline() - -//go:cgo_import_dynamic libc_fchdir fchdir "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchflags(fd int, flags int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_fchflags_trampoline), uintptr(fd), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_fchflags_trampoline() - -//go:cgo_import_dynamic libc_fchflags fchflags "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchmod(fd int, mode uint32) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_fchmod_trampoline), uintptr(fd), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_fchmod_trampoline() - -//go:cgo_import_dynamic libc_fchmod fchmod "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall6(funcPC(libc_fchmodat_trampoline), uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_fchmodat_trampoline() - -//go:cgo_import_dynamic libc_fchmodat fchmodat "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchown(fd int, uid int, gid int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_fchown_trampoline), uintptr(fd), uintptr(uid), uintptr(gid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_fchown_trampoline() - -//go:cgo_import_dynamic libc_fchown fchown "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall6(funcPC(libc_fchownat_trampoline), uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_fchownat_trampoline() - -//go:cgo_import_dynamic libc_fchownat fchownat "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fclonefileat(srcDirfd int, dstDirfd int, dst string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(dst) - if err != nil { - return - } - _, _, e1 := syscall_syscall6(funcPC(libc_fclonefileat_trampoline), uintptr(srcDirfd), uintptr(dstDirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_fclonefileat_trampoline() - -//go:cgo_import_dynamic libc_fclonefileat fclonefileat "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Flock(fd int, how int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_flock_trampoline), uintptr(fd), uintptr(how), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_flock_trampoline() - -//go:cgo_import_dynamic libc_flock flock "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fpathconf(fd int, name int) (val int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_fpathconf_trampoline), uintptr(fd), uintptr(name), 0) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_fpathconf_trampoline() - -//go:cgo_import_dynamic libc_fpathconf fpathconf "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fsync(fd int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_fsync_trampoline), uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_fsync_trampoline() - -//go:cgo_import_dynamic libc_fsync fsync "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Ftruncate(fd int, length int64) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_ftruncate_trampoline), uintptr(fd), uintptr(length), uintptr(length>>32)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_ftruncate_trampoline() - -//go:cgo_import_dynamic libc_ftruncate ftruncate "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getcwd(buf []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := syscall_syscall(funcPC(libc_getcwd_trampoline), uintptr(_p0), uintptr(len(buf)), 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_getcwd_trampoline() - -//go:cgo_import_dynamic libc_getcwd getcwd "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getdtablesize() (size int) { - r0, _, _ := syscall_syscall(funcPC(libc_getdtablesize_trampoline), 0, 0, 0) - size = int(r0) - return -} - -func libc_getdtablesize_trampoline() - -//go:cgo_import_dynamic libc_getdtablesize getdtablesize "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getegid() (egid int) { - r0, _, _ := syscall_rawSyscall(funcPC(libc_getegid_trampoline), 0, 0, 0) - egid = int(r0) - return -} - -func libc_getegid_trampoline() - -//go:cgo_import_dynamic libc_getegid getegid "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Geteuid() (uid int) { - r0, _, _ := syscall_rawSyscall(funcPC(libc_geteuid_trampoline), 0, 0, 0) - uid = int(r0) - return -} - -func libc_geteuid_trampoline() - -//go:cgo_import_dynamic libc_geteuid geteuid "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getgid() (gid int) { - r0, _, _ := syscall_rawSyscall(funcPC(libc_getgid_trampoline), 0, 0, 0) - gid = int(r0) - return -} - -func libc_getgid_trampoline() - -//go:cgo_import_dynamic libc_getgid getgid "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpgid(pid int) (pgid int, err error) { - r0, _, e1 := syscall_rawSyscall(funcPC(libc_getpgid_trampoline), uintptr(pid), 0, 0) - pgid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_getpgid_trampoline() - -//go:cgo_import_dynamic libc_getpgid getpgid "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpgrp() (pgrp int) { - r0, _, _ := syscall_rawSyscall(funcPC(libc_getpgrp_trampoline), 0, 0, 0) - pgrp = int(r0) - return -} - -func libc_getpgrp_trampoline() - -//go:cgo_import_dynamic libc_getpgrp getpgrp "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpid() (pid int) { - r0, _, _ := syscall_rawSyscall(funcPC(libc_getpid_trampoline), 0, 0, 0) - pid = int(r0) - return -} - -func libc_getpid_trampoline() - -//go:cgo_import_dynamic libc_getpid getpid "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getppid() (ppid int) { - r0, _, _ := syscall_rawSyscall(funcPC(libc_getppid_trampoline), 0, 0, 0) - ppid = int(r0) - return -} - -func libc_getppid_trampoline() - -//go:cgo_import_dynamic libc_getppid getppid "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpriority(which int, who int) (prio int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_getpriority_trampoline), uintptr(which), uintptr(who), 0) - prio = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_getpriority_trampoline() - -//go:cgo_import_dynamic libc_getpriority getpriority "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_getrlimit_trampoline), uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_getrlimit_trampoline() - -//go:cgo_import_dynamic libc_getrlimit getrlimit "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getrusage(who int, rusage *Rusage) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_getrusage_trampoline), uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_getrusage_trampoline() - -//go:cgo_import_dynamic libc_getrusage getrusage "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getsid(pid int) (sid int, err error) { - r0, _, e1 := syscall_rawSyscall(funcPC(libc_getsid_trampoline), uintptr(pid), 0, 0) - sid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_getsid_trampoline() - -//go:cgo_import_dynamic libc_getsid getsid "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Gettimeofday(tp *Timeval) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_gettimeofday_trampoline), uintptr(unsafe.Pointer(tp)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_gettimeofday_trampoline() - -//go:cgo_import_dynamic libc_gettimeofday gettimeofday "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getuid() (uid int) { - r0, _, _ := syscall_rawSyscall(funcPC(libc_getuid_trampoline), 0, 0, 0) - uid = int(r0) - return -} - -func libc_getuid_trampoline() - -//go:cgo_import_dynamic libc_getuid getuid "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Issetugid() (tainted bool) { - r0, _, _ := syscall_rawSyscall(funcPC(libc_issetugid_trampoline), 0, 0, 0) - tainted = bool(r0 != 0) - return -} - -func libc_issetugid_trampoline() - -//go:cgo_import_dynamic libc_issetugid issetugid "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Kqueue() (fd int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_kqueue_trampoline), 0, 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_kqueue_trampoline() - -//go:cgo_import_dynamic libc_kqueue kqueue "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lchown(path string, uid int, gid int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_lchown_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_lchown_trampoline() - -//go:cgo_import_dynamic libc_lchown lchown "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Link(path string, link string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(link) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_link_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_link_trampoline() - -//go:cgo_import_dynamic libc_link link "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(link) - if err != nil { - return - } - _, _, e1 := syscall_syscall6(funcPC(libc_linkat_trampoline), uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_linkat_trampoline() - -//go:cgo_import_dynamic libc_linkat linkat "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Listen(s int, backlog int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_listen_trampoline), uintptr(s), uintptr(backlog), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_listen_trampoline() - -//go:cgo_import_dynamic libc_listen listen "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mkdir(path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_mkdir_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_mkdir_trampoline() - -//go:cgo_import_dynamic libc_mkdir mkdir "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mkdirat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_mkdirat_trampoline), uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_mkdirat_trampoline() - -//go:cgo_import_dynamic libc_mkdirat mkdirat "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mkfifo(path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_mkfifo_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_mkfifo_trampoline() - -//go:cgo_import_dynamic libc_mkfifo mkfifo "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mknod(path string, mode uint32, dev int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_mknod_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_mknod_trampoline() - -//go:cgo_import_dynamic libc_mknod mknod "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Open(path string, mode int, perm uint32) (fd int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - r0, _, e1 := syscall_syscall(funcPC(libc_open_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_open_trampoline() - -//go:cgo_import_dynamic libc_open open "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - r0, _, e1 := syscall_syscall6(funcPC(libc_openat_trampoline), uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_openat_trampoline() - -//go:cgo_import_dynamic libc_openat openat "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Pathconf(path string, name int) (val int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - r0, _, e1 := syscall_syscall(funcPC(libc_pathconf_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_pathconf_trampoline() - -//go:cgo_import_dynamic libc_pathconf pathconf "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Pread(fd int, p []byte, offset int64) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := syscall_syscall6(funcPC(libc_pread_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), uintptr(offset>>32), 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_pread_trampoline() - -//go:cgo_import_dynamic libc_pread pread "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := syscall_syscall6(funcPC(libc_pwrite_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), uintptr(offset>>32), 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_pwrite_trampoline() - -//go:cgo_import_dynamic libc_pwrite pwrite "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func read(fd int, p []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := syscall_syscall(funcPC(libc_read_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(p))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_read_trampoline() - -//go:cgo_import_dynamic libc_read read "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Readlink(path string, buf []byte) (n int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(buf) > 0 { - _p1 = unsafe.Pointer(&buf[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := syscall_syscall(funcPC(libc_readlink_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_readlink_trampoline() - -//go:cgo_import_dynamic libc_readlink readlink "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(buf) > 0 { - _p1 = unsafe.Pointer(&buf[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := syscall_syscall6(funcPC(libc_readlinkat_trampoline), uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_readlinkat_trampoline() - -//go:cgo_import_dynamic libc_readlinkat readlinkat "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Rename(from string, to string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(from) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(to) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_rename_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_rename_trampoline() - -//go:cgo_import_dynamic libc_rename rename "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Renameat(fromfd int, from string, tofd int, to string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(from) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(to) - if err != nil { - return - } - _, _, e1 := syscall_syscall6(funcPC(libc_renameat_trampoline), uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_renameat_trampoline() - -//go:cgo_import_dynamic libc_renameat renameat "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Revoke(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_revoke_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_revoke_trampoline() - -//go:cgo_import_dynamic libc_revoke revoke "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Rmdir(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_rmdir_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_rmdir_trampoline() - -//go:cgo_import_dynamic libc_rmdir rmdir "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { - r0, r1, e1 := syscall_syscall6(funcPC(libc_lseek_trampoline), uintptr(fd), uintptr(offset), uintptr(offset>>32), uintptr(whence), 0, 0) - newoffset = int64(int64(r1)<<32 | int64(r0)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_lseek_trampoline() - -//go:cgo_import_dynamic libc_lseek lseek "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { - r0, _, e1 := syscall_syscall6(funcPC(libc_select_trampoline), uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_select_trampoline() - -//go:cgo_import_dynamic libc_select select "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setegid(egid int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_setegid_trampoline), uintptr(egid), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_setegid_trampoline() - -//go:cgo_import_dynamic libc_setegid setegid "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Seteuid(euid int) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_seteuid_trampoline), uintptr(euid), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_seteuid_trampoline() - -//go:cgo_import_dynamic libc_seteuid seteuid "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setgid(gid int) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_setgid_trampoline), uintptr(gid), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_setgid_trampoline() - -//go:cgo_import_dynamic libc_setgid setgid "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setlogin(name string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(name) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_setlogin_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_setlogin_trampoline() - -//go:cgo_import_dynamic libc_setlogin setlogin "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setpgid(pid int, pgid int) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_setpgid_trampoline), uintptr(pid), uintptr(pgid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_setpgid_trampoline() - -//go:cgo_import_dynamic libc_setpgid setpgid "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setpriority(which int, who int, prio int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_setpriority_trampoline), uintptr(which), uintptr(who), uintptr(prio)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_setpriority_trampoline() - -//go:cgo_import_dynamic libc_setpriority setpriority "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setprivexec(flag int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_setprivexec_trampoline), uintptr(flag), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_setprivexec_trampoline() - -//go:cgo_import_dynamic libc_setprivexec setprivexec "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setregid(rgid int, egid int) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_setregid_trampoline), uintptr(rgid), uintptr(egid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_setregid_trampoline() - -//go:cgo_import_dynamic libc_setregid setregid "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_setreuid_trampoline), uintptr(ruid), uintptr(euid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_setreuid_trampoline() - -//go:cgo_import_dynamic libc_setreuid setreuid "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_setrlimit_trampoline), uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_setrlimit_trampoline() - -//go:cgo_import_dynamic libc_setrlimit setrlimit "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setsid() (pid int, err error) { - r0, _, e1 := syscall_rawSyscall(funcPC(libc_setsid_trampoline), 0, 0, 0) - pid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_setsid_trampoline() - -//go:cgo_import_dynamic libc_setsid setsid "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Settimeofday(tp *Timeval) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_settimeofday_trampoline), uintptr(unsafe.Pointer(tp)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_settimeofday_trampoline() - -//go:cgo_import_dynamic libc_settimeofday settimeofday "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setuid(uid int) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_setuid_trampoline), uintptr(uid), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_setuid_trampoline() - -//go:cgo_import_dynamic libc_setuid setuid "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Symlink(path string, link string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(link) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_symlink_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_symlink_trampoline() - -//go:cgo_import_dynamic libc_symlink symlink "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_symlinkat_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_symlinkat_trampoline() - -//go:cgo_import_dynamic libc_symlinkat symlinkat "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Sync() (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_sync_trampoline), 0, 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_sync_trampoline() - -//go:cgo_import_dynamic libc_sync sync "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Truncate(path string, length int64) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_truncate_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(length), uintptr(length>>32)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_truncate_trampoline() - -//go:cgo_import_dynamic libc_truncate truncate "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Umask(newmask int) (oldmask int) { - r0, _, _ := syscall_syscall(funcPC(libc_umask_trampoline), uintptr(newmask), 0, 0) - oldmask = int(r0) - return -} - -func libc_umask_trampoline() - -//go:cgo_import_dynamic libc_umask umask "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Undelete(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_undelete_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_undelete_trampoline() - -//go:cgo_import_dynamic libc_undelete undelete "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unlink(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_unlink_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_unlink_trampoline() - -//go:cgo_import_dynamic libc_unlink unlink "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unlinkat(dirfd int, path string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_unlinkat_trampoline), uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_unlinkat_trampoline() - -//go:cgo_import_dynamic libc_unlinkat unlinkat "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unmount(path string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_unmount_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_unmount_trampoline() - -//go:cgo_import_dynamic libc_unmount unmount "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func write(fd int, p []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := syscall_syscall(funcPC(libc_write_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(p))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_write_trampoline() - -//go:cgo_import_dynamic libc_write write "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { - r0, _, e1 := syscall_syscall9(funcPC(libc_mmap_trampoline), uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos), uintptr(pos>>32), 0, 0) - ret = uintptr(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_mmap_trampoline() - -//go:cgo_import_dynamic libc_mmap mmap "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func munmap(addr uintptr, length uintptr) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_munmap_trampoline), uintptr(addr), uintptr(length), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_munmap_trampoline() - -//go:cgo_import_dynamic libc_munmap munmap "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_read_trampoline), uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_write_trampoline), uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fstat(fd int, stat *Stat_t) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_fstat_trampoline), uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_fstat_trampoline() - -//go:cgo_import_dynamic libc_fstat fstat "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall6(funcPC(libc_fstatat_trampoline), uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_fstatat_trampoline() - -//go:cgo_import_dynamic libc_fstatat fstatat "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fstatfs(fd int, stat *Statfs_t) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_fstatfs_trampoline), uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_fstatfs_trampoline() - -//go:cgo_import_dynamic libc_fstatfs fstatfs "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func getfsstat(buf unsafe.Pointer, size uintptr, flags int) (n int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_getfsstat_trampoline), uintptr(buf), uintptr(size), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_getfsstat_trampoline() - -//go:cgo_import_dynamic libc_getfsstat getfsstat "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lstat(path string, stat *Stat_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_lstat_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_lstat_trampoline() - -//go:cgo_import_dynamic libc_lstat lstat "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Stat(path string, stat *Stat_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_stat_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_stat_trampoline() - -//go:cgo_import_dynamic libc_stat stat "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Statfs(path string, stat *Statfs_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_statfs_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_statfs_trampoline() - -//go:cgo_import_dynamic libc_statfs statfs "/usr/lib/libSystem.B.dylib" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.s deleted file mode 100644 index 5eec5f1d95..0000000000 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.s +++ /dev/null @@ -1,288 +0,0 @@ -// go run mkasm_darwin.go arm -// Code generated by the command above; DO NOT EDIT. - -// +build go1.12 - -#include "textflag.h" -TEXT ·libc_getgroups_trampoline(SB),NOSPLIT,$0-0 - JMP libc_getgroups(SB) -TEXT ·libc_setgroups_trampoline(SB),NOSPLIT,$0-0 - JMP libc_setgroups(SB) -TEXT ·libc_wait4_trampoline(SB),NOSPLIT,$0-0 - JMP libc_wait4(SB) -TEXT ·libc_accept_trampoline(SB),NOSPLIT,$0-0 - JMP libc_accept(SB) -TEXT ·libc_bind_trampoline(SB),NOSPLIT,$0-0 - JMP libc_bind(SB) -TEXT ·libc_connect_trampoline(SB),NOSPLIT,$0-0 - JMP libc_connect(SB) -TEXT ·libc_socket_trampoline(SB),NOSPLIT,$0-0 - JMP libc_socket(SB) -TEXT ·libc_getsockopt_trampoline(SB),NOSPLIT,$0-0 - JMP libc_getsockopt(SB) -TEXT ·libc_setsockopt_trampoline(SB),NOSPLIT,$0-0 - JMP libc_setsockopt(SB) -TEXT ·libc_getpeername_trampoline(SB),NOSPLIT,$0-0 - JMP libc_getpeername(SB) -TEXT ·libc_getsockname_trampoline(SB),NOSPLIT,$0-0 - JMP libc_getsockname(SB) -TEXT ·libc_shutdown_trampoline(SB),NOSPLIT,$0-0 - JMP libc_shutdown(SB) -TEXT ·libc_socketpair_trampoline(SB),NOSPLIT,$0-0 - JMP libc_socketpair(SB) -TEXT ·libc_recvfrom_trampoline(SB),NOSPLIT,$0-0 - JMP libc_recvfrom(SB) -TEXT ·libc_sendto_trampoline(SB),NOSPLIT,$0-0 - JMP libc_sendto(SB) -TEXT ·libc_recvmsg_trampoline(SB),NOSPLIT,$0-0 - JMP libc_recvmsg(SB) -TEXT ·libc_sendmsg_trampoline(SB),NOSPLIT,$0-0 - JMP libc_sendmsg(SB) -TEXT ·libc_kevent_trampoline(SB),NOSPLIT,$0-0 - JMP libc_kevent(SB) -TEXT ·libc_utimes_trampoline(SB),NOSPLIT,$0-0 - JMP libc_utimes(SB) -TEXT ·libc_futimes_trampoline(SB),NOSPLIT,$0-0 - JMP libc_futimes(SB) -TEXT ·libc_poll_trampoline(SB),NOSPLIT,$0-0 - JMP libc_poll(SB) -TEXT ·libc_madvise_trampoline(SB),NOSPLIT,$0-0 - JMP libc_madvise(SB) -TEXT ·libc_mlock_trampoline(SB),NOSPLIT,$0-0 - JMP libc_mlock(SB) -TEXT ·libc_mlockall_trampoline(SB),NOSPLIT,$0-0 - JMP libc_mlockall(SB) -TEXT ·libc_mprotect_trampoline(SB),NOSPLIT,$0-0 - JMP libc_mprotect(SB) -TEXT ·libc_msync_trampoline(SB),NOSPLIT,$0-0 - JMP libc_msync(SB) -TEXT ·libc_munlock_trampoline(SB),NOSPLIT,$0-0 - JMP libc_munlock(SB) -TEXT ·libc_munlockall_trampoline(SB),NOSPLIT,$0-0 - JMP libc_munlockall(SB) -TEXT ·libc_pipe_trampoline(SB),NOSPLIT,$0-0 - JMP libc_pipe(SB) -TEXT ·libc_getxattr_trampoline(SB),NOSPLIT,$0-0 - JMP libc_getxattr(SB) -TEXT ·libc_fgetxattr_trampoline(SB),NOSPLIT,$0-0 - JMP libc_fgetxattr(SB) -TEXT ·libc_setxattr_trampoline(SB),NOSPLIT,$0-0 - JMP libc_setxattr(SB) -TEXT ·libc_fsetxattr_trampoline(SB),NOSPLIT,$0-0 - JMP libc_fsetxattr(SB) -TEXT ·libc_removexattr_trampoline(SB),NOSPLIT,$0-0 - JMP libc_removexattr(SB) -TEXT ·libc_fremovexattr_trampoline(SB),NOSPLIT,$0-0 - JMP libc_fremovexattr(SB) -TEXT ·libc_listxattr_trampoline(SB),NOSPLIT,$0-0 - JMP libc_listxattr(SB) -TEXT ·libc_flistxattr_trampoline(SB),NOSPLIT,$0-0 - JMP libc_flistxattr(SB) -TEXT ·libc_setattrlist_trampoline(SB),NOSPLIT,$0-0 - JMP libc_setattrlist(SB) -TEXT ·libc_fcntl_trampoline(SB),NOSPLIT,$0-0 - JMP libc_fcntl(SB) -TEXT ·libc_kill_trampoline(SB),NOSPLIT,$0-0 - JMP libc_kill(SB) -TEXT ·libc_ioctl_trampoline(SB),NOSPLIT,$0-0 - JMP libc_ioctl(SB) -TEXT ·libc_sysctl_trampoline(SB),NOSPLIT,$0-0 - JMP libc_sysctl(SB) -TEXT ·libc_sendfile_trampoline(SB),NOSPLIT,$0-0 - JMP libc_sendfile(SB) -TEXT ·libc_access_trampoline(SB),NOSPLIT,$0-0 - JMP libc_access(SB) -TEXT ·libc_adjtime_trampoline(SB),NOSPLIT,$0-0 - JMP libc_adjtime(SB) -TEXT ·libc_chdir_trampoline(SB),NOSPLIT,$0-0 - JMP libc_chdir(SB) -TEXT ·libc_chflags_trampoline(SB),NOSPLIT,$0-0 - JMP libc_chflags(SB) -TEXT ·libc_chmod_trampoline(SB),NOSPLIT,$0-0 - JMP libc_chmod(SB) -TEXT ·libc_chown_trampoline(SB),NOSPLIT,$0-0 - JMP libc_chown(SB) -TEXT ·libc_chroot_trampoline(SB),NOSPLIT,$0-0 - JMP libc_chroot(SB) -TEXT ·libc_clock_gettime_trampoline(SB),NOSPLIT,$0-0 - JMP libc_clock_gettime(SB) -TEXT ·libc_close_trampoline(SB),NOSPLIT,$0-0 - JMP libc_close(SB) -TEXT ·libc_clonefile_trampoline(SB),NOSPLIT,$0-0 - JMP libc_clonefile(SB) -TEXT ·libc_clonefileat_trampoline(SB),NOSPLIT,$0-0 - JMP libc_clonefileat(SB) -TEXT ·libc_dup_trampoline(SB),NOSPLIT,$0-0 - JMP libc_dup(SB) -TEXT ·libc_dup2_trampoline(SB),NOSPLIT,$0-0 - JMP libc_dup2(SB) -TEXT ·libc_exchangedata_trampoline(SB),NOSPLIT,$0-0 - JMP libc_exchangedata(SB) -TEXT ·libc_exit_trampoline(SB),NOSPLIT,$0-0 - JMP libc_exit(SB) -TEXT ·libc_faccessat_trampoline(SB),NOSPLIT,$0-0 - JMP libc_faccessat(SB) -TEXT ·libc_fchdir_trampoline(SB),NOSPLIT,$0-0 - JMP libc_fchdir(SB) -TEXT ·libc_fchflags_trampoline(SB),NOSPLIT,$0-0 - JMP libc_fchflags(SB) -TEXT ·libc_fchmod_trampoline(SB),NOSPLIT,$0-0 - JMP libc_fchmod(SB) -TEXT ·libc_fchmodat_trampoline(SB),NOSPLIT,$0-0 - JMP libc_fchmodat(SB) -TEXT ·libc_fchown_trampoline(SB),NOSPLIT,$0-0 - JMP libc_fchown(SB) -TEXT ·libc_fchownat_trampoline(SB),NOSPLIT,$0-0 - JMP libc_fchownat(SB) -TEXT ·libc_fclonefileat_trampoline(SB),NOSPLIT,$0-0 - JMP libc_fclonefileat(SB) -TEXT ·libc_flock_trampoline(SB),NOSPLIT,$0-0 - JMP libc_flock(SB) -TEXT ·libc_fpathconf_trampoline(SB),NOSPLIT,$0-0 - JMP libc_fpathconf(SB) -TEXT ·libc_fsync_trampoline(SB),NOSPLIT,$0-0 - JMP libc_fsync(SB) -TEXT ·libc_ftruncate_trampoline(SB),NOSPLIT,$0-0 - JMP libc_ftruncate(SB) -TEXT ·libc_getcwd_trampoline(SB),NOSPLIT,$0-0 - JMP libc_getcwd(SB) -TEXT ·libc_getdtablesize_trampoline(SB),NOSPLIT,$0-0 - JMP libc_getdtablesize(SB) -TEXT ·libc_getegid_trampoline(SB),NOSPLIT,$0-0 - JMP libc_getegid(SB) -TEXT ·libc_geteuid_trampoline(SB),NOSPLIT,$0-0 - JMP libc_geteuid(SB) -TEXT ·libc_getgid_trampoline(SB),NOSPLIT,$0-0 - JMP libc_getgid(SB) -TEXT ·libc_getpgid_trampoline(SB),NOSPLIT,$0-0 - JMP libc_getpgid(SB) -TEXT ·libc_getpgrp_trampoline(SB),NOSPLIT,$0-0 - JMP libc_getpgrp(SB) -TEXT ·libc_getpid_trampoline(SB),NOSPLIT,$0-0 - JMP libc_getpid(SB) -TEXT ·libc_getppid_trampoline(SB),NOSPLIT,$0-0 - JMP libc_getppid(SB) -TEXT ·libc_getpriority_trampoline(SB),NOSPLIT,$0-0 - JMP libc_getpriority(SB) -TEXT ·libc_getrlimit_trampoline(SB),NOSPLIT,$0-0 - JMP libc_getrlimit(SB) -TEXT ·libc_getrusage_trampoline(SB),NOSPLIT,$0-0 - JMP libc_getrusage(SB) -TEXT ·libc_getsid_trampoline(SB),NOSPLIT,$0-0 - JMP libc_getsid(SB) -TEXT ·libc_gettimeofday_trampoline(SB),NOSPLIT,$0-0 - JMP libc_gettimeofday(SB) -TEXT ·libc_getuid_trampoline(SB),NOSPLIT,$0-0 - JMP libc_getuid(SB) -TEXT ·libc_issetugid_trampoline(SB),NOSPLIT,$0-0 - JMP libc_issetugid(SB) -TEXT ·libc_kqueue_trampoline(SB),NOSPLIT,$0-0 - JMP libc_kqueue(SB) -TEXT ·libc_lchown_trampoline(SB),NOSPLIT,$0-0 - JMP libc_lchown(SB) -TEXT ·libc_link_trampoline(SB),NOSPLIT,$0-0 - JMP libc_link(SB) -TEXT ·libc_linkat_trampoline(SB),NOSPLIT,$0-0 - JMP libc_linkat(SB) -TEXT ·libc_listen_trampoline(SB),NOSPLIT,$0-0 - JMP libc_listen(SB) -TEXT ·libc_mkdir_trampoline(SB),NOSPLIT,$0-0 - JMP libc_mkdir(SB) -TEXT ·libc_mkdirat_trampoline(SB),NOSPLIT,$0-0 - JMP libc_mkdirat(SB) -TEXT ·libc_mkfifo_trampoline(SB),NOSPLIT,$0-0 - JMP libc_mkfifo(SB) -TEXT ·libc_mknod_trampoline(SB),NOSPLIT,$0-0 - JMP libc_mknod(SB) -TEXT ·libc_open_trampoline(SB),NOSPLIT,$0-0 - JMP libc_open(SB) -TEXT ·libc_openat_trampoline(SB),NOSPLIT,$0-0 - JMP libc_openat(SB) -TEXT ·libc_pathconf_trampoline(SB),NOSPLIT,$0-0 - JMP libc_pathconf(SB) -TEXT ·libc_pread_trampoline(SB),NOSPLIT,$0-0 - JMP libc_pread(SB) -TEXT ·libc_pwrite_trampoline(SB),NOSPLIT,$0-0 - JMP libc_pwrite(SB) -TEXT ·libc_read_trampoline(SB),NOSPLIT,$0-0 - JMP libc_read(SB) -TEXT ·libc_readlink_trampoline(SB),NOSPLIT,$0-0 - JMP libc_readlink(SB) -TEXT ·libc_readlinkat_trampoline(SB),NOSPLIT,$0-0 - JMP libc_readlinkat(SB) -TEXT ·libc_rename_trampoline(SB),NOSPLIT,$0-0 - JMP libc_rename(SB) -TEXT ·libc_renameat_trampoline(SB),NOSPLIT,$0-0 - JMP libc_renameat(SB) -TEXT ·libc_revoke_trampoline(SB),NOSPLIT,$0-0 - JMP libc_revoke(SB) -TEXT ·libc_rmdir_trampoline(SB),NOSPLIT,$0-0 - JMP libc_rmdir(SB) -TEXT ·libc_lseek_trampoline(SB),NOSPLIT,$0-0 - JMP libc_lseek(SB) -TEXT ·libc_select_trampoline(SB),NOSPLIT,$0-0 - JMP libc_select(SB) -TEXT ·libc_setegid_trampoline(SB),NOSPLIT,$0-0 - JMP libc_setegid(SB) -TEXT ·libc_seteuid_trampoline(SB),NOSPLIT,$0-0 - JMP libc_seteuid(SB) -TEXT ·libc_setgid_trampoline(SB),NOSPLIT,$0-0 - JMP libc_setgid(SB) -TEXT ·libc_setlogin_trampoline(SB),NOSPLIT,$0-0 - JMP libc_setlogin(SB) -TEXT ·libc_setpgid_trampoline(SB),NOSPLIT,$0-0 - JMP libc_setpgid(SB) -TEXT ·libc_setpriority_trampoline(SB),NOSPLIT,$0-0 - JMP libc_setpriority(SB) -TEXT ·libc_setprivexec_trampoline(SB),NOSPLIT,$0-0 - JMP libc_setprivexec(SB) -TEXT ·libc_setregid_trampoline(SB),NOSPLIT,$0-0 - JMP libc_setregid(SB) -TEXT ·libc_setreuid_trampoline(SB),NOSPLIT,$0-0 - JMP libc_setreuid(SB) -TEXT ·libc_setrlimit_trampoline(SB),NOSPLIT,$0-0 - JMP libc_setrlimit(SB) -TEXT ·libc_setsid_trampoline(SB),NOSPLIT,$0-0 - JMP libc_setsid(SB) -TEXT ·libc_settimeofday_trampoline(SB),NOSPLIT,$0-0 - JMP libc_settimeofday(SB) -TEXT ·libc_setuid_trampoline(SB),NOSPLIT,$0-0 - JMP libc_setuid(SB) -TEXT ·libc_symlink_trampoline(SB),NOSPLIT,$0-0 - JMP libc_symlink(SB) -TEXT ·libc_symlinkat_trampoline(SB),NOSPLIT,$0-0 - JMP libc_symlinkat(SB) -TEXT ·libc_sync_trampoline(SB),NOSPLIT,$0-0 - JMP libc_sync(SB) -TEXT ·libc_truncate_trampoline(SB),NOSPLIT,$0-0 - JMP libc_truncate(SB) -TEXT ·libc_umask_trampoline(SB),NOSPLIT,$0-0 - JMP libc_umask(SB) -TEXT ·libc_undelete_trampoline(SB),NOSPLIT,$0-0 - JMP libc_undelete(SB) -TEXT ·libc_unlink_trampoline(SB),NOSPLIT,$0-0 - JMP libc_unlink(SB) -TEXT ·libc_unlinkat_trampoline(SB),NOSPLIT,$0-0 - JMP libc_unlinkat(SB) -TEXT ·libc_unmount_trampoline(SB),NOSPLIT,$0-0 - JMP libc_unmount(SB) -TEXT ·libc_write_trampoline(SB),NOSPLIT,$0-0 - JMP libc_write(SB) -TEXT ·libc_mmap_trampoline(SB),NOSPLIT,$0-0 - JMP libc_mmap(SB) -TEXT ·libc_munmap_trampoline(SB),NOSPLIT,$0-0 - JMP libc_munmap(SB) -TEXT ·libc_fstat_trampoline(SB),NOSPLIT,$0-0 - JMP libc_fstat(SB) -TEXT ·libc_fstatat_trampoline(SB),NOSPLIT,$0-0 - JMP libc_fstatat(SB) -TEXT ·libc_fstatfs_trampoline(SB),NOSPLIT,$0-0 - JMP libc_fstatfs(SB) -TEXT ·libc_getfsstat_trampoline(SB),NOSPLIT,$0-0 - JMP libc_getfsstat(SB) -TEXT ·libc_lstat_trampoline(SB),NOSPLIT,$0-0 - JMP libc_lstat(SB) -TEXT ·libc_stat_trampoline(SB),NOSPLIT,$0-0 - JMP libc_stat(SB) -TEXT ·libc_statfs_trampoline(SB),NOSPLIT,$0-0 - JMP libc_statfs(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.go index 870eb37abf..cec595d553 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.go @@ -1,6 +1,7 @@ // go run mksyscall.go -tags darwin,arm64,go1.13 syscall_darwin.1_13.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build darwin && arm64 && go1.13 // +build darwin,arm64,go1.13 package unix @@ -15,25 +16,25 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func closedir(dir uintptr) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_closedir_trampoline), uintptr(dir), 0, 0) + _, _, e1 := syscall_syscall(libc_closedir_trampoline_addr, uintptr(dir), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_closedir_trampoline() +var libc_closedir_trampoline_addr uintptr //go:cgo_import_dynamic libc_closedir closedir "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func readdir_r(dir uintptr, entry *Dirent, result **Dirent) (res Errno) { - r0, _, _ := syscall_syscall(funcPC(libc_readdir_r_trampoline), uintptr(dir), uintptr(unsafe.Pointer(entry)), uintptr(unsafe.Pointer(result))) + r0, _, _ := syscall_syscall(libc_readdir_r_trampoline_addr, uintptr(dir), uintptr(unsafe.Pointer(entry)), uintptr(unsafe.Pointer(result))) res = Errno(r0) return } -func libc_readdir_r_trampoline() +var libc_readdir_r_trampoline_addr uintptr //go:cgo_import_dynamic libc_readdir_r readdir_r "/usr/lib/libSystem.B.dylib" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.s index b29dabb0f0..357989722c 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.s @@ -1,12 +1,25 @@ // go run mkasm_darwin.go arm64 // Code generated by the command above; DO NOT EDIT. +//go:build go1.13 // +build go1.13 #include "textflag.h" -TEXT ·libc_fdopendir_trampoline(SB),NOSPLIT,$0-0 + +TEXT libc_fdopendir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fdopendir(SB) -TEXT ·libc_closedir_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_fdopendir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fdopendir_trampoline_addr(SB)/8, $libc_fdopendir_trampoline<>(SB) + +TEXT libc_closedir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_closedir(SB) -TEXT ·libc_readdir_r_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_closedir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_closedir_trampoline_addr(SB)/8, $libc_closedir_trampoline<>(SB) + +TEXT libc_readdir_r_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_readdir_r(SB) + +GLOBL ·libc_readdir_r_trampoline_addr(SB), RODATA, $8 +DATA ·libc_readdir_r_trampoline_addr(SB)/8, $libc_readdir_r_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go index 9b01a79c41..f2ee2bd33b 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go @@ -1,6 +1,7 @@ // go run mksyscall.go -tags darwin,arm64,go1.12 syscall_bsd.go syscall_darwin.go syscall_darwin_arm64.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build darwin && arm64 && go1.12 // +build darwin,arm64,go1.12 package unix @@ -15,7 +16,7 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getgroups(ngid int, gid *_Gid_t) (n int, err error) { - r0, _, e1 := syscall_rawSyscall(funcPC(libc_getgroups_trampoline), uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + r0, _, e1 := syscall_rawSyscall(libc_getgroups_trampoline_addr, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -23,28 +24,28 @@ func getgroups(ngid int, gid *_Gid_t) (n int, err error) { return } -func libc_getgroups_trampoline() +var libc_getgroups_trampoline_addr uintptr //go:cgo_import_dynamic libc_getgroups getgroups "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func setgroups(ngid int, gid *_Gid_t) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_setgroups_trampoline), uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + _, _, e1 := syscall_rawSyscall(libc_setgroups_trampoline_addr, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_setgroups_trampoline() +var libc_setgroups_trampoline_addr uintptr //go:cgo_import_dynamic libc_setgroups setgroups "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { - r0, _, e1 := syscall_syscall6(funcPC(libc_wait4_trampoline), uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + r0, _, e1 := syscall_syscall6(libc_wait4_trampoline_addr, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) wpid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -52,14 +53,14 @@ func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err return } -func libc_wait4_trampoline() +var libc_wait4_trampoline_addr uintptr //go:cgo_import_dynamic libc_wait4 wait4 "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_accept_trampoline), uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + r0, _, e1 := syscall_syscall(libc_accept_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -67,42 +68,42 @@ func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { return } -func libc_accept_trampoline() +var libc_accept_trampoline_addr uintptr //go:cgo_import_dynamic libc_accept accept "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_bind_trampoline), uintptr(s), uintptr(addr), uintptr(addrlen)) + _, _, e1 := syscall_syscall(libc_bind_trampoline_addr, uintptr(s), uintptr(addr), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } -func libc_bind_trampoline() +var libc_bind_trampoline_addr uintptr //go:cgo_import_dynamic libc_bind bind "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_connect_trampoline), uintptr(s), uintptr(addr), uintptr(addrlen)) + _, _, e1 := syscall_syscall(libc_connect_trampoline_addr, uintptr(s), uintptr(addr), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } -func libc_connect_trampoline() +var libc_connect_trampoline_addr uintptr //go:cgo_import_dynamic libc_connect connect "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func socket(domain int, typ int, proto int) (fd int, err error) { - r0, _, e1 := syscall_rawSyscall(funcPC(libc_socket_trampoline), uintptr(domain), uintptr(typ), uintptr(proto)) + r0, _, e1 := syscall_rawSyscall(libc_socket_trampoline_addr, uintptr(domain), uintptr(typ), uintptr(proto)) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -110,91 +111,91 @@ func socket(domain int, typ int, proto int) (fd int, err error) { return } -func libc_socket_trampoline() +var libc_socket_trampoline_addr uintptr //go:cgo_import_dynamic libc_socket socket "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { - _, _, e1 := syscall_syscall6(funcPC(libc_getsockopt_trampoline), uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + _, _, e1 := syscall_syscall6(libc_getsockopt_trampoline_addr, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_getsockopt_trampoline() +var libc_getsockopt_trampoline_addr uintptr //go:cgo_import_dynamic libc_getsockopt getsockopt "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { - _, _, e1 := syscall_syscall6(funcPC(libc_setsockopt_trampoline), uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + _, _, e1 := syscall_syscall6(libc_setsockopt_trampoline_addr, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_setsockopt_trampoline() +var libc_setsockopt_trampoline_addr uintptr //go:cgo_import_dynamic libc_setsockopt setsockopt "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_getpeername_trampoline), uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + _, _, e1 := syscall_rawSyscall(libc_getpeername_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) if e1 != 0 { err = errnoErr(e1) } return } -func libc_getpeername_trampoline() +var libc_getpeername_trampoline_addr uintptr //go:cgo_import_dynamic libc_getpeername getpeername "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_getsockname_trampoline), uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + _, _, e1 := syscall_rawSyscall(libc_getsockname_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) if e1 != 0 { err = errnoErr(e1) } return } -func libc_getsockname_trampoline() +var libc_getsockname_trampoline_addr uintptr //go:cgo_import_dynamic libc_getsockname getsockname "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Shutdown(s int, how int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_shutdown_trampoline), uintptr(s), uintptr(how), 0) + _, _, e1 := syscall_syscall(libc_shutdown_trampoline_addr, uintptr(s), uintptr(how), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_shutdown_trampoline() +var libc_shutdown_trampoline_addr uintptr //go:cgo_import_dynamic libc_shutdown shutdown "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { - _, _, e1 := syscall_rawSyscall6(funcPC(libc_socketpair_trampoline), uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + _, _, e1 := syscall_rawSyscall6(libc_socketpair_trampoline_addr, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_socketpair_trampoline() +var libc_socketpair_trampoline_addr uintptr //go:cgo_import_dynamic libc_socketpair socketpair "/usr/lib/libSystem.B.dylib" @@ -207,7 +208,7 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := syscall_syscall6(funcPC(libc_recvfrom_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + r0, _, e1 := syscall_syscall6(libc_recvfrom_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -215,7 +216,7 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl return } -func libc_recvfrom_trampoline() +var libc_recvfrom_trampoline_addr uintptr //go:cgo_import_dynamic libc_recvfrom recvfrom "/usr/lib/libSystem.B.dylib" @@ -228,21 +229,21 @@ func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) ( } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := syscall_syscall6(funcPC(libc_sendto_trampoline), uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + _, _, e1 := syscall_syscall6(libc_sendto_trampoline_addr, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } -func libc_sendto_trampoline() +var libc_sendto_trampoline_addr uintptr //go:cgo_import_dynamic libc_sendto sendto "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_recvmsg_trampoline), uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + r0, _, e1 := syscall_syscall(libc_recvmsg_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -250,14 +251,14 @@ func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { return } -func libc_recvmsg_trampoline() +var libc_recvmsg_trampoline_addr uintptr //go:cgo_import_dynamic libc_recvmsg recvmsg "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_sendmsg_trampoline), uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + r0, _, e1 := syscall_syscall(libc_sendmsg_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -265,14 +266,14 @@ func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { return } -func libc_sendmsg_trampoline() +var libc_sendmsg_trampoline_addr uintptr //go:cgo_import_dynamic libc_sendmsg sendmsg "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) { - r0, _, e1 := syscall_syscall6(funcPC(libc_kevent_trampoline), uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) + r0, _, e1 := syscall_syscall6(libc_kevent_trampoline_addr, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -280,7 +281,7 @@ func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, ne return } -func libc_kevent_trampoline() +var libc_kevent_trampoline_addr uintptr //go:cgo_import_dynamic libc_kevent kevent "/usr/lib/libSystem.B.dylib" @@ -292,35 +293,35 @@ func utimes(path string, timeval *[2]Timeval) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_utimes_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) + _, _, e1 := syscall_syscall(libc_utimes_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_utimes_trampoline() +var libc_utimes_trampoline_addr uintptr //go:cgo_import_dynamic libc_utimes utimes "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func futimes(fd int, timeval *[2]Timeval) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_futimes_trampoline), uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) + _, _, e1 := syscall_syscall(libc_futimes_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_futimes_trampoline() +var libc_futimes_trampoline_addr uintptr //go:cgo_import_dynamic libc_futimes futimes "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_poll_trampoline), uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + r0, _, e1 := syscall_syscall(libc_poll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -328,7 +329,7 @@ func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { return } -func libc_poll_trampoline() +var libc_poll_trampoline_addr uintptr //go:cgo_import_dynamic libc_poll poll "/usr/lib/libSystem.B.dylib" @@ -341,14 +342,14 @@ func Madvise(b []byte, behav int) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := syscall_syscall(funcPC(libc_madvise_trampoline), uintptr(_p0), uintptr(len(b)), uintptr(behav)) + _, _, e1 := syscall_syscall(libc_madvise_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(behav)) if e1 != 0 { err = errnoErr(e1) } return } -func libc_madvise_trampoline() +var libc_madvise_trampoline_addr uintptr //go:cgo_import_dynamic libc_madvise madvise "/usr/lib/libSystem.B.dylib" @@ -361,28 +362,28 @@ func Mlock(b []byte) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := syscall_syscall(funcPC(libc_mlock_trampoline), uintptr(_p0), uintptr(len(b)), 0) + _, _, e1 := syscall_syscall(libc_mlock_trampoline_addr, uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_mlock_trampoline() +var libc_mlock_trampoline_addr uintptr //go:cgo_import_dynamic libc_mlock mlock "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mlockall(flags int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_mlockall_trampoline), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall(libc_mlockall_trampoline_addr, uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_mlockall_trampoline() +var libc_mlockall_trampoline_addr uintptr //go:cgo_import_dynamic libc_mlockall mlockall "/usr/lib/libSystem.B.dylib" @@ -395,14 +396,14 @@ func Mprotect(b []byte, prot int) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := syscall_syscall(funcPC(libc_mprotect_trampoline), uintptr(_p0), uintptr(len(b)), uintptr(prot)) + _, _, e1 := syscall_syscall(libc_mprotect_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(prot)) if e1 != 0 { err = errnoErr(e1) } return } -func libc_mprotect_trampoline() +var libc_mprotect_trampoline_addr uintptr //go:cgo_import_dynamic libc_mprotect mprotect "/usr/lib/libSystem.B.dylib" @@ -415,14 +416,14 @@ func Msync(b []byte, flags int) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := syscall_syscall(funcPC(libc_msync_trampoline), uintptr(_p0), uintptr(len(b)), uintptr(flags)) + _, _, e1 := syscall_syscall(libc_msync_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } return } -func libc_msync_trampoline() +var libc_msync_trampoline_addr uintptr //go:cgo_import_dynamic libc_msync msync "/usr/lib/libSystem.B.dylib" @@ -435,42 +436,42 @@ func Munlock(b []byte) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := syscall_syscall(funcPC(libc_munlock_trampoline), uintptr(_p0), uintptr(len(b)), 0) + _, _, e1 := syscall_syscall(libc_munlock_trampoline_addr, uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_munlock_trampoline() +var libc_munlock_trampoline_addr uintptr //go:cgo_import_dynamic libc_munlock munlock "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Munlockall() (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_munlockall_trampoline), 0, 0, 0) + _, _, e1 := syscall_syscall(libc_munlockall_trampoline_addr, 0, 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_munlockall_trampoline() +var libc_munlockall_trampoline_addr uintptr //go:cgo_import_dynamic libc_munlockall munlockall "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func pipe(p *[2]int32) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_pipe_trampoline), uintptr(unsafe.Pointer(p)), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_pipe_trampoline_addr, uintptr(unsafe.Pointer(p)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_pipe_trampoline() +var libc_pipe_trampoline_addr uintptr //go:cgo_import_dynamic libc_pipe pipe "/usr/lib/libSystem.B.dylib" @@ -487,7 +488,7 @@ func getxattr(path string, attr string, dest *byte, size int, position uint32, o if err != nil { return } - r0, _, e1 := syscall_syscall6(funcPC(libc_getxattr_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(position), uintptr(options)) + r0, _, e1 := syscall_syscall6(libc_getxattr_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(position), uintptr(options)) sz = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -495,7 +496,7 @@ func getxattr(path string, attr string, dest *byte, size int, position uint32, o return } -func libc_getxattr_trampoline() +var libc_getxattr_trampoline_addr uintptr //go:cgo_import_dynamic libc_getxattr getxattr "/usr/lib/libSystem.B.dylib" @@ -507,7 +508,7 @@ func fgetxattr(fd int, attr string, dest *byte, size int, position uint32, optio if err != nil { return } - r0, _, e1 := syscall_syscall6(funcPC(libc_fgetxattr_trampoline), uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(position), uintptr(options)) + r0, _, e1 := syscall_syscall6(libc_fgetxattr_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(position), uintptr(options)) sz = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -515,7 +516,7 @@ func fgetxattr(fd int, attr string, dest *byte, size int, position uint32, optio return } -func libc_fgetxattr_trampoline() +var libc_fgetxattr_trampoline_addr uintptr //go:cgo_import_dynamic libc_fgetxattr fgetxattr "/usr/lib/libSystem.B.dylib" @@ -532,14 +533,14 @@ func setxattr(path string, attr string, data *byte, size int, position uint32, o if err != nil { return } - _, _, e1 := syscall_syscall6(funcPC(libc_setxattr_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(data)), uintptr(size), uintptr(position), uintptr(options)) + _, _, e1 := syscall_syscall6(libc_setxattr_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(data)), uintptr(size), uintptr(position), uintptr(options)) if e1 != 0 { err = errnoErr(e1) } return } -func libc_setxattr_trampoline() +var libc_setxattr_trampoline_addr uintptr //go:cgo_import_dynamic libc_setxattr setxattr "/usr/lib/libSystem.B.dylib" @@ -551,14 +552,14 @@ func fsetxattr(fd int, attr string, data *byte, size int, position uint32, optio if err != nil { return } - _, _, e1 := syscall_syscall6(funcPC(libc_fsetxattr_trampoline), uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(data)), uintptr(size), uintptr(position), uintptr(options)) + _, _, e1 := syscall_syscall6(libc_fsetxattr_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(data)), uintptr(size), uintptr(position), uintptr(options)) if e1 != 0 { err = errnoErr(e1) } return } -func libc_fsetxattr_trampoline() +var libc_fsetxattr_trampoline_addr uintptr //go:cgo_import_dynamic libc_fsetxattr fsetxattr "/usr/lib/libSystem.B.dylib" @@ -575,14 +576,14 @@ func removexattr(path string, attr string, options int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_removexattr_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(options)) + _, _, e1 := syscall_syscall(libc_removexattr_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(options)) if e1 != 0 { err = errnoErr(e1) } return } -func libc_removexattr_trampoline() +var libc_removexattr_trampoline_addr uintptr //go:cgo_import_dynamic libc_removexattr removexattr "/usr/lib/libSystem.B.dylib" @@ -594,14 +595,14 @@ func fremovexattr(fd int, attr string, options int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_fremovexattr_trampoline), uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(options)) + _, _, e1 := syscall_syscall(libc_fremovexattr_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(options)) if e1 != 0 { err = errnoErr(e1) } return } -func libc_fremovexattr_trampoline() +var libc_fremovexattr_trampoline_addr uintptr //go:cgo_import_dynamic libc_fremovexattr fremovexattr "/usr/lib/libSystem.B.dylib" @@ -613,7 +614,7 @@ func listxattr(path string, dest *byte, size int, options int) (sz int, err erro if err != nil { return } - r0, _, e1 := syscall_syscall6(funcPC(libc_listxattr_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(options), 0, 0) + r0, _, e1 := syscall_syscall6(libc_listxattr_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(options), 0, 0) sz = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -621,14 +622,14 @@ func listxattr(path string, dest *byte, size int, options int) (sz int, err erro return } -func libc_listxattr_trampoline() +var libc_listxattr_trampoline_addr uintptr //go:cgo_import_dynamic libc_listxattr listxattr "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func flistxattr(fd int, dest *byte, size int, options int) (sz int, err error) { - r0, _, e1 := syscall_syscall6(funcPC(libc_flistxattr_trampoline), uintptr(fd), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(options), 0, 0) + r0, _, e1 := syscall_syscall6(libc_flistxattr_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(options), 0, 0) sz = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -636,28 +637,28 @@ func flistxattr(fd int, dest *byte, size int, options int) (sz int, err error) { return } -func libc_flistxattr_trampoline() +var libc_flistxattr_trampoline_addr uintptr //go:cgo_import_dynamic libc_flistxattr flistxattr "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func setattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) { - _, _, e1 := syscall_syscall6(funcPC(libc_setattrlist_trampoline), uintptr(unsafe.Pointer(path)), uintptr(list), uintptr(buf), uintptr(size), uintptr(options), 0) + _, _, e1 := syscall_syscall6(libc_setattrlist_trampoline_addr, uintptr(unsafe.Pointer(path)), uintptr(list), uintptr(buf), uintptr(size), uintptr(options), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_setattrlist_trampoline() +var libc_setattrlist_trampoline_addr uintptr //go:cgo_import_dynamic libc_setattrlist setattrlist "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_fcntl_trampoline), uintptr(fd), uintptr(cmd), uintptr(arg)) + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) val = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -665,35 +666,35 @@ func fcntl(fd int, cmd int, arg int) (val int, err error) { return } -func libc_fcntl_trampoline() +var libc_fcntl_trampoline_addr uintptr //go:cgo_import_dynamic libc_fcntl fcntl "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func kill(pid int, signum int, posix int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_kill_trampoline), uintptr(pid), uintptr(signum), uintptr(posix)) + _, _, e1 := syscall_syscall(libc_kill_trampoline_addr, uintptr(pid), uintptr(signum), uintptr(posix)) if e1 != 0 { err = errnoErr(e1) } return } -func libc_kill_trampoline() +var libc_kill_trampoline_addr uintptr //go:cgo_import_dynamic libc_kill kill "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func ioctl(fd int, req uint, arg uintptr) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_ioctl_trampoline), uintptr(fd), uintptr(req), uintptr(arg)) + _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { err = errnoErr(e1) } return } -func libc_ioctl_trampoline() +var libc_ioctl_trampoline_addr uintptr //go:cgo_import_dynamic libc_ioctl ioctl "/usr/lib/libSystem.B.dylib" @@ -706,28 +707,28 @@ func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := syscall_syscall6(funcPC(libc_sysctl_trampoline), uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + _, _, e1 := syscall_syscall6(libc_sysctl_trampoline_addr, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) if e1 != 0 { err = errnoErr(e1) } return } -func libc_sysctl_trampoline() +var libc_sysctl_trampoline_addr uintptr //go:cgo_import_dynamic libc_sysctl sysctl "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) { - _, _, e1 := syscall_syscall6(funcPC(libc_sendfile_trampoline), uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags)) + _, _, e1 := syscall_syscall6(libc_sendfile_trampoline_addr, uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } return } -func libc_sendfile_trampoline() +var libc_sendfile_trampoline_addr uintptr //go:cgo_import_dynamic libc_sendfile sendfile "/usr/lib/libSystem.B.dylib" @@ -739,28 +740,28 @@ func Access(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_access_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_access_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_access_trampoline() +var libc_access_trampoline_addr uintptr //go:cgo_import_dynamic libc_access access "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_adjtime_trampoline), uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) + _, _, e1 := syscall_syscall(libc_adjtime_trampoline_addr, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_adjtime_trampoline() +var libc_adjtime_trampoline_addr uintptr //go:cgo_import_dynamic libc_adjtime adjtime "/usr/lib/libSystem.B.dylib" @@ -772,14 +773,14 @@ func Chdir(path string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_chdir_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_chdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_chdir_trampoline() +var libc_chdir_trampoline_addr uintptr //go:cgo_import_dynamic libc_chdir chdir "/usr/lib/libSystem.B.dylib" @@ -791,14 +792,14 @@ func Chflags(path string, flags int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_chflags_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + _, _, e1 := syscall_syscall(libc_chflags_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_chflags_trampoline() +var libc_chflags_trampoline_addr uintptr //go:cgo_import_dynamic libc_chflags chflags "/usr/lib/libSystem.B.dylib" @@ -810,14 +811,14 @@ func Chmod(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_chmod_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_chmod_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_chmod_trampoline() +var libc_chmod_trampoline_addr uintptr //go:cgo_import_dynamic libc_chmod chmod "/usr/lib/libSystem.B.dylib" @@ -829,14 +830,14 @@ func Chown(path string, uid int, gid int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_chown_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + _, _, e1 := syscall_syscall(libc_chown_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) if e1 != 0 { err = errnoErr(e1) } return } -func libc_chown_trampoline() +var libc_chown_trampoline_addr uintptr //go:cgo_import_dynamic libc_chown chown "/usr/lib/libSystem.B.dylib" @@ -848,42 +849,42 @@ func Chroot(path string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_chroot_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_chroot_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_chroot_trampoline() +var libc_chroot_trampoline_addr uintptr //go:cgo_import_dynamic libc_chroot chroot "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func ClockGettime(clockid int32, time *Timespec) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_clock_gettime_trampoline), uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + _, _, e1 := syscall_syscall(libc_clock_gettime_trampoline_addr, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_clock_gettime_trampoline() +var libc_clock_gettime_trampoline_addr uintptr //go:cgo_import_dynamic libc_clock_gettime clock_gettime "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Close(fd int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_close_trampoline), uintptr(fd), 0, 0) + _, _, e1 := syscall_syscall(libc_close_trampoline_addr, uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_close_trampoline() +var libc_close_trampoline_addr uintptr //go:cgo_import_dynamic libc_close close "/usr/lib/libSystem.B.dylib" @@ -900,14 +901,14 @@ func Clonefile(src string, dst string, flags int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_clonefile_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags)) + _, _, e1 := syscall_syscall(libc_clonefile_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } return } -func libc_clonefile_trampoline() +var libc_clonefile_trampoline_addr uintptr //go:cgo_import_dynamic libc_clonefile clonefile "/usr/lib/libSystem.B.dylib" @@ -924,21 +925,21 @@ func Clonefileat(srcDirfd int, src string, dstDirfd int, dst string, flags int) if err != nil { return } - _, _, e1 := syscall_syscall6(funcPC(libc_clonefileat_trampoline), uintptr(srcDirfd), uintptr(unsafe.Pointer(_p0)), uintptr(dstDirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + _, _, e1 := syscall_syscall6(libc_clonefileat_trampoline_addr, uintptr(srcDirfd), uintptr(unsafe.Pointer(_p0)), uintptr(dstDirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_clonefileat_trampoline() +var libc_clonefileat_trampoline_addr uintptr //go:cgo_import_dynamic libc_clonefileat clonefileat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup(fd int) (nfd int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_dup_trampoline), uintptr(fd), 0, 0) + r0, _, e1 := syscall_syscall(libc_dup_trampoline_addr, uintptr(fd), 0, 0) nfd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -946,21 +947,21 @@ func Dup(fd int) (nfd int, err error) { return } -func libc_dup_trampoline() +var libc_dup_trampoline_addr uintptr //go:cgo_import_dynamic libc_dup dup "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup2(from int, to int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_dup2_trampoline), uintptr(from), uintptr(to), 0) + _, _, e1 := syscall_syscall(libc_dup2_trampoline_addr, uintptr(from), uintptr(to), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_dup2_trampoline() +var libc_dup2_trampoline_addr uintptr //go:cgo_import_dynamic libc_dup2 dup2 "/usr/lib/libSystem.B.dylib" @@ -977,25 +978,25 @@ func Exchangedata(path1 string, path2 string, options int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_exchangedata_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(options)) + _, _, e1 := syscall_syscall(libc_exchangedata_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(options)) if e1 != 0 { err = errnoErr(e1) } return } -func libc_exchangedata_trampoline() +var libc_exchangedata_trampoline_addr uintptr //go:cgo_import_dynamic libc_exchangedata exchangedata "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Exit(code int) { - syscall_syscall(funcPC(libc_exit_trampoline), uintptr(code), 0, 0) + syscall_syscall(libc_exit_trampoline_addr, uintptr(code), 0, 0) return } -func libc_exit_trampoline() +var libc_exit_trampoline_addr uintptr //go:cgo_import_dynamic libc_exit exit "/usr/lib/libSystem.B.dylib" @@ -1007,56 +1008,56 @@ func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall6(funcPC(libc_faccessat_trampoline), uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_faccessat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_faccessat_trampoline() +var libc_faccessat_trampoline_addr uintptr //go:cgo_import_dynamic libc_faccessat faccessat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchdir(fd int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_fchdir_trampoline), uintptr(fd), 0, 0) + _, _, e1 := syscall_syscall(libc_fchdir_trampoline_addr, uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_fchdir_trampoline() +var libc_fchdir_trampoline_addr uintptr //go:cgo_import_dynamic libc_fchdir fchdir "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchflags(fd int, flags int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_fchflags_trampoline), uintptr(fd), uintptr(flags), 0) + _, _, e1 := syscall_syscall(libc_fchflags_trampoline_addr, uintptr(fd), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_fchflags_trampoline() +var libc_fchflags_trampoline_addr uintptr //go:cgo_import_dynamic libc_fchflags fchflags "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchmod(fd int, mode uint32) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_fchmod_trampoline), uintptr(fd), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_fchmod_trampoline_addr, uintptr(fd), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_fchmod_trampoline() +var libc_fchmod_trampoline_addr uintptr //go:cgo_import_dynamic libc_fchmod fchmod "/usr/lib/libSystem.B.dylib" @@ -1068,28 +1069,28 @@ func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall6(funcPC(libc_fchmodat_trampoline), uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_fchmodat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_fchmodat_trampoline() +var libc_fchmodat_trampoline_addr uintptr //go:cgo_import_dynamic libc_fchmodat fchmodat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchown(fd int, uid int, gid int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_fchown_trampoline), uintptr(fd), uintptr(uid), uintptr(gid)) + _, _, e1 := syscall_syscall(libc_fchown_trampoline_addr, uintptr(fd), uintptr(uid), uintptr(gid)) if e1 != 0 { err = errnoErr(e1) } return } -func libc_fchown_trampoline() +var libc_fchown_trampoline_addr uintptr //go:cgo_import_dynamic libc_fchown fchown "/usr/lib/libSystem.B.dylib" @@ -1101,14 +1102,14 @@ func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall6(funcPC(libc_fchownat_trampoline), uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) + _, _, e1 := syscall_syscall6(libc_fchownat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_fchownat_trampoline() +var libc_fchownat_trampoline_addr uintptr //go:cgo_import_dynamic libc_fchownat fchownat "/usr/lib/libSystem.B.dylib" @@ -1120,35 +1121,35 @@ func Fclonefileat(srcDirfd int, dstDirfd int, dst string, flags int) (err error) if err != nil { return } - _, _, e1 := syscall_syscall6(funcPC(libc_fclonefileat_trampoline), uintptr(srcDirfd), uintptr(dstDirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_fclonefileat_trampoline_addr, uintptr(srcDirfd), uintptr(dstDirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_fclonefileat_trampoline() +var libc_fclonefileat_trampoline_addr uintptr //go:cgo_import_dynamic libc_fclonefileat fclonefileat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Flock(fd int, how int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_flock_trampoline), uintptr(fd), uintptr(how), 0) + _, _, e1 := syscall_syscall(libc_flock_trampoline_addr, uintptr(fd), uintptr(how), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_flock_trampoline() +var libc_flock_trampoline_addr uintptr //go:cgo_import_dynamic libc_flock flock "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fpathconf(fd int, name int) (val int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_fpathconf_trampoline), uintptr(fd), uintptr(name), 0) + r0, _, e1 := syscall_syscall(libc_fpathconf_trampoline_addr, uintptr(fd), uintptr(name), 0) val = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1156,35 +1157,35 @@ func Fpathconf(fd int, name int) (val int, err error) { return } -func libc_fpathconf_trampoline() +var libc_fpathconf_trampoline_addr uintptr //go:cgo_import_dynamic libc_fpathconf fpathconf "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fsync(fd int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_fsync_trampoline), uintptr(fd), 0, 0) + _, _, e1 := syscall_syscall(libc_fsync_trampoline_addr, uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_fsync_trampoline() +var libc_fsync_trampoline_addr uintptr //go:cgo_import_dynamic libc_fsync fsync "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Ftruncate(fd int, length int64) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_ftruncate_trampoline), uintptr(fd), uintptr(length), 0) + _, _, e1 := syscall_syscall(libc_ftruncate_trampoline_addr, uintptr(fd), uintptr(length), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_ftruncate_trampoline() +var libc_ftruncate_trampoline_addr uintptr //go:cgo_import_dynamic libc_ftruncate ftruncate "/usr/lib/libSystem.B.dylib" @@ -1197,7 +1198,7 @@ func Getcwd(buf []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := syscall_syscall(funcPC(libc_getcwd_trampoline), uintptr(_p0), uintptr(len(buf)), 0) + r0, _, e1 := syscall_syscall(libc_getcwd_trampoline_addr, uintptr(_p0), uintptr(len(buf)), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1205,62 +1206,62 @@ func Getcwd(buf []byte) (n int, err error) { return } -func libc_getcwd_trampoline() +var libc_getcwd_trampoline_addr uintptr //go:cgo_import_dynamic libc_getcwd getcwd "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getdtablesize() (size int) { - r0, _, _ := syscall_syscall(funcPC(libc_getdtablesize_trampoline), 0, 0, 0) + r0, _, _ := syscall_syscall(libc_getdtablesize_trampoline_addr, 0, 0, 0) size = int(r0) return } -func libc_getdtablesize_trampoline() +var libc_getdtablesize_trampoline_addr uintptr //go:cgo_import_dynamic libc_getdtablesize getdtablesize "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getegid() (egid int) { - r0, _, _ := syscall_rawSyscall(funcPC(libc_getegid_trampoline), 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getegid_trampoline_addr, 0, 0, 0) egid = int(r0) return } -func libc_getegid_trampoline() +var libc_getegid_trampoline_addr uintptr //go:cgo_import_dynamic libc_getegid getegid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Geteuid() (uid int) { - r0, _, _ := syscall_rawSyscall(funcPC(libc_geteuid_trampoline), 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_geteuid_trampoline_addr, 0, 0, 0) uid = int(r0) return } -func libc_geteuid_trampoline() +var libc_geteuid_trampoline_addr uintptr //go:cgo_import_dynamic libc_geteuid geteuid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getgid() (gid int) { - r0, _, _ := syscall_rawSyscall(funcPC(libc_getgid_trampoline), 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getgid_trampoline_addr, 0, 0, 0) gid = int(r0) return } -func libc_getgid_trampoline() +var libc_getgid_trampoline_addr uintptr //go:cgo_import_dynamic libc_getgid getgid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpgid(pid int) (pgid int, err error) { - r0, _, e1 := syscall_rawSyscall(funcPC(libc_getpgid_trampoline), uintptr(pid), 0, 0) + r0, _, e1 := syscall_rawSyscall(libc_getpgid_trampoline_addr, uintptr(pid), 0, 0) pgid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1268,50 +1269,50 @@ func Getpgid(pid int) (pgid int, err error) { return } -func libc_getpgid_trampoline() +var libc_getpgid_trampoline_addr uintptr //go:cgo_import_dynamic libc_getpgid getpgid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpgrp() (pgrp int) { - r0, _, _ := syscall_rawSyscall(funcPC(libc_getpgrp_trampoline), 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getpgrp_trampoline_addr, 0, 0, 0) pgrp = int(r0) return } -func libc_getpgrp_trampoline() +var libc_getpgrp_trampoline_addr uintptr //go:cgo_import_dynamic libc_getpgrp getpgrp "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpid() (pid int) { - r0, _, _ := syscall_rawSyscall(funcPC(libc_getpid_trampoline), 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getpid_trampoline_addr, 0, 0, 0) pid = int(r0) return } -func libc_getpid_trampoline() +var libc_getpid_trampoline_addr uintptr //go:cgo_import_dynamic libc_getpid getpid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getppid() (ppid int) { - r0, _, _ := syscall_rawSyscall(funcPC(libc_getppid_trampoline), 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getppid_trampoline_addr, 0, 0, 0) ppid = int(r0) return } -func libc_getppid_trampoline() +var libc_getppid_trampoline_addr uintptr //go:cgo_import_dynamic libc_getppid getppid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpriority(which int, who int) (prio int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_getpriority_trampoline), uintptr(which), uintptr(who), 0) + r0, _, e1 := syscall_syscall(libc_getpriority_trampoline_addr, uintptr(which), uintptr(who), 0) prio = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1319,42 +1320,42 @@ func Getpriority(which int, who int) (prio int, err error) { return } -func libc_getpriority_trampoline() +var libc_getpriority_trampoline_addr uintptr //go:cgo_import_dynamic libc_getpriority getpriority "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_getrlimit_trampoline), uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + _, _, e1 := syscall_rawSyscall(libc_getrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_getrlimit_trampoline() +var libc_getrlimit_trampoline_addr uintptr //go:cgo_import_dynamic libc_getrlimit getrlimit "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getrusage(who int, rusage *Rusage) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_getrusage_trampoline), uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + _, _, e1 := syscall_rawSyscall(libc_getrusage_trampoline_addr, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_getrusage_trampoline() +var libc_getrusage_trampoline_addr uintptr //go:cgo_import_dynamic libc_getrusage getrusage "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getsid(pid int) (sid int, err error) { - r0, _, e1 := syscall_rawSyscall(funcPC(libc_getsid_trampoline), uintptr(pid), 0, 0) + r0, _, e1 := syscall_rawSyscall(libc_getsid_trampoline_addr, uintptr(pid), 0, 0) sid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1362,52 +1363,52 @@ func Getsid(pid int) (sid int, err error) { return } -func libc_getsid_trampoline() +var libc_getsid_trampoline_addr uintptr //go:cgo_import_dynamic libc_getsid getsid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Gettimeofday(tp *Timeval) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_gettimeofday_trampoline), uintptr(unsafe.Pointer(tp)), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_gettimeofday_trampoline_addr, uintptr(unsafe.Pointer(tp)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_gettimeofday_trampoline() +var libc_gettimeofday_trampoline_addr uintptr //go:cgo_import_dynamic libc_gettimeofday gettimeofday "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getuid() (uid int) { - r0, _, _ := syscall_rawSyscall(funcPC(libc_getuid_trampoline), 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getuid_trampoline_addr, 0, 0, 0) uid = int(r0) return } -func libc_getuid_trampoline() +var libc_getuid_trampoline_addr uintptr //go:cgo_import_dynamic libc_getuid getuid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Issetugid() (tainted bool) { - r0, _, _ := syscall_rawSyscall(funcPC(libc_issetugid_trampoline), 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_issetugid_trampoline_addr, 0, 0, 0) tainted = bool(r0 != 0) return } -func libc_issetugid_trampoline() +var libc_issetugid_trampoline_addr uintptr //go:cgo_import_dynamic libc_issetugid issetugid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Kqueue() (fd int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_kqueue_trampoline), 0, 0, 0) + r0, _, e1 := syscall_syscall(libc_kqueue_trampoline_addr, 0, 0, 0) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1415,7 +1416,7 @@ func Kqueue() (fd int, err error) { return } -func libc_kqueue_trampoline() +var libc_kqueue_trampoline_addr uintptr //go:cgo_import_dynamic libc_kqueue kqueue "/usr/lib/libSystem.B.dylib" @@ -1427,14 +1428,14 @@ func Lchown(path string, uid int, gid int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_lchown_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + _, _, e1 := syscall_syscall(libc_lchown_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) if e1 != 0 { err = errnoErr(e1) } return } -func libc_lchown_trampoline() +var libc_lchown_trampoline_addr uintptr //go:cgo_import_dynamic libc_lchown lchown "/usr/lib/libSystem.B.dylib" @@ -1451,14 +1452,14 @@ func Link(path string, link string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_link_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + _, _, e1 := syscall_syscall(libc_link_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_link_trampoline() +var libc_link_trampoline_addr uintptr //go:cgo_import_dynamic libc_link link "/usr/lib/libSystem.B.dylib" @@ -1475,28 +1476,28 @@ func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err er if err != nil { return } - _, _, e1 := syscall_syscall6(funcPC(libc_linkat_trampoline), uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + _, _, e1 := syscall_syscall6(libc_linkat_trampoline_addr, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_linkat_trampoline() +var libc_linkat_trampoline_addr uintptr //go:cgo_import_dynamic libc_linkat linkat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Listen(s int, backlog int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_listen_trampoline), uintptr(s), uintptr(backlog), 0) + _, _, e1 := syscall_syscall(libc_listen_trampoline_addr, uintptr(s), uintptr(backlog), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_listen_trampoline() +var libc_listen_trampoline_addr uintptr //go:cgo_import_dynamic libc_listen listen "/usr/lib/libSystem.B.dylib" @@ -1508,14 +1509,14 @@ func Mkdir(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_mkdir_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_mkdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_mkdir_trampoline() +var libc_mkdir_trampoline_addr uintptr //go:cgo_import_dynamic libc_mkdir mkdir "/usr/lib/libSystem.B.dylib" @@ -1527,14 +1528,14 @@ func Mkdirat(dirfd int, path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_mkdirat_trampoline), uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + _, _, e1 := syscall_syscall(libc_mkdirat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) if e1 != 0 { err = errnoErr(e1) } return } -func libc_mkdirat_trampoline() +var libc_mkdirat_trampoline_addr uintptr //go:cgo_import_dynamic libc_mkdirat mkdirat "/usr/lib/libSystem.B.dylib" @@ -1546,14 +1547,14 @@ func Mkfifo(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_mkfifo_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_mkfifo_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_mkfifo_trampoline() +var libc_mkfifo_trampoline_addr uintptr //go:cgo_import_dynamic libc_mkfifo mkfifo "/usr/lib/libSystem.B.dylib" @@ -1565,14 +1566,14 @@ func Mknod(path string, mode uint32, dev int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_mknod_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) + _, _, e1 := syscall_syscall(libc_mknod_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) if e1 != 0 { err = errnoErr(e1) } return } -func libc_mknod_trampoline() +var libc_mknod_trampoline_addr uintptr //go:cgo_import_dynamic libc_mknod mknod "/usr/lib/libSystem.B.dylib" @@ -1584,7 +1585,7 @@ func Open(path string, mode int, perm uint32) (fd int, err error) { if err != nil { return } - r0, _, e1 := syscall_syscall(funcPC(libc_open_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) + r0, _, e1 := syscall_syscall(libc_open_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1592,7 +1593,7 @@ func Open(path string, mode int, perm uint32) (fd int, err error) { return } -func libc_open_trampoline() +var libc_open_trampoline_addr uintptr //go:cgo_import_dynamic libc_open open "/usr/lib/libSystem.B.dylib" @@ -1604,7 +1605,7 @@ func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { if err != nil { return } - r0, _, e1 := syscall_syscall6(funcPC(libc_openat_trampoline), uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) + r0, _, e1 := syscall_syscall6(libc_openat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1612,7 +1613,7 @@ func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { return } -func libc_openat_trampoline() +var libc_openat_trampoline_addr uintptr //go:cgo_import_dynamic libc_openat openat "/usr/lib/libSystem.B.dylib" @@ -1624,7 +1625,7 @@ func Pathconf(path string, name int) (val int, err error) { if err != nil { return } - r0, _, e1 := syscall_syscall(funcPC(libc_pathconf_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) + r0, _, e1 := syscall_syscall(libc_pathconf_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) val = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1632,7 +1633,7 @@ func Pathconf(path string, name int) (val int, err error) { return } -func libc_pathconf_trampoline() +var libc_pathconf_trampoline_addr uintptr //go:cgo_import_dynamic libc_pathconf pathconf "/usr/lib/libSystem.B.dylib" @@ -1645,7 +1646,7 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := syscall_syscall6(funcPC(libc_pread_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + r0, _, e1 := syscall_syscall6(libc_pread_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1653,7 +1654,7 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { return } -func libc_pread_trampoline() +var libc_pread_trampoline_addr uintptr //go:cgo_import_dynamic libc_pread pread "/usr/lib/libSystem.B.dylib" @@ -1666,7 +1667,7 @@ func Pwrite(fd int, p []byte, offset int64) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := syscall_syscall6(funcPC(libc_pwrite_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + r0, _, e1 := syscall_syscall6(libc_pwrite_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1674,7 +1675,7 @@ func Pwrite(fd int, p []byte, offset int64) (n int, err error) { return } -func libc_pwrite_trampoline() +var libc_pwrite_trampoline_addr uintptr //go:cgo_import_dynamic libc_pwrite pwrite "/usr/lib/libSystem.B.dylib" @@ -1687,7 +1688,7 @@ func read(fd int, p []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := syscall_syscall(funcPC(libc_read_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(p))) + r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1695,7 +1696,7 @@ func read(fd int, p []byte) (n int, err error) { return } -func libc_read_trampoline() +var libc_read_trampoline_addr uintptr //go:cgo_import_dynamic libc_read read "/usr/lib/libSystem.B.dylib" @@ -1713,7 +1714,7 @@ func Readlink(path string, buf []byte) (n int, err error) { } else { _p1 = unsafe.Pointer(&_zero) } - r0, _, e1 := syscall_syscall(funcPC(libc_readlink_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) + r0, _, e1 := syscall_syscall(libc_readlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1721,7 +1722,7 @@ func Readlink(path string, buf []byte) (n int, err error) { return } -func libc_readlink_trampoline() +var libc_readlink_trampoline_addr uintptr //go:cgo_import_dynamic libc_readlink readlink "/usr/lib/libSystem.B.dylib" @@ -1739,7 +1740,7 @@ func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { } else { _p1 = unsafe.Pointer(&_zero) } - r0, _, e1 := syscall_syscall6(funcPC(libc_readlinkat_trampoline), uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) + r0, _, e1 := syscall_syscall6(libc_readlinkat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1747,7 +1748,7 @@ func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { return } -func libc_readlinkat_trampoline() +var libc_readlinkat_trampoline_addr uintptr //go:cgo_import_dynamic libc_readlinkat readlinkat "/usr/lib/libSystem.B.dylib" @@ -1764,14 +1765,14 @@ func Rename(from string, to string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_rename_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + _, _, e1 := syscall_syscall(libc_rename_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_rename_trampoline() +var libc_rename_trampoline_addr uintptr //go:cgo_import_dynamic libc_rename rename "/usr/lib/libSystem.B.dylib" @@ -1788,14 +1789,14 @@ func Renameat(fromfd int, from string, tofd int, to string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall6(funcPC(libc_renameat_trampoline), uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) + _, _, e1 := syscall_syscall6(libc_renameat_trampoline_addr, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_renameat_trampoline() +var libc_renameat_trampoline_addr uintptr //go:cgo_import_dynamic libc_renameat renameat "/usr/lib/libSystem.B.dylib" @@ -1807,14 +1808,14 @@ func Revoke(path string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_revoke_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_revoke_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_revoke_trampoline() +var libc_revoke_trampoline_addr uintptr //go:cgo_import_dynamic libc_revoke revoke "/usr/lib/libSystem.B.dylib" @@ -1826,21 +1827,21 @@ func Rmdir(path string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_rmdir_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_rmdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_rmdir_trampoline() +var libc_rmdir_trampoline_addr uintptr //go:cgo_import_dynamic libc_rmdir rmdir "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_lseek_trampoline), uintptr(fd), uintptr(offset), uintptr(whence)) + r0, _, e1 := syscall_syscall(libc_lseek_trampoline_addr, uintptr(fd), uintptr(offset), uintptr(whence)) newoffset = int64(r0) if e1 != 0 { err = errnoErr(e1) @@ -1848,14 +1849,14 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { return } -func libc_lseek_trampoline() +var libc_lseek_trampoline_addr uintptr //go:cgo_import_dynamic libc_lseek lseek "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { - r0, _, e1 := syscall_syscall6(funcPC(libc_select_trampoline), uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + r0, _, e1 := syscall_syscall6(libc_select_trampoline_addr, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1863,49 +1864,49 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err return } -func libc_select_trampoline() +var libc_select_trampoline_addr uintptr //go:cgo_import_dynamic libc_select select "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setegid(egid int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_setegid_trampoline), uintptr(egid), 0, 0) + _, _, e1 := syscall_syscall(libc_setegid_trampoline_addr, uintptr(egid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_setegid_trampoline() +var libc_setegid_trampoline_addr uintptr //go:cgo_import_dynamic libc_setegid setegid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Seteuid(euid int) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_seteuid_trampoline), uintptr(euid), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_seteuid_trampoline_addr, uintptr(euid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_seteuid_trampoline() +var libc_seteuid_trampoline_addr uintptr //go:cgo_import_dynamic libc_seteuid seteuid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setgid(gid int) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_setgid_trampoline), uintptr(gid), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_setgid_trampoline_addr, uintptr(gid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_setgid_trampoline() +var libc_setgid_trampoline_addr uintptr //go:cgo_import_dynamic libc_setgid setgid "/usr/lib/libSystem.B.dylib" @@ -1917,105 +1918,105 @@ func Setlogin(name string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_setlogin_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_setlogin_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_setlogin_trampoline() +var libc_setlogin_trampoline_addr uintptr //go:cgo_import_dynamic libc_setlogin setlogin "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setpgid(pid int, pgid int) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_setpgid_trampoline), uintptr(pid), uintptr(pgid), 0) + _, _, e1 := syscall_rawSyscall(libc_setpgid_trampoline_addr, uintptr(pid), uintptr(pgid), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_setpgid_trampoline() +var libc_setpgid_trampoline_addr uintptr //go:cgo_import_dynamic libc_setpgid setpgid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setpriority(which int, who int, prio int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_setpriority_trampoline), uintptr(which), uintptr(who), uintptr(prio)) + _, _, e1 := syscall_syscall(libc_setpriority_trampoline_addr, uintptr(which), uintptr(who), uintptr(prio)) if e1 != 0 { err = errnoErr(e1) } return } -func libc_setpriority_trampoline() +var libc_setpriority_trampoline_addr uintptr //go:cgo_import_dynamic libc_setpriority setpriority "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setprivexec(flag int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_setprivexec_trampoline), uintptr(flag), 0, 0) + _, _, e1 := syscall_syscall(libc_setprivexec_trampoline_addr, uintptr(flag), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_setprivexec_trampoline() +var libc_setprivexec_trampoline_addr uintptr //go:cgo_import_dynamic libc_setprivexec setprivexec "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setregid(rgid int, egid int) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_setregid_trampoline), uintptr(rgid), uintptr(egid), 0) + _, _, e1 := syscall_rawSyscall(libc_setregid_trampoline_addr, uintptr(rgid), uintptr(egid), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_setregid_trampoline() +var libc_setregid_trampoline_addr uintptr //go:cgo_import_dynamic libc_setregid setregid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_setreuid_trampoline), uintptr(ruid), uintptr(euid), 0) + _, _, e1 := syscall_rawSyscall(libc_setreuid_trampoline_addr, uintptr(ruid), uintptr(euid), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_setreuid_trampoline() +var libc_setreuid_trampoline_addr uintptr //go:cgo_import_dynamic libc_setreuid setreuid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_setrlimit_trampoline), uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + _, _, e1 := syscall_rawSyscall(libc_setrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_setrlimit_trampoline() +var libc_setrlimit_trampoline_addr uintptr //go:cgo_import_dynamic libc_setrlimit setrlimit "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setsid() (pid int, err error) { - r0, _, e1 := syscall_rawSyscall(funcPC(libc_setsid_trampoline), 0, 0, 0) + r0, _, e1 := syscall_rawSyscall(libc_setsid_trampoline_addr, 0, 0, 0) pid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -2023,35 +2024,35 @@ func Setsid() (pid int, err error) { return } -func libc_setsid_trampoline() +var libc_setsid_trampoline_addr uintptr //go:cgo_import_dynamic libc_setsid setsid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Settimeofday(tp *Timeval) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_settimeofday_trampoline), uintptr(unsafe.Pointer(tp)), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_settimeofday_trampoline_addr, uintptr(unsafe.Pointer(tp)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_settimeofday_trampoline() +var libc_settimeofday_trampoline_addr uintptr //go:cgo_import_dynamic libc_settimeofday settimeofday "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setuid(uid int) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_setuid_trampoline), uintptr(uid), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_setuid_trampoline_addr, uintptr(uid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_setuid_trampoline() +var libc_setuid_trampoline_addr uintptr //go:cgo_import_dynamic libc_setuid setuid "/usr/lib/libSystem.B.dylib" @@ -2068,14 +2069,14 @@ func Symlink(path string, link string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_symlink_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + _, _, e1 := syscall_syscall(libc_symlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_symlink_trampoline() +var libc_symlink_trampoline_addr uintptr //go:cgo_import_dynamic libc_symlink symlink "/usr/lib/libSystem.B.dylib" @@ -2092,28 +2093,28 @@ func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_symlinkat_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) + _, _, e1 := syscall_syscall(libc_symlinkat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) if e1 != 0 { err = errnoErr(e1) } return } -func libc_symlinkat_trampoline() +var libc_symlinkat_trampoline_addr uintptr //go:cgo_import_dynamic libc_symlinkat symlinkat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Sync() (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_sync_trampoline), 0, 0, 0) + _, _, e1 := syscall_syscall(libc_sync_trampoline_addr, 0, 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_sync_trampoline() +var libc_sync_trampoline_addr uintptr //go:cgo_import_dynamic libc_sync sync "/usr/lib/libSystem.B.dylib" @@ -2125,26 +2126,26 @@ func Truncate(path string, length int64) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_truncate_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(length), 0) + _, _, e1 := syscall_syscall(libc_truncate_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_truncate_trampoline() +var libc_truncate_trampoline_addr uintptr //go:cgo_import_dynamic libc_truncate truncate "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Umask(newmask int) (oldmask int) { - r0, _, _ := syscall_syscall(funcPC(libc_umask_trampoline), uintptr(newmask), 0, 0) + r0, _, _ := syscall_syscall(libc_umask_trampoline_addr, uintptr(newmask), 0, 0) oldmask = int(r0) return } -func libc_umask_trampoline() +var libc_umask_trampoline_addr uintptr //go:cgo_import_dynamic libc_umask umask "/usr/lib/libSystem.B.dylib" @@ -2156,14 +2157,14 @@ func Undelete(path string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_undelete_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_undelete_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_undelete_trampoline() +var libc_undelete_trampoline_addr uintptr //go:cgo_import_dynamic libc_undelete undelete "/usr/lib/libSystem.B.dylib" @@ -2175,14 +2176,14 @@ func Unlink(path string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_unlink_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_unlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_unlink_trampoline() +var libc_unlink_trampoline_addr uintptr //go:cgo_import_dynamic libc_unlink unlink "/usr/lib/libSystem.B.dylib" @@ -2194,14 +2195,14 @@ func Unlinkat(dirfd int, path string, flags int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_unlinkat_trampoline), uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + _, _, e1 := syscall_syscall(libc_unlinkat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } return } -func libc_unlinkat_trampoline() +var libc_unlinkat_trampoline_addr uintptr //go:cgo_import_dynamic libc_unlinkat unlinkat "/usr/lib/libSystem.B.dylib" @@ -2213,14 +2214,14 @@ func Unmount(path string, flags int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_unmount_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + _, _, e1 := syscall_syscall(libc_unmount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_unmount_trampoline() +var libc_unmount_trampoline_addr uintptr //go:cgo_import_dynamic libc_unmount unmount "/usr/lib/libSystem.B.dylib" @@ -2233,7 +2234,7 @@ func write(fd int, p []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := syscall_syscall(funcPC(libc_write_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(p))) + r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -2241,14 +2242,14 @@ func write(fd int, p []byte) (n int, err error) { return } -func libc_write_trampoline() +var libc_write_trampoline_addr uintptr //go:cgo_import_dynamic libc_write write "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { - r0, _, e1 := syscall_syscall6(funcPC(libc_mmap_trampoline), uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos)) + r0, _, e1 := syscall_syscall6(libc_mmap_trampoline_addr, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos)) ret = uintptr(r0) if e1 != 0 { err = errnoErr(e1) @@ -2256,28 +2257,28 @@ func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) ( return } -func libc_mmap_trampoline() +var libc_mmap_trampoline_addr uintptr //go:cgo_import_dynamic libc_mmap mmap "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func munmap(addr uintptr, length uintptr) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_munmap_trampoline), uintptr(addr), uintptr(length), 0) + _, _, e1 := syscall_syscall(libc_munmap_trampoline_addr, uintptr(addr), uintptr(length), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_munmap_trampoline() +var libc_munmap_trampoline_addr uintptr //go:cgo_import_dynamic libc_munmap munmap "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_read_trampoline), uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -2288,7 +2289,7 @@ func readlen(fd int, buf *byte, nbuf int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_write_trampoline), uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -2299,14 +2300,14 @@ func writelen(fd int, buf *byte, nbuf int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fstat(fd int, stat *Stat_t) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_fstat_trampoline), uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_fstat_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_fstat_trampoline() +var libc_fstat_trampoline_addr uintptr //go:cgo_import_dynamic libc_fstat fstat "/usr/lib/libSystem.B.dylib" @@ -2318,35 +2319,35 @@ func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall6(funcPC(libc_fstatat_trampoline), uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_fstatat_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_fstatat_trampoline() +var libc_fstatat_trampoline_addr uintptr //go:cgo_import_dynamic libc_fstatat fstatat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fstatfs(fd int, stat *Statfs_t) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_fstatfs_trampoline), uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_fstatfs_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_fstatfs_trampoline() +var libc_fstatfs_trampoline_addr uintptr //go:cgo_import_dynamic libc_fstatfs fstatfs "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getfsstat(buf unsafe.Pointer, size uintptr, flags int) (n int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_getfsstat_trampoline), uintptr(buf), uintptr(size), uintptr(flags)) + r0, _, e1 := syscall_syscall(libc_getfsstat_trampoline_addr, uintptr(buf), uintptr(size), uintptr(flags)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -2354,7 +2355,7 @@ func getfsstat(buf unsafe.Pointer, size uintptr, flags int) (n int, err error) { return } -func libc_getfsstat_trampoline() +var libc_getfsstat_trampoline_addr uintptr //go:cgo_import_dynamic libc_getfsstat getfsstat "/usr/lib/libSystem.B.dylib" @@ -2366,28 +2367,28 @@ func Lstat(path string, stat *Stat_t) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_lstat_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_lstat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_lstat_trampoline() +var libc_lstat_trampoline_addr uintptr //go:cgo_import_dynamic libc_lstat lstat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func ptrace1(request int, pid int, addr uintptr, data uintptr) (err error) { - _, _, e1 := syscall_syscall6(funcPC(libc_ptrace_trampoline), uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + _, _, e1 := syscall_syscall6(libc_ptrace_trampoline_addr, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_ptrace_trampoline() +var libc_ptrace_trampoline_addr uintptr //go:cgo_import_dynamic libc_ptrace ptrace "/usr/lib/libSystem.B.dylib" @@ -2399,14 +2400,14 @@ func Stat(path string, stat *Stat_t) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_stat_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_stat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_stat_trampoline() +var libc_stat_trampoline_addr uintptr //go:cgo_import_dynamic libc_stat stat "/usr/lib/libSystem.B.dylib" @@ -2418,13 +2419,13 @@ func Statfs(path string, stat *Statfs_t) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_statfs_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_statfs_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_statfs_trampoline() +var libc_statfs_trampoline_addr uintptr //go:cgo_import_dynamic libc_statfs statfs "/usr/lib/libSystem.B.dylib" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s index 53c402bf68..33e19776db 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s @@ -1,290 +1,859 @@ // go run mkasm_darwin.go arm64 // Code generated by the command above; DO NOT EDIT. +//go:build go1.12 // +build go1.12 #include "textflag.h" -TEXT ·libc_getgroups_trampoline(SB),NOSPLIT,$0-0 + +TEXT libc_getgroups_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getgroups(SB) -TEXT ·libc_setgroups_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_getgroups_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getgroups_trampoline_addr(SB)/8, $libc_getgroups_trampoline<>(SB) + +TEXT libc_setgroups_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setgroups(SB) -TEXT ·libc_wait4_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_setgroups_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setgroups_trampoline_addr(SB)/8, $libc_setgroups_trampoline<>(SB) + +TEXT libc_wait4_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_wait4(SB) -TEXT ·libc_accept_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_wait4_trampoline_addr(SB), RODATA, $8 +DATA ·libc_wait4_trampoline_addr(SB)/8, $libc_wait4_trampoline<>(SB) + +TEXT libc_accept_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_accept(SB) -TEXT ·libc_bind_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_accept_trampoline_addr(SB), RODATA, $8 +DATA ·libc_accept_trampoline_addr(SB)/8, $libc_accept_trampoline<>(SB) + +TEXT libc_bind_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_bind(SB) -TEXT ·libc_connect_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_bind_trampoline_addr(SB), RODATA, $8 +DATA ·libc_bind_trampoline_addr(SB)/8, $libc_bind_trampoline<>(SB) + +TEXT libc_connect_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_connect(SB) -TEXT ·libc_socket_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_connect_trampoline_addr(SB), RODATA, $8 +DATA ·libc_connect_trampoline_addr(SB)/8, $libc_connect_trampoline<>(SB) + +TEXT libc_socket_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_socket(SB) -TEXT ·libc_getsockopt_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_socket_trampoline_addr(SB), RODATA, $8 +DATA ·libc_socket_trampoline_addr(SB)/8, $libc_socket_trampoline<>(SB) + +TEXT libc_getsockopt_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getsockopt(SB) -TEXT ·libc_setsockopt_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_getsockopt_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getsockopt_trampoline_addr(SB)/8, $libc_getsockopt_trampoline<>(SB) + +TEXT libc_setsockopt_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setsockopt(SB) -TEXT ·libc_getpeername_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_setsockopt_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setsockopt_trampoline_addr(SB)/8, $libc_setsockopt_trampoline<>(SB) + +TEXT libc_getpeername_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpeername(SB) -TEXT ·libc_getsockname_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_getpeername_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpeername_trampoline_addr(SB)/8, $libc_getpeername_trampoline<>(SB) + +TEXT libc_getsockname_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getsockname(SB) -TEXT ·libc_shutdown_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_getsockname_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getsockname_trampoline_addr(SB)/8, $libc_getsockname_trampoline<>(SB) + +TEXT libc_shutdown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_shutdown(SB) -TEXT ·libc_socketpair_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_shutdown_trampoline_addr(SB), RODATA, $8 +DATA ·libc_shutdown_trampoline_addr(SB)/8, $libc_shutdown_trampoline<>(SB) + +TEXT libc_socketpair_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_socketpair(SB) -TEXT ·libc_recvfrom_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_socketpair_trampoline_addr(SB), RODATA, $8 +DATA ·libc_socketpair_trampoline_addr(SB)/8, $libc_socketpair_trampoline<>(SB) + +TEXT libc_recvfrom_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_recvfrom(SB) -TEXT ·libc_sendto_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_recvfrom_trampoline_addr(SB), RODATA, $8 +DATA ·libc_recvfrom_trampoline_addr(SB)/8, $libc_recvfrom_trampoline<>(SB) + +TEXT libc_sendto_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendto(SB) -TEXT ·libc_recvmsg_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_sendto_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sendto_trampoline_addr(SB)/8, $libc_sendto_trampoline<>(SB) + +TEXT libc_recvmsg_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_recvmsg(SB) -TEXT ·libc_sendmsg_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_recvmsg_trampoline_addr(SB), RODATA, $8 +DATA ·libc_recvmsg_trampoline_addr(SB)/8, $libc_recvmsg_trampoline<>(SB) + +TEXT libc_sendmsg_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendmsg(SB) -TEXT ·libc_kevent_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_sendmsg_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sendmsg_trampoline_addr(SB)/8, $libc_sendmsg_trampoline<>(SB) + +TEXT libc_kevent_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_kevent(SB) -TEXT ·libc_utimes_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_kevent_trampoline_addr(SB), RODATA, $8 +DATA ·libc_kevent_trampoline_addr(SB)/8, $libc_kevent_trampoline<>(SB) + +TEXT libc_utimes_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimes(SB) -TEXT ·libc_futimes_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_utimes_trampoline_addr(SB), RODATA, $8 +DATA ·libc_utimes_trampoline_addr(SB)/8, $libc_utimes_trampoline<>(SB) + +TEXT libc_futimes_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_futimes(SB) -TEXT ·libc_poll_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_futimes_trampoline_addr(SB), RODATA, $8 +DATA ·libc_futimes_trampoline_addr(SB)/8, $libc_futimes_trampoline<>(SB) + +TEXT libc_poll_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_poll(SB) -TEXT ·libc_madvise_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_poll_trampoline_addr(SB), RODATA, $8 +DATA ·libc_poll_trampoline_addr(SB)/8, $libc_poll_trampoline<>(SB) + +TEXT libc_madvise_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_madvise(SB) -TEXT ·libc_mlock_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_madvise_trampoline_addr(SB), RODATA, $8 +DATA ·libc_madvise_trampoline_addr(SB)/8, $libc_madvise_trampoline<>(SB) + +TEXT libc_mlock_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mlock(SB) -TEXT ·libc_mlockall_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_mlock_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mlock_trampoline_addr(SB)/8, $libc_mlock_trampoline<>(SB) + +TEXT libc_mlockall_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mlockall(SB) -TEXT ·libc_mprotect_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_mlockall_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mlockall_trampoline_addr(SB)/8, $libc_mlockall_trampoline<>(SB) + +TEXT libc_mprotect_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mprotect(SB) -TEXT ·libc_msync_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_mprotect_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mprotect_trampoline_addr(SB)/8, $libc_mprotect_trampoline<>(SB) + +TEXT libc_msync_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_msync(SB) -TEXT ·libc_munlock_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_msync_trampoline_addr(SB), RODATA, $8 +DATA ·libc_msync_trampoline_addr(SB)/8, $libc_msync_trampoline<>(SB) + +TEXT libc_munlock_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_munlock(SB) -TEXT ·libc_munlockall_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_munlock_trampoline_addr(SB), RODATA, $8 +DATA ·libc_munlock_trampoline_addr(SB)/8, $libc_munlock_trampoline<>(SB) + +TEXT libc_munlockall_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_munlockall(SB) -TEXT ·libc_pipe_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_munlockall_trampoline_addr(SB), RODATA, $8 +DATA ·libc_munlockall_trampoline_addr(SB)/8, $libc_munlockall_trampoline<>(SB) + +TEXT libc_pipe_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pipe(SB) -TEXT ·libc_getxattr_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_pipe_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pipe_trampoline_addr(SB)/8, $libc_pipe_trampoline<>(SB) + +TEXT libc_getxattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getxattr(SB) -TEXT ·libc_fgetxattr_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_getxattr_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getxattr_trampoline_addr(SB)/8, $libc_getxattr_trampoline<>(SB) + +TEXT libc_fgetxattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fgetxattr(SB) -TEXT ·libc_setxattr_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_fgetxattr_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fgetxattr_trampoline_addr(SB)/8, $libc_fgetxattr_trampoline<>(SB) + +TEXT libc_setxattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setxattr(SB) -TEXT ·libc_fsetxattr_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_setxattr_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setxattr_trampoline_addr(SB)/8, $libc_setxattr_trampoline<>(SB) + +TEXT libc_fsetxattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fsetxattr(SB) -TEXT ·libc_removexattr_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_fsetxattr_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fsetxattr_trampoline_addr(SB)/8, $libc_fsetxattr_trampoline<>(SB) + +TEXT libc_removexattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_removexattr(SB) -TEXT ·libc_fremovexattr_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_removexattr_trampoline_addr(SB), RODATA, $8 +DATA ·libc_removexattr_trampoline_addr(SB)/8, $libc_removexattr_trampoline<>(SB) + +TEXT libc_fremovexattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fremovexattr(SB) -TEXT ·libc_listxattr_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_fremovexattr_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fremovexattr_trampoline_addr(SB)/8, $libc_fremovexattr_trampoline<>(SB) + +TEXT libc_listxattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_listxattr(SB) -TEXT ·libc_flistxattr_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_listxattr_trampoline_addr(SB), RODATA, $8 +DATA ·libc_listxattr_trampoline_addr(SB)/8, $libc_listxattr_trampoline<>(SB) + +TEXT libc_flistxattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_flistxattr(SB) -TEXT ·libc_setattrlist_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_flistxattr_trampoline_addr(SB), RODATA, $8 +DATA ·libc_flistxattr_trampoline_addr(SB)/8, $libc_flistxattr_trampoline<>(SB) + +TEXT libc_setattrlist_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setattrlist(SB) -TEXT ·libc_fcntl_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_setattrlist_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setattrlist_trampoline_addr(SB)/8, $libc_setattrlist_trampoline<>(SB) + +TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fcntl(SB) -TEXT ·libc_kill_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_fcntl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fcntl_trampoline_addr(SB)/8, $libc_fcntl_trampoline<>(SB) + +TEXT libc_kill_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_kill(SB) -TEXT ·libc_ioctl_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_kill_trampoline_addr(SB), RODATA, $8 +DATA ·libc_kill_trampoline_addr(SB)/8, $libc_kill_trampoline<>(SB) + +TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ioctl(SB) -TEXT ·libc_sysctl_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_ioctl_trampoline_addr(SB)/8, $libc_ioctl_trampoline<>(SB) + +TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sysctl(SB) -TEXT ·libc_sendfile_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) + +TEXT libc_sendfile_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendfile(SB) -TEXT ·libc_access_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_sendfile_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sendfile_trampoline_addr(SB)/8, $libc_sendfile_trampoline<>(SB) + +TEXT libc_access_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_access(SB) -TEXT ·libc_adjtime_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_access_trampoline_addr(SB), RODATA, $8 +DATA ·libc_access_trampoline_addr(SB)/8, $libc_access_trampoline<>(SB) + +TEXT libc_adjtime_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_adjtime(SB) -TEXT ·libc_chdir_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_adjtime_trampoline_addr(SB), RODATA, $8 +DATA ·libc_adjtime_trampoline_addr(SB)/8, $libc_adjtime_trampoline<>(SB) + +TEXT libc_chdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chdir(SB) -TEXT ·libc_chflags_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_chdir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chdir_trampoline_addr(SB)/8, $libc_chdir_trampoline<>(SB) + +TEXT libc_chflags_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chflags(SB) -TEXT ·libc_chmod_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_chflags_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chflags_trampoline_addr(SB)/8, $libc_chflags_trampoline<>(SB) + +TEXT libc_chmod_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chmod(SB) -TEXT ·libc_chown_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_chmod_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chmod_trampoline_addr(SB)/8, $libc_chmod_trampoline<>(SB) + +TEXT libc_chown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chown(SB) -TEXT ·libc_chroot_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_chown_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chown_trampoline_addr(SB)/8, $libc_chown_trampoline<>(SB) + +TEXT libc_chroot_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chroot(SB) -TEXT ·libc_clock_gettime_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_chroot_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chroot_trampoline_addr(SB)/8, $libc_chroot_trampoline<>(SB) + +TEXT libc_clock_gettime_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_clock_gettime(SB) -TEXT ·libc_close_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_clock_gettime_trampoline_addr(SB), RODATA, $8 +DATA ·libc_clock_gettime_trampoline_addr(SB)/8, $libc_clock_gettime_trampoline<>(SB) + +TEXT libc_close_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_close(SB) -TEXT ·libc_clonefile_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_close_trampoline_addr(SB), RODATA, $8 +DATA ·libc_close_trampoline_addr(SB)/8, $libc_close_trampoline<>(SB) + +TEXT libc_clonefile_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_clonefile(SB) -TEXT ·libc_clonefileat_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_clonefile_trampoline_addr(SB), RODATA, $8 +DATA ·libc_clonefile_trampoline_addr(SB)/8, $libc_clonefile_trampoline<>(SB) + +TEXT libc_clonefileat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_clonefileat(SB) -TEXT ·libc_dup_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_clonefileat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_clonefileat_trampoline_addr(SB)/8, $libc_clonefileat_trampoline<>(SB) + +TEXT libc_dup_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_dup(SB) -TEXT ·libc_dup2_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_dup_trampoline_addr(SB), RODATA, $8 +DATA ·libc_dup_trampoline_addr(SB)/8, $libc_dup_trampoline<>(SB) + +TEXT libc_dup2_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_dup2(SB) -TEXT ·libc_exchangedata_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_dup2_trampoline_addr(SB), RODATA, $8 +DATA ·libc_dup2_trampoline_addr(SB)/8, $libc_dup2_trampoline<>(SB) + +TEXT libc_exchangedata_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_exchangedata(SB) -TEXT ·libc_exit_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_exchangedata_trampoline_addr(SB), RODATA, $8 +DATA ·libc_exchangedata_trampoline_addr(SB)/8, $libc_exchangedata_trampoline<>(SB) + +TEXT libc_exit_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_exit(SB) -TEXT ·libc_faccessat_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_exit_trampoline_addr(SB), RODATA, $8 +DATA ·libc_exit_trampoline_addr(SB)/8, $libc_exit_trampoline<>(SB) + +TEXT libc_faccessat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_faccessat(SB) -TEXT ·libc_fchdir_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_faccessat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_faccessat_trampoline_addr(SB)/8, $libc_faccessat_trampoline<>(SB) + +TEXT libc_fchdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchdir(SB) -TEXT ·libc_fchflags_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_fchdir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchdir_trampoline_addr(SB)/8, $libc_fchdir_trampoline<>(SB) + +TEXT libc_fchflags_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchflags(SB) -TEXT ·libc_fchmod_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_fchflags_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchflags_trampoline_addr(SB)/8, $libc_fchflags_trampoline<>(SB) + +TEXT libc_fchmod_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchmod(SB) -TEXT ·libc_fchmodat_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_fchmod_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchmod_trampoline_addr(SB)/8, $libc_fchmod_trampoline<>(SB) + +TEXT libc_fchmodat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchmodat(SB) -TEXT ·libc_fchown_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_fchmodat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchmodat_trampoline_addr(SB)/8, $libc_fchmodat_trampoline<>(SB) + +TEXT libc_fchown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchown(SB) -TEXT ·libc_fchownat_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_fchown_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchown_trampoline_addr(SB)/8, $libc_fchown_trampoline<>(SB) + +TEXT libc_fchownat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchownat(SB) -TEXT ·libc_fclonefileat_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_fchownat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchownat_trampoline_addr(SB)/8, $libc_fchownat_trampoline<>(SB) + +TEXT libc_fclonefileat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fclonefileat(SB) -TEXT ·libc_flock_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_fclonefileat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fclonefileat_trampoline_addr(SB)/8, $libc_fclonefileat_trampoline<>(SB) + +TEXT libc_flock_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_flock(SB) -TEXT ·libc_fpathconf_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_flock_trampoline_addr(SB), RODATA, $8 +DATA ·libc_flock_trampoline_addr(SB)/8, $libc_flock_trampoline<>(SB) + +TEXT libc_fpathconf_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fpathconf(SB) -TEXT ·libc_fsync_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_fpathconf_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fpathconf_trampoline_addr(SB)/8, $libc_fpathconf_trampoline<>(SB) + +TEXT libc_fsync_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fsync(SB) -TEXT ·libc_ftruncate_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_fsync_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fsync_trampoline_addr(SB)/8, $libc_fsync_trampoline<>(SB) + +TEXT libc_ftruncate_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ftruncate(SB) -TEXT ·libc_getcwd_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_ftruncate_trampoline_addr(SB), RODATA, $8 +DATA ·libc_ftruncate_trampoline_addr(SB)/8, $libc_ftruncate_trampoline<>(SB) + +TEXT libc_getcwd_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getcwd(SB) -TEXT ·libc_getdtablesize_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getcwd_trampoline_addr(SB)/8, $libc_getcwd_trampoline<>(SB) + +TEXT libc_getdtablesize_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getdtablesize(SB) -TEXT ·libc_getegid_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_getdtablesize_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getdtablesize_trampoline_addr(SB)/8, $libc_getdtablesize_trampoline<>(SB) + +TEXT libc_getegid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getegid(SB) -TEXT ·libc_geteuid_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_getegid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getegid_trampoline_addr(SB)/8, $libc_getegid_trampoline<>(SB) + +TEXT libc_geteuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_geteuid(SB) -TEXT ·libc_getgid_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_geteuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_geteuid_trampoline_addr(SB)/8, $libc_geteuid_trampoline<>(SB) + +TEXT libc_getgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getgid(SB) -TEXT ·libc_getpgid_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_getgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getgid_trampoline_addr(SB)/8, $libc_getgid_trampoline<>(SB) + +TEXT libc_getpgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpgid(SB) -TEXT ·libc_getpgrp_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_getpgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpgid_trampoline_addr(SB)/8, $libc_getpgid_trampoline<>(SB) + +TEXT libc_getpgrp_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpgrp(SB) -TEXT ·libc_getpid_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_getpgrp_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpgrp_trampoline_addr(SB)/8, $libc_getpgrp_trampoline<>(SB) + +TEXT libc_getpid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpid(SB) -TEXT ·libc_getppid_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_getpid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpid_trampoline_addr(SB)/8, $libc_getpid_trampoline<>(SB) + +TEXT libc_getppid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getppid(SB) -TEXT ·libc_getpriority_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_getppid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getppid_trampoline_addr(SB)/8, $libc_getppid_trampoline<>(SB) + +TEXT libc_getpriority_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpriority(SB) -TEXT ·libc_getrlimit_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_getpriority_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpriority_trampoline_addr(SB)/8, $libc_getpriority_trampoline<>(SB) + +TEXT libc_getrlimit_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getrlimit(SB) -TEXT ·libc_getrusage_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_getrlimit_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getrlimit_trampoline_addr(SB)/8, $libc_getrlimit_trampoline<>(SB) + +TEXT libc_getrusage_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getrusage(SB) -TEXT ·libc_getsid_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_getrusage_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getrusage_trampoline_addr(SB)/8, $libc_getrusage_trampoline<>(SB) + +TEXT libc_getsid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getsid(SB) -TEXT ·libc_gettimeofday_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_getsid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getsid_trampoline_addr(SB)/8, $libc_getsid_trampoline<>(SB) + +TEXT libc_gettimeofday_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_gettimeofday(SB) -TEXT ·libc_getuid_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_gettimeofday_trampoline_addr(SB), RODATA, $8 +DATA ·libc_gettimeofday_trampoline_addr(SB)/8, $libc_gettimeofday_trampoline<>(SB) + +TEXT libc_getuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getuid(SB) -TEXT ·libc_issetugid_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_getuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getuid_trampoline_addr(SB)/8, $libc_getuid_trampoline<>(SB) + +TEXT libc_issetugid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_issetugid(SB) -TEXT ·libc_kqueue_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_issetugid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_issetugid_trampoline_addr(SB)/8, $libc_issetugid_trampoline<>(SB) + +TEXT libc_kqueue_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_kqueue(SB) -TEXT ·libc_lchown_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_kqueue_trampoline_addr(SB), RODATA, $8 +DATA ·libc_kqueue_trampoline_addr(SB)/8, $libc_kqueue_trampoline<>(SB) + +TEXT libc_lchown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_lchown(SB) -TEXT ·libc_link_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_lchown_trampoline_addr(SB), RODATA, $8 +DATA ·libc_lchown_trampoline_addr(SB)/8, $libc_lchown_trampoline<>(SB) + +TEXT libc_link_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_link(SB) -TEXT ·libc_linkat_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_link_trampoline_addr(SB), RODATA, $8 +DATA ·libc_link_trampoline_addr(SB)/8, $libc_link_trampoline<>(SB) + +TEXT libc_linkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_linkat(SB) -TEXT ·libc_listen_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_linkat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_linkat_trampoline_addr(SB)/8, $libc_linkat_trampoline<>(SB) + +TEXT libc_listen_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_listen(SB) -TEXT ·libc_mkdir_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_listen_trampoline_addr(SB), RODATA, $8 +DATA ·libc_listen_trampoline_addr(SB)/8, $libc_listen_trampoline<>(SB) + +TEXT libc_mkdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mkdir(SB) -TEXT ·libc_mkdirat_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_mkdir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mkdir_trampoline_addr(SB)/8, $libc_mkdir_trampoline<>(SB) + +TEXT libc_mkdirat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mkdirat(SB) -TEXT ·libc_mkfifo_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_mkdirat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mkdirat_trampoline_addr(SB)/8, $libc_mkdirat_trampoline<>(SB) + +TEXT libc_mkfifo_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mkfifo(SB) -TEXT ·libc_mknod_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_mkfifo_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mkfifo_trampoline_addr(SB)/8, $libc_mkfifo_trampoline<>(SB) + +TEXT libc_mknod_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mknod(SB) -TEXT ·libc_open_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_mknod_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mknod_trampoline_addr(SB)/8, $libc_mknod_trampoline<>(SB) + +TEXT libc_open_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_open(SB) -TEXT ·libc_openat_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_open_trampoline_addr(SB), RODATA, $8 +DATA ·libc_open_trampoline_addr(SB)/8, $libc_open_trampoline<>(SB) + +TEXT libc_openat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_openat(SB) -TEXT ·libc_pathconf_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_openat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_openat_trampoline_addr(SB)/8, $libc_openat_trampoline<>(SB) + +TEXT libc_pathconf_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pathconf(SB) -TEXT ·libc_pread_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_pathconf_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pathconf_trampoline_addr(SB)/8, $libc_pathconf_trampoline<>(SB) + +TEXT libc_pread_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pread(SB) -TEXT ·libc_pwrite_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_pread_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pread_trampoline_addr(SB)/8, $libc_pread_trampoline<>(SB) + +TEXT libc_pwrite_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pwrite(SB) -TEXT ·libc_read_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_pwrite_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pwrite_trampoline_addr(SB)/8, $libc_pwrite_trampoline<>(SB) + +TEXT libc_read_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_read(SB) -TEXT ·libc_readlink_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_read_trampoline_addr(SB), RODATA, $8 +DATA ·libc_read_trampoline_addr(SB)/8, $libc_read_trampoline<>(SB) + +TEXT libc_readlink_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_readlink(SB) -TEXT ·libc_readlinkat_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_readlink_trampoline_addr(SB), RODATA, $8 +DATA ·libc_readlink_trampoline_addr(SB)/8, $libc_readlink_trampoline<>(SB) + +TEXT libc_readlinkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_readlinkat(SB) -TEXT ·libc_rename_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_readlinkat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_readlinkat_trampoline_addr(SB)/8, $libc_readlinkat_trampoline<>(SB) + +TEXT libc_rename_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_rename(SB) -TEXT ·libc_renameat_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_rename_trampoline_addr(SB), RODATA, $8 +DATA ·libc_rename_trampoline_addr(SB)/8, $libc_rename_trampoline<>(SB) + +TEXT libc_renameat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_renameat(SB) -TEXT ·libc_revoke_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_renameat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_renameat_trampoline_addr(SB)/8, $libc_renameat_trampoline<>(SB) + +TEXT libc_revoke_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_revoke(SB) -TEXT ·libc_rmdir_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_revoke_trampoline_addr(SB), RODATA, $8 +DATA ·libc_revoke_trampoline_addr(SB)/8, $libc_revoke_trampoline<>(SB) + +TEXT libc_rmdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_rmdir(SB) -TEXT ·libc_lseek_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_rmdir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_rmdir_trampoline_addr(SB)/8, $libc_rmdir_trampoline<>(SB) + +TEXT libc_lseek_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_lseek(SB) -TEXT ·libc_select_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_lseek_trampoline_addr(SB), RODATA, $8 +DATA ·libc_lseek_trampoline_addr(SB)/8, $libc_lseek_trampoline<>(SB) + +TEXT libc_select_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_select(SB) -TEXT ·libc_setegid_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_select_trampoline_addr(SB), RODATA, $8 +DATA ·libc_select_trampoline_addr(SB)/8, $libc_select_trampoline<>(SB) + +TEXT libc_setegid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setegid(SB) -TEXT ·libc_seteuid_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_setegid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setegid_trampoline_addr(SB)/8, $libc_setegid_trampoline<>(SB) + +TEXT libc_seteuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_seteuid(SB) -TEXT ·libc_setgid_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_seteuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_seteuid_trampoline_addr(SB)/8, $libc_seteuid_trampoline<>(SB) + +TEXT libc_setgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setgid(SB) -TEXT ·libc_setlogin_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_setgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setgid_trampoline_addr(SB)/8, $libc_setgid_trampoline<>(SB) + +TEXT libc_setlogin_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setlogin(SB) -TEXT ·libc_setpgid_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_setlogin_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setlogin_trampoline_addr(SB)/8, $libc_setlogin_trampoline<>(SB) + +TEXT libc_setpgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setpgid(SB) -TEXT ·libc_setpriority_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_setpgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setpgid_trampoline_addr(SB)/8, $libc_setpgid_trampoline<>(SB) + +TEXT libc_setpriority_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setpriority(SB) -TEXT ·libc_setprivexec_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_setpriority_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setpriority_trampoline_addr(SB)/8, $libc_setpriority_trampoline<>(SB) + +TEXT libc_setprivexec_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setprivexec(SB) -TEXT ·libc_setregid_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_setprivexec_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setprivexec_trampoline_addr(SB)/8, $libc_setprivexec_trampoline<>(SB) + +TEXT libc_setregid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setregid(SB) -TEXT ·libc_setreuid_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_setregid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setregid_trampoline_addr(SB)/8, $libc_setregid_trampoline<>(SB) + +TEXT libc_setreuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setreuid(SB) -TEXT ·libc_setrlimit_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_setreuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setreuid_trampoline_addr(SB)/8, $libc_setreuid_trampoline<>(SB) + +TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setrlimit(SB) -TEXT ·libc_setsid_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setrlimit_trampoline_addr(SB)/8, $libc_setrlimit_trampoline<>(SB) + +TEXT libc_setsid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setsid(SB) -TEXT ·libc_settimeofday_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_setsid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setsid_trampoline_addr(SB)/8, $libc_setsid_trampoline<>(SB) + +TEXT libc_settimeofday_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_settimeofday(SB) -TEXT ·libc_setuid_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_settimeofday_trampoline_addr(SB), RODATA, $8 +DATA ·libc_settimeofday_trampoline_addr(SB)/8, $libc_settimeofday_trampoline<>(SB) + +TEXT libc_setuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setuid(SB) -TEXT ·libc_symlink_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_setuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setuid_trampoline_addr(SB)/8, $libc_setuid_trampoline<>(SB) + +TEXT libc_symlink_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_symlink(SB) -TEXT ·libc_symlinkat_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_symlink_trampoline_addr(SB), RODATA, $8 +DATA ·libc_symlink_trampoline_addr(SB)/8, $libc_symlink_trampoline<>(SB) + +TEXT libc_symlinkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_symlinkat(SB) -TEXT ·libc_sync_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_symlinkat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_symlinkat_trampoline_addr(SB)/8, $libc_symlinkat_trampoline<>(SB) + +TEXT libc_sync_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sync(SB) -TEXT ·libc_truncate_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_sync_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sync_trampoline_addr(SB)/8, $libc_sync_trampoline<>(SB) + +TEXT libc_truncate_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_truncate(SB) -TEXT ·libc_umask_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_truncate_trampoline_addr(SB), RODATA, $8 +DATA ·libc_truncate_trampoline_addr(SB)/8, $libc_truncate_trampoline<>(SB) + +TEXT libc_umask_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_umask(SB) -TEXT ·libc_undelete_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_umask_trampoline_addr(SB), RODATA, $8 +DATA ·libc_umask_trampoline_addr(SB)/8, $libc_umask_trampoline<>(SB) + +TEXT libc_undelete_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_undelete(SB) -TEXT ·libc_unlink_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_undelete_trampoline_addr(SB), RODATA, $8 +DATA ·libc_undelete_trampoline_addr(SB)/8, $libc_undelete_trampoline<>(SB) + +TEXT libc_unlink_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_unlink(SB) -TEXT ·libc_unlinkat_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_unlink_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unlink_trampoline_addr(SB)/8, $libc_unlink_trampoline<>(SB) + +TEXT libc_unlinkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_unlinkat(SB) -TEXT ·libc_unmount_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_unlinkat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unlinkat_trampoline_addr(SB)/8, $libc_unlinkat_trampoline<>(SB) + +TEXT libc_unmount_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_unmount(SB) -TEXT ·libc_write_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_unmount_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unmount_trampoline_addr(SB)/8, $libc_unmount_trampoline<>(SB) + +TEXT libc_write_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_write(SB) -TEXT ·libc_mmap_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_write_trampoline_addr(SB), RODATA, $8 +DATA ·libc_write_trampoline_addr(SB)/8, $libc_write_trampoline<>(SB) + +TEXT libc_mmap_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mmap(SB) -TEXT ·libc_munmap_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_mmap_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mmap_trampoline_addr(SB)/8, $libc_mmap_trampoline<>(SB) + +TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_munmap(SB) -TEXT ·libc_fstat_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 +DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) + +TEXT libc_fstat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fstat(SB) -TEXT ·libc_fstatat_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_fstat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fstat_trampoline_addr(SB)/8, $libc_fstat_trampoline<>(SB) + +TEXT libc_fstatat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fstatat(SB) -TEXT ·libc_fstatfs_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_fstatat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fstatat_trampoline_addr(SB)/8, $libc_fstatat_trampoline<>(SB) + +TEXT libc_fstatfs_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fstatfs(SB) -TEXT ·libc_getfsstat_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_fstatfs_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fstatfs_trampoline_addr(SB)/8, $libc_fstatfs_trampoline<>(SB) + +TEXT libc_getfsstat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getfsstat(SB) -TEXT ·libc_lstat_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_getfsstat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getfsstat_trampoline_addr(SB)/8, $libc_getfsstat_trampoline<>(SB) + +TEXT libc_lstat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_lstat(SB) -TEXT ·libc_ptrace_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_lstat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_lstat_trampoline_addr(SB)/8, $libc_lstat_trampoline<>(SB) + +TEXT libc_ptrace_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ptrace(SB) -TEXT ·libc_stat_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_ptrace_trampoline_addr(SB), RODATA, $8 +DATA ·libc_ptrace_trampoline_addr(SB)/8, $libc_ptrace_trampoline<>(SB) + +TEXT libc_stat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_stat(SB) -TEXT ·libc_statfs_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_stat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_stat_trampoline_addr(SB)/8, $libc_stat_trampoline<>(SB) + +TEXT libc_statfs_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_statfs(SB) + +GLOBL ·libc_statfs_trampoline_addr(SB), RODATA, $8 +DATA ·libc_statfs_trampoline_addr(SB)/8, $libc_statfs_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go index 1aaccd3615..1b6eedfa61 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go @@ -1,6 +1,7 @@ // go run mksyscall.go -dragonfly -tags dragonfly,amd64 syscall_bsd.go syscall_dragonfly.go syscall_dragonfly_amd64.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build dragonfly && amd64 // +build dragonfly,amd64 package unix @@ -362,8 +363,10 @@ func pipe() (r int, w int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func pipe2(p *[2]_C_int, flags int) (err error) { - _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) +func pipe2(p *[2]_C_int, flags int) (r int, w int, err error) { + r0, r1, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) + r = int(r0) + w = int(r1) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go index 600f1d26d2..3e9bddb7b2 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go @@ -1,6 +1,7 @@ // go run mksyscall.go -l32 -tags freebsd,386 syscall_bsd.go syscall_freebsd.go syscall_freebsd_386.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build freebsd && 386 // +build freebsd,386 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go index 064934b0d1..c72a462b91 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go @@ -1,6 +1,7 @@ // go run mksyscall.go -tags freebsd,amd64 syscall_bsd.go syscall_freebsd.go syscall_freebsd_amd64.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build freebsd && amd64 // +build freebsd,amd64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go index 31d2c46165..530d5df90c 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go @@ -1,6 +1,7 @@ // go run mksyscall.go -l32 -arm -tags freebsd,arm syscall_bsd.go syscall_freebsd.go syscall_freebsd_arm.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build freebsd && arm // +build freebsd,arm package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go index 4adaaa5618..71e7df9e85 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go @@ -1,6 +1,7 @@ // go run mksyscall.go -tags freebsd,arm64 syscall_bsd.go syscall_freebsd.go syscall_freebsd_arm64.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build freebsd && arm64 // +build freebsd,arm64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go index 665dd9e4b4..af5cb064ec 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go @@ -1,6 +1,7 @@ // go run mksyscall_solaris.go -illumos -tags illumos,amd64 syscall_illumos.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build illumos && amd64 // +build illumos,amd64 package unix @@ -14,19 +15,25 @@ import ( //go:cgo_import_dynamic libc_writev writev "libc.so" //go:cgo_import_dynamic libc_pwritev pwritev "libc.so" //go:cgo_import_dynamic libc_accept4 accept4 "libsocket.so" +//go:cgo_import_dynamic libc_putmsg putmsg "libc.so" +//go:cgo_import_dynamic libc_getmsg getmsg "libc.so" //go:linkname procreadv libc_readv //go:linkname procpreadv libc_preadv //go:linkname procwritev libc_writev //go:linkname procpwritev libc_pwritev //go:linkname procaccept4 libc_accept4 +//go:linkname procputmsg libc_putmsg +//go:linkname procgetmsg libc_getmsg var ( procreadv, procpreadv, procwritev, procpwritev, - procaccept4 syscallFunc + procaccept4, + procputmsg, + procgetmsg syscallFunc ) // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -99,3 +106,23 @@ func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, } return } + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func putmsg(fd int, clptr *strbuf, dataptr *strbuf, flags int) (err error) { + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procputmsg)), 4, uintptr(fd), uintptr(unsafe.Pointer(clptr)), uintptr(unsafe.Pointer(dataptr)), uintptr(flags), 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getmsg(fd int, clptr *strbuf, dataptr *strbuf, flags *int) (err error) { + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procgetmsg)), 4, uintptr(fd), uintptr(unsafe.Pointer(clptr)), uintptr(unsafe.Pointer(dataptr)), uintptr(unsafe.Pointer(flags)), 0, 0) + if e1 != 0 { + err = e1 + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go index 2fbbbe5a89..7305cc915b 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go @@ -1,5 +1,6 @@ // Code generated by mkmerge.go; DO NOT EDIT. +//go:build linux // +build linux package unix @@ -531,6 +532,16 @@ func Close(fd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func CloseRange(first uint, last uint, flags uint) (err error) { + _, _, e1 := Syscall(SYS_CLOSE_RANGE, uintptr(first), uintptr(last), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func CopyFileRange(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) { r0, _, e1 := Syscall6(SYS_COPY_FILE_RANGE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) n = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go index 19ebd3ff75..e37096e4de 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go @@ -1,6 +1,7 @@ // go run mksyscall.go -l32 -tags linux,386 syscall_linux.go syscall_linux_386.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build linux && 386 // +build linux,386 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go index 5c562182a1..9919d8486d 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go @@ -1,6 +1,7 @@ // go run mksyscall.go -tags linux,amd64 syscall_linux.go syscall_linux_amd64.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build linux && amd64 // +build linux,amd64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go index dc69d99c61..076754d48d 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go @@ -1,6 +1,7 @@ // go run mksyscall.go -l32 -arm -tags linux,arm syscall_linux.go syscall_linux_arm.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build linux && arm // +build linux,arm package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go index 1b897dee05..e893f987f9 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go @@ -1,6 +1,7 @@ // go run mksyscall.go -tags linux,arm64 syscall_linux.go syscall_linux_arm64.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build linux && arm64 // +build linux,arm64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go index 49186843ae..4703cf3c33 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go @@ -1,6 +1,7 @@ // go run mksyscall.go -b32 -arm -tags linux,mips syscall_linux.go syscall_linux_mipsx.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build linux && mips // +build linux,mips package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go index 9171d3bd2a..a134f9a4d2 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go @@ -1,6 +1,7 @@ // go run mksyscall.go -tags linux,mips64 syscall_linux.go syscall_linux_mips64x.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build linux && mips64 // +build linux,mips64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go index 82286f04f9..b1fff2d946 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go @@ -1,6 +1,7 @@ // go run mksyscall.go -tags linux,mips64le syscall_linux.go syscall_linux_mips64x.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build linux && mips64le // +build linux,mips64le package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go index 15920621c4..d13d6da01e 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go @@ -1,6 +1,7 @@ // go run mksyscall.go -l32 -arm -tags linux,mipsle syscall_linux.go syscall_linux_mipsx.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build linux && mipsle // +build linux,mipsle package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go new file mode 100644 index 0000000000..927cf1a00f --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go @@ -0,0 +1,762 @@ +// go run mksyscall.go -b32 -tags linux,ppc syscall_linux.go syscall_linux_ppc.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +//go:build linux && ppc +// +build linux,ppc + +package unix + +import ( + "syscall" + "unsafe" +) + +var _ syscall.Errno + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname *byte) (err error) { + _, _, e1 := Syscall6(SYS_FANOTIFY_MARK, uintptr(fd), uintptr(flags), uintptr(mask>>32), uintptr(mask), uintptr(dirFd), uintptr(unsafe.Pointer(pathname))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { + _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off>>32), uintptr(off), uintptr(len>>32), uintptr(len)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { + r0, r1, e1 := Syscall6(SYS_TEE, uintptr(rfd), uintptr(wfd), uintptr(len), uintptr(flags), 0, 0) + n = int64(int64(r0)<<32 | int64(r1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func dup2(oldfd int, newfd int) (err error) { + _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollCreate(size int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { + var _p0 unsafe.Pointer + if len(events) > 0 { + _p0 = unsafe.Pointer(&events[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_EPOLL_WAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchown(fd int, uid int, gid int) (err error) { + _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstat(fd int, stat *Stat_t) (err error) { + _, _, e1 := Syscall(SYS_FSTAT64, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FSTATAT64, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ftruncate(fd int, length int64) (err error) { + _, _, e1 := Syscall(SYS_FTRUNCATE64, uintptr(fd), uintptr(length>>32), uintptr(length)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getegid() (egid int) { + r0, _ := RawSyscallNoError(SYS_GETEGID, 0, 0, 0) + egid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Geteuid() (euid int) { + r0, _ := RawSyscallNoError(SYS_GETEUID, 0, 0, 0) + euid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getgid() (gid int) { + r0, _ := RawSyscallNoError(SYS_GETGID, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getuid() (uid int) { + r0, _ := RawSyscallNoError(SYS_GETUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func InotifyInit() (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT, 0, 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ioperm(from int, num int, on int) (err error) { + _, _, e1 := Syscall(SYS_IOPERM, uintptr(from), uintptr(num), uintptr(on)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Iopl(level int) (err error) { + _, _, e1 := Syscall(SYS_IOPL, uintptr(level), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lchown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listen(s int, n int) (err error) { + _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(n), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lstat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LSTAT64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pause() (err error) { + _, _, e1 := Syscall(SYS_PAUSE, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pread(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PREAD64, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset>>32), uintptr(offset), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pwrite(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PWRITE64, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset>>32), uintptr(offset), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, _, e1 := Syscall6(SYS__NEWSELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { + r0, _, e1 := Syscall6(SYS_SENDFILE64, uintptr(outfd), uintptr(infd), uintptr(unsafe.Pointer(offset)), uintptr(count), 0, 0) + written = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setfsgid(gid int) (prev int, err error) { + r0, _, e1 := Syscall(SYS_SETFSGID, uintptr(gid), 0, 0) + prev = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setfsuid(uid int) (prev int, err error) { + r0, _, e1 := Syscall(SYS_SETFSUID, uintptr(uid), 0, 0) + prev = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setregid(rgid int, egid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresgid(rgid int, egid int, sgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresuid(ruid int, euid int, suid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setreuid(ruid int, euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Shutdown(fd int, how int) (err error) { + _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) { + r0, _, e1 := Syscall6(SYS_SPLICE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Stat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STAT64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Truncate(path string, length int64) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_TRUNCATE64, uintptr(unsafe.Pointer(_p0)), uintptr(length>>32), uintptr(length)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ustat(dev int, ubuf *Ustat_t) (err error) { + _, _, e1 := Syscall(SYS_USTAT, uintptr(dev), uintptr(unsafe.Pointer(ubuf)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { + r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) { + r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getgroups(n int, list *_Gid_t) (nn int, err error) { + r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0) + nn = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setgroups(n int, list *_Gid_t) (err error) { + _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { + _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { + _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socket(domain int, typ int, proto int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { + _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func futimesat(dirfd int, path string, times *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FUTIMESAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Gettimeofday(tv *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Time(t *Time_t) (tt Time_t, err error) { + r0, _, e1 := RawSyscall(SYS_TIME, uintptr(unsafe.Pointer(t)), 0, 0) + tt = Time_t(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Utime(path string, buf *Utimbuf) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimes(path string, times *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mmap2(addr uintptr, length uintptr, prot int, flags int, fd int, pageOffset uintptr) (xaddr uintptr, err error) { + r0, _, e1 := Syscall6(SYS_MMAP2, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flags), uintptr(fd), uintptr(pageOffset)) + xaddr = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getrlimit(resource int, rlim *rlimit32) (err error) { + _, _, e1 := RawSyscall(SYS_UGETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setrlimit(resource int, rlim *rlimit32) (err error) { + _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pipe(p *[2]_C_int) (err error) { + _, _, e1 := RawSyscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { + r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func syncFileRange2(fd int, flags int, off int64, n int64) (err error) { + _, _, e1 := Syscall6(SYS_SYNC_FILE_RANGE2, uintptr(fd), uintptr(flags), uintptr(off>>32), uintptr(off), uintptr(n>>32), uintptr(n)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func kexecFileLoad(kernelFd int, initrdFd int, cmdlineLen int, cmdline string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(cmdline) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_KEXEC_FILE_LOAD, uintptr(kernelFd), uintptr(initrdFd), uintptr(cmdlineLen), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go index 73a42e2ccb..da8ec03966 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go @@ -1,6 +1,7 @@ // go run mksyscall.go -tags linux,ppc64 syscall_linux.go syscall_linux_ppc64x.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build linux && ppc64 // +build linux,ppc64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go index 6b85595366..083f493bb6 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go @@ -1,6 +1,7 @@ // go run mksyscall.go -tags linux,ppc64le syscall_linux.go syscall_linux_ppc64x.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build linux && ppc64le // +build linux,ppc64le package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go index b76133447e..63b393b802 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go @@ -1,6 +1,7 @@ // go run mksyscall.go -tags linux,riscv64 syscall_linux.go syscall_linux_riscv64.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build linux && riscv64 // +build linux,riscv64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go index d7032ab1e4..bb347407d3 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go @@ -1,6 +1,7 @@ // go run mksyscall.go -tags linux,s390x syscall_linux.go syscall_linux_s390x.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build linux && s390x // +build linux,s390x package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go index bcbbdd906e..8edc517e1e 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go @@ -1,6 +1,7 @@ // go run mksyscall.go -tags linux,sparc64 syscall_linux.go syscall_linux_sparc64.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build linux && sparc64 // +build linux,sparc64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go index 3bbd9e39cd..4726ab30a8 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go @@ -1,6 +1,7 @@ // go run mksyscall.go -l32 -netbsd -tags netbsd,386 syscall_bsd.go syscall_netbsd.go syscall_netbsd_386.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build netbsd && 386 // +build netbsd,386 package unix @@ -362,6 +363,16 @@ func pipe() (fd1 int, fd2 int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func pipe2(p *[2]_C_int, flags int) (err error) { + _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getdents(fd int, buf []byte) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go index d8cf5012c2..fe71456dbc 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go @@ -1,6 +1,7 @@ // go run mksyscall.go -netbsd -tags netbsd,amd64 syscall_bsd.go syscall_netbsd.go syscall_netbsd_amd64.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build netbsd && amd64 // +build netbsd,amd64 package unix @@ -362,6 +363,16 @@ func pipe() (fd1 int, fd2 int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func pipe2(p *[2]_C_int, flags int) (err error) { + _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getdents(fd int, buf []byte) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go index 1153fe69b8..0b5b2f0143 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go @@ -1,6 +1,7 @@ // go run mksyscall.go -l32 -netbsd -arm -tags netbsd,arm syscall_bsd.go syscall_netbsd.go syscall_netbsd_arm.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build netbsd && arm // +build netbsd,arm package unix @@ -362,6 +363,16 @@ func pipe() (fd1 int, fd2 int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func pipe2(p *[2]_C_int, flags int) (err error) { + _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getdents(fd int, buf []byte) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go index 24b4ebb41f..bfca28648f 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go @@ -1,6 +1,7 @@ // go run mksyscall.go -netbsd -tags netbsd,arm64 syscall_bsd.go syscall_netbsd.go syscall_netbsd_arm64.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build netbsd && arm64 // +build netbsd,arm64 package unix @@ -362,6 +363,16 @@ func pipe() (fd1 int, fd2 int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func pipe2(p *[2]_C_int, flags int) (err error) { + _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getdents(fd int, buf []byte) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go index b44b31aeb1..8f80f4ade5 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go @@ -1,6 +1,7 @@ // go run mksyscall.go -l32 -openbsd -tags openbsd,386 syscall_bsd.go syscall_openbsd.go syscall_openbsd_386.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build openbsd && 386 // +build openbsd,386 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go index 67f93ee76d..3a47aca7bf 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go @@ -1,6 +1,7 @@ // go run mksyscall.go -openbsd -tags openbsd,amd64 syscall_bsd.go syscall_openbsd.go syscall_openbsd_amd64.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build openbsd && amd64 // +build openbsd,amd64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go index d7c878b1d0..883a9b45e8 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go @@ -1,6 +1,7 @@ // go run mksyscall.go -l32 -openbsd -arm -tags openbsd,arm syscall_bsd.go syscall_openbsd.go syscall_openbsd_arm.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build openbsd && arm // +build openbsd,arm package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go index 8facd695d5..aac7fdc95e 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go @@ -1,6 +1,7 @@ // go run mksyscall.go -openbsd -tags openbsd,arm64 syscall_bsd.go syscall_openbsd.go syscall_openbsd_arm64.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build openbsd && arm64 // +build openbsd,arm64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go index ec6bd5bb73..8776187462 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go @@ -1,6 +1,7 @@ // go run mksyscall.go -openbsd -tags openbsd,mips64 syscall_bsd.go syscall_openbsd.go syscall_openbsd_mips64.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build openbsd && mips64 // +build openbsd,mips64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go index 6dbb83716c..4e18d5c99f 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go @@ -1,6 +1,7 @@ // go run mksyscall_solaris.go -tags solaris,amd64 syscall_solaris.go syscall_solaris_amd64.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build solaris && amd64 // +build solaris,amd64 package unix @@ -115,6 +116,7 @@ import ( //go:cgo_import_dynamic libc_statvfs statvfs "libc.so" //go:cgo_import_dynamic libc_symlink symlink "libc.so" //go:cgo_import_dynamic libc_sync sync "libc.so" +//go:cgo_import_dynamic libc_sysconf sysconf "libc.so" //go:cgo_import_dynamic libc_times times "libc.so" //go:cgo_import_dynamic libc_truncate truncate "libc.so" //go:cgo_import_dynamic libc_fsync fsync "libc.so" @@ -245,6 +247,7 @@ import ( //go:linkname procStatvfs libc_statvfs //go:linkname procSymlink libc_symlink //go:linkname procSync libc_sync +//go:linkname procSysconf libc_sysconf //go:linkname procTimes libc_times //go:linkname procTruncate libc_truncate //go:linkname procFsync libc_fsync @@ -376,6 +379,7 @@ var ( procStatvfs, procSymlink, procSync, + procSysconf, procTimes, procTruncate, procFsync, @@ -615,8 +619,9 @@ func __minor(version int, dev uint64) (val uint) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ioctl(fd int, req uint, arg uintptr) (err error) { - _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procioctl)), 3, uintptr(fd), uintptr(req), uintptr(arg), 0, 0, 0) +func ioctlRet(fd int, req uint, arg uintptr) (ret int, err error) { + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procioctl)), 3, uintptr(fd), uintptr(req), uintptr(arg), 0, 0, 0) + ret = int(r0) if e1 != 0 { err = e1 } @@ -1687,6 +1692,17 @@ func Sync() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Sysconf(which int) (n int64, err error) { + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procSysconf)), 1, uintptr(which), 0, 0, 0, 0, 0) + n = int64(r0) + if e1 != 0 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Times(tms *Tms) (ticks uintptr, err error) { r0, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procTimes)), 1, uintptr(unsafe.Pointer(tms)), 0, 0, 0, 0, 0) ticks = uintptr(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go b/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go new file mode 100644 index 0000000000..f2079457c6 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go @@ -0,0 +1,1255 @@ +// go run mksyscall.go -tags zos,s390x syscall_zos_s390x.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +//go:build zos && s390x +// +build zos,s390x + +package unix + +import ( + "unsafe" +) + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntl(fd int, cmd int, arg int) (val int, err error) { + r0, _, e1 := syscall_syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func read(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readlen(fd int, buf *byte, nbuf int) (n int, err error) { + r0, _, e1 := syscall_syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func write(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { + r0, _, e1 := syscall_syscall(SYS___ACCEPT_A, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := syscall_syscall(SYS___BIND_A, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := syscall_syscall(SYS___CONNECT_A, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getgroups(n int, list *_Gid_t) (nn int, err error) { + r0, _, e1 := syscall_rawsyscall(SYS_GETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0) + nn = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setgroups(n int, list *_Gid_t) (err error) { + _, _, e1 := syscall_rawsyscall(SYS_SETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { + _, _, e1 := syscall_syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { + _, _, e1 := syscall_syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socket(domain int, typ int, proto int) (fd int, err error) { + r0, _, e1 := syscall_rawsyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { + _, _, e1 := syscall_rawsyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := syscall_rawsyscall(SYS___GETPEERNAME_A, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := syscall_rawsyscall(SYS___GETSOCKNAME_A, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall6(SYS___RECVFROM_A, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall6(SYS___SENDTO_A, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := syscall_syscall(SYS___RECVMSG_A, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := syscall_syscall(SYS___SENDMSG_A, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { + r0, _, e1 := syscall_syscall6(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos)) + ret = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func munmap(addr uintptr, length uintptr) (err error) { + _, _, e1 := syscall_syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ioctl(fd int, req uint, arg uintptr) (err error) { + _, _, e1 := syscall_syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Access(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(SYS___ACCESS_A, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(SYS___CHDIR_A, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(SYS___CHOWN_A, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chmod(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(SYS___CHMOD_A, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Creat(path string, mode uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := syscall_syscall(SYS___CREAT_A, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup(oldfd int) (fd int, err error) { + r0, _, e1 := syscall_syscall(SYS_DUP, uintptr(oldfd), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup2(oldfd int, newfd int) (err error) { + _, _, e1 := syscall_syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Errno2() (er2 int) { + uer2, _, _ := syscall_syscall(SYS___ERRNO2, 0, 0, 0) + er2 = int(uer2) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Err2ad() (eadd *int) { + ueadd, _, _ := syscall_syscall(SYS___ERR2AD, 0, 0, 0) + eadd = (*int)(unsafe.Pointer(ueadd)) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Exit(code int) { + syscall_syscall(SYS_EXIT, uintptr(code), 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchdir(fd int) (err error) { + _, _, e1 := syscall_syscall(SYS_FCHDIR, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmod(fd int, mode uint32) (err error) { + _, _, e1 := syscall_syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchown(fd int, uid int, gid int) (err error) { + _, _, e1 := syscall_syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func FcntlInt(fd uintptr, cmd int, arg int) (retval int, err error) { + r0, _, e1 := syscall_syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) + retval = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fstat(fd int, stat *Stat_LE_t) (err error) { + _, _, e1 := syscall_syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatvfs(fd int, stat *Statvfs_t) (err error) { + _, _, e1 := syscall_syscall(SYS_FSTATVFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsync(fd int) (err error) { + _, _, e1 := syscall_syscall(SYS_FSYNC, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ftruncate(fd int, length int64) (err error) { + _, _, e1 := syscall_syscall(SYS_FTRUNCATE, uintptr(fd), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpagesize() (pgsize int) { + r0, _, _ := syscall_syscall(SYS_GETPAGESIZE, 0, 0, 0) + pgsize = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mprotect(b []byte, prot int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Msync(b []byte, flags int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Poll(fds []PollFd, timeout int) (n int, err error) { + var _p0 unsafe.Pointer + if len(fds) > 0 { + _p0 = unsafe.Pointer(&fds[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall(SYS_POLL, uintptr(_p0), uintptr(len(fds)), uintptr(timeout)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Times(tms *Tms) (ticks uintptr, err error) { + r0, _, e1 := syscall_syscall(SYS_TIMES, uintptr(unsafe.Pointer(tms)), 0, 0) + ticks = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func W_Getmntent(buff *byte, size int) (lastsys int, err error) { + r0, _, e1 := syscall_syscall(SYS_W_GETMNTENT, uintptr(unsafe.Pointer(buff)), uintptr(size), 0) + lastsys = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func W_Getmntent_A(buff *byte, size int) (lastsys int, err error) { + r0, _, e1 := syscall_syscall(SYS___W_GETMNTENT_A, uintptr(unsafe.Pointer(buff)), uintptr(size), 0) + lastsys = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mount_LE(path string, filesystem string, fstype string, mtm uint32, parmlen int32, parm string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(filesystem) + if err != nil { + return + } + var _p2 *byte + _p2, err = BytePtrFromString(fstype) + if err != nil { + return + } + var _p3 *byte + _p3, err = BytePtrFromString(parm) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(SYS___MOUNT_A, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(mtm), uintptr(parmlen), uintptr(unsafe.Pointer(_p3))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func unmount(filesystem string, mtm int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(filesystem) + if err != nil { + return + } + _, _, e1 := syscall_syscall(SYS___UMOUNT_A, uintptr(unsafe.Pointer(_p0)), uintptr(mtm), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chroot(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(SYS___CHROOT_A, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Uname(buf *Utsname) (err error) { + _, _, e1 := syscall_rawsyscall(SYS___UNAME_A, uintptr(unsafe.Pointer(buf)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Gethostname(buf []byte) (err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall(SYS___GETHOSTNAME_A, uintptr(_p0), uintptr(len(buf)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getegid() (egid int) { + r0, _, _ := syscall_rawsyscall(SYS_GETEGID, 0, 0, 0) + egid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Geteuid() (uid int) { + r0, _, _ := syscall_rawsyscall(SYS_GETEUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getgid() (gid int) { + r0, _, _ := syscall_rawsyscall(SYS_GETGID, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpid() (pid int) { + r0, _, _ := syscall_rawsyscall(SYS_GETPID, 0, 0, 0) + pid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgid(pid int) (pgid int, err error) { + r0, _, e1 := syscall_rawsyscall(SYS_GETPGID, uintptr(pid), 0, 0) + pgid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getppid() (pid int) { + r0, _, _ := syscall_rawsyscall(SYS_GETPPID, 0, 0, 0) + pid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpriority(which int, who int) (prio int, err error) { + r0, _, e1 := syscall_syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) + prio = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrlimit(resource int, rlim *Rlimit) (err error) { + _, _, e1 := syscall_rawsyscall(SYS_GETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getrusage(who int, rusage *rusage_zos) (err error) { + _, _, e1 := syscall_rawsyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getsid(pid int) (sid int, err error) { + r0, _, e1 := syscall_rawsyscall(SYS_GETSID, uintptr(pid), 0, 0) + sid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getuid() (uid int) { + r0, _, _ := syscall_rawsyscall(SYS_GETUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kill(pid int, sig Signal) (err error) { + _, _, e1 := syscall_rawsyscall(SYS_KILL, uintptr(pid), uintptr(sig), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lchown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(SYS___LCHOWN_A, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Link(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := syscall_syscall(SYS___LINK_A, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listen(s int, n int) (err error) { + _, _, e1 := syscall_syscall(SYS_LISTEN, uintptr(s), uintptr(n), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func lstat(path string, stat *Stat_LE_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(SYS___LSTAT_A, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdir(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(SYS___MKDIR_A, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkfifo(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(SYS___MKFIFO_A, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mknod(path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(SYS___MKNOD_A, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pread(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pwrite(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Readlink(path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall(SYS___READLINK_A, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Rename(from string, to string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := syscall_syscall(SYS___RENAME_A, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Rmdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(SYS___RMDIR_A, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seek(fd int, offset int64, whence int) (off int64, err error) { + r0, _, e1 := syscall_syscall(SYS_LSEEK, uintptr(fd), uintptr(offset), uintptr(whence)) + off = int64(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpriority(which int, who int, prio int) (err error) { + _, _, e1 := syscall_syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpgid(pid int, pgid int) (err error) { + _, _, e1 := syscall_rawsyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setrlimit(resource int, lim *Rlimit) (err error) { + _, _, e1 := syscall_rawsyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(lim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setregid(rgid int, egid int) (err error) { + _, _, e1 := syscall_rawsyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setreuid(ruid int, euid int) (err error) { + _, _, e1 := syscall_rawsyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setsid() (pid int, err error) { + r0, _, e1 := syscall_rawsyscall(SYS_SETSID, 0, 0, 0) + pid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setuid(uid int) (err error) { + _, _, e1 := syscall_syscall(SYS_SETUID, uintptr(uid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setgid(uid int) (err error) { + _, _, e1 := syscall_syscall(SYS_SETGID, uintptr(uid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Shutdown(fd int, how int) (err error) { + _, _, e1 := syscall_syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func stat(path string, statLE *Stat_LE_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(SYS___STAT_A, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(statLE)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Symlink(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := syscall_syscall(SYS___SYMLINK_A, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sync() { + syscall_syscall(SYS_SYNC, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Truncate(path string, length int64) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(SYS___TRUNCATE_A, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Tcgetattr(fildes int, termptr *Termios) (err error) { + _, _, e1 := syscall_syscall(SYS_TCGETATTR, uintptr(fildes), uintptr(unsafe.Pointer(termptr)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Tcsetattr(fildes int, when int, termptr *Termios) (err error) { + _, _, e1 := syscall_syscall(SYS_TCSETATTR, uintptr(fildes), uintptr(when), uintptr(unsafe.Pointer(termptr))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Umask(mask int) (oldmask int) { + r0, _, _ := syscall_syscall(SYS_UMASK, uintptr(mask), 0, 0) + oldmask = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unlink(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(SYS___UNLINK_A, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Utime(path string, utim *Utimbuf) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(SYS___UTIME_A, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(utim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func open(path string, mode int, perm uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := syscall_syscall(SYS___OPEN_A, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func remove(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(SYS_REMOVE, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func waitpid(pid int, wstatus *_C_int, options int) (wpid int, err error) { + r0, _, e1 := syscall_syscall(SYS_WAITPID, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options)) + wpid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func gettimeofday(tv *timeval_zos) (err error) { + _, _, e1 := syscall_rawsyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pipe(p *[2]_C_int) (err error) { + _, _, e1 := syscall_rawsyscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimes(path string, timeval *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(SYS___UTIMES_A, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Select(nmsgsfds int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (ret int, err error) { + r0, _, e1 := syscall_syscall6(SYS_SELECT, uintptr(nmsgsfds), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go index 102f1ab475..9e9d0b2a9c 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go @@ -1,6 +1,7 @@ // go run mksysctl_openbsd.go // Code generated by the command above; DO NOT EDIT. +//go:build 386 && openbsd // +build 386,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go index 4866fced8a..adecd09667 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go @@ -1,6 +1,7 @@ // go run mksysctl_openbsd.go // Code generated by the command above; DO NOT EDIT. +//go:build amd64 && openbsd // +build amd64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go index d3801eb24b..8ea52a4a18 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go @@ -1,6 +1,7 @@ // go run mksysctl_openbsd.go // Code generated by the command above; DO NOT EDIT. +//go:build arm && openbsd // +build arm,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go index ba4304fd23..154b57ae3e 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go @@ -1,6 +1,7 @@ // go run mksysctl_openbsd.go // Code generated by the command above; DO NOT EDIT. +//go:build arm64 && openbsd // +build arm64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go index aca34b3493..d96bb2ba4d 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go @@ -1,6 +1,7 @@ // go run mksysctl_openbsd.go // Code generated by the command above; DO NOT EDIT. +//go:build mips64 && openbsd // +build mips64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_darwin_386.go b/vendor/golang.org/x/sys/unix/zsysnum_darwin_386.go deleted file mode 100644 index ad62324c7c..0000000000 --- a/vendor/golang.org/x/sys/unix/zsysnum_darwin_386.go +++ /dev/null @@ -1,437 +0,0 @@ -// go run mksysnum.go /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.13.sdk/usr/include/sys/syscall.h -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build 386,darwin - -package unix - -// Deprecated: Use libSystem wrappers instead of direct syscalls. -const ( - SYS_SYSCALL = 0 - SYS_EXIT = 1 - SYS_FORK = 2 - SYS_READ = 3 - SYS_WRITE = 4 - SYS_OPEN = 5 - SYS_CLOSE = 6 - SYS_WAIT4 = 7 - SYS_LINK = 9 - SYS_UNLINK = 10 - SYS_CHDIR = 12 - SYS_FCHDIR = 13 - SYS_MKNOD = 14 - SYS_CHMOD = 15 - SYS_CHOWN = 16 - SYS_GETFSSTAT = 18 - SYS_GETPID = 20 - SYS_SETUID = 23 - SYS_GETUID = 24 - SYS_GETEUID = 25 - SYS_PTRACE = 26 - SYS_RECVMSG = 27 - SYS_SENDMSG = 28 - SYS_RECVFROM = 29 - SYS_ACCEPT = 30 - SYS_GETPEERNAME = 31 - SYS_GETSOCKNAME = 32 - SYS_ACCESS = 33 - SYS_CHFLAGS = 34 - SYS_FCHFLAGS = 35 - SYS_SYNC = 36 - SYS_KILL = 37 - SYS_GETPPID = 39 - SYS_DUP = 41 - SYS_PIPE = 42 - SYS_GETEGID = 43 - SYS_SIGACTION = 46 - SYS_GETGID = 47 - SYS_SIGPROCMASK = 48 - SYS_GETLOGIN = 49 - SYS_SETLOGIN = 50 - SYS_ACCT = 51 - SYS_SIGPENDING = 52 - SYS_SIGALTSTACK = 53 - SYS_IOCTL = 54 - SYS_REBOOT = 55 - SYS_REVOKE = 56 - SYS_SYMLINK = 57 - SYS_READLINK = 58 - SYS_EXECVE = 59 - SYS_UMASK = 60 - SYS_CHROOT = 61 - SYS_MSYNC = 65 - SYS_VFORK = 66 - SYS_MUNMAP = 73 - SYS_MPROTECT = 74 - SYS_MADVISE = 75 - SYS_MINCORE = 78 - SYS_GETGROUPS = 79 - SYS_SETGROUPS = 80 - SYS_GETPGRP = 81 - SYS_SETPGID = 82 - SYS_SETITIMER = 83 - SYS_SWAPON = 85 - SYS_GETITIMER = 86 - SYS_GETDTABLESIZE = 89 - SYS_DUP2 = 90 - SYS_FCNTL = 92 - SYS_SELECT = 93 - SYS_FSYNC = 95 - SYS_SETPRIORITY = 96 - SYS_SOCKET = 97 - SYS_CONNECT = 98 - SYS_GETPRIORITY = 100 - SYS_BIND = 104 - SYS_SETSOCKOPT = 105 - SYS_LISTEN = 106 - SYS_SIGSUSPEND = 111 - SYS_GETTIMEOFDAY = 116 - SYS_GETRUSAGE = 117 - SYS_GETSOCKOPT = 118 - SYS_READV = 120 - SYS_WRITEV = 121 - SYS_SETTIMEOFDAY = 122 - SYS_FCHOWN = 123 - SYS_FCHMOD = 124 - SYS_SETREUID = 126 - SYS_SETREGID = 127 - SYS_RENAME = 128 - SYS_FLOCK = 131 - SYS_MKFIFO = 132 - SYS_SENDTO = 133 - SYS_SHUTDOWN = 134 - SYS_SOCKETPAIR = 135 - SYS_MKDIR = 136 - SYS_RMDIR = 137 - SYS_UTIMES = 138 - SYS_FUTIMES = 139 - SYS_ADJTIME = 140 - SYS_GETHOSTUUID = 142 - SYS_SETSID = 147 - SYS_GETPGID = 151 - SYS_SETPRIVEXEC = 152 - SYS_PREAD = 153 - SYS_PWRITE = 154 - SYS_NFSSVC = 155 - SYS_STATFS = 157 - SYS_FSTATFS = 158 - SYS_UNMOUNT = 159 - SYS_GETFH = 161 - SYS_QUOTACTL = 165 - SYS_MOUNT = 167 - SYS_CSOPS = 169 - SYS_CSOPS_AUDITTOKEN = 170 - SYS_WAITID = 173 - SYS_KDEBUG_TYPEFILTER = 177 - SYS_KDEBUG_TRACE_STRING = 178 - SYS_KDEBUG_TRACE64 = 179 - SYS_KDEBUG_TRACE = 180 - SYS_SETGID = 181 - SYS_SETEGID = 182 - SYS_SETEUID = 183 - SYS_SIGRETURN = 184 - SYS_THREAD_SELFCOUNTS = 186 - SYS_FDATASYNC = 187 - SYS_STAT = 188 - SYS_FSTAT = 189 - SYS_LSTAT = 190 - SYS_PATHCONF = 191 - SYS_FPATHCONF = 192 - SYS_GETRLIMIT = 194 - SYS_SETRLIMIT = 195 - SYS_GETDIRENTRIES = 196 - SYS_MMAP = 197 - SYS_LSEEK = 199 - SYS_TRUNCATE = 200 - SYS_FTRUNCATE = 201 - SYS_SYSCTL = 202 - SYS_MLOCK = 203 - SYS_MUNLOCK = 204 - SYS_UNDELETE = 205 - SYS_OPEN_DPROTECTED_NP = 216 - SYS_GETATTRLIST = 220 - SYS_SETATTRLIST = 221 - SYS_GETDIRENTRIESATTR = 222 - SYS_EXCHANGEDATA = 223 - SYS_SEARCHFS = 225 - SYS_DELETE = 226 - SYS_COPYFILE = 227 - SYS_FGETATTRLIST = 228 - SYS_FSETATTRLIST = 229 - SYS_POLL = 230 - SYS_WATCHEVENT = 231 - SYS_WAITEVENT = 232 - SYS_MODWATCH = 233 - SYS_GETXATTR = 234 - SYS_FGETXATTR = 235 - SYS_SETXATTR = 236 - SYS_FSETXATTR = 237 - SYS_REMOVEXATTR = 238 - SYS_FREMOVEXATTR = 239 - SYS_LISTXATTR = 240 - SYS_FLISTXATTR = 241 - SYS_FSCTL = 242 - SYS_INITGROUPS = 243 - SYS_POSIX_SPAWN = 244 - SYS_FFSCTL = 245 - SYS_NFSCLNT = 247 - SYS_FHOPEN = 248 - SYS_MINHERIT = 250 - SYS_SEMSYS = 251 - SYS_MSGSYS = 252 - SYS_SHMSYS = 253 - SYS_SEMCTL = 254 - SYS_SEMGET = 255 - SYS_SEMOP = 256 - SYS_MSGCTL = 258 - SYS_MSGGET = 259 - SYS_MSGSND = 260 - SYS_MSGRCV = 261 - SYS_SHMAT = 262 - SYS_SHMCTL = 263 - SYS_SHMDT = 264 - SYS_SHMGET = 265 - SYS_SHM_OPEN = 266 - SYS_SHM_UNLINK = 267 - SYS_SEM_OPEN = 268 - SYS_SEM_CLOSE = 269 - SYS_SEM_UNLINK = 270 - SYS_SEM_WAIT = 271 - SYS_SEM_TRYWAIT = 272 - SYS_SEM_POST = 273 - SYS_SYSCTLBYNAME = 274 - SYS_OPEN_EXTENDED = 277 - SYS_UMASK_EXTENDED = 278 - SYS_STAT_EXTENDED = 279 - SYS_LSTAT_EXTENDED = 280 - SYS_FSTAT_EXTENDED = 281 - SYS_CHMOD_EXTENDED = 282 - SYS_FCHMOD_EXTENDED = 283 - SYS_ACCESS_EXTENDED = 284 - SYS_SETTID = 285 - SYS_GETTID = 286 - SYS_SETSGROUPS = 287 - SYS_GETSGROUPS = 288 - SYS_SETWGROUPS = 289 - SYS_GETWGROUPS = 290 - SYS_MKFIFO_EXTENDED = 291 - SYS_MKDIR_EXTENDED = 292 - SYS_IDENTITYSVC = 293 - SYS_SHARED_REGION_CHECK_NP = 294 - SYS_VM_PRESSURE_MONITOR = 296 - SYS_PSYNCH_RW_LONGRDLOCK = 297 - SYS_PSYNCH_RW_YIELDWRLOCK = 298 - SYS_PSYNCH_RW_DOWNGRADE = 299 - SYS_PSYNCH_RW_UPGRADE = 300 - SYS_PSYNCH_MUTEXWAIT = 301 - SYS_PSYNCH_MUTEXDROP = 302 - SYS_PSYNCH_CVBROAD = 303 - SYS_PSYNCH_CVSIGNAL = 304 - SYS_PSYNCH_CVWAIT = 305 - SYS_PSYNCH_RW_RDLOCK = 306 - SYS_PSYNCH_RW_WRLOCK = 307 - SYS_PSYNCH_RW_UNLOCK = 308 - SYS_PSYNCH_RW_UNLOCK2 = 309 - SYS_GETSID = 310 - SYS_SETTID_WITH_PID = 311 - SYS_PSYNCH_CVCLRPREPOST = 312 - SYS_AIO_FSYNC = 313 - SYS_AIO_RETURN = 314 - SYS_AIO_SUSPEND = 315 - SYS_AIO_CANCEL = 316 - SYS_AIO_ERROR = 317 - SYS_AIO_READ = 318 - SYS_AIO_WRITE = 319 - SYS_LIO_LISTIO = 320 - SYS_IOPOLICYSYS = 322 - SYS_PROCESS_POLICY = 323 - SYS_MLOCKALL = 324 - SYS_MUNLOCKALL = 325 - SYS_ISSETUGID = 327 - SYS___PTHREAD_KILL = 328 - SYS___PTHREAD_SIGMASK = 329 - SYS___SIGWAIT = 330 - SYS___DISABLE_THREADSIGNAL = 331 - SYS___PTHREAD_MARKCANCEL = 332 - SYS___PTHREAD_CANCELED = 333 - SYS___SEMWAIT_SIGNAL = 334 - SYS_PROC_INFO = 336 - SYS_SENDFILE = 337 - SYS_STAT64 = 338 - SYS_FSTAT64 = 339 - SYS_LSTAT64 = 340 - SYS_STAT64_EXTENDED = 341 - SYS_LSTAT64_EXTENDED = 342 - SYS_FSTAT64_EXTENDED = 343 - SYS_GETDIRENTRIES64 = 344 - SYS_STATFS64 = 345 - SYS_FSTATFS64 = 346 - SYS_GETFSSTAT64 = 347 - SYS___PTHREAD_CHDIR = 348 - SYS___PTHREAD_FCHDIR = 349 - SYS_AUDIT = 350 - SYS_AUDITON = 351 - SYS_GETAUID = 353 - SYS_SETAUID = 354 - SYS_GETAUDIT_ADDR = 357 - SYS_SETAUDIT_ADDR = 358 - SYS_AUDITCTL = 359 - SYS_BSDTHREAD_CREATE = 360 - SYS_BSDTHREAD_TERMINATE = 361 - SYS_KQUEUE = 362 - SYS_KEVENT = 363 - SYS_LCHOWN = 364 - SYS_BSDTHREAD_REGISTER = 366 - SYS_WORKQ_OPEN = 367 - SYS_WORKQ_KERNRETURN = 368 - SYS_KEVENT64 = 369 - SYS___OLD_SEMWAIT_SIGNAL = 370 - SYS___OLD_SEMWAIT_SIGNAL_NOCANCEL = 371 - SYS_THREAD_SELFID = 372 - SYS_LEDGER = 373 - SYS_KEVENT_QOS = 374 - SYS_KEVENT_ID = 375 - SYS___MAC_EXECVE = 380 - SYS___MAC_SYSCALL = 381 - SYS___MAC_GET_FILE = 382 - SYS___MAC_SET_FILE = 383 - SYS___MAC_GET_LINK = 384 - SYS___MAC_SET_LINK = 385 - SYS___MAC_GET_PROC = 386 - SYS___MAC_SET_PROC = 387 - SYS___MAC_GET_FD = 388 - SYS___MAC_SET_FD = 389 - SYS___MAC_GET_PID = 390 - SYS_PSELECT = 394 - SYS_PSELECT_NOCANCEL = 395 - SYS_READ_NOCANCEL = 396 - SYS_WRITE_NOCANCEL = 397 - SYS_OPEN_NOCANCEL = 398 - SYS_CLOSE_NOCANCEL = 399 - SYS_WAIT4_NOCANCEL = 400 - SYS_RECVMSG_NOCANCEL = 401 - SYS_SENDMSG_NOCANCEL = 402 - SYS_RECVFROM_NOCANCEL = 403 - SYS_ACCEPT_NOCANCEL = 404 - SYS_MSYNC_NOCANCEL = 405 - SYS_FCNTL_NOCANCEL = 406 - SYS_SELECT_NOCANCEL = 407 - SYS_FSYNC_NOCANCEL = 408 - SYS_CONNECT_NOCANCEL = 409 - SYS_SIGSUSPEND_NOCANCEL = 410 - SYS_READV_NOCANCEL = 411 - SYS_WRITEV_NOCANCEL = 412 - SYS_SENDTO_NOCANCEL = 413 - SYS_PREAD_NOCANCEL = 414 - SYS_PWRITE_NOCANCEL = 415 - SYS_WAITID_NOCANCEL = 416 - SYS_POLL_NOCANCEL = 417 - SYS_MSGSND_NOCANCEL = 418 - SYS_MSGRCV_NOCANCEL = 419 - SYS_SEM_WAIT_NOCANCEL = 420 - SYS_AIO_SUSPEND_NOCANCEL = 421 - SYS___SIGWAIT_NOCANCEL = 422 - SYS___SEMWAIT_SIGNAL_NOCANCEL = 423 - SYS___MAC_MOUNT = 424 - SYS___MAC_GET_MOUNT = 425 - SYS___MAC_GETFSSTAT = 426 - SYS_FSGETPATH = 427 - SYS_AUDIT_SESSION_SELF = 428 - SYS_AUDIT_SESSION_JOIN = 429 - SYS_FILEPORT_MAKEPORT = 430 - SYS_FILEPORT_MAKEFD = 431 - SYS_AUDIT_SESSION_PORT = 432 - SYS_PID_SUSPEND = 433 - SYS_PID_RESUME = 434 - SYS_PID_HIBERNATE = 435 - SYS_PID_SHUTDOWN_SOCKETS = 436 - SYS_SHARED_REGION_MAP_AND_SLIDE_NP = 438 - SYS_KAS_INFO = 439 - SYS_MEMORYSTATUS_CONTROL = 440 - SYS_GUARDED_OPEN_NP = 441 - SYS_GUARDED_CLOSE_NP = 442 - SYS_GUARDED_KQUEUE_NP = 443 - SYS_CHANGE_FDGUARD_NP = 444 - SYS_USRCTL = 445 - SYS_PROC_RLIMIT_CONTROL = 446 - SYS_CONNECTX = 447 - SYS_DISCONNECTX = 448 - SYS_PEELOFF = 449 - SYS_SOCKET_DELEGATE = 450 - SYS_TELEMETRY = 451 - SYS_PROC_UUID_POLICY = 452 - SYS_MEMORYSTATUS_GET_LEVEL = 453 - SYS_SYSTEM_OVERRIDE = 454 - SYS_VFS_PURGE = 455 - SYS_SFI_CTL = 456 - SYS_SFI_PIDCTL = 457 - SYS_COALITION = 458 - SYS_COALITION_INFO = 459 - SYS_NECP_MATCH_POLICY = 460 - SYS_GETATTRLISTBULK = 461 - SYS_CLONEFILEAT = 462 - SYS_OPENAT = 463 - SYS_OPENAT_NOCANCEL = 464 - SYS_RENAMEAT = 465 - SYS_FACCESSAT = 466 - SYS_FCHMODAT = 467 - SYS_FCHOWNAT = 468 - SYS_FSTATAT = 469 - SYS_FSTATAT64 = 470 - SYS_LINKAT = 471 - SYS_UNLINKAT = 472 - SYS_READLINKAT = 473 - SYS_SYMLINKAT = 474 - SYS_MKDIRAT = 475 - SYS_GETATTRLISTAT = 476 - SYS_PROC_TRACE_LOG = 477 - SYS_BSDTHREAD_CTL = 478 - SYS_OPENBYID_NP = 479 - SYS_RECVMSG_X = 480 - SYS_SENDMSG_X = 481 - SYS_THREAD_SELFUSAGE = 482 - SYS_CSRCTL = 483 - SYS_GUARDED_OPEN_DPROTECTED_NP = 484 - SYS_GUARDED_WRITE_NP = 485 - SYS_GUARDED_PWRITE_NP = 486 - SYS_GUARDED_WRITEV_NP = 487 - SYS_RENAMEATX_NP = 488 - SYS_MREMAP_ENCRYPTED = 489 - SYS_NETAGENT_TRIGGER = 490 - SYS_STACK_SNAPSHOT_WITH_CONFIG = 491 - SYS_MICROSTACKSHOT = 492 - SYS_GRAB_PGO_DATA = 493 - SYS_PERSONA = 494 - SYS_WORK_INTERVAL_CTL = 499 - SYS_GETENTROPY = 500 - SYS_NECP_OPEN = 501 - SYS_NECP_CLIENT_ACTION = 502 - SYS___NEXUS_OPEN = 503 - SYS___NEXUS_REGISTER = 504 - SYS___NEXUS_DEREGISTER = 505 - SYS___NEXUS_CREATE = 506 - SYS___NEXUS_DESTROY = 507 - SYS___NEXUS_GET_OPT = 508 - SYS___NEXUS_SET_OPT = 509 - SYS___CHANNEL_OPEN = 510 - SYS___CHANNEL_GET_INFO = 511 - SYS___CHANNEL_SYNC = 512 - SYS___CHANNEL_GET_OPT = 513 - SYS___CHANNEL_SET_OPT = 514 - SYS_ULOCK_WAIT = 515 - SYS_ULOCK_WAKE = 516 - SYS_FCLONEFILEAT = 517 - SYS_FS_SNAPSHOT = 518 - SYS_TERMINATE_WITH_PAYLOAD = 520 - SYS_ABORT_WITH_PAYLOAD = 521 - SYS_NECP_SESSION_OPEN = 522 - SYS_NECP_SESSION_ACTION = 523 - SYS_SETATTRLISTAT = 524 - SYS_NET_QOS_GUIDELINE = 525 - SYS_FMOUNT = 526 - SYS_NTP_ADJTIME = 527 - SYS_NTP_GETTIME = 528 - SYS_OS_FAULT_WITH_PAYLOAD = 529 - SYS_MAXSYSCALL = 530 - SYS_INVALID = 63 -) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go index a2fc91d6a8..f8298ff9b5 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go @@ -1,6 +1,7 @@ // go run mksysnum.go /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.14.sdk/usr/include/sys/syscall.h // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build amd64 && darwin // +build amd64,darwin package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm.go deleted file mode 100644 index 20d7808ace..0000000000 --- a/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm.go +++ /dev/null @@ -1,437 +0,0 @@ -// go run mksysnum.go /Applications/Xcode.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS11.1.sdk/usr/include/sys/syscall.h -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build arm,darwin - -package unix - -// Deprecated: Use libSystem wrappers instead of direct syscalls. -const ( - SYS_SYSCALL = 0 - SYS_EXIT = 1 - SYS_FORK = 2 - SYS_READ = 3 - SYS_WRITE = 4 - SYS_OPEN = 5 - SYS_CLOSE = 6 - SYS_WAIT4 = 7 - SYS_LINK = 9 - SYS_UNLINK = 10 - SYS_CHDIR = 12 - SYS_FCHDIR = 13 - SYS_MKNOD = 14 - SYS_CHMOD = 15 - SYS_CHOWN = 16 - SYS_GETFSSTAT = 18 - SYS_GETPID = 20 - SYS_SETUID = 23 - SYS_GETUID = 24 - SYS_GETEUID = 25 - SYS_PTRACE = 26 - SYS_RECVMSG = 27 - SYS_SENDMSG = 28 - SYS_RECVFROM = 29 - SYS_ACCEPT = 30 - SYS_GETPEERNAME = 31 - SYS_GETSOCKNAME = 32 - SYS_ACCESS = 33 - SYS_CHFLAGS = 34 - SYS_FCHFLAGS = 35 - SYS_SYNC = 36 - SYS_KILL = 37 - SYS_GETPPID = 39 - SYS_DUP = 41 - SYS_PIPE = 42 - SYS_GETEGID = 43 - SYS_SIGACTION = 46 - SYS_GETGID = 47 - SYS_SIGPROCMASK = 48 - SYS_GETLOGIN = 49 - SYS_SETLOGIN = 50 - SYS_ACCT = 51 - SYS_SIGPENDING = 52 - SYS_SIGALTSTACK = 53 - SYS_IOCTL = 54 - SYS_REBOOT = 55 - SYS_REVOKE = 56 - SYS_SYMLINK = 57 - SYS_READLINK = 58 - SYS_EXECVE = 59 - SYS_UMASK = 60 - SYS_CHROOT = 61 - SYS_MSYNC = 65 - SYS_VFORK = 66 - SYS_MUNMAP = 73 - SYS_MPROTECT = 74 - SYS_MADVISE = 75 - SYS_MINCORE = 78 - SYS_GETGROUPS = 79 - SYS_SETGROUPS = 80 - SYS_GETPGRP = 81 - SYS_SETPGID = 82 - SYS_SETITIMER = 83 - SYS_SWAPON = 85 - SYS_GETITIMER = 86 - SYS_GETDTABLESIZE = 89 - SYS_DUP2 = 90 - SYS_FCNTL = 92 - SYS_SELECT = 93 - SYS_FSYNC = 95 - SYS_SETPRIORITY = 96 - SYS_SOCKET = 97 - SYS_CONNECT = 98 - SYS_GETPRIORITY = 100 - SYS_BIND = 104 - SYS_SETSOCKOPT = 105 - SYS_LISTEN = 106 - SYS_SIGSUSPEND = 111 - SYS_GETTIMEOFDAY = 116 - SYS_GETRUSAGE = 117 - SYS_GETSOCKOPT = 118 - SYS_READV = 120 - SYS_WRITEV = 121 - SYS_SETTIMEOFDAY = 122 - SYS_FCHOWN = 123 - SYS_FCHMOD = 124 - SYS_SETREUID = 126 - SYS_SETREGID = 127 - SYS_RENAME = 128 - SYS_FLOCK = 131 - SYS_MKFIFO = 132 - SYS_SENDTO = 133 - SYS_SHUTDOWN = 134 - SYS_SOCKETPAIR = 135 - SYS_MKDIR = 136 - SYS_RMDIR = 137 - SYS_UTIMES = 138 - SYS_FUTIMES = 139 - SYS_ADJTIME = 140 - SYS_GETHOSTUUID = 142 - SYS_SETSID = 147 - SYS_GETPGID = 151 - SYS_SETPRIVEXEC = 152 - SYS_PREAD = 153 - SYS_PWRITE = 154 - SYS_NFSSVC = 155 - SYS_STATFS = 157 - SYS_FSTATFS = 158 - SYS_UNMOUNT = 159 - SYS_GETFH = 161 - SYS_QUOTACTL = 165 - SYS_MOUNT = 167 - SYS_CSOPS = 169 - SYS_CSOPS_AUDITTOKEN = 170 - SYS_WAITID = 173 - SYS_KDEBUG_TYPEFILTER = 177 - SYS_KDEBUG_TRACE_STRING = 178 - SYS_KDEBUG_TRACE64 = 179 - SYS_KDEBUG_TRACE = 180 - SYS_SETGID = 181 - SYS_SETEGID = 182 - SYS_SETEUID = 183 - SYS_SIGRETURN = 184 - SYS_THREAD_SELFCOUNTS = 186 - SYS_FDATASYNC = 187 - SYS_STAT = 188 - SYS_FSTAT = 189 - SYS_LSTAT = 190 - SYS_PATHCONF = 191 - SYS_FPATHCONF = 192 - SYS_GETRLIMIT = 194 - SYS_SETRLIMIT = 195 - SYS_GETDIRENTRIES = 196 - SYS_MMAP = 197 - SYS_LSEEK = 199 - SYS_TRUNCATE = 200 - SYS_FTRUNCATE = 201 - SYS_SYSCTL = 202 - SYS_MLOCK = 203 - SYS_MUNLOCK = 204 - SYS_UNDELETE = 205 - SYS_OPEN_DPROTECTED_NP = 216 - SYS_GETATTRLIST = 220 - SYS_SETATTRLIST = 221 - SYS_GETDIRENTRIESATTR = 222 - SYS_EXCHANGEDATA = 223 - SYS_SEARCHFS = 225 - SYS_DELETE = 226 - SYS_COPYFILE = 227 - SYS_FGETATTRLIST = 228 - SYS_FSETATTRLIST = 229 - SYS_POLL = 230 - SYS_WATCHEVENT = 231 - SYS_WAITEVENT = 232 - SYS_MODWATCH = 233 - SYS_GETXATTR = 234 - SYS_FGETXATTR = 235 - SYS_SETXATTR = 236 - SYS_FSETXATTR = 237 - SYS_REMOVEXATTR = 238 - SYS_FREMOVEXATTR = 239 - SYS_LISTXATTR = 240 - SYS_FLISTXATTR = 241 - SYS_FSCTL = 242 - SYS_INITGROUPS = 243 - SYS_POSIX_SPAWN = 244 - SYS_FFSCTL = 245 - SYS_NFSCLNT = 247 - SYS_FHOPEN = 248 - SYS_MINHERIT = 250 - SYS_SEMSYS = 251 - SYS_MSGSYS = 252 - SYS_SHMSYS = 253 - SYS_SEMCTL = 254 - SYS_SEMGET = 255 - SYS_SEMOP = 256 - SYS_MSGCTL = 258 - SYS_MSGGET = 259 - SYS_MSGSND = 260 - SYS_MSGRCV = 261 - SYS_SHMAT = 262 - SYS_SHMCTL = 263 - SYS_SHMDT = 264 - SYS_SHMGET = 265 - SYS_SHM_OPEN = 266 - SYS_SHM_UNLINK = 267 - SYS_SEM_OPEN = 268 - SYS_SEM_CLOSE = 269 - SYS_SEM_UNLINK = 270 - SYS_SEM_WAIT = 271 - SYS_SEM_TRYWAIT = 272 - SYS_SEM_POST = 273 - SYS_SYSCTLBYNAME = 274 - SYS_OPEN_EXTENDED = 277 - SYS_UMASK_EXTENDED = 278 - SYS_STAT_EXTENDED = 279 - SYS_LSTAT_EXTENDED = 280 - SYS_FSTAT_EXTENDED = 281 - SYS_CHMOD_EXTENDED = 282 - SYS_FCHMOD_EXTENDED = 283 - SYS_ACCESS_EXTENDED = 284 - SYS_SETTID = 285 - SYS_GETTID = 286 - SYS_SETSGROUPS = 287 - SYS_GETSGROUPS = 288 - SYS_SETWGROUPS = 289 - SYS_GETWGROUPS = 290 - SYS_MKFIFO_EXTENDED = 291 - SYS_MKDIR_EXTENDED = 292 - SYS_IDENTITYSVC = 293 - SYS_SHARED_REGION_CHECK_NP = 294 - SYS_VM_PRESSURE_MONITOR = 296 - SYS_PSYNCH_RW_LONGRDLOCK = 297 - SYS_PSYNCH_RW_YIELDWRLOCK = 298 - SYS_PSYNCH_RW_DOWNGRADE = 299 - SYS_PSYNCH_RW_UPGRADE = 300 - SYS_PSYNCH_MUTEXWAIT = 301 - SYS_PSYNCH_MUTEXDROP = 302 - SYS_PSYNCH_CVBROAD = 303 - SYS_PSYNCH_CVSIGNAL = 304 - SYS_PSYNCH_CVWAIT = 305 - SYS_PSYNCH_RW_RDLOCK = 306 - SYS_PSYNCH_RW_WRLOCK = 307 - SYS_PSYNCH_RW_UNLOCK = 308 - SYS_PSYNCH_RW_UNLOCK2 = 309 - SYS_GETSID = 310 - SYS_SETTID_WITH_PID = 311 - SYS_PSYNCH_CVCLRPREPOST = 312 - SYS_AIO_FSYNC = 313 - SYS_AIO_RETURN = 314 - SYS_AIO_SUSPEND = 315 - SYS_AIO_CANCEL = 316 - SYS_AIO_ERROR = 317 - SYS_AIO_READ = 318 - SYS_AIO_WRITE = 319 - SYS_LIO_LISTIO = 320 - SYS_IOPOLICYSYS = 322 - SYS_PROCESS_POLICY = 323 - SYS_MLOCKALL = 324 - SYS_MUNLOCKALL = 325 - SYS_ISSETUGID = 327 - SYS___PTHREAD_KILL = 328 - SYS___PTHREAD_SIGMASK = 329 - SYS___SIGWAIT = 330 - SYS___DISABLE_THREADSIGNAL = 331 - SYS___PTHREAD_MARKCANCEL = 332 - SYS___PTHREAD_CANCELED = 333 - SYS___SEMWAIT_SIGNAL = 334 - SYS_PROC_INFO = 336 - SYS_SENDFILE = 337 - SYS_STAT64 = 338 - SYS_FSTAT64 = 339 - SYS_LSTAT64 = 340 - SYS_STAT64_EXTENDED = 341 - SYS_LSTAT64_EXTENDED = 342 - SYS_FSTAT64_EXTENDED = 343 - SYS_GETDIRENTRIES64 = 344 - SYS_STATFS64 = 345 - SYS_FSTATFS64 = 346 - SYS_GETFSSTAT64 = 347 - SYS___PTHREAD_CHDIR = 348 - SYS___PTHREAD_FCHDIR = 349 - SYS_AUDIT = 350 - SYS_AUDITON = 351 - SYS_GETAUID = 353 - SYS_SETAUID = 354 - SYS_GETAUDIT_ADDR = 357 - SYS_SETAUDIT_ADDR = 358 - SYS_AUDITCTL = 359 - SYS_BSDTHREAD_CREATE = 360 - SYS_BSDTHREAD_TERMINATE = 361 - SYS_KQUEUE = 362 - SYS_KEVENT = 363 - SYS_LCHOWN = 364 - SYS_BSDTHREAD_REGISTER = 366 - SYS_WORKQ_OPEN = 367 - SYS_WORKQ_KERNRETURN = 368 - SYS_KEVENT64 = 369 - SYS___OLD_SEMWAIT_SIGNAL = 370 - SYS___OLD_SEMWAIT_SIGNAL_NOCANCEL = 371 - SYS_THREAD_SELFID = 372 - SYS_LEDGER = 373 - SYS_KEVENT_QOS = 374 - SYS_KEVENT_ID = 375 - SYS___MAC_EXECVE = 380 - SYS___MAC_SYSCALL = 381 - SYS___MAC_GET_FILE = 382 - SYS___MAC_SET_FILE = 383 - SYS___MAC_GET_LINK = 384 - SYS___MAC_SET_LINK = 385 - SYS___MAC_GET_PROC = 386 - SYS___MAC_SET_PROC = 387 - SYS___MAC_GET_FD = 388 - SYS___MAC_SET_FD = 389 - SYS___MAC_GET_PID = 390 - SYS_PSELECT = 394 - SYS_PSELECT_NOCANCEL = 395 - SYS_READ_NOCANCEL = 396 - SYS_WRITE_NOCANCEL = 397 - SYS_OPEN_NOCANCEL = 398 - SYS_CLOSE_NOCANCEL = 399 - SYS_WAIT4_NOCANCEL = 400 - SYS_RECVMSG_NOCANCEL = 401 - SYS_SENDMSG_NOCANCEL = 402 - SYS_RECVFROM_NOCANCEL = 403 - SYS_ACCEPT_NOCANCEL = 404 - SYS_MSYNC_NOCANCEL = 405 - SYS_FCNTL_NOCANCEL = 406 - SYS_SELECT_NOCANCEL = 407 - SYS_FSYNC_NOCANCEL = 408 - SYS_CONNECT_NOCANCEL = 409 - SYS_SIGSUSPEND_NOCANCEL = 410 - SYS_READV_NOCANCEL = 411 - SYS_WRITEV_NOCANCEL = 412 - SYS_SENDTO_NOCANCEL = 413 - SYS_PREAD_NOCANCEL = 414 - SYS_PWRITE_NOCANCEL = 415 - SYS_WAITID_NOCANCEL = 416 - SYS_POLL_NOCANCEL = 417 - SYS_MSGSND_NOCANCEL = 418 - SYS_MSGRCV_NOCANCEL = 419 - SYS_SEM_WAIT_NOCANCEL = 420 - SYS_AIO_SUSPEND_NOCANCEL = 421 - SYS___SIGWAIT_NOCANCEL = 422 - SYS___SEMWAIT_SIGNAL_NOCANCEL = 423 - SYS___MAC_MOUNT = 424 - SYS___MAC_GET_MOUNT = 425 - SYS___MAC_GETFSSTAT = 426 - SYS_FSGETPATH = 427 - SYS_AUDIT_SESSION_SELF = 428 - SYS_AUDIT_SESSION_JOIN = 429 - SYS_FILEPORT_MAKEPORT = 430 - SYS_FILEPORT_MAKEFD = 431 - SYS_AUDIT_SESSION_PORT = 432 - SYS_PID_SUSPEND = 433 - SYS_PID_RESUME = 434 - SYS_PID_HIBERNATE = 435 - SYS_PID_SHUTDOWN_SOCKETS = 436 - SYS_SHARED_REGION_MAP_AND_SLIDE_NP = 438 - SYS_KAS_INFO = 439 - SYS_MEMORYSTATUS_CONTROL = 440 - SYS_GUARDED_OPEN_NP = 441 - SYS_GUARDED_CLOSE_NP = 442 - SYS_GUARDED_KQUEUE_NP = 443 - SYS_CHANGE_FDGUARD_NP = 444 - SYS_USRCTL = 445 - SYS_PROC_RLIMIT_CONTROL = 446 - SYS_CONNECTX = 447 - SYS_DISCONNECTX = 448 - SYS_PEELOFF = 449 - SYS_SOCKET_DELEGATE = 450 - SYS_TELEMETRY = 451 - SYS_PROC_UUID_POLICY = 452 - SYS_MEMORYSTATUS_GET_LEVEL = 453 - SYS_SYSTEM_OVERRIDE = 454 - SYS_VFS_PURGE = 455 - SYS_SFI_CTL = 456 - SYS_SFI_PIDCTL = 457 - SYS_COALITION = 458 - SYS_COALITION_INFO = 459 - SYS_NECP_MATCH_POLICY = 460 - SYS_GETATTRLISTBULK = 461 - SYS_CLONEFILEAT = 462 - SYS_OPENAT = 463 - SYS_OPENAT_NOCANCEL = 464 - SYS_RENAMEAT = 465 - SYS_FACCESSAT = 466 - SYS_FCHMODAT = 467 - SYS_FCHOWNAT = 468 - SYS_FSTATAT = 469 - SYS_FSTATAT64 = 470 - SYS_LINKAT = 471 - SYS_UNLINKAT = 472 - SYS_READLINKAT = 473 - SYS_SYMLINKAT = 474 - SYS_MKDIRAT = 475 - SYS_GETATTRLISTAT = 476 - SYS_PROC_TRACE_LOG = 477 - SYS_BSDTHREAD_CTL = 478 - SYS_OPENBYID_NP = 479 - SYS_RECVMSG_X = 480 - SYS_SENDMSG_X = 481 - SYS_THREAD_SELFUSAGE = 482 - SYS_CSRCTL = 483 - SYS_GUARDED_OPEN_DPROTECTED_NP = 484 - SYS_GUARDED_WRITE_NP = 485 - SYS_GUARDED_PWRITE_NP = 486 - SYS_GUARDED_WRITEV_NP = 487 - SYS_RENAMEATX_NP = 488 - SYS_MREMAP_ENCRYPTED = 489 - SYS_NETAGENT_TRIGGER = 490 - SYS_STACK_SNAPSHOT_WITH_CONFIG = 491 - SYS_MICROSTACKSHOT = 492 - SYS_GRAB_PGO_DATA = 493 - SYS_PERSONA = 494 - SYS_WORK_INTERVAL_CTL = 499 - SYS_GETENTROPY = 500 - SYS_NECP_OPEN = 501 - SYS_NECP_CLIENT_ACTION = 502 - SYS___NEXUS_OPEN = 503 - SYS___NEXUS_REGISTER = 504 - SYS___NEXUS_DEREGISTER = 505 - SYS___NEXUS_CREATE = 506 - SYS___NEXUS_DESTROY = 507 - SYS___NEXUS_GET_OPT = 508 - SYS___NEXUS_SET_OPT = 509 - SYS___CHANNEL_OPEN = 510 - SYS___CHANNEL_GET_INFO = 511 - SYS___CHANNEL_SYNC = 512 - SYS___CHANNEL_GET_OPT = 513 - SYS___CHANNEL_SET_OPT = 514 - SYS_ULOCK_WAIT = 515 - SYS_ULOCK_WAKE = 516 - SYS_FCLONEFILEAT = 517 - SYS_FS_SNAPSHOT = 518 - SYS_TERMINATE_WITH_PAYLOAD = 520 - SYS_ABORT_WITH_PAYLOAD = 521 - SYS_NECP_SESSION_OPEN = 522 - SYS_NECP_SESSION_ACTION = 523 - SYS_SETATTRLISTAT = 524 - SYS_NET_QOS_GUIDELINE = 525 - SYS_FMOUNT = 526 - SYS_NTP_ADJTIME = 527 - SYS_NTP_GETTIME = 528 - SYS_OS_FAULT_WITH_PAYLOAD = 529 - SYS_MAXSYSCALL = 530 - SYS_INVALID = 63 -) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go index 527b9588cc..5eb433bbf0 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go @@ -1,6 +1,7 @@ // go run mksysnum.go /Applications/Xcode.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS11.1.sdk/usr/include/sys/syscall.h // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build arm64 && darwin // +build arm64,darwin package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go index 9912c6ee3d..703675c0c4 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go @@ -1,6 +1,7 @@ // go run mksysnum.go https://gitweb.dragonflybsd.org/dragonfly.git/blob_plain/HEAD:/sys/kern/syscalls.master // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build amd64 && dragonfly // +build amd64,dragonfly package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go index 9474974b65..59d5dfc209 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go @@ -1,6 +1,7 @@ // go run mksysnum.go https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build 386 && freebsd // +build 386,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go index 48a7beae7b..342d471d2e 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go @@ -1,6 +1,7 @@ // go run mksysnum.go https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build amd64 && freebsd // +build amd64,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go index 4a6dfd4a74..e2e3d72c5b 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go @@ -1,6 +1,7 @@ // go run mksysnum.go https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build arm && freebsd // +build arm,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go index 3e51af8edd..61ad5ca3c1 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go @@ -1,6 +1,7 @@ // go run mksysnum.go https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build arm64 && freebsd // +build arm64,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go index f6742bdee0..fbc59b7fdd 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -1,6 +1,7 @@ // go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include -m32 /tmp/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build 386 && linux // +build 386,linux package unix @@ -436,4 +437,6 @@ const ( SYS_PIDFD_GETFD = 438 SYS_FACCESSAT2 = 439 SYS_PROCESS_MADVISE = 440 + SYS_EPOLL_PWAIT2 = 441 + SYS_MOUNT_SETATTR = 442 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index f7e525573b..04d16d771e 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -1,6 +1,7 @@ // go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include -m64 /tmp/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build amd64 && linux // +build amd64,linux package unix @@ -358,4 +359,6 @@ const ( SYS_PIDFD_GETFD = 438 SYS_FACCESSAT2 = 439 SYS_PROCESS_MADVISE = 440 + SYS_EPOLL_PWAIT2 = 441 + SYS_MOUNT_SETATTR = 442 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go index 3f60977da6..3b1c105137 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -1,6 +1,7 @@ // go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build arm && linux // +build arm,linux package unix @@ -400,4 +401,6 @@ const ( SYS_PIDFD_GETFD = 438 SYS_FACCESSAT2 = 439 SYS_PROCESS_MADVISE = 440 + SYS_EPOLL_PWAIT2 = 441 + SYS_MOUNT_SETATTR = 442 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index dbedf4cbac..3198adcf77 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -1,6 +1,7 @@ // go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include -fsigned-char /tmp/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build arm64 && linux // +build arm64,linux package unix @@ -303,4 +304,6 @@ const ( SYS_PIDFD_GETFD = 438 SYS_FACCESSAT2 = 439 SYS_PROCESS_MADVISE = 440 + SYS_EPOLL_PWAIT2 = 441 + SYS_MOUNT_SETATTR = 442 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go index eeff7e1dc9..c877ec6e68 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go @@ -1,6 +1,7 @@ // go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build mips && linux // +build mips,linux package unix @@ -421,4 +422,6 @@ const ( SYS_PIDFD_GETFD = 4438 SYS_FACCESSAT2 = 4439 SYS_PROCESS_MADVISE = 4440 + SYS_EPOLL_PWAIT2 = 4441 + SYS_MOUNT_SETATTR = 4442 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go index 73cfa535cd..b5f2903729 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go @@ -1,6 +1,7 @@ // go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build mips64 && linux // +build mips64,linux package unix @@ -351,4 +352,6 @@ const ( SYS_PIDFD_GETFD = 5438 SYS_FACCESSAT2 = 5439 SYS_PROCESS_MADVISE = 5440 + SYS_EPOLL_PWAIT2 = 5441 + SYS_MOUNT_SETATTR = 5442 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go index be74729e0c..46077689ab 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go @@ -1,6 +1,7 @@ // go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build mips64le && linux // +build mips64le,linux package unix @@ -351,4 +352,6 @@ const ( SYS_PIDFD_GETFD = 5438 SYS_FACCESSAT2 = 5439 SYS_PROCESS_MADVISE = 5440 + SYS_EPOLL_PWAIT2 = 5441 + SYS_MOUNT_SETATTR = 5442 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go index 2a1047c818..80e6696b39 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go @@ -1,6 +1,7 @@ // go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build mipsle && linux // +build mipsle,linux package unix @@ -421,4 +422,6 @@ const ( SYS_PIDFD_GETFD = 4438 SYS_FACCESSAT2 = 4439 SYS_PROCESS_MADVISE = 4440 + SYS_EPOLL_PWAIT2 = 4441 + SYS_MOUNT_SETATTR = 4442 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go new file mode 100644 index 0000000000..b9d697ffb1 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go @@ -0,0 +1,434 @@ +// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h +// Code generated by the command above; see README.md. DO NOT EDIT. + +//go:build ppc && linux +// +build ppc,linux + +package unix + +const ( + SYS_RESTART_SYSCALL = 0 + SYS_EXIT = 1 + SYS_FORK = 2 + SYS_READ = 3 + SYS_WRITE = 4 + SYS_OPEN = 5 + SYS_CLOSE = 6 + SYS_WAITPID = 7 + SYS_CREAT = 8 + SYS_LINK = 9 + SYS_UNLINK = 10 + SYS_EXECVE = 11 + SYS_CHDIR = 12 + SYS_TIME = 13 + SYS_MKNOD = 14 + SYS_CHMOD = 15 + SYS_LCHOWN = 16 + SYS_BREAK = 17 + SYS_OLDSTAT = 18 + SYS_LSEEK = 19 + SYS_GETPID = 20 + SYS_MOUNT = 21 + SYS_UMOUNT = 22 + SYS_SETUID = 23 + SYS_GETUID = 24 + SYS_STIME = 25 + SYS_PTRACE = 26 + SYS_ALARM = 27 + SYS_OLDFSTAT = 28 + SYS_PAUSE = 29 + SYS_UTIME = 30 + SYS_STTY = 31 + SYS_GTTY = 32 + SYS_ACCESS = 33 + SYS_NICE = 34 + SYS_FTIME = 35 + SYS_SYNC = 36 + SYS_KILL = 37 + SYS_RENAME = 38 + SYS_MKDIR = 39 + SYS_RMDIR = 40 + SYS_DUP = 41 + SYS_PIPE = 42 + SYS_TIMES = 43 + SYS_PROF = 44 + SYS_BRK = 45 + SYS_SETGID = 46 + SYS_GETGID = 47 + SYS_SIGNAL = 48 + SYS_GETEUID = 49 + SYS_GETEGID = 50 + SYS_ACCT = 51 + SYS_UMOUNT2 = 52 + SYS_LOCK = 53 + SYS_IOCTL = 54 + SYS_FCNTL = 55 + SYS_MPX = 56 + SYS_SETPGID = 57 + SYS_ULIMIT = 58 + SYS_OLDOLDUNAME = 59 + SYS_UMASK = 60 + SYS_CHROOT = 61 + SYS_USTAT = 62 + SYS_DUP2 = 63 + SYS_GETPPID = 64 + SYS_GETPGRP = 65 + SYS_SETSID = 66 + SYS_SIGACTION = 67 + SYS_SGETMASK = 68 + SYS_SSETMASK = 69 + SYS_SETREUID = 70 + SYS_SETREGID = 71 + SYS_SIGSUSPEND = 72 + SYS_SIGPENDING = 73 + SYS_SETHOSTNAME = 74 + SYS_SETRLIMIT = 75 + SYS_GETRLIMIT = 76 + SYS_GETRUSAGE = 77 + SYS_GETTIMEOFDAY = 78 + SYS_SETTIMEOFDAY = 79 + SYS_GETGROUPS = 80 + SYS_SETGROUPS = 81 + SYS_SELECT = 82 + SYS_SYMLINK = 83 + SYS_OLDLSTAT = 84 + SYS_READLINK = 85 + SYS_USELIB = 86 + SYS_SWAPON = 87 + SYS_REBOOT = 88 + SYS_READDIR = 89 + SYS_MMAP = 90 + SYS_MUNMAP = 91 + SYS_TRUNCATE = 92 + SYS_FTRUNCATE = 93 + SYS_FCHMOD = 94 + SYS_FCHOWN = 95 + SYS_GETPRIORITY = 96 + SYS_SETPRIORITY = 97 + SYS_PROFIL = 98 + SYS_STATFS = 99 + SYS_FSTATFS = 100 + SYS_IOPERM = 101 + SYS_SOCKETCALL = 102 + SYS_SYSLOG = 103 + SYS_SETITIMER = 104 + SYS_GETITIMER = 105 + SYS_STAT = 106 + SYS_LSTAT = 107 + SYS_FSTAT = 108 + SYS_OLDUNAME = 109 + SYS_IOPL = 110 + SYS_VHANGUP = 111 + SYS_IDLE = 112 + SYS_VM86 = 113 + SYS_WAIT4 = 114 + SYS_SWAPOFF = 115 + SYS_SYSINFO = 116 + SYS_IPC = 117 + SYS_FSYNC = 118 + SYS_SIGRETURN = 119 + SYS_CLONE = 120 + SYS_SETDOMAINNAME = 121 + SYS_UNAME = 122 + SYS_MODIFY_LDT = 123 + SYS_ADJTIMEX = 124 + SYS_MPROTECT = 125 + SYS_SIGPROCMASK = 126 + SYS_CREATE_MODULE = 127 + SYS_INIT_MODULE = 128 + SYS_DELETE_MODULE = 129 + SYS_GET_KERNEL_SYMS = 130 + SYS_QUOTACTL = 131 + SYS_GETPGID = 132 + SYS_FCHDIR = 133 + SYS_BDFLUSH = 134 + SYS_SYSFS = 135 + SYS_PERSONALITY = 136 + SYS_AFS_SYSCALL = 137 + SYS_SETFSUID = 138 + SYS_SETFSGID = 139 + SYS__LLSEEK = 140 + SYS_GETDENTS = 141 + SYS__NEWSELECT = 142 + SYS_FLOCK = 143 + SYS_MSYNC = 144 + SYS_READV = 145 + SYS_WRITEV = 146 + SYS_GETSID = 147 + SYS_FDATASYNC = 148 + SYS__SYSCTL = 149 + SYS_MLOCK = 150 + SYS_MUNLOCK = 151 + SYS_MLOCKALL = 152 + SYS_MUNLOCKALL = 153 + SYS_SCHED_SETPARAM = 154 + SYS_SCHED_GETPARAM = 155 + SYS_SCHED_SETSCHEDULER = 156 + SYS_SCHED_GETSCHEDULER = 157 + SYS_SCHED_YIELD = 158 + SYS_SCHED_GET_PRIORITY_MAX = 159 + SYS_SCHED_GET_PRIORITY_MIN = 160 + SYS_SCHED_RR_GET_INTERVAL = 161 + SYS_NANOSLEEP = 162 + SYS_MREMAP = 163 + SYS_SETRESUID = 164 + SYS_GETRESUID = 165 + SYS_QUERY_MODULE = 166 + SYS_POLL = 167 + SYS_NFSSERVCTL = 168 + SYS_SETRESGID = 169 + SYS_GETRESGID = 170 + SYS_PRCTL = 171 + SYS_RT_SIGRETURN = 172 + SYS_RT_SIGACTION = 173 + SYS_RT_SIGPROCMASK = 174 + SYS_RT_SIGPENDING = 175 + SYS_RT_SIGTIMEDWAIT = 176 + SYS_RT_SIGQUEUEINFO = 177 + SYS_RT_SIGSUSPEND = 178 + SYS_PREAD64 = 179 + SYS_PWRITE64 = 180 + SYS_CHOWN = 181 + SYS_GETCWD = 182 + SYS_CAPGET = 183 + SYS_CAPSET = 184 + SYS_SIGALTSTACK = 185 + SYS_SENDFILE = 186 + SYS_GETPMSG = 187 + SYS_PUTPMSG = 188 + SYS_VFORK = 189 + SYS_UGETRLIMIT = 190 + SYS_READAHEAD = 191 + SYS_MMAP2 = 192 + SYS_TRUNCATE64 = 193 + SYS_FTRUNCATE64 = 194 + SYS_STAT64 = 195 + SYS_LSTAT64 = 196 + SYS_FSTAT64 = 197 + SYS_PCICONFIG_READ = 198 + SYS_PCICONFIG_WRITE = 199 + SYS_PCICONFIG_IOBASE = 200 + SYS_MULTIPLEXER = 201 + SYS_GETDENTS64 = 202 + SYS_PIVOT_ROOT = 203 + SYS_FCNTL64 = 204 + SYS_MADVISE = 205 + SYS_MINCORE = 206 + SYS_GETTID = 207 + SYS_TKILL = 208 + SYS_SETXATTR = 209 + SYS_LSETXATTR = 210 + SYS_FSETXATTR = 211 + SYS_GETXATTR = 212 + SYS_LGETXATTR = 213 + SYS_FGETXATTR = 214 + SYS_LISTXATTR = 215 + SYS_LLISTXATTR = 216 + SYS_FLISTXATTR = 217 + SYS_REMOVEXATTR = 218 + SYS_LREMOVEXATTR = 219 + SYS_FREMOVEXATTR = 220 + SYS_FUTEX = 221 + SYS_SCHED_SETAFFINITY = 222 + SYS_SCHED_GETAFFINITY = 223 + SYS_TUXCALL = 225 + SYS_SENDFILE64 = 226 + SYS_IO_SETUP = 227 + SYS_IO_DESTROY = 228 + SYS_IO_GETEVENTS = 229 + SYS_IO_SUBMIT = 230 + SYS_IO_CANCEL = 231 + SYS_SET_TID_ADDRESS = 232 + SYS_FADVISE64 = 233 + SYS_EXIT_GROUP = 234 + SYS_LOOKUP_DCOOKIE = 235 + SYS_EPOLL_CREATE = 236 + SYS_EPOLL_CTL = 237 + SYS_EPOLL_WAIT = 238 + SYS_REMAP_FILE_PAGES = 239 + SYS_TIMER_CREATE = 240 + SYS_TIMER_SETTIME = 241 + SYS_TIMER_GETTIME = 242 + SYS_TIMER_GETOVERRUN = 243 + SYS_TIMER_DELETE = 244 + SYS_CLOCK_SETTIME = 245 + SYS_CLOCK_GETTIME = 246 + SYS_CLOCK_GETRES = 247 + SYS_CLOCK_NANOSLEEP = 248 + SYS_SWAPCONTEXT = 249 + SYS_TGKILL = 250 + SYS_UTIMES = 251 + SYS_STATFS64 = 252 + SYS_FSTATFS64 = 253 + SYS_FADVISE64_64 = 254 + SYS_RTAS = 255 + SYS_SYS_DEBUG_SETCONTEXT = 256 + SYS_MIGRATE_PAGES = 258 + SYS_MBIND = 259 + SYS_GET_MEMPOLICY = 260 + SYS_SET_MEMPOLICY = 261 + SYS_MQ_OPEN = 262 + SYS_MQ_UNLINK = 263 + SYS_MQ_TIMEDSEND = 264 + SYS_MQ_TIMEDRECEIVE = 265 + SYS_MQ_NOTIFY = 266 + SYS_MQ_GETSETATTR = 267 + SYS_KEXEC_LOAD = 268 + SYS_ADD_KEY = 269 + SYS_REQUEST_KEY = 270 + SYS_KEYCTL = 271 + SYS_WAITID = 272 + SYS_IOPRIO_SET = 273 + SYS_IOPRIO_GET = 274 + SYS_INOTIFY_INIT = 275 + SYS_INOTIFY_ADD_WATCH = 276 + SYS_INOTIFY_RM_WATCH = 277 + SYS_SPU_RUN = 278 + SYS_SPU_CREATE = 279 + SYS_PSELECT6 = 280 + SYS_PPOLL = 281 + SYS_UNSHARE = 282 + SYS_SPLICE = 283 + SYS_TEE = 284 + SYS_VMSPLICE = 285 + SYS_OPENAT = 286 + SYS_MKDIRAT = 287 + SYS_MKNODAT = 288 + SYS_FCHOWNAT = 289 + SYS_FUTIMESAT = 290 + SYS_FSTATAT64 = 291 + SYS_UNLINKAT = 292 + SYS_RENAMEAT = 293 + SYS_LINKAT = 294 + SYS_SYMLINKAT = 295 + SYS_READLINKAT = 296 + SYS_FCHMODAT = 297 + SYS_FACCESSAT = 298 + SYS_GET_ROBUST_LIST = 299 + SYS_SET_ROBUST_LIST = 300 + SYS_MOVE_PAGES = 301 + SYS_GETCPU = 302 + SYS_EPOLL_PWAIT = 303 + SYS_UTIMENSAT = 304 + SYS_SIGNALFD = 305 + SYS_TIMERFD_CREATE = 306 + SYS_EVENTFD = 307 + SYS_SYNC_FILE_RANGE2 = 308 + SYS_FALLOCATE = 309 + SYS_SUBPAGE_PROT = 310 + SYS_TIMERFD_SETTIME = 311 + SYS_TIMERFD_GETTIME = 312 + SYS_SIGNALFD4 = 313 + SYS_EVENTFD2 = 314 + SYS_EPOLL_CREATE1 = 315 + SYS_DUP3 = 316 + SYS_PIPE2 = 317 + SYS_INOTIFY_INIT1 = 318 + SYS_PERF_EVENT_OPEN = 319 + SYS_PREADV = 320 + SYS_PWRITEV = 321 + SYS_RT_TGSIGQUEUEINFO = 322 + SYS_FANOTIFY_INIT = 323 + SYS_FANOTIFY_MARK = 324 + SYS_PRLIMIT64 = 325 + SYS_SOCKET = 326 + SYS_BIND = 327 + SYS_CONNECT = 328 + SYS_LISTEN = 329 + SYS_ACCEPT = 330 + SYS_GETSOCKNAME = 331 + SYS_GETPEERNAME = 332 + SYS_SOCKETPAIR = 333 + SYS_SEND = 334 + SYS_SENDTO = 335 + SYS_RECV = 336 + SYS_RECVFROM = 337 + SYS_SHUTDOWN = 338 + SYS_SETSOCKOPT = 339 + SYS_GETSOCKOPT = 340 + SYS_SENDMSG = 341 + SYS_RECVMSG = 342 + SYS_RECVMMSG = 343 + SYS_ACCEPT4 = 344 + SYS_NAME_TO_HANDLE_AT = 345 + SYS_OPEN_BY_HANDLE_AT = 346 + SYS_CLOCK_ADJTIME = 347 + SYS_SYNCFS = 348 + SYS_SENDMMSG = 349 + SYS_SETNS = 350 + SYS_PROCESS_VM_READV = 351 + SYS_PROCESS_VM_WRITEV = 352 + SYS_FINIT_MODULE = 353 + SYS_KCMP = 354 + SYS_SCHED_SETATTR = 355 + SYS_SCHED_GETATTR = 356 + SYS_RENAMEAT2 = 357 + SYS_SECCOMP = 358 + SYS_GETRANDOM = 359 + SYS_MEMFD_CREATE = 360 + SYS_BPF = 361 + SYS_EXECVEAT = 362 + SYS_SWITCH_ENDIAN = 363 + SYS_USERFAULTFD = 364 + SYS_MEMBARRIER = 365 + SYS_MLOCK2 = 378 + SYS_COPY_FILE_RANGE = 379 + SYS_PREADV2 = 380 + SYS_PWRITEV2 = 381 + SYS_KEXEC_FILE_LOAD = 382 + SYS_STATX = 383 + SYS_PKEY_ALLOC = 384 + SYS_PKEY_FREE = 385 + SYS_PKEY_MPROTECT = 386 + SYS_RSEQ = 387 + SYS_IO_PGETEVENTS = 388 + SYS_SEMGET = 393 + SYS_SEMCTL = 394 + SYS_SHMGET = 395 + SYS_SHMCTL = 396 + SYS_SHMAT = 397 + SYS_SHMDT = 398 + SYS_MSGGET = 399 + SYS_MSGSND = 400 + SYS_MSGRCV = 401 + SYS_MSGCTL = 402 + SYS_CLOCK_GETTIME64 = 403 + SYS_CLOCK_SETTIME64 = 404 + SYS_CLOCK_ADJTIME64 = 405 + SYS_CLOCK_GETRES_TIME64 = 406 + SYS_CLOCK_NANOSLEEP_TIME64 = 407 + SYS_TIMER_GETTIME64 = 408 + SYS_TIMER_SETTIME64 = 409 + SYS_TIMERFD_GETTIME64 = 410 + SYS_TIMERFD_SETTIME64 = 411 + SYS_UTIMENSAT_TIME64 = 412 + SYS_PSELECT6_TIME64 = 413 + SYS_PPOLL_TIME64 = 414 + SYS_IO_PGETEVENTS_TIME64 = 416 + SYS_RECVMMSG_TIME64 = 417 + SYS_MQ_TIMEDSEND_TIME64 = 418 + SYS_MQ_TIMEDRECEIVE_TIME64 = 419 + SYS_SEMTIMEDOP_TIME64 = 420 + SYS_RT_SIGTIMEDWAIT_TIME64 = 421 + SYS_FUTEX_TIME64 = 422 + SYS_SCHED_RR_GET_INTERVAL_TIME64 = 423 + SYS_PIDFD_SEND_SIGNAL = 424 + SYS_IO_URING_SETUP = 425 + SYS_IO_URING_ENTER = 426 + SYS_IO_URING_REGISTER = 427 + SYS_OPEN_TREE = 428 + SYS_MOVE_MOUNT = 429 + SYS_FSOPEN = 430 + SYS_FSCONFIG = 431 + SYS_FSMOUNT = 432 + SYS_FSPICK = 433 + SYS_PIDFD_OPEN = 434 + SYS_CLONE3 = 435 + SYS_CLOSE_RANGE = 436 + SYS_OPENAT2 = 437 + SYS_PIDFD_GETFD = 438 + SYS_FACCESSAT2 = 439 + SYS_PROCESS_MADVISE = 440 + SYS_EPOLL_PWAIT2 = 441 + SYS_MOUNT_SETATTR = 442 +) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go index 32707428ce..08edc54d35 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -1,6 +1,7 @@ // go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build ppc64 && linux // +build ppc64,linux package unix @@ -400,4 +401,6 @@ const ( SYS_PIDFD_GETFD = 438 SYS_FACCESSAT2 = 439 SYS_PROCESS_MADVISE = 440 + SYS_EPOLL_PWAIT2 = 441 + SYS_MOUNT_SETATTR = 442 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go index a58572f781..33b33b0834 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -1,6 +1,7 @@ // go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build ppc64le && linux // +build ppc64le,linux package unix @@ -400,4 +401,6 @@ const ( SYS_PIDFD_GETFD = 438 SYS_FACCESSAT2 = 439 SYS_PROCESS_MADVISE = 440 + SYS_EPOLL_PWAIT2 = 441 + SYS_MOUNT_SETATTR = 442 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index 72a65b7602..66c8a8e09e 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -1,6 +1,7 @@ // go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build riscv64 && linux // +build riscv64,linux package unix @@ -302,4 +303,6 @@ const ( SYS_PIDFD_GETFD = 438 SYS_FACCESSAT2 = 439 SYS_PROCESS_MADVISE = 440 + SYS_EPOLL_PWAIT2 = 441 + SYS_MOUNT_SETATTR = 442 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index 1fb9ae5d49..aea5760cea 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -1,6 +1,7 @@ // go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include -fsigned-char /tmp/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build s390x && linux // +build s390x,linux package unix @@ -365,4 +366,6 @@ const ( SYS_PIDFD_GETFD = 438 SYS_FACCESSAT2 = 439 SYS_PROCESS_MADVISE = 440 + SYS_EPOLL_PWAIT2 = 441 + SYS_MOUNT_SETATTR = 442 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go index 57636e09e4..488ca848d1 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go @@ -1,6 +1,7 @@ // go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build sparc64 && linux // +build sparc64,linux package unix @@ -379,4 +380,6 @@ const ( SYS_PIDFD_GETFD = 438 SYS_FACCESSAT2 = 439 SYS_PROCESS_MADVISE = 440 + SYS_EPOLL_PWAIT2 = 441 + SYS_MOUNT_SETATTR = 442 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_386.go b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_386.go index e66a8c9d39..3a6699eba9 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_386.go @@ -1,6 +1,7 @@ // go run mksysnum.go http://cvsweb.netbsd.org/bsdweb.cgi/~checkout~/src/sys/kern/syscalls.master // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build 386 && netbsd // +build 386,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go index 42c788f249..5677cd4f15 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go @@ -1,6 +1,7 @@ // go run mksysnum.go http://cvsweb.netbsd.org/bsdweb.cgi/~checkout~/src/sys/kern/syscalls.master // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build amd64 && netbsd // +build amd64,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm.go index 0a0757179b..e784cb6db1 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm.go @@ -1,6 +1,7 @@ // go run mksysnum.go http://cvsweb.netbsd.org/bsdweb.cgi/~checkout~/src/sys/kern/syscalls.master // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build arm && netbsd // +build arm,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm64.go index 0291c0931b..bd4952efa5 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm64.go @@ -1,6 +1,7 @@ // go run mksysnum.go http://cvsweb.netbsd.org/bsdweb.cgi/~checkout~/src/sys/kern/syscalls.master // Code generated by the command above; DO NOT EDIT. +//go:build arm64 && netbsd // +build arm64,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go index b0207d1c9b..817edbf95c 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go @@ -1,6 +1,7 @@ // go run mksysnum.go https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build 386 && openbsd // +build 386,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go index f0dec6f0b4..ea453614e6 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go @@ -1,6 +1,7 @@ // go run mksysnum.go https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build amd64 && openbsd // +build amd64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go index 33d1dc5404..467971eed6 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go @@ -1,6 +1,7 @@ // go run mksysnum.go https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build arm && openbsd // +build arm,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go index fe2b689b63..32eec5ed56 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go @@ -1,6 +1,7 @@ // go run mksysnum.go https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build arm64 && openbsd // +build arm64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go index 5c08d573b3..a37f773756 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go @@ -1,6 +1,7 @@ // go run mksysnum.go https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build mips64 && openbsd // +build mips64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_zos_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_zos_s390x.go new file mode 100644 index 0000000000..073daad43b --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsysnum_zos_s390x.go @@ -0,0 +1,2670 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build zos && s390x +// +build zos,s390x + +package unix + +// TODO: auto-generate. + +const ( + SYS_ACOSD128 = 0xB80 + SYS_ACOSD32 = 0xB7E + SYS_ACOSD64 = 0xB7F + SYS_ACOSHD128 = 0xB83 + SYS_ACOSHD32 = 0xB81 + SYS_ACOSHD64 = 0xB82 + SYS_AIO_FSYNC = 0xC69 + SYS_ASCTIME = 0x0AE + SYS_ASCTIME64 = 0xCD7 + SYS_ASCTIME64_R = 0xCD8 + SYS_ASIND128 = 0xB86 + SYS_ASIND32 = 0xB84 + SYS_ASIND64 = 0xB85 + SYS_ASINHD128 = 0xB89 + SYS_ASINHD32 = 0xB87 + SYS_ASINHD64 = 0xB88 + SYS_ATAN2D128 = 0xB8F + SYS_ATAN2D32 = 0xB8D + SYS_ATAN2D64 = 0xB8E + SYS_ATAND128 = 0xB8C + SYS_ATAND32 = 0xB8A + SYS_ATAND64 = 0xB8B + SYS_ATANHD128 = 0xB92 + SYS_ATANHD32 = 0xB90 + SYS_ATANHD64 = 0xB91 + SYS_BIND2ADDRSEL = 0xD59 + SYS_C16RTOMB = 0xD40 + SYS_C32RTOMB = 0xD41 + SYS_CBRTD128 = 0xB95 + SYS_CBRTD32 = 0xB93 + SYS_CBRTD64 = 0xB94 + SYS_CEILD128 = 0xB98 + SYS_CEILD32 = 0xB96 + SYS_CEILD64 = 0xB97 + SYS_CLEARENV = 0x0C9 + SYS_CLEARERR_UNLOCKED = 0xCA1 + SYS_CLOCK = 0x0AA + SYS_CLOGL = 0xA00 + SYS_CLRMEMF = 0x0BD + SYS_CONJ = 0xA03 + SYS_CONJF = 0xA06 + SYS_CONJL = 0xA09 + SYS_COPYSIGND128 = 0xB9E + SYS_COPYSIGND32 = 0xB9C + SYS_COPYSIGND64 = 0xB9D + SYS_COSD128 = 0xBA1 + SYS_COSD32 = 0xB9F + SYS_COSD64 = 0xBA0 + SYS_COSHD128 = 0xBA4 + SYS_COSHD32 = 0xBA2 + SYS_COSHD64 = 0xBA3 + SYS_CPOW = 0xA0C + SYS_CPOWF = 0xA0F + SYS_CPOWL = 0xA12 + SYS_CPROJ = 0xA15 + SYS_CPROJF = 0xA18 + SYS_CPROJL = 0xA1B + SYS_CREAL = 0xA1E + SYS_CREALF = 0xA21 + SYS_CREALL = 0xA24 + SYS_CSIN = 0xA27 + SYS_CSINF = 0xA2A + SYS_CSINH = 0xA30 + SYS_CSINHF = 0xA33 + SYS_CSINHL = 0xA36 + SYS_CSINL = 0xA2D + SYS_CSNAP = 0x0C5 + SYS_CSQRT = 0xA39 + SYS_CSQRTF = 0xA3C + SYS_CSQRTL = 0xA3F + SYS_CTAN = 0xA42 + SYS_CTANF = 0xA45 + SYS_CTANH = 0xA4B + SYS_CTANHF = 0xA4E + SYS_CTANHL = 0xA51 + SYS_CTANL = 0xA48 + SYS_CTIME = 0x0AB + SYS_CTIME64 = 0xCD9 + SYS_CTIME64_R = 0xCDA + SYS_CTRACE = 0x0C6 + SYS_DIFFTIME = 0x0A7 + SYS_DIFFTIME64 = 0xCDB + SYS_DLADDR = 0xC82 + SYS_DYNALLOC = 0x0C3 + SYS_DYNFREE = 0x0C2 + SYS_ERFCD128 = 0xBAA + SYS_ERFCD32 = 0xBA8 + SYS_ERFCD64 = 0xBA9 + SYS_ERFD128 = 0xBA7 + SYS_ERFD32 = 0xBA5 + SYS_ERFD64 = 0xBA6 + SYS_EXP2D128 = 0xBB0 + SYS_EXP2D32 = 0xBAE + SYS_EXP2D64 = 0xBAF + SYS_EXPD128 = 0xBAD + SYS_EXPD32 = 0xBAB + SYS_EXPD64 = 0xBAC + SYS_EXPM1D128 = 0xBB3 + SYS_EXPM1D32 = 0xBB1 + SYS_EXPM1D64 = 0xBB2 + SYS_FABSD128 = 0xBB6 + SYS_FABSD32 = 0xBB4 + SYS_FABSD64 = 0xBB5 + SYS_FDELREC_UNLOCKED = 0xCA2 + SYS_FDIMD128 = 0xBB9 + SYS_FDIMD32 = 0xBB7 + SYS_FDIMD64 = 0xBB8 + SYS_FDOPEN_UNLOCKED = 0xCFC + SYS_FECLEAREXCEPT = 0xAEA + SYS_FEGETENV = 0xAEB + SYS_FEGETEXCEPTFLAG = 0xAEC + SYS_FEGETROUND = 0xAED + SYS_FEHOLDEXCEPT = 0xAEE + SYS_FEOF_UNLOCKED = 0xCA3 + SYS_FERAISEEXCEPT = 0xAEF + SYS_FERROR_UNLOCKED = 0xCA4 + SYS_FESETENV = 0xAF0 + SYS_FESETEXCEPTFLAG = 0xAF1 + SYS_FESETROUND = 0xAF2 + SYS_FETCHEP = 0x0BF + SYS_FETESTEXCEPT = 0xAF3 + SYS_FEUPDATEENV = 0xAF4 + SYS_FE_DEC_GETROUND = 0xBBA + SYS_FE_DEC_SETROUND = 0xBBB + SYS_FFLUSH_UNLOCKED = 0xCA5 + SYS_FGETC_UNLOCKED = 0xC80 + SYS_FGETPOS64 = 0xCEE + SYS_FGETPOS64_UNLOCKED = 0xCF4 + SYS_FGETPOS_UNLOCKED = 0xCA6 + SYS_FGETS_UNLOCKED = 0xC7C + SYS_FGETWC_UNLOCKED = 0xCA7 + SYS_FGETWS_UNLOCKED = 0xCA8 + SYS_FILENO_UNLOCKED = 0xCA9 + SYS_FLDATA = 0x0C1 + SYS_FLDATA_UNLOCKED = 0xCAA + SYS_FLOCATE_UNLOCKED = 0xCAB + SYS_FLOORD128 = 0xBBE + SYS_FLOORD32 = 0xBBC + SYS_FLOORD64 = 0xBBD + SYS_FMA = 0xA63 + SYS_FMAD128 = 0xBC1 + SYS_FMAD32 = 0xBBF + SYS_FMAD64 = 0xBC0 + SYS_FMAF = 0xA66 + SYS_FMAL = 0xA69 + SYS_FMAX = 0xA6C + SYS_FMAXD128 = 0xBC4 + SYS_FMAXD32 = 0xBC2 + SYS_FMAXD64 = 0xBC3 + SYS_FMAXF = 0xA6F + SYS_FMAXL = 0xA72 + SYS_FMIN = 0xA75 + SYS_FMIND128 = 0xBC7 + SYS_FMIND32 = 0xBC5 + SYS_FMIND64 = 0xBC6 + SYS_FMINF = 0xA78 + SYS_FMINL = 0xA7B + SYS_FMODD128 = 0xBCA + SYS_FMODD32 = 0xBC8 + SYS_FMODD64 = 0xBC9 + SYS_FOPEN64 = 0xD49 + SYS_FOPEN64_UNLOCKED = 0xD4A + SYS_FOPEN_UNLOCKED = 0xCFA + SYS_FPRINTF_UNLOCKED = 0xCAC + SYS_FPUTC_UNLOCKED = 0xC81 + SYS_FPUTS_UNLOCKED = 0xC7E + SYS_FPUTWC_UNLOCKED = 0xCAD + SYS_FPUTWS_UNLOCKED = 0xCAE + SYS_FREAD_NOUPDATE = 0xCEC + SYS_FREAD_NOUPDATE_UNLOCKED = 0xCED + SYS_FREAD_UNLOCKED = 0xC7B + SYS_FREEIFADDRS = 0xCE6 + SYS_FREOPEN64 = 0xD4B + SYS_FREOPEN64_UNLOCKED = 0xD4C + SYS_FREOPEN_UNLOCKED = 0xCFB + SYS_FREXPD128 = 0xBCE + SYS_FREXPD32 = 0xBCC + SYS_FREXPD64 = 0xBCD + SYS_FSCANF_UNLOCKED = 0xCAF + SYS_FSEEK64 = 0xCEF + SYS_FSEEK64_UNLOCKED = 0xCF5 + SYS_FSEEKO64 = 0xCF0 + SYS_FSEEKO64_UNLOCKED = 0xCF6 + SYS_FSEEKO_UNLOCKED = 0xCB1 + SYS_FSEEK_UNLOCKED = 0xCB0 + SYS_FSETPOS64 = 0xCF1 + SYS_FSETPOS64_UNLOCKED = 0xCF7 + SYS_FSETPOS_UNLOCKED = 0xCB3 + SYS_FTELL64 = 0xCF2 + SYS_FTELL64_UNLOCKED = 0xCF8 + SYS_FTELLO64 = 0xCF3 + SYS_FTELLO64_UNLOCKED = 0xCF9 + SYS_FTELLO_UNLOCKED = 0xCB5 + SYS_FTELL_UNLOCKED = 0xCB4 + SYS_FUPDATE = 0x0B5 + SYS_FUPDATE_UNLOCKED = 0xCB7 + SYS_FWIDE_UNLOCKED = 0xCB8 + SYS_FWPRINTF_UNLOCKED = 0xCB9 + SYS_FWRITE_UNLOCKED = 0xC7A + SYS_FWSCANF_UNLOCKED = 0xCBA + SYS_GETDATE64 = 0xD4F + SYS_GETIFADDRS = 0xCE7 + SYS_GETIPV4SOURCEFILTER = 0xC77 + SYS_GETSOURCEFILTER = 0xC79 + SYS_GETSYNTX = 0x0FD + SYS_GETS_UNLOCKED = 0xC7D + SYS_GETTIMEOFDAY64 = 0xD50 + SYS_GETWCHAR_UNLOCKED = 0xCBC + SYS_GETWC_UNLOCKED = 0xCBB + SYS_GMTIME = 0x0B0 + SYS_GMTIME64 = 0xCDC + SYS_GMTIME64_R = 0xCDD + SYS_HYPOTD128 = 0xBD1 + SYS_HYPOTD32 = 0xBCF + SYS_HYPOTD64 = 0xBD0 + SYS_ILOGBD128 = 0xBD4 + SYS_ILOGBD32 = 0xBD2 + SYS_ILOGBD64 = 0xBD3 + SYS_ILOGBF = 0xA7E + SYS_ILOGBL = 0xA81 + SYS_INET6_IS_SRCADDR = 0xD5A + SYS_ISBLANK = 0x0FE + SYS_ISWALNUM = 0x0FF + SYS_LDEXPD128 = 0xBD7 + SYS_LDEXPD32 = 0xBD5 + SYS_LDEXPD64 = 0xBD6 + SYS_LGAMMAD128 = 0xBDA + SYS_LGAMMAD32 = 0xBD8 + SYS_LGAMMAD64 = 0xBD9 + SYS_LIO_LISTIO = 0xC6A + SYS_LLRINT = 0xA84 + SYS_LLRINTD128 = 0xBDD + SYS_LLRINTD32 = 0xBDB + SYS_LLRINTD64 = 0xBDC + SYS_LLRINTF = 0xA87 + SYS_LLRINTL = 0xA8A + SYS_LLROUND = 0xA8D + SYS_LLROUNDD128 = 0xBE0 + SYS_LLROUNDD32 = 0xBDE + SYS_LLROUNDD64 = 0xBDF + SYS_LLROUNDF = 0xA90 + SYS_LLROUNDL = 0xA93 + SYS_LOCALTIM = 0x0B1 + SYS_LOCALTIME = 0x0B1 + SYS_LOCALTIME64 = 0xCDE + SYS_LOCALTIME64_R = 0xCDF + SYS_LOG10D128 = 0xBE6 + SYS_LOG10D32 = 0xBE4 + SYS_LOG10D64 = 0xBE5 + SYS_LOG1PD128 = 0xBE9 + SYS_LOG1PD32 = 0xBE7 + SYS_LOG1PD64 = 0xBE8 + SYS_LOG2D128 = 0xBEC + SYS_LOG2D32 = 0xBEA + SYS_LOG2D64 = 0xBEB + SYS_LOGBD128 = 0xBEF + SYS_LOGBD32 = 0xBED + SYS_LOGBD64 = 0xBEE + SYS_LOGBF = 0xA96 + SYS_LOGBL = 0xA99 + SYS_LOGD128 = 0xBE3 + SYS_LOGD32 = 0xBE1 + SYS_LOGD64 = 0xBE2 + SYS_LRINT = 0xA9C + SYS_LRINTD128 = 0xBF2 + SYS_LRINTD32 = 0xBF0 + SYS_LRINTD64 = 0xBF1 + SYS_LRINTF = 0xA9F + SYS_LRINTL = 0xAA2 + SYS_LROUNDD128 = 0xBF5 + SYS_LROUNDD32 = 0xBF3 + SYS_LROUNDD64 = 0xBF4 + SYS_LROUNDL = 0xAA5 + SYS_MBLEN = 0x0AF + SYS_MBRTOC16 = 0xD42 + SYS_MBRTOC32 = 0xD43 + SYS_MEMSET = 0x0A3 + SYS_MKTIME = 0x0AC + SYS_MKTIME64 = 0xCE0 + SYS_MODFD128 = 0xBF8 + SYS_MODFD32 = 0xBF6 + SYS_MODFD64 = 0xBF7 + SYS_NAN = 0xAA8 + SYS_NAND128 = 0xBFB + SYS_NAND32 = 0xBF9 + SYS_NAND64 = 0xBFA + SYS_NANF = 0xAAA + SYS_NANL = 0xAAC + SYS_NEARBYINT = 0xAAE + SYS_NEARBYINTD128 = 0xBFE + SYS_NEARBYINTD32 = 0xBFC + SYS_NEARBYINTD64 = 0xBFD + SYS_NEARBYINTF = 0xAB1 + SYS_NEARBYINTL = 0xAB4 + SYS_NEXTAFTERD128 = 0xC01 + SYS_NEXTAFTERD32 = 0xBFF + SYS_NEXTAFTERD64 = 0xC00 + SYS_NEXTAFTERF = 0xAB7 + SYS_NEXTAFTERL = 0xABA + SYS_NEXTTOWARD = 0xABD + SYS_NEXTTOWARDD128 = 0xC04 + SYS_NEXTTOWARDD32 = 0xC02 + SYS_NEXTTOWARDD64 = 0xC03 + SYS_NEXTTOWARDF = 0xAC0 + SYS_NEXTTOWARDL = 0xAC3 + SYS_NL_LANGINFO = 0x0FC + SYS_PERROR_UNLOCKED = 0xCBD + SYS_POSIX_FALLOCATE = 0xCE8 + SYS_POSIX_MEMALIGN = 0xCE9 + SYS_POSIX_OPENPT = 0xC66 + SYS_POWD128 = 0xC07 + SYS_POWD32 = 0xC05 + SYS_POWD64 = 0xC06 + SYS_PRINTF_UNLOCKED = 0xCBE + SYS_PSELECT = 0xC67 + SYS_PTHREAD_ATTR_GETSTACK = 0xB3E + SYS_PTHREAD_ATTR_SETSTACK = 0xB3F + SYS_PTHREAD_SECURITY_APPLID_NP = 0xCE4 + SYS_PUTS_UNLOCKED = 0xC7F + SYS_PUTWCHAR_UNLOCKED = 0xCC0 + SYS_PUTWC_UNLOCKED = 0xCBF + SYS_QUANTEXPD128 = 0xD46 + SYS_QUANTEXPD32 = 0xD44 + SYS_QUANTEXPD64 = 0xD45 + SYS_QUANTIZED128 = 0xC0A + SYS_QUANTIZED32 = 0xC08 + SYS_QUANTIZED64 = 0xC09 + SYS_REMAINDERD128 = 0xC0D + SYS_REMAINDERD32 = 0xC0B + SYS_REMAINDERD64 = 0xC0C + SYS_RESIZE_ALLOC = 0xCEB + SYS_REWIND_UNLOCKED = 0xCC1 + SYS_RINTD128 = 0xC13 + SYS_RINTD32 = 0xC11 + SYS_RINTD64 = 0xC12 + SYS_RINTF = 0xACB + SYS_RINTL = 0xACD + SYS_ROUND = 0xACF + SYS_ROUNDD128 = 0xC16 + SYS_ROUNDD32 = 0xC14 + SYS_ROUNDD64 = 0xC15 + SYS_ROUNDF = 0xAD2 + SYS_ROUNDL = 0xAD5 + SYS_SAMEQUANTUMD128 = 0xC19 + SYS_SAMEQUANTUMD32 = 0xC17 + SYS_SAMEQUANTUMD64 = 0xC18 + SYS_SCALBLN = 0xAD8 + SYS_SCALBLND128 = 0xC1C + SYS_SCALBLND32 = 0xC1A + SYS_SCALBLND64 = 0xC1B + SYS_SCALBLNF = 0xADB + SYS_SCALBLNL = 0xADE + SYS_SCALBND128 = 0xC1F + SYS_SCALBND32 = 0xC1D + SYS_SCALBND64 = 0xC1E + SYS_SCALBNF = 0xAE3 + SYS_SCALBNL = 0xAE6 + SYS_SCANF_UNLOCKED = 0xCC2 + SYS_SCHED_YIELD = 0xB32 + SYS_SETENV = 0x0C8 + SYS_SETIPV4SOURCEFILTER = 0xC76 + SYS_SETSOURCEFILTER = 0xC78 + SYS_SHM_OPEN = 0xC8C + SYS_SHM_UNLINK = 0xC8D + SYS_SIND128 = 0xC22 + SYS_SIND32 = 0xC20 + SYS_SIND64 = 0xC21 + SYS_SINHD128 = 0xC25 + SYS_SINHD32 = 0xC23 + SYS_SINHD64 = 0xC24 + SYS_SIZEOF_ALLOC = 0xCEA + SYS_SOCKATMARK = 0xC68 + SYS_SQRTD128 = 0xC28 + SYS_SQRTD32 = 0xC26 + SYS_SQRTD64 = 0xC27 + SYS_STRCHR = 0x0A0 + SYS_STRCSPN = 0x0A1 + SYS_STRERROR = 0x0A8 + SYS_STRERROR_R = 0xB33 + SYS_STRFTIME = 0x0B2 + SYS_STRLEN = 0x0A9 + SYS_STRPBRK = 0x0A2 + SYS_STRSPN = 0x0A4 + SYS_STRSTR = 0x0A5 + SYS_STRTOD128 = 0xC2B + SYS_STRTOD32 = 0xC29 + SYS_STRTOD64 = 0xC2A + SYS_STRTOK = 0x0A6 + SYS_TAND128 = 0xC2E + SYS_TAND32 = 0xC2C + SYS_TAND64 = 0xC2D + SYS_TANHD128 = 0xC31 + SYS_TANHD32 = 0xC2F + SYS_TANHD64 = 0xC30 + SYS_TGAMMAD128 = 0xC34 + SYS_TGAMMAD32 = 0xC32 + SYS_TGAMMAD64 = 0xC33 + SYS_TIME = 0x0AD + SYS_TIME64 = 0xCE1 + SYS_TMPFILE64 = 0xD4D + SYS_TMPFILE64_UNLOCKED = 0xD4E + SYS_TMPFILE_UNLOCKED = 0xCFD + SYS_TRUNCD128 = 0xC40 + SYS_TRUNCD32 = 0xC3E + SYS_TRUNCD64 = 0xC3F + SYS_UNGETC_UNLOCKED = 0xCC3 + SYS_UNGETWC_UNLOCKED = 0xCC4 + SYS_UNSETENV = 0xB34 + SYS_VFPRINTF_UNLOCKED = 0xCC5 + SYS_VFSCANF_UNLOCKED = 0xCC7 + SYS_VFWPRINTF_UNLOCKED = 0xCC9 + SYS_VFWSCANF_UNLOCKED = 0xCCB + SYS_VPRINTF_UNLOCKED = 0xCCD + SYS_VSCANF_UNLOCKED = 0xCCF + SYS_VWPRINTF_UNLOCKED = 0xCD1 + SYS_VWSCANF_UNLOCKED = 0xCD3 + SYS_WCSTOD128 = 0xC43 + SYS_WCSTOD32 = 0xC41 + SYS_WCSTOD64 = 0xC42 + SYS_WPRINTF_UNLOCKED = 0xCD5 + SYS_WSCANF_UNLOCKED = 0xCD6 + SYS__FLUSHLBF = 0xD68 + SYS__FLUSHLBF_UNLOCKED = 0xD6F + SYS___ACOSHF_H = 0xA54 + SYS___ACOSHL_H = 0xA55 + SYS___ASINHF_H = 0xA56 + SYS___ASINHL_H = 0xA57 + SYS___ATANPID128 = 0xC6D + SYS___ATANPID32 = 0xC6B + SYS___ATANPID64 = 0xC6C + SYS___CBRTF_H = 0xA58 + SYS___CBRTL_H = 0xA59 + SYS___CDUMP = 0x0C4 + SYS___CLASS = 0xAFA + SYS___CLASS2 = 0xB99 + SYS___CLASS2D128 = 0xC99 + SYS___CLASS2D32 = 0xC97 + SYS___CLASS2D64 = 0xC98 + SYS___CLASS2F = 0xC91 + SYS___CLASS2F_B = 0xC93 + SYS___CLASS2F_H = 0xC94 + SYS___CLASS2L = 0xC92 + SYS___CLASS2L_B = 0xC95 + SYS___CLASS2L_H = 0xC96 + SYS___CLASS2_B = 0xB9A + SYS___CLASS2_H = 0xB9B + SYS___CLASS_B = 0xAFB + SYS___CLASS_H = 0xAFC + SYS___CLOGL_B = 0xA01 + SYS___CLOGL_H = 0xA02 + SYS___CLRENV = 0x0C9 + SYS___CLRMF = 0x0BD + SYS___CODEPAGE_INFO = 0xC64 + SYS___CONJF_B = 0xA07 + SYS___CONJF_H = 0xA08 + SYS___CONJL_B = 0xA0A + SYS___CONJL_H = 0xA0B + SYS___CONJ_B = 0xA04 + SYS___CONJ_H = 0xA05 + SYS___COPYSIGN_B = 0xA5A + SYS___COPYSIGN_H = 0xAF5 + SYS___COSPID128 = 0xC70 + SYS___COSPID32 = 0xC6E + SYS___COSPID64 = 0xC6F + SYS___CPOWF_B = 0xA10 + SYS___CPOWF_H = 0xA11 + SYS___CPOWL_B = 0xA13 + SYS___CPOWL_H = 0xA14 + SYS___CPOW_B = 0xA0D + SYS___CPOW_H = 0xA0E + SYS___CPROJF_B = 0xA19 + SYS___CPROJF_H = 0xA1A + SYS___CPROJL_B = 0xA1C + SYS___CPROJL_H = 0xA1D + SYS___CPROJ_B = 0xA16 + SYS___CPROJ_H = 0xA17 + SYS___CREALF_B = 0xA22 + SYS___CREALF_H = 0xA23 + SYS___CREALL_B = 0xA25 + SYS___CREALL_H = 0xA26 + SYS___CREAL_B = 0xA1F + SYS___CREAL_H = 0xA20 + SYS___CSINF_B = 0xA2B + SYS___CSINF_H = 0xA2C + SYS___CSINHF_B = 0xA34 + SYS___CSINHF_H = 0xA35 + SYS___CSINHL_B = 0xA37 + SYS___CSINHL_H = 0xA38 + SYS___CSINH_B = 0xA31 + SYS___CSINH_H = 0xA32 + SYS___CSINL_B = 0xA2E + SYS___CSINL_H = 0xA2F + SYS___CSIN_B = 0xA28 + SYS___CSIN_H = 0xA29 + SYS___CSNAP = 0x0C5 + SYS___CSQRTF_B = 0xA3D + SYS___CSQRTF_H = 0xA3E + SYS___CSQRTL_B = 0xA40 + SYS___CSQRTL_H = 0xA41 + SYS___CSQRT_B = 0xA3A + SYS___CSQRT_H = 0xA3B + SYS___CTANF_B = 0xA46 + SYS___CTANF_H = 0xA47 + SYS___CTANHF_B = 0xA4F + SYS___CTANHF_H = 0xA50 + SYS___CTANHL_B = 0xA52 + SYS___CTANHL_H = 0xA53 + SYS___CTANH_B = 0xA4C + SYS___CTANH_H = 0xA4D + SYS___CTANL_B = 0xA49 + SYS___CTANL_H = 0xA4A + SYS___CTAN_B = 0xA43 + SYS___CTAN_H = 0xA44 + SYS___CTEST = 0x0C7 + SYS___CTRACE = 0x0C6 + SYS___D1TOP = 0xC9B + SYS___D2TOP = 0xC9C + SYS___D4TOP = 0xC9D + SYS___DYNALL = 0x0C3 + SYS___DYNFRE = 0x0C2 + SYS___EXP2F_H = 0xA5E + SYS___EXP2L_H = 0xA5F + SYS___EXP2_H = 0xA5D + SYS___EXPM1F_H = 0xA5B + SYS___EXPM1L_H = 0xA5C + SYS___FBUFSIZE = 0xD60 + SYS___FLBF = 0xD62 + SYS___FLDATA = 0x0C1 + SYS___FMAF_B = 0xA67 + SYS___FMAF_H = 0xA68 + SYS___FMAL_B = 0xA6A + SYS___FMAL_H = 0xA6B + SYS___FMAXF_B = 0xA70 + SYS___FMAXF_H = 0xA71 + SYS___FMAXL_B = 0xA73 + SYS___FMAXL_H = 0xA74 + SYS___FMAX_B = 0xA6D + SYS___FMAX_H = 0xA6E + SYS___FMA_B = 0xA64 + SYS___FMA_H = 0xA65 + SYS___FMINF_B = 0xA79 + SYS___FMINF_H = 0xA7A + SYS___FMINL_B = 0xA7C + SYS___FMINL_H = 0xA7D + SYS___FMIN_B = 0xA76 + SYS___FMIN_H = 0xA77 + SYS___FPENDING = 0xD61 + SYS___FPENDING_UNLOCKED = 0xD6C + SYS___FPURGE = 0xD69 + SYS___FPURGE_UNLOCKED = 0xD70 + SYS___FP_CAST_D = 0xBCB + SYS___FREADABLE = 0xD63 + SYS___FREADAHEAD = 0xD6A + SYS___FREADAHEAD_UNLOCKED = 0xD71 + SYS___FREADING = 0xD65 + SYS___FREADING_UNLOCKED = 0xD6D + SYS___FSEEK2 = 0xB3C + SYS___FSETERR = 0xD6B + SYS___FSETLOCKING = 0xD67 + SYS___FTCHEP = 0x0BF + SYS___FTELL2 = 0xB3B + SYS___FUPDT = 0x0B5 + SYS___FWRITABLE = 0xD64 + SYS___FWRITING = 0xD66 + SYS___FWRITING_UNLOCKED = 0xD6E + SYS___GETCB = 0x0B4 + SYS___GETGRGID1 = 0xD5B + SYS___GETGRNAM1 = 0xD5C + SYS___GETTHENT = 0xCE5 + SYS___GETTOD = 0xD3E + SYS___HYPOTF_H = 0xAF6 + SYS___HYPOTL_H = 0xAF7 + SYS___ILOGBF_B = 0xA7F + SYS___ILOGBF_H = 0xA80 + SYS___ILOGBL_B = 0xA82 + SYS___ILOGBL_H = 0xA83 + SYS___ISBLANK_A = 0xB2E + SYS___ISBLNK = 0x0FE + SYS___ISWBLANK_A = 0xB2F + SYS___LE_CEEGTJS = 0xD72 + SYS___LE_TRACEBACK = 0xB7A + SYS___LGAMMAL_H = 0xA62 + SYS___LGAMMA_B_C99 = 0xB39 + SYS___LGAMMA_H_C99 = 0xB38 + SYS___LGAMMA_R_C99 = 0xB3A + SYS___LLRINTF_B = 0xA88 + SYS___LLRINTF_H = 0xA89 + SYS___LLRINTL_B = 0xA8B + SYS___LLRINTL_H = 0xA8C + SYS___LLRINT_B = 0xA85 + SYS___LLRINT_H = 0xA86 + SYS___LLROUNDF_B = 0xA91 + SYS___LLROUNDF_H = 0xA92 + SYS___LLROUNDL_B = 0xA94 + SYS___LLROUNDL_H = 0xA95 + SYS___LLROUND_B = 0xA8E + SYS___LLROUND_H = 0xA8F + SYS___LOCALE_CTL = 0xD47 + SYS___LOG1PF_H = 0xA60 + SYS___LOG1PL_H = 0xA61 + SYS___LOGBF_B = 0xA97 + SYS___LOGBF_H = 0xA98 + SYS___LOGBL_B = 0xA9A + SYS___LOGBL_H = 0xA9B + SYS___LOGIN_APPLID = 0xCE2 + SYS___LRINTF_B = 0xAA0 + SYS___LRINTF_H = 0xAA1 + SYS___LRINTL_B = 0xAA3 + SYS___LRINTL_H = 0xAA4 + SYS___LRINT_B = 0xA9D + SYS___LRINT_H = 0xA9E + SYS___LROUNDF_FIXUP = 0xB31 + SYS___LROUNDL_B = 0xAA6 + SYS___LROUNDL_H = 0xAA7 + SYS___LROUND_FIXUP = 0xB30 + SYS___MOSERVICES = 0xD3D + SYS___MUST_STAY_CLEAN = 0xB7C + SYS___NANF_B = 0xAAB + SYS___NANL_B = 0xAAD + SYS___NAN_B = 0xAA9 + SYS___NEARBYINTF_B = 0xAB2 + SYS___NEARBYINTF_H = 0xAB3 + SYS___NEARBYINTL_B = 0xAB5 + SYS___NEARBYINTL_H = 0xAB6 + SYS___NEARBYINT_B = 0xAAF + SYS___NEARBYINT_H = 0xAB0 + SYS___NEXTAFTERF_B = 0xAB8 + SYS___NEXTAFTERF_H = 0xAB9 + SYS___NEXTAFTERL_B = 0xABB + SYS___NEXTAFTERL_H = 0xABC + SYS___NEXTTOWARDF_B = 0xAC1 + SYS___NEXTTOWARDF_H = 0xAC2 + SYS___NEXTTOWARDL_B = 0xAC4 + SYS___NEXTTOWARDL_H = 0xAC5 + SYS___NEXTTOWARD_B = 0xABE + SYS___NEXTTOWARD_H = 0xABF + SYS___O_ENV = 0xB7D + SYS___PASSWD_APPLID = 0xCE3 + SYS___PTOD1 = 0xC9E + SYS___PTOD2 = 0xC9F + SYS___PTOD4 = 0xCA0 + SYS___REGCOMP_STD = 0x0EA + SYS___REMAINDERF_H = 0xAC6 + SYS___REMAINDERL_H = 0xAC7 + SYS___REMQUOD128 = 0xC10 + SYS___REMQUOD32 = 0xC0E + SYS___REMQUOD64 = 0xC0F + SYS___REMQUOF_H = 0xAC9 + SYS___REMQUOL_H = 0xACA + SYS___REMQUO_H = 0xAC8 + SYS___RINTF_B = 0xACC + SYS___RINTL_B = 0xACE + SYS___ROUNDF_B = 0xAD3 + SYS___ROUNDF_H = 0xAD4 + SYS___ROUNDL_B = 0xAD6 + SYS___ROUNDL_H = 0xAD7 + SYS___ROUND_B = 0xAD0 + SYS___ROUND_H = 0xAD1 + SYS___SCALBLNF_B = 0xADC + SYS___SCALBLNF_H = 0xADD + SYS___SCALBLNL_B = 0xADF + SYS___SCALBLNL_H = 0xAE0 + SYS___SCALBLN_B = 0xAD9 + SYS___SCALBLN_H = 0xADA + SYS___SCALBNF_B = 0xAE4 + SYS___SCALBNF_H = 0xAE5 + SYS___SCALBNL_B = 0xAE7 + SYS___SCALBNL_H = 0xAE8 + SYS___SCALBN_B = 0xAE1 + SYS___SCALBN_H = 0xAE2 + SYS___SETENV = 0x0C8 + SYS___SINPID128 = 0xC73 + SYS___SINPID32 = 0xC71 + SYS___SINPID64 = 0xC72 + SYS___SMF_RECORD2 = 0xD48 + SYS___STATIC_REINIT = 0xB3D + SYS___TGAMMAF_H_C99 = 0xB79 + SYS___TGAMMAL_H = 0xAE9 + SYS___TGAMMA_H_C99 = 0xB78 + SYS___TOCSNAME2 = 0xC9A + SYS_CEIL = 0x01F + SYS_CHAUDIT = 0x1E0 + SYS_EXP = 0x01A + SYS_FCHAUDIT = 0x1E1 + SYS_FREXP = 0x01D + SYS_GETGROUPSBYNAME = 0x1E2 + SYS_GETPWUID = 0x1A0 + SYS_GETUID = 0x1A1 + SYS_ISATTY = 0x1A3 + SYS_KILL = 0x1A4 + SYS_LDEXP = 0x01E + SYS_LINK = 0x1A5 + SYS_LOG10 = 0x01C + SYS_LSEEK = 0x1A6 + SYS_LSTAT = 0x1A7 + SYS_MKDIR = 0x1A8 + SYS_MKFIFO = 0x1A9 + SYS_MKNOD = 0x1AA + SYS_MODF = 0x01B + SYS_MOUNT = 0x1AB + SYS_OPEN = 0x1AC + SYS_OPENDIR = 0x1AD + SYS_PATHCONF = 0x1AE + SYS_PAUSE = 0x1AF + SYS_PIPE = 0x1B0 + SYS_PTHREAD_ATTR_DESTROY = 0x1E7 + SYS_PTHREAD_ATTR_GETDETACHSTATE = 0x1EB + SYS_PTHREAD_ATTR_GETSTACKSIZE = 0x1E9 + SYS_PTHREAD_ATTR_GETWEIGHT_NP = 0x1ED + SYS_PTHREAD_ATTR_INIT = 0x1E6 + SYS_PTHREAD_ATTR_SETDETACHSTATE = 0x1EA + SYS_PTHREAD_ATTR_SETSTACKSIZE = 0x1E8 + SYS_PTHREAD_ATTR_SETWEIGHT_NP = 0x1EC + SYS_PTHREAD_CANCEL = 0x1EE + SYS_PTHREAD_CLEANUP_POP = 0x1F0 + SYS_PTHREAD_CLEANUP_PUSH = 0x1EF + SYS_PTHREAD_CONDATTR_DESTROY = 0x1F2 + SYS_PTHREAD_CONDATTR_INIT = 0x1F1 + SYS_PTHREAD_COND_BROADCAST = 0x1F6 + SYS_PTHREAD_COND_DESTROY = 0x1F4 + SYS_PTHREAD_COND_INIT = 0x1F3 + SYS_PTHREAD_COND_SIGNAL = 0x1F5 + SYS_PTHREAD_COND_TIMEDWAIT = 0x1F8 + SYS_PTHREAD_COND_WAIT = 0x1F7 + SYS_PTHREAD_CREATE = 0x1F9 + SYS_PTHREAD_DETACH = 0x1FA + SYS_PTHREAD_EQUAL = 0x1FB + SYS_PTHREAD_EXIT = 0x1E4 + SYS_PTHREAD_GETSPECIFIC = 0x1FC + SYS_PTHREAD_JOIN = 0x1FD + SYS_PTHREAD_KEY_CREATE = 0x1FE + SYS_PTHREAD_KILL = 0x1E5 + SYS_PTHREAD_MUTEXATTR_INIT = 0x1FF + SYS_READ = 0x1B2 + SYS_READDIR = 0x1B3 + SYS_READLINK = 0x1B4 + SYS_REWINDDIR = 0x1B5 + SYS_RMDIR = 0x1B6 + SYS_SETEGID = 0x1B7 + SYS_SETEUID = 0x1B8 + SYS_SETGID = 0x1B9 + SYS_SETPGID = 0x1BA + SYS_SETSID = 0x1BB + SYS_SETUID = 0x1BC + SYS_SIGACTION = 0x1BD + SYS_SIGADDSET = 0x1BE + SYS_SIGDELSET = 0x1BF + SYS_SIGEMPTYSET = 0x1C0 + SYS_SIGFILLSET = 0x1C1 + SYS_SIGISMEMBER = 0x1C2 + SYS_SIGLONGJMP = 0x1C3 + SYS_SIGPENDING = 0x1C4 + SYS_SIGPROCMASK = 0x1C5 + SYS_SIGSETJMP = 0x1C6 + SYS_SIGSUSPEND = 0x1C7 + SYS_SIGWAIT = 0x1E3 + SYS_SLEEP = 0x1C8 + SYS_STAT = 0x1C9 + SYS_SYMLINK = 0x1CB + SYS_SYSCONF = 0x1CC + SYS_TCDRAIN = 0x1CD + SYS_TCFLOW = 0x1CE + SYS_TCFLUSH = 0x1CF + SYS_TCGETATTR = 0x1D0 + SYS_TCGETPGRP = 0x1D1 + SYS_TCSENDBREAK = 0x1D2 + SYS_TCSETATTR = 0x1D3 + SYS_TCSETPGRP = 0x1D4 + SYS_TIMES = 0x1D5 + SYS_TTYNAME = 0x1D6 + SYS_TZSET = 0x1D7 + SYS_UMASK = 0x1D8 + SYS_UMOUNT = 0x1D9 + SYS_UNAME = 0x1DA + SYS_UNLINK = 0x1DB + SYS_UTIME = 0x1DC + SYS_WAIT = 0x1DD + SYS_WAITPID = 0x1DE + SYS_WRITE = 0x1DF + SYS_W_GETPSENT = 0x1B1 + SYS_W_IOCTL = 0x1A2 + SYS_W_STATFS = 0x1CA + SYS_A64L = 0x2EF + SYS_BCMP = 0x2B9 + SYS_BCOPY = 0x2BA + SYS_BZERO = 0x2BB + SYS_CATCLOSE = 0x2B6 + SYS_CATGETS = 0x2B7 + SYS_CATOPEN = 0x2B8 + SYS_CRYPT = 0x2AC + SYS_DBM_CLEARERR = 0x2F7 + SYS_DBM_CLOSE = 0x2F8 + SYS_DBM_DELETE = 0x2F9 + SYS_DBM_ERROR = 0x2FA + SYS_DBM_FETCH = 0x2FB + SYS_DBM_FIRSTKEY = 0x2FC + SYS_DBM_NEXTKEY = 0x2FD + SYS_DBM_OPEN = 0x2FE + SYS_DBM_STORE = 0x2FF + SYS_DRAND48 = 0x2B2 + SYS_ENCRYPT = 0x2AD + SYS_ENDUTXENT = 0x2E1 + SYS_ERAND48 = 0x2B3 + SYS_ERF = 0x02C + SYS_ERFC = 0x02D + SYS_FCHDIR = 0x2D9 + SYS_FFS = 0x2BC + SYS_FMTMSG = 0x2E5 + SYS_FSTATVFS = 0x2B4 + SYS_FTIME = 0x2F5 + SYS_GAMMA = 0x02E + SYS_GETDATE = 0x2A6 + SYS_GETPAGESIZE = 0x2D8 + SYS_GETTIMEOFDAY = 0x2F6 + SYS_GETUTXENT = 0x2E0 + SYS_GETUTXID = 0x2E2 + SYS_GETUTXLINE = 0x2E3 + SYS_HCREATE = 0x2C6 + SYS_HDESTROY = 0x2C7 + SYS_HSEARCH = 0x2C8 + SYS_HYPOT = 0x02B + SYS_INDEX = 0x2BD + SYS_INITSTATE = 0x2C2 + SYS_INSQUE = 0x2CF + SYS_ISASCII = 0x2ED + SYS_JRAND48 = 0x2E6 + SYS_L64A = 0x2F0 + SYS_LCONG48 = 0x2EA + SYS_LFIND = 0x2C9 + SYS_LRAND48 = 0x2E7 + SYS_LSEARCH = 0x2CA + SYS_MEMCCPY = 0x2D4 + SYS_MRAND48 = 0x2E8 + SYS_NRAND48 = 0x2E9 + SYS_PCLOSE = 0x2D2 + SYS_POPEN = 0x2D1 + SYS_PUTUTXLINE = 0x2E4 + SYS_RANDOM = 0x2C4 + SYS_REMQUE = 0x2D0 + SYS_RINDEX = 0x2BE + SYS_SEED48 = 0x2EC + SYS_SETKEY = 0x2AE + SYS_SETSTATE = 0x2C3 + SYS_SETUTXENT = 0x2DF + SYS_SRAND48 = 0x2EB + SYS_SRANDOM = 0x2C5 + SYS_STATVFS = 0x2B5 + SYS_STRCASECMP = 0x2BF + SYS_STRDUP = 0x2C0 + SYS_STRNCASECMP = 0x2C1 + SYS_SWAB = 0x2D3 + SYS_TDELETE = 0x2CB + SYS_TFIND = 0x2CC + SYS_TOASCII = 0x2EE + SYS_TSEARCH = 0x2CD + SYS_TWALK = 0x2CE + SYS_UALARM = 0x2F1 + SYS_USLEEP = 0x2F2 + SYS_WAIT3 = 0x2A7 + SYS_WAITID = 0x2A8 + SYS_Y1 = 0x02A + SYS___ATOE = 0x2DB + SYS___ATOE_L = 0x2DC + SYS___CATTRM = 0x2A9 + SYS___CNVBLK = 0x2AF + SYS___CRYTRM = 0x2B0 + SYS___DLGHT = 0x2A1 + SYS___ECRTRM = 0x2B1 + SYS___ETOA = 0x2DD + SYS___ETOA_L = 0x2DE + SYS___GDTRM = 0x2AA + SYS___OCLCK = 0x2DA + SYS___OPARGF = 0x2A2 + SYS___OPERRF = 0x2A5 + SYS___OPINDF = 0x2A4 + SYS___OPOPTF = 0x2A3 + SYS___RNDTRM = 0x2AB + SYS___SRCTRM = 0x2F4 + SYS___TZONE = 0x2A0 + SYS___UTXTRM = 0x2F3 + SYS_ASIN = 0x03E + SYS_ISXDIGIT = 0x03B + SYS_SETLOCAL = 0x03A + SYS_SETLOCALE = 0x03A + SYS_SIN = 0x03F + SYS_TOLOWER = 0x03C + SYS_TOUPPER = 0x03D + SYS_ACCEPT_AND_RECV = 0x4F7 + SYS_ATOL = 0x04E + SYS_CHECKSCH = 0x4BC + SYS_CHECKSCHENV = 0x4BC + SYS_CLEARERR = 0x04C + SYS_CONNECTS = 0x4B5 + SYS_CONNECTSERVER = 0x4B5 + SYS_CONNECTW = 0x4B4 + SYS_CONNECTWORKMGR = 0x4B4 + SYS_CONTINUE = 0x4B3 + SYS_CONTINUEWORKUNIT = 0x4B3 + SYS_COPYSIGN = 0x4C2 + SYS_CREATEWO = 0x4B2 + SYS_CREATEWORKUNIT = 0x4B2 + SYS_DELETEWO = 0x4B9 + SYS_DELETEWORKUNIT = 0x4B9 + SYS_DISCONNE = 0x4B6 + SYS_DISCONNECTSERVER = 0x4B6 + SYS_FEOF = 0x04D + SYS_FERROR = 0x04A + SYS_FINITE = 0x4C8 + SYS_GAMMA_R = 0x4E2 + SYS_JOINWORK = 0x4B7 + SYS_JOINWORKUNIT = 0x4B7 + SYS_LEAVEWOR = 0x4B8 + SYS_LEAVEWORKUNIT = 0x4B8 + SYS_LGAMMA_R = 0x4EB + SYS_MATHERR = 0x4D0 + SYS_PERROR = 0x04F + SYS_QUERYMET = 0x4BA + SYS_QUERYMETRICS = 0x4BA + SYS_QUERYSCH = 0x4BB + SYS_QUERYSCHENV = 0x4BB + SYS_REWIND = 0x04B + SYS_SCALBN = 0x4D4 + SYS_SIGNIFIC = 0x4D5 + SYS_SIGNIFICAND = 0x4D5 + SYS___ACOSH_B = 0x4DA + SYS___ACOS_B = 0x4D9 + SYS___ASINH_B = 0x4BE + SYS___ASIN_B = 0x4DB + SYS___ATAN2_B = 0x4DC + SYS___ATANH_B = 0x4DD + SYS___ATAN_B = 0x4BF + SYS___CBRT_B = 0x4C0 + SYS___CEIL_B = 0x4C1 + SYS___COSH_B = 0x4DE + SYS___COS_B = 0x4C3 + SYS___DGHT = 0x4A8 + SYS___ENVN = 0x4B0 + SYS___ERFC_B = 0x4C5 + SYS___ERF_B = 0x4C4 + SYS___EXPM1_B = 0x4C6 + SYS___EXP_B = 0x4DF + SYS___FABS_B = 0x4C7 + SYS___FLOOR_B = 0x4C9 + SYS___FMOD_B = 0x4E0 + SYS___FP_SETMODE = 0x4F8 + SYS___FREXP_B = 0x4CA + SYS___GAMMA_B = 0x4E1 + SYS___GDRR = 0x4A1 + SYS___HRRNO = 0x4A2 + SYS___HYPOT_B = 0x4E3 + SYS___ILOGB_B = 0x4CB + SYS___ISNAN_B = 0x4CC + SYS___J0_B = 0x4E4 + SYS___J1_B = 0x4E6 + SYS___JN_B = 0x4E8 + SYS___LDEXP_B = 0x4CD + SYS___LGAMMA_B = 0x4EA + SYS___LOG10_B = 0x4ED + SYS___LOG1P_B = 0x4CE + SYS___LOGB_B = 0x4CF + SYS___LOGIN = 0x4F5 + SYS___LOG_B = 0x4EC + SYS___MLOCKALL = 0x4B1 + SYS___MODF_B = 0x4D1 + SYS___NEXTAFTER_B = 0x4D2 + SYS___OPENDIR2 = 0x4F3 + SYS___OPEN_STAT = 0x4F6 + SYS___OPND = 0x4A5 + SYS___OPPT = 0x4A6 + SYS___OPRG = 0x4A3 + SYS___OPRR = 0x4A4 + SYS___PID_AFFINITY = 0x4BD + SYS___POW_B = 0x4EE + SYS___READDIR2 = 0x4F4 + SYS___REMAINDER_B = 0x4EF + SYS___RINT_B = 0x4D3 + SYS___SCALB_B = 0x4F0 + SYS___SIGACTIONSET = 0x4FB + SYS___SIGGM = 0x4A7 + SYS___SINH_B = 0x4F1 + SYS___SIN_B = 0x4D6 + SYS___SQRT_B = 0x4F2 + SYS___TANH_B = 0x4D8 + SYS___TAN_B = 0x4D7 + SYS___TRRNO = 0x4AF + SYS___TZNE = 0x4A9 + SYS___TZZN = 0x4AA + SYS___UCREATE = 0x4FC + SYS___UFREE = 0x4FE + SYS___UHEAPREPORT = 0x4FF + SYS___UMALLOC = 0x4FD + SYS___Y0_B = 0x4E5 + SYS___Y1_B = 0x4E7 + SYS___YN_B = 0x4E9 + SYS_ABORT = 0x05C + SYS_ASCTIME_R = 0x5E0 + SYS_ATEXIT = 0x05D + SYS_CONNECTE = 0x5AE + SYS_CONNECTEXPORTIMPORT = 0x5AE + SYS_CTIME_R = 0x5E1 + SYS_DN_COMP = 0x5DF + SYS_DN_EXPAND = 0x5DD + SYS_DN_SKIPNAME = 0x5DE + SYS_EXIT = 0x05A + SYS_EXPORTWO = 0x5A1 + SYS_EXPORTWORKUNIT = 0x5A1 + SYS_EXTRACTW = 0x5A5 + SYS_EXTRACTWORKUNIT = 0x5A5 + SYS_FSEEKO = 0x5C9 + SYS_FTELLO = 0x5C8 + SYS_GETGRGID_R = 0x5E7 + SYS_GETGRNAM_R = 0x5E8 + SYS_GETLOGIN_R = 0x5E9 + SYS_GETPWNAM_R = 0x5EA + SYS_GETPWUID_R = 0x5EB + SYS_GMTIME_R = 0x5E2 + SYS_IMPORTWO = 0x5A3 + SYS_IMPORTWORKUNIT = 0x5A3 + SYS_INET_NTOP = 0x5D3 + SYS_INET_PTON = 0x5D4 + SYS_LLABS = 0x5CE + SYS_LLDIV = 0x5CB + SYS_LOCALTIME_R = 0x5E3 + SYS_PTHREAD_ATFORK = 0x5ED + SYS_PTHREAD_ATTR_GETDETACHSTATE_U98 = 0x5FB + SYS_PTHREAD_ATTR_GETGUARDSIZE = 0x5EE + SYS_PTHREAD_ATTR_GETSCHEDPARAM = 0x5F9 + SYS_PTHREAD_ATTR_GETSTACKADDR = 0x5EF + SYS_PTHREAD_ATTR_SETDETACHSTATE_U98 = 0x5FC + SYS_PTHREAD_ATTR_SETGUARDSIZE = 0x5F0 + SYS_PTHREAD_ATTR_SETSCHEDPARAM = 0x5FA + SYS_PTHREAD_ATTR_SETSTACKADDR = 0x5F1 + SYS_PTHREAD_CONDATTR_GETPSHARED = 0x5F2 + SYS_PTHREAD_CONDATTR_SETPSHARED = 0x5F3 + SYS_PTHREAD_DETACH_U98 = 0x5FD + SYS_PTHREAD_GETCONCURRENCY = 0x5F4 + SYS_PTHREAD_GETSPECIFIC_U98 = 0x5FE + SYS_PTHREAD_KEY_DELETE = 0x5F5 + SYS_PTHREAD_SETCANCELSTATE = 0x5FF + SYS_PTHREAD_SETCONCURRENCY = 0x5F6 + SYS_PTHREAD_SIGMASK = 0x5F7 + SYS_QUERYENC = 0x5AD + SYS_QUERYWORKUNITCLASSIFICATION = 0x5AD + SYS_RAISE = 0x05E + SYS_RAND_R = 0x5E4 + SYS_READDIR_R = 0x5E6 + SYS_REALLOC = 0x05B + SYS_RES_INIT = 0x5D8 + SYS_RES_MKQUERY = 0x5D7 + SYS_RES_QUERY = 0x5D9 + SYS_RES_QUERYDOMAIN = 0x5DC + SYS_RES_SEARCH = 0x5DA + SYS_RES_SEND = 0x5DB + SYS_SETJMP = 0x05F + SYS_SIGQUEUE = 0x5A9 + SYS_STRTOK_R = 0x5E5 + SYS_STRTOLL = 0x5B0 + SYS_STRTOULL = 0x5B1 + SYS_TTYNAME_R = 0x5EC + SYS_UNDOEXPO = 0x5A2 + SYS_UNDOEXPORTWORKUNIT = 0x5A2 + SYS_UNDOIMPO = 0x5A4 + SYS_UNDOIMPORTWORKUNIT = 0x5A4 + SYS_WCSTOLL = 0x5CC + SYS_WCSTOULL = 0x5CD + SYS___ABORT = 0x05C + SYS___CONSOLE2 = 0x5D2 + SYS___CPL = 0x5A6 + SYS___DISCARDDATA = 0x5F8 + SYS___DSA_PREV = 0x5B2 + SYS___EP_FIND = 0x5B3 + SYS___FP_SWAPMODE = 0x5AF + SYS___GETUSERID = 0x5AB + SYS___GET_CPUID = 0x5B9 + SYS___GET_SYSTEM_SETTINGS = 0x5BA + SYS___IPDOMAINNAME = 0x5AC + SYS___MAP_INIT = 0x5A7 + SYS___MAP_SERVICE = 0x5A8 + SYS___MOUNT = 0x5AA + SYS___MSGRCV_TIMED = 0x5B7 + SYS___RES = 0x5D6 + SYS___SEMOP_TIMED = 0x5B8 + SYS___SERVER_THREADS_QUERY = 0x5B4 + SYS_FPRINTF = 0x06D + SYS_FSCANF = 0x06A + SYS_PRINTF = 0x06F + SYS_SETBUF = 0x06B + SYS_SETVBUF = 0x06C + SYS_SSCANF = 0x06E + SYS___CATGETS_A = 0x6C0 + SYS___CHAUDIT_A = 0x6F4 + SYS___CHMOD_A = 0x6E8 + SYS___COLLATE_INIT_A = 0x6AC + SYS___CREAT_A = 0x6F6 + SYS___CTYPE_INIT_A = 0x6AF + SYS___DLLLOAD_A = 0x6DF + SYS___DLLQUERYFN_A = 0x6E0 + SYS___DLLQUERYVAR_A = 0x6E1 + SYS___E2A_L = 0x6E3 + SYS___EXECLE_A = 0x6A0 + SYS___EXECLP_A = 0x6A4 + SYS___EXECVE_A = 0x6C1 + SYS___EXECVP_A = 0x6C2 + SYS___EXECV_A = 0x6B1 + SYS___FPRINTF_A = 0x6FA + SYS___GETADDRINFO_A = 0x6BF + SYS___GETNAMEINFO_A = 0x6C4 + SYS___GET_WCTYPE_STD_A = 0x6AE + SYS___ICONV_OPEN_A = 0x6DE + SYS___IF_INDEXTONAME_A = 0x6DC + SYS___IF_NAMETOINDEX_A = 0x6DB + SYS___ISWCTYPE_A = 0x6B0 + SYS___IS_WCTYPE_STD_A = 0x6B2 + SYS___LOCALECONV_A = 0x6B8 + SYS___LOCALECONV_STD_A = 0x6B9 + SYS___LOCALE_INIT_A = 0x6B7 + SYS___LSTAT_A = 0x6EE + SYS___LSTAT_O_A = 0x6EF + SYS___MKDIR_A = 0x6E9 + SYS___MKFIFO_A = 0x6EC + SYS___MKNOD_A = 0x6F0 + SYS___MONETARY_INIT_A = 0x6BC + SYS___MOUNT_A = 0x6F1 + SYS___NL_CSINFO_A = 0x6D6 + SYS___NL_LANGINFO_A = 0x6BA + SYS___NL_LNAGINFO_STD_A = 0x6BB + SYS___NL_MONINFO_A = 0x6D7 + SYS___NL_NUMINFO_A = 0x6D8 + SYS___NL_RESPINFO_A = 0x6D9 + SYS___NL_TIMINFO_A = 0x6DA + SYS___NUMERIC_INIT_A = 0x6C6 + SYS___OPEN_A = 0x6F7 + SYS___PRINTF_A = 0x6DD + SYS___RESP_INIT_A = 0x6C7 + SYS___RPMATCH_A = 0x6C8 + SYS___RPMATCH_C_A = 0x6C9 + SYS___RPMATCH_STD_A = 0x6CA + SYS___SETLOCALE_A = 0x6F9 + SYS___SPAWNP_A = 0x6C5 + SYS___SPAWN_A = 0x6C3 + SYS___SPRINTF_A = 0x6FB + SYS___STAT_A = 0x6EA + SYS___STAT_O_A = 0x6EB + SYS___STRCOLL_STD_A = 0x6A1 + SYS___STRFMON_A = 0x6BD + SYS___STRFMON_STD_A = 0x6BE + SYS___STRFTIME_A = 0x6CC + SYS___STRFTIME_STD_A = 0x6CD + SYS___STRPTIME_A = 0x6CE + SYS___STRPTIME_STD_A = 0x6CF + SYS___STRXFRM_A = 0x6A2 + SYS___STRXFRM_C_A = 0x6A3 + SYS___STRXFRM_STD_A = 0x6A5 + SYS___SYNTAX_INIT_A = 0x6D4 + SYS___TIME_INIT_A = 0x6CB + SYS___TOD_INIT_A = 0x6D5 + SYS___TOWLOWER_A = 0x6B3 + SYS___TOWLOWER_STD_A = 0x6B4 + SYS___TOWUPPER_A = 0x6B5 + SYS___TOWUPPER_STD_A = 0x6B6 + SYS___UMOUNT_A = 0x6F2 + SYS___VFPRINTF_A = 0x6FC + SYS___VPRINTF_A = 0x6FD + SYS___VSPRINTF_A = 0x6FE + SYS___VSWPRINTF_A = 0x6FF + SYS___WCSCOLL_A = 0x6A6 + SYS___WCSCOLL_C_A = 0x6A7 + SYS___WCSCOLL_STD_A = 0x6A8 + SYS___WCSFTIME_A = 0x6D0 + SYS___WCSFTIME_STD_A = 0x6D1 + SYS___WCSXFRM_A = 0x6A9 + SYS___WCSXFRM_C_A = 0x6AA + SYS___WCSXFRM_STD_A = 0x6AB + SYS___WCTYPE_A = 0x6AD + SYS___W_GETMNTENT_A = 0x6F5 + SYS_____CCSIDTYPE_A = 0x6E6 + SYS_____CHATTR_A = 0x6E2 + SYS_____CSNAMETYPE_A = 0x6E7 + SYS_____OPEN_STAT_A = 0x6ED + SYS_____SPAWN2_A = 0x6D2 + SYS_____SPAWNP2_A = 0x6D3 + SYS_____TOCCSID_A = 0x6E4 + SYS_____TOCSNAME_A = 0x6E5 + SYS_ACL_FREE = 0x7FF + SYS_ACL_INIT = 0x7FE + SYS_FWIDE = 0x7DF + SYS_FWPRINTF = 0x7D1 + SYS_FWRITE = 0x07E + SYS_FWSCANF = 0x7D5 + SYS_GETCHAR = 0x07B + SYS_GETS = 0x07C + SYS_M_CREATE_LAYOUT = 0x7C9 + SYS_M_DESTROY_LAYOUT = 0x7CA + SYS_M_GETVALUES_LAYOUT = 0x7CB + SYS_M_SETVALUES_LAYOUT = 0x7CC + SYS_M_TRANSFORM_LAYOUT = 0x7CD + SYS_M_WTRANSFORM_LAYOUT = 0x7CE + SYS_PREAD = 0x7C7 + SYS_PUTC = 0x07D + SYS_PUTCHAR = 0x07A + SYS_PUTS = 0x07F + SYS_PWRITE = 0x7C8 + SYS_TOWCTRAN = 0x7D8 + SYS_TOWCTRANS = 0x7D8 + SYS_UNATEXIT = 0x7B5 + SYS_VFWPRINT = 0x7D3 + SYS_VFWPRINTF = 0x7D3 + SYS_VWPRINTF = 0x7D4 + SYS_WCTRANS = 0x7D7 + SYS_WPRINTF = 0x7D2 + SYS_WSCANF = 0x7D6 + SYS___ASCTIME_R_A = 0x7A1 + SYS___BASENAME_A = 0x7DC + SYS___BTOWC_A = 0x7E4 + SYS___CDUMP_A = 0x7B7 + SYS___CEE3DMP_A = 0x7B6 + SYS___CEILF_H = 0x7F4 + SYS___CEILL_H = 0x7F5 + SYS___CEIL_H = 0x7EA + SYS___CRYPT_A = 0x7BE + SYS___CSNAP_A = 0x7B8 + SYS___CTEST_A = 0x7B9 + SYS___CTIME_R_A = 0x7A2 + SYS___CTRACE_A = 0x7BA + SYS___DBM_OPEN_A = 0x7E6 + SYS___DIRNAME_A = 0x7DD + SYS___FABSF_H = 0x7FA + SYS___FABSL_H = 0x7FB + SYS___FABS_H = 0x7ED + SYS___FGETWC_A = 0x7AA + SYS___FGETWS_A = 0x7AD + SYS___FLOORF_H = 0x7F6 + SYS___FLOORL_H = 0x7F7 + SYS___FLOOR_H = 0x7EB + SYS___FPUTWC_A = 0x7A5 + SYS___FPUTWS_A = 0x7A8 + SYS___GETTIMEOFDAY_A = 0x7AE + SYS___GETWCHAR_A = 0x7AC + SYS___GETWC_A = 0x7AB + SYS___GLOB_A = 0x7DE + SYS___GMTIME_A = 0x7AF + SYS___GMTIME_R_A = 0x7B0 + SYS___INET_PTON_A = 0x7BC + SYS___J0_H = 0x7EE + SYS___J1_H = 0x7EF + SYS___JN_H = 0x7F0 + SYS___LOCALTIME_A = 0x7B1 + SYS___LOCALTIME_R_A = 0x7B2 + SYS___MALLOC24 = 0x7FC + SYS___MALLOC31 = 0x7FD + SYS___MKTIME_A = 0x7B3 + SYS___MODFF_H = 0x7F8 + SYS___MODFL_H = 0x7F9 + SYS___MODF_H = 0x7EC + SYS___OPENDIR_A = 0x7C2 + SYS___OSNAME = 0x7E0 + SYS___PUTWCHAR_A = 0x7A7 + SYS___PUTWC_A = 0x7A6 + SYS___READDIR_A = 0x7C3 + SYS___STRTOLL_A = 0x7A3 + SYS___STRTOULL_A = 0x7A4 + SYS___SYSLOG_A = 0x7BD + SYS___TZZNA = 0x7B4 + SYS___UNGETWC_A = 0x7A9 + SYS___UTIME_A = 0x7A0 + SYS___VFPRINTF2_A = 0x7E7 + SYS___VPRINTF2_A = 0x7E8 + SYS___VSPRINTF2_A = 0x7E9 + SYS___VSWPRNTF2_A = 0x7BB + SYS___WCSTOD_A = 0x7D9 + SYS___WCSTOL_A = 0x7DA + SYS___WCSTOUL_A = 0x7DB + SYS___WCTOB_A = 0x7E5 + SYS___Y0_H = 0x7F1 + SYS___Y1_H = 0x7F2 + SYS___YN_H = 0x7F3 + SYS_____OPENDIR2_A = 0x7BF + SYS_____OSNAME_A = 0x7E1 + SYS_____READDIR2_A = 0x7C0 + SYS_DLCLOSE = 0x8DF + SYS_DLERROR = 0x8E0 + SYS_DLOPEN = 0x8DD + SYS_DLSYM = 0x8DE + SYS_FLOCKFILE = 0x8D3 + SYS_FTRYLOCKFILE = 0x8D4 + SYS_FUNLOCKFILE = 0x8D5 + SYS_GETCHAR_UNLOCKED = 0x8D7 + SYS_GETC_UNLOCKED = 0x8D6 + SYS_PUTCHAR_UNLOCKED = 0x8D9 + SYS_PUTC_UNLOCKED = 0x8D8 + SYS_SNPRINTF = 0x8DA + SYS_VSNPRINTF = 0x8DB + SYS_WCSCSPN = 0x08B + SYS_WCSLEN = 0x08C + SYS_WCSNCAT = 0x08D + SYS_WCSNCMP = 0x08A + SYS_WCSNCPY = 0x08F + SYS_WCSSPN = 0x08E + SYS___ABSF_H = 0x8E7 + SYS___ABSL_H = 0x8E8 + SYS___ABS_H = 0x8E6 + SYS___ACOSF_H = 0x8EA + SYS___ACOSH_H = 0x8EC + SYS___ACOSL_H = 0x8EB + SYS___ACOS_H = 0x8E9 + SYS___ASINF_H = 0x8EE + SYS___ASINH_H = 0x8F0 + SYS___ASINL_H = 0x8EF + SYS___ASIN_H = 0x8ED + SYS___ATAN2F_H = 0x8F8 + SYS___ATAN2L_H = 0x8F9 + SYS___ATAN2_H = 0x8F7 + SYS___ATANF_H = 0x8F2 + SYS___ATANHF_H = 0x8F5 + SYS___ATANHL_H = 0x8F6 + SYS___ATANH_H = 0x8F4 + SYS___ATANL_H = 0x8F3 + SYS___ATAN_H = 0x8F1 + SYS___CBRT_H = 0x8FA + SYS___COPYSIGNF_H = 0x8FB + SYS___COPYSIGNL_H = 0x8FC + SYS___COSF_H = 0x8FE + SYS___COSL_H = 0x8FF + SYS___COS_H = 0x8FD + SYS___DLERROR_A = 0x8D2 + SYS___DLOPEN_A = 0x8D0 + SYS___DLSYM_A = 0x8D1 + SYS___GETUTXENT_A = 0x8C6 + SYS___GETUTXID_A = 0x8C7 + SYS___GETUTXLINE_A = 0x8C8 + SYS___ITOA = 0x8AA + SYS___ITOA_A = 0x8B0 + SYS___LE_CONDITION_TOKEN_BUILD = 0x8A5 + SYS___LE_MSG_ADD_INSERT = 0x8A6 + SYS___LE_MSG_GET = 0x8A7 + SYS___LE_MSG_GET_AND_WRITE = 0x8A8 + SYS___LE_MSG_WRITE = 0x8A9 + SYS___LLTOA = 0x8AE + SYS___LLTOA_A = 0x8B4 + SYS___LTOA = 0x8AC + SYS___LTOA_A = 0x8B2 + SYS___PUTCHAR_UNLOCKED_A = 0x8CC + SYS___PUTC_UNLOCKED_A = 0x8CB + SYS___PUTUTXLINE_A = 0x8C9 + SYS___RESET_EXCEPTION_HANDLER = 0x8E3 + SYS___REXEC_A = 0x8C4 + SYS___REXEC_AF_A = 0x8C5 + SYS___SET_EXCEPTION_HANDLER = 0x8E2 + SYS___SNPRINTF_A = 0x8CD + SYS___SUPERKILL = 0x8A4 + SYS___TCGETATTR_A = 0x8A1 + SYS___TCSETATTR_A = 0x8A2 + SYS___ULLTOA = 0x8AF + SYS___ULLTOA_A = 0x8B5 + SYS___ULTOA = 0x8AD + SYS___ULTOA_A = 0x8B3 + SYS___UTOA = 0x8AB + SYS___UTOA_A = 0x8B1 + SYS___VHM_EVENT = 0x8E4 + SYS___VSNPRINTF_A = 0x8CE + SYS_____GETENV_A = 0x8C3 + SYS_____UTMPXNAME_A = 0x8CA + SYS_CACOSH = 0x9A0 + SYS_CACOSHF = 0x9A3 + SYS_CACOSHL = 0x9A6 + SYS_CARG = 0x9A9 + SYS_CARGF = 0x9AC + SYS_CARGL = 0x9AF + SYS_CASIN = 0x9B2 + SYS_CASINF = 0x9B5 + SYS_CASINH = 0x9BB + SYS_CASINHF = 0x9BE + SYS_CASINHL = 0x9C1 + SYS_CASINL = 0x9B8 + SYS_CATAN = 0x9C4 + SYS_CATANF = 0x9C7 + SYS_CATANH = 0x9CD + SYS_CATANHF = 0x9D0 + SYS_CATANHL = 0x9D3 + SYS_CATANL = 0x9CA + SYS_CCOS = 0x9D6 + SYS_CCOSF = 0x9D9 + SYS_CCOSH = 0x9DF + SYS_CCOSHF = 0x9E2 + SYS_CCOSHL = 0x9E5 + SYS_CCOSL = 0x9DC + SYS_CEXP = 0x9E8 + SYS_CEXPF = 0x9EB + SYS_CEXPL = 0x9EE + SYS_CIMAG = 0x9F1 + SYS_CIMAGF = 0x9F4 + SYS_CIMAGL = 0x9F7 + SYS_CLOGF = 0x9FD + SYS_MEMCHR = 0x09B + SYS_MEMCMP = 0x09A + SYS_STRCOLL = 0x09C + SYS_STRNCMP = 0x09D + SYS_STRRCHR = 0x09F + SYS_STRXFRM = 0x09E + SYS___CACOSHF_B = 0x9A4 + SYS___CACOSHF_H = 0x9A5 + SYS___CACOSHL_B = 0x9A7 + SYS___CACOSHL_H = 0x9A8 + SYS___CACOSH_B = 0x9A1 + SYS___CACOSH_H = 0x9A2 + SYS___CARGF_B = 0x9AD + SYS___CARGF_H = 0x9AE + SYS___CARGL_B = 0x9B0 + SYS___CARGL_H = 0x9B1 + SYS___CARG_B = 0x9AA + SYS___CARG_H = 0x9AB + SYS___CASINF_B = 0x9B6 + SYS___CASINF_H = 0x9B7 + SYS___CASINHF_B = 0x9BF + SYS___CASINHF_H = 0x9C0 + SYS___CASINHL_B = 0x9C2 + SYS___CASINHL_H = 0x9C3 + SYS___CASINH_B = 0x9BC + SYS___CASINH_H = 0x9BD + SYS___CASINL_B = 0x9B9 + SYS___CASINL_H = 0x9BA + SYS___CASIN_B = 0x9B3 + SYS___CASIN_H = 0x9B4 + SYS___CATANF_B = 0x9C8 + SYS___CATANF_H = 0x9C9 + SYS___CATANHF_B = 0x9D1 + SYS___CATANHF_H = 0x9D2 + SYS___CATANHL_B = 0x9D4 + SYS___CATANHL_H = 0x9D5 + SYS___CATANH_B = 0x9CE + SYS___CATANH_H = 0x9CF + SYS___CATANL_B = 0x9CB + SYS___CATANL_H = 0x9CC + SYS___CATAN_B = 0x9C5 + SYS___CATAN_H = 0x9C6 + SYS___CCOSF_B = 0x9DA + SYS___CCOSF_H = 0x9DB + SYS___CCOSHF_B = 0x9E3 + SYS___CCOSHF_H = 0x9E4 + SYS___CCOSHL_B = 0x9E6 + SYS___CCOSHL_H = 0x9E7 + SYS___CCOSH_B = 0x9E0 + SYS___CCOSH_H = 0x9E1 + SYS___CCOSL_B = 0x9DD + SYS___CCOSL_H = 0x9DE + SYS___CCOS_B = 0x9D7 + SYS___CCOS_H = 0x9D8 + SYS___CEXPF_B = 0x9EC + SYS___CEXPF_H = 0x9ED + SYS___CEXPL_B = 0x9EF + SYS___CEXPL_H = 0x9F0 + SYS___CEXP_B = 0x9E9 + SYS___CEXP_H = 0x9EA + SYS___CIMAGF_B = 0x9F5 + SYS___CIMAGF_H = 0x9F6 + SYS___CIMAGL_B = 0x9F8 + SYS___CIMAGL_H = 0x9F9 + SYS___CIMAG_B = 0x9F2 + SYS___CIMAG_H = 0x9F3 + SYS___CLOG = 0x9FA + SYS___CLOGF_B = 0x9FE + SYS___CLOGF_H = 0x9FF + SYS___CLOG_B = 0x9FB + SYS___CLOG_H = 0x9FC + SYS_ISWCTYPE = 0x10C + SYS_ISWXDIGI = 0x10A + SYS_ISWXDIGIT = 0x10A + SYS_MBSINIT = 0x10F + SYS_TOWLOWER = 0x10D + SYS_TOWUPPER = 0x10E + SYS_WCTYPE = 0x10B + SYS_WCSSTR = 0x11B + SYS___RPMTCH = 0x11A + SYS_WCSTOD = 0x12E + SYS_WCSTOK = 0x12C + SYS_WCSTOL = 0x12D + SYS_WCSTOUL = 0x12F + SYS_FGETWC = 0x13C + SYS_FGETWS = 0x13D + SYS_FPUTWC = 0x13E + SYS_FPUTWS = 0x13F + SYS_REGERROR = 0x13B + SYS_REGFREE = 0x13A + SYS_COLLEQUIV = 0x14F + SYS_COLLTOSTR = 0x14E + SYS_ISMCCOLLEL = 0x14C + SYS_STRTOCOLL = 0x14D + SYS_DLLFREE = 0x16F + SYS_DLLQUERYFN = 0x16D + SYS_DLLQUERYVAR = 0x16E + SYS_GETMCCOLL = 0x16A + SYS_GETWMCCOLL = 0x16B + SYS___ERR2AD = 0x16C + SYS_CFSETOSPEED = 0x17A + SYS_CHDIR = 0x17B + SYS_CHMOD = 0x17C + SYS_CHOWN = 0x17D + SYS_CLOSE = 0x17E + SYS_CLOSEDIR = 0x17F + SYS_LOG = 0x017 + SYS_COSH = 0x018 + SYS_FCHMOD = 0x18A + SYS_FCHOWN = 0x18B + SYS_FCNTL = 0x18C + SYS_FILENO = 0x18D + SYS_FORK = 0x18E + SYS_FPATHCONF = 0x18F + SYS_GETLOGIN = 0x19A + SYS_GETPGRP = 0x19C + SYS_GETPID = 0x19D + SYS_GETPPID = 0x19E + SYS_GETPWNAM = 0x19F + SYS_TANH = 0x019 + SYS_W_GETMNTENT = 0x19B + SYS_POW = 0x020 + SYS_PTHREAD_SELF = 0x20A + SYS_PTHREAD_SETINTR = 0x20B + SYS_PTHREAD_SETINTRTYPE = 0x20C + SYS_PTHREAD_SETSPECIFIC = 0x20D + SYS_PTHREAD_TESTINTR = 0x20E + SYS_PTHREAD_YIELD = 0x20F + SYS_SQRT = 0x021 + SYS_FLOOR = 0x022 + SYS_J1 = 0x023 + SYS_WCSPBRK = 0x23F + SYS_BSEARCH = 0x24C + SYS_FABS = 0x024 + SYS_GETENV = 0x24A + SYS_LDIV = 0x24D + SYS_SYSTEM = 0x24B + SYS_FMOD = 0x025 + SYS___RETHROW = 0x25F + SYS___THROW = 0x25E + SYS_J0 = 0x026 + SYS_PUTENV = 0x26A + SYS___GETENV = 0x26F + SYS_SEMCTL = 0x27A + SYS_SEMGET = 0x27B + SYS_SEMOP = 0x27C + SYS_SHMAT = 0x27D + SYS_SHMCTL = 0x27E + SYS_SHMDT = 0x27F + SYS_YN = 0x027 + SYS_JN = 0x028 + SYS_SIGALTSTACK = 0x28A + SYS_SIGHOLD = 0x28B + SYS_SIGIGNORE = 0x28C + SYS_SIGINTERRUPT = 0x28D + SYS_SIGPAUSE = 0x28E + SYS_SIGRELSE = 0x28F + SYS_GETOPT = 0x29A + SYS_GETSUBOPT = 0x29D + SYS_LCHOWN = 0x29B + SYS_SETPGRP = 0x29E + SYS_TRUNCATE = 0x29C + SYS_Y0 = 0x029 + SYS___GDERR = 0x29F + SYS_ISALPHA = 0x030 + SYS_VFORK = 0x30F + SYS__LONGJMP = 0x30D + SYS__SETJMP = 0x30E + SYS_GLOB = 0x31A + SYS_GLOBFREE = 0x31B + SYS_ISALNUM = 0x031 + SYS_PUTW = 0x31C + SYS_SEEKDIR = 0x31D + SYS_TELLDIR = 0x31E + SYS_TEMPNAM = 0x31F + SYS_GETTIMEOFDAY_R = 0x32E + SYS_ISLOWER = 0x032 + SYS_LGAMMA = 0x32C + SYS_REMAINDER = 0x32A + SYS_SCALB = 0x32B + SYS_SYNC = 0x32F + SYS_TTYSLOT = 0x32D + SYS_ENDPROTOENT = 0x33A + SYS_ENDSERVENT = 0x33B + SYS_GETHOSTBYADDR = 0x33D + SYS_GETHOSTBYADDR_R = 0x33C + SYS_GETHOSTBYNAME = 0x33F + SYS_GETHOSTBYNAME_R = 0x33E + SYS_ISCNTRL = 0x033 + SYS_GETSERVBYNAME = 0x34A + SYS_GETSERVBYPORT = 0x34B + SYS_GETSERVENT = 0x34C + SYS_GETSOCKNAME = 0x34D + SYS_GETSOCKOPT = 0x34E + SYS_INET_ADDR = 0x34F + SYS_ISDIGIT = 0x034 + SYS_ISGRAPH = 0x035 + SYS_SELECT = 0x35B + SYS_SELECTEX = 0x35C + SYS_SEND = 0x35D + SYS_SENDTO = 0x35F + SYS_CHROOT = 0x36A + SYS_ISNAN = 0x36D + SYS_ISUPPER = 0x036 + SYS_ULIMIT = 0x36C + SYS_UTIMES = 0x36E + SYS_W_STATVFS = 0x36B + SYS___H_ERRNO = 0x36F + SYS_GRANTPT = 0x37A + SYS_ISPRINT = 0x037 + SYS_TCGETSID = 0x37C + SYS_UNLOCKPT = 0x37B + SYS___TCGETCP = 0x37D + SYS___TCSETCP = 0x37E + SYS___TCSETTABLES = 0x37F + SYS_ISPUNCT = 0x038 + SYS_NLIST = 0x38C + SYS___IPDBCS = 0x38D + SYS___IPDSPX = 0x38E + SYS___IPMSGC = 0x38F + SYS___STHOSTENT = 0x38B + SYS___STSERVENT = 0x38A + SYS_ISSPACE = 0x039 + SYS_COS = 0x040 + SYS_T_ALLOC = 0x40A + SYS_T_BIND = 0x40B + SYS_T_CLOSE = 0x40C + SYS_T_CONNECT = 0x40D + SYS_T_ERROR = 0x40E + SYS_T_FREE = 0x40F + SYS_TAN = 0x041 + SYS_T_RCVREL = 0x41A + SYS_T_RCVUDATA = 0x41B + SYS_T_RCVUDERR = 0x41C + SYS_T_SND = 0x41D + SYS_T_SNDDIS = 0x41E + SYS_T_SNDREL = 0x41F + SYS_GETPMSG = 0x42A + SYS_ISASTREAM = 0x42B + SYS_PUTMSG = 0x42C + SYS_PUTPMSG = 0x42D + SYS_SINH = 0x042 + SYS___ISPOSIXON = 0x42E + SYS___OPENMVSREL = 0x42F + SYS_ACOS = 0x043 + SYS_ATAN = 0x044 + SYS_ATAN2 = 0x045 + SYS_FTELL = 0x046 + SYS_FGETPOS = 0x047 + SYS_SOCK_DEBUG = 0x47A + SYS_SOCK_DO_TESTSTOR = 0x47D + SYS_TAKESOCKET = 0x47E + SYS___SERVER_INIT = 0x47F + SYS_FSEEK = 0x048 + SYS___IPHOST = 0x48B + SYS___IPNODE = 0x48C + SYS___SERVER_CLASSIFY_CREATE = 0x48D + SYS___SERVER_CLASSIFY_DESTROY = 0x48E + SYS___SERVER_CLASSIFY_RESET = 0x48F + SYS___SMF_RECORD = 0x48A + SYS_FSETPOS = 0x049 + SYS___FNWSA = 0x49B + SYS___SPAWN2 = 0x49D + SYS___SPAWNP2 = 0x49E + SYS_ATOF = 0x050 + SYS_PTHREAD_MUTEXATTR_GETPSHARED = 0x50A + SYS_PTHREAD_MUTEXATTR_SETPSHARED = 0x50B + SYS_PTHREAD_RWLOCK_DESTROY = 0x50C + SYS_PTHREAD_RWLOCK_INIT = 0x50D + SYS_PTHREAD_RWLOCK_RDLOCK = 0x50E + SYS_PTHREAD_RWLOCK_TRYRDLOCK = 0x50F + SYS_ATOI = 0x051 + SYS___FP_CLASS = 0x51D + SYS___FP_CLR_FLAG = 0x51A + SYS___FP_FINITE = 0x51E + SYS___FP_ISNAN = 0x51F + SYS___FP_RAISE_XCP = 0x51C + SYS___FP_READ_FLAG = 0x51B + SYS_RAND = 0x052 + SYS_SIGTIMEDWAIT = 0x52D + SYS_SIGWAITINFO = 0x52E + SYS___CHKBFP = 0x52F + SYS___FPC_RS = 0x52C + SYS___FPC_RW = 0x52A + SYS___FPC_SM = 0x52B + SYS_STRTOD = 0x053 + SYS_STRTOL = 0x054 + SYS_STRTOUL = 0x055 + SYS_MALLOC = 0x056 + SYS_SRAND = 0x057 + SYS_CALLOC = 0x058 + SYS_FREE = 0x059 + SYS___OSENV = 0x59F + SYS___W_PIOCTL = 0x59E + SYS_LONGJMP = 0x060 + SYS___FLOORF_B = 0x60A + SYS___FLOORL_B = 0x60B + SYS___FREXPF_B = 0x60C + SYS___FREXPL_B = 0x60D + SYS___LDEXPF_B = 0x60E + SYS___LDEXPL_B = 0x60F + SYS_SIGNAL = 0x061 + SYS___ATAN2F_B = 0x61A + SYS___ATAN2L_B = 0x61B + SYS___COSHF_B = 0x61C + SYS___COSHL_B = 0x61D + SYS___EXPF_B = 0x61E + SYS___EXPL_B = 0x61F + SYS_TMPNAM = 0x062 + SYS___ABSF_B = 0x62A + SYS___ABSL_B = 0x62C + SYS___ABS_B = 0x62B + SYS___FMODF_B = 0x62D + SYS___FMODL_B = 0x62E + SYS___MODFF_B = 0x62F + SYS_ATANL = 0x63A + SYS_CEILF = 0x63B + SYS_CEILL = 0x63C + SYS_COSF = 0x63D + SYS_COSHF = 0x63F + SYS_COSL = 0x63E + SYS_REMOVE = 0x063 + SYS_POWL = 0x64A + SYS_RENAME = 0x064 + SYS_SINF = 0x64B + SYS_SINHF = 0x64F + SYS_SINL = 0x64C + SYS_SQRTF = 0x64D + SYS_SQRTL = 0x64E + SYS_BTOWC = 0x65F + SYS_FREXPL = 0x65A + SYS_LDEXPF = 0x65B + SYS_LDEXPL = 0x65C + SYS_MODFF = 0x65D + SYS_MODFL = 0x65E + SYS_TMPFILE = 0x065 + SYS_FREOPEN = 0x066 + SYS___CHARMAP_INIT_A = 0x66E + SYS___GETHOSTBYADDR_R_A = 0x66C + SYS___GETHOSTBYNAME_A = 0x66A + SYS___GETHOSTBYNAME_R_A = 0x66D + SYS___MBLEN_A = 0x66F + SYS___RES_INIT_A = 0x66B + SYS_FCLOSE = 0x067 + SYS___GETGRGID_R_A = 0x67D + SYS___WCSTOMBS_A = 0x67A + SYS___WCSTOMBS_STD_A = 0x67B + SYS___WCSWIDTH_A = 0x67C + SYS___WCSWIDTH_ASIA = 0x67F + SYS___WCSWIDTH_STD_A = 0x67E + SYS_FFLUSH = 0x068 + SYS___GETLOGIN_R_A = 0x68E + SYS___GETPWNAM_R_A = 0x68C + SYS___GETPWUID_R_A = 0x68D + SYS___TTYNAME_R_A = 0x68F + SYS___WCWIDTH_ASIA = 0x68B + SYS___WCWIDTH_STD_A = 0x68A + SYS_FOPEN = 0x069 + SYS___REGEXEC_A = 0x69A + SYS___REGEXEC_STD_A = 0x69B + SYS___REGFREE_A = 0x69C + SYS___REGFREE_STD_A = 0x69D + SYS___STRCOLL_A = 0x69E + SYS___STRCOLL_C_A = 0x69F + SYS_SCANF = 0x070 + SYS___A64L_A = 0x70C + SYS___ECVT_A = 0x70D + SYS___FCVT_A = 0x70E + SYS___GCVT_A = 0x70F + SYS___STRTOUL_A = 0x70A + SYS_____AE_CORRESTBL_QUERY_A = 0x70B + SYS_SPRINTF = 0x071 + SYS___ACCESS_A = 0x71F + SYS___CATOPEN_A = 0x71E + SYS___GETOPT_A = 0x71D + SYS___REALPATH_A = 0x71A + SYS___SETENV_A = 0x71B + SYS___SYSTEM_A = 0x71C + SYS_FGETC = 0x072 + SYS___GAI_STRERROR_A = 0x72F + SYS___RMDIR_A = 0x72A + SYS___STATVFS_A = 0x72B + SYS___SYMLINK_A = 0x72C + SYS___TRUNCATE_A = 0x72D + SYS___UNLINK_A = 0x72E + SYS_VFPRINTF = 0x073 + SYS___ISSPACE_A = 0x73A + SYS___ISUPPER_A = 0x73B + SYS___ISWALNUM_A = 0x73F + SYS___ISXDIGIT_A = 0x73C + SYS___TOLOWER_A = 0x73D + SYS___TOUPPER_A = 0x73E + SYS_VPRINTF = 0x074 + SYS___CONFSTR_A = 0x74B + SYS___FDOPEN_A = 0x74E + SYS___FLDATA_A = 0x74F + SYS___FTOK_A = 0x74C + SYS___ISWXDIGIT_A = 0x74A + SYS___MKTEMP_A = 0x74D + SYS_VSPRINTF = 0x075 + SYS___GETGRGID_A = 0x75A + SYS___GETGRNAM_A = 0x75B + SYS___GETGROUPSBYNAME_A = 0x75C + SYS___GETHOSTENT_A = 0x75D + SYS___GETHOSTNAME_A = 0x75E + SYS___GETLOGIN_A = 0x75F + SYS_GETC = 0x076 + SYS___CREATEWORKUNIT_A = 0x76A + SYS___CTERMID_A = 0x76B + SYS___FMTMSG_A = 0x76C + SYS___INITGROUPS_A = 0x76D + SYS___MSGRCV_A = 0x76F + SYS_____LOGIN_A = 0x76E + SYS_FGETS = 0x077 + SYS___STRCASECMP_A = 0x77B + SYS___STRNCASECMP_A = 0x77C + SYS___TTYNAME_A = 0x77D + SYS___UNAME_A = 0x77E + SYS___UTIMES_A = 0x77F + SYS_____SERVER_PWU_A = 0x77A + SYS_FPUTC = 0x078 + SYS___CREAT_O_A = 0x78E + SYS___ENVNA = 0x78F + SYS___FREAD_A = 0x78A + SYS___FWRITE_A = 0x78B + SYS___ISASCII = 0x78D + SYS___OPEN_O_A = 0x78C + SYS_FPUTS = 0x079 + SYS___ASCTIME_A = 0x79C + SYS___CTIME_A = 0x79D + SYS___GETDATE_A = 0x79E + SYS___GETSERVBYPORT_A = 0x79A + SYS___GETSERVENT_A = 0x79B + SYS___TZSET_A = 0x79F + SYS_ACL_FROM_TEXT = 0x80C + SYS_ACL_SET_FD = 0x80A + SYS_ACL_SET_FILE = 0x80B + SYS_ACL_SORT = 0x80E + SYS_ACL_TO_TEXT = 0x80D + SYS_UNGETC = 0x080 + SYS___SHUTDOWN_REGISTRATION = 0x80F + SYS_FREAD = 0x081 + SYS_FREEADDRINFO = 0x81A + SYS_GAI_STRERROR = 0x81B + SYS_REXEC_AF = 0x81C + SYS___DYNALLOC_A = 0x81F + SYS___POE = 0x81D + SYS_WCSTOMBS = 0x082 + SYS___INET_ADDR_A = 0x82F + SYS___NLIST_A = 0x82A + SYS_____TCGETCP_A = 0x82B + SYS_____TCSETCP_A = 0x82C + SYS_____W_PIOCTL_A = 0x82E + SYS_MBTOWC = 0x083 + SYS___CABEND = 0x83D + SYS___LE_CIB_GET = 0x83E + SYS___RECVMSG_A = 0x83B + SYS___SENDMSG_A = 0x83A + SYS___SET_LAA_FOR_JIT = 0x83F + SYS_____LCHATTR_A = 0x83C + SYS_WCTOMB = 0x084 + SYS___CBRTL_B = 0x84A + SYS___COPYSIGNF_B = 0x84B + SYS___COPYSIGNL_B = 0x84C + SYS___COTANF_B = 0x84D + SYS___COTANL_B = 0x84F + SYS___COTAN_B = 0x84E + SYS_MBSTOWCS = 0x085 + SYS___LOG1PL_B = 0x85A + SYS___LOG2F_B = 0x85B + SYS___LOG2L_B = 0x85D + SYS___LOG2_B = 0x85C + SYS___REMAINDERF_B = 0x85E + SYS___REMAINDERL_B = 0x85F + SYS_ACOSHF = 0x86E + SYS_ACOSHL = 0x86F + SYS_WCSCPY = 0x086 + SYS___ERFCF_B = 0x86D + SYS___ERFF_B = 0x86C + SYS___LROUNDF_B = 0x86A + SYS___LROUND_B = 0x86B + SYS_COTANL = 0x87A + SYS_EXP2F = 0x87B + SYS_EXP2L = 0x87C + SYS_EXPM1F = 0x87D + SYS_EXPM1L = 0x87E + SYS_FDIMF = 0x87F + SYS_WCSCAT = 0x087 + SYS___COTANL = 0x87A + SYS_REMAINDERF = 0x88A + SYS_REMAINDERL = 0x88B + SYS_REMAINDF = 0x88A + SYS_REMAINDL = 0x88B + SYS_REMQUO = 0x88D + SYS_REMQUOF = 0x88C + SYS_REMQUOL = 0x88E + SYS_TGAMMAF = 0x88F + SYS_WCSCHR = 0x088 + SYS_ERFCF = 0x89B + SYS_ERFCL = 0x89C + SYS_ERFL = 0x89A + SYS_EXP2 = 0x89E + SYS_WCSCMP = 0x089 + SYS___EXP2_B = 0x89D + SYS___FAR_JUMP = 0x89F + SYS_ABS = 0x090 + SYS___ERFCL_H = 0x90A + SYS___EXPF_H = 0x90C + SYS___EXPL_H = 0x90D + SYS___EXPM1_H = 0x90E + SYS___EXP_H = 0x90B + SYS___FDIM_H = 0x90F + SYS_DIV = 0x091 + SYS___LOG2F_H = 0x91F + SYS___LOG2_H = 0x91E + SYS___LOGB_H = 0x91D + SYS___LOGF_H = 0x91B + SYS___LOGL_H = 0x91C + SYS___LOG_H = 0x91A + SYS_LABS = 0x092 + SYS___POWL_H = 0x92A + SYS___REMAINDER_H = 0x92B + SYS___RINT_H = 0x92C + SYS___SCALB_H = 0x92D + SYS___SINF_H = 0x92F + SYS___SIN_H = 0x92E + SYS_STRNCPY = 0x093 + SYS___TANHF_H = 0x93B + SYS___TANHL_H = 0x93C + SYS___TANH_H = 0x93A + SYS___TGAMMAF_H = 0x93E + SYS___TGAMMA_H = 0x93D + SYS___TRUNC_H = 0x93F + SYS_MEMCPY = 0x094 + SYS_VFWSCANF = 0x94A + SYS_VSWSCANF = 0x94E + SYS_VWSCANF = 0x94C + SYS_INET6_RTH_ADD = 0x95D + SYS_INET6_RTH_INIT = 0x95C + SYS_INET6_RTH_REVERSE = 0x95E + SYS_INET6_RTH_SEGMENTS = 0x95F + SYS_INET6_RTH_SPACE = 0x95B + SYS_MEMMOVE = 0x095 + SYS_WCSTOLD = 0x95A + SYS_STRCPY = 0x096 + SYS_STRCMP = 0x097 + SYS_CABS = 0x98E + SYS_STRCAT = 0x098 + SYS___CABS_B = 0x98F + SYS___POW_II = 0x98A + SYS___POW_II_B = 0x98B + SYS___POW_II_H = 0x98C + SYS_CACOSF = 0x99A + SYS_CACOSL = 0x99D + SYS_STRNCAT = 0x099 + SYS___CACOSF_B = 0x99B + SYS___CACOSF_H = 0x99C + SYS___CACOSL_B = 0x99E + SYS___CACOSL_H = 0x99F + SYS_ISWALPHA = 0x100 + SYS_ISWBLANK = 0x101 + SYS___ISWBLK = 0x101 + SYS_ISWCNTRL = 0x102 + SYS_ISWDIGIT = 0x103 + SYS_ISWGRAPH = 0x104 + SYS_ISWLOWER = 0x105 + SYS_ISWPRINT = 0x106 + SYS_ISWPUNCT = 0x107 + SYS_ISWSPACE = 0x108 + SYS_ISWUPPER = 0x109 + SYS_WCTOB = 0x110 + SYS_MBRLEN = 0x111 + SYS_MBRTOWC = 0x112 + SYS_MBSRTOWC = 0x113 + SYS_MBSRTOWCS = 0x113 + SYS_WCRTOMB = 0x114 + SYS_WCSRTOMB = 0x115 + SYS_WCSRTOMBS = 0x115 + SYS___CSID = 0x116 + SYS___WCSID = 0x117 + SYS_STRPTIME = 0x118 + SYS___STRPTM = 0x118 + SYS_STRFMON = 0x119 + SYS_WCSCOLL = 0x130 + SYS_WCSXFRM = 0x131 + SYS_WCSWIDTH = 0x132 + SYS_WCWIDTH = 0x133 + SYS_WCSFTIME = 0x134 + SYS_SWPRINTF = 0x135 + SYS_VSWPRINT = 0x136 + SYS_VSWPRINTF = 0x136 + SYS_SWSCANF = 0x137 + SYS_REGCOMP = 0x138 + SYS_REGEXEC = 0x139 + SYS_GETWC = 0x140 + SYS_GETWCHAR = 0x141 + SYS_PUTWC = 0x142 + SYS_PUTWCHAR = 0x143 + SYS_UNGETWC = 0x144 + SYS_ICONV_OPEN = 0x145 + SYS_ICONV = 0x146 + SYS_ICONV_CLOSE = 0x147 + SYS_COLLRANGE = 0x150 + SYS_CCLASS = 0x151 + SYS_COLLORDER = 0x152 + SYS___DEMANGLE = 0x154 + SYS_FDOPEN = 0x155 + SYS___ERRNO = 0x156 + SYS___ERRNO2 = 0x157 + SYS___TERROR = 0x158 + SYS_MAXCOLL = 0x169 + SYS_DLLLOAD = 0x170 + SYS__EXIT = 0x174 + SYS_ACCESS = 0x175 + SYS_ALARM = 0x176 + SYS_CFGETISPEED = 0x177 + SYS_CFGETOSPEED = 0x178 + SYS_CFSETISPEED = 0x179 + SYS_CREAT = 0x180 + SYS_CTERMID = 0x181 + SYS_DUP = 0x182 + SYS_DUP2 = 0x183 + SYS_EXECL = 0x184 + SYS_EXECLE = 0x185 + SYS_EXECLP = 0x186 + SYS_EXECV = 0x187 + SYS_EXECVE = 0x188 + SYS_EXECVP = 0x189 + SYS_FSTAT = 0x190 + SYS_FSYNC = 0x191 + SYS_FTRUNCATE = 0x192 + SYS_GETCWD = 0x193 + SYS_GETEGID = 0x194 + SYS_GETEUID = 0x195 + SYS_GETGID = 0x196 + SYS_GETGRGID = 0x197 + SYS_GETGRNAM = 0x198 + SYS_GETGROUPS = 0x199 + SYS_PTHREAD_MUTEXATTR_DESTROY = 0x200 + SYS_PTHREAD_MUTEXATTR_SETKIND_NP = 0x201 + SYS_PTHREAD_MUTEXATTR_GETKIND_NP = 0x202 + SYS_PTHREAD_MUTEX_INIT = 0x203 + SYS_PTHREAD_MUTEX_DESTROY = 0x204 + SYS_PTHREAD_MUTEX_LOCK = 0x205 + SYS_PTHREAD_MUTEX_TRYLOCK = 0x206 + SYS_PTHREAD_MUTEX_UNLOCK = 0x207 + SYS_PTHREAD_ONCE = 0x209 + SYS_TW_OPEN = 0x210 + SYS_TW_FCNTL = 0x211 + SYS_PTHREAD_JOIN_D4_NP = 0x212 + SYS_PTHREAD_CONDATTR_SETKIND_NP = 0x213 + SYS_PTHREAD_CONDATTR_GETKIND_NP = 0x214 + SYS_EXTLINK_NP = 0x215 + SYS___PASSWD = 0x216 + SYS_SETGROUPS = 0x217 + SYS_INITGROUPS = 0x218 + SYS_WCSRCHR = 0x240 + SYS_SVC99 = 0x241 + SYS___SVC99 = 0x241 + SYS_WCSWCS = 0x242 + SYS_LOCALECO = 0x243 + SYS_LOCALECONV = 0x243 + SYS___LIBREL = 0x244 + SYS_RELEASE = 0x245 + SYS___RLSE = 0x245 + SYS_FLOCATE = 0x246 + SYS___FLOCT = 0x246 + SYS_FDELREC = 0x247 + SYS___FDLREC = 0x247 + SYS_FETCH = 0x248 + SYS___FETCH = 0x248 + SYS_QSORT = 0x249 + SYS___CLEANUPCATCH = 0x260 + SYS___CATCHMATCH = 0x261 + SYS___CLEAN2UPCATCH = 0x262 + SYS_GETPRIORITY = 0x270 + SYS_NICE = 0x271 + SYS_SETPRIORITY = 0x272 + SYS_GETITIMER = 0x273 + SYS_SETITIMER = 0x274 + SYS_MSGCTL = 0x275 + SYS_MSGGET = 0x276 + SYS_MSGRCV = 0x277 + SYS_MSGSND = 0x278 + SYS_MSGXRCV = 0x279 + SYS___MSGXR = 0x279 + SYS_SHMGET = 0x280 + SYS___GETIPC = 0x281 + SYS_SETGRENT = 0x282 + SYS_GETGRENT = 0x283 + SYS_ENDGRENT = 0x284 + SYS_SETPWENT = 0x285 + SYS_GETPWENT = 0x286 + SYS_ENDPWENT = 0x287 + SYS_BSD_SIGNAL = 0x288 + SYS_KILLPG = 0x289 + SYS_SIGSET = 0x290 + SYS_SIGSTACK = 0x291 + SYS_GETRLIMIT = 0x292 + SYS_SETRLIMIT = 0x293 + SYS_GETRUSAGE = 0x294 + SYS_MMAP = 0x295 + SYS_MPROTECT = 0x296 + SYS_MSYNC = 0x297 + SYS_MUNMAP = 0x298 + SYS_CONFSTR = 0x299 + SYS___NDMTRM = 0x300 + SYS_FTOK = 0x301 + SYS_BASENAME = 0x302 + SYS_DIRNAME = 0x303 + SYS_GETDTABLESIZE = 0x304 + SYS_MKSTEMP = 0x305 + SYS_MKTEMP = 0x306 + SYS_NFTW = 0x307 + SYS_GETWD = 0x308 + SYS_LOCKF = 0x309 + SYS_WORDEXP = 0x310 + SYS_WORDFREE = 0x311 + SYS_GETPGID = 0x312 + SYS_GETSID = 0x313 + SYS___UTMPXNAME = 0x314 + SYS_CUSERID = 0x315 + SYS_GETPASS = 0x316 + SYS_FNMATCH = 0x317 + SYS_FTW = 0x318 + SYS_GETW = 0x319 + SYS_ACOSH = 0x320 + SYS_ASINH = 0x321 + SYS_ATANH = 0x322 + SYS_CBRT = 0x323 + SYS_EXPM1 = 0x324 + SYS_ILOGB = 0x325 + SYS_LOGB = 0x326 + SYS_LOG1P = 0x327 + SYS_NEXTAFTER = 0x328 + SYS_RINT = 0x329 + SYS_SPAWN = 0x330 + SYS_SPAWNP = 0x331 + SYS_GETLOGIN_UU = 0x332 + SYS_ECVT = 0x333 + SYS_FCVT = 0x334 + SYS_GCVT = 0x335 + SYS_ACCEPT = 0x336 + SYS_BIND = 0x337 + SYS_CONNECT = 0x338 + SYS_ENDHOSTENT = 0x339 + SYS_GETHOSTENT = 0x340 + SYS_GETHOSTID = 0x341 + SYS_GETHOSTNAME = 0x342 + SYS_GETNETBYADDR = 0x343 + SYS_GETNETBYNAME = 0x344 + SYS_GETNETENT = 0x345 + SYS_GETPEERNAME = 0x346 + SYS_GETPROTOBYNAME = 0x347 + SYS_GETPROTOBYNUMBER = 0x348 + SYS_GETPROTOENT = 0x349 + SYS_INET_LNAOF = 0x350 + SYS_INET_MAKEADDR = 0x351 + SYS_INET_NETOF = 0x352 + SYS_INET_NETWORK = 0x353 + SYS_INET_NTOA = 0x354 + SYS_IOCTL = 0x355 + SYS_LISTEN = 0x356 + SYS_READV = 0x357 + SYS_RECV = 0x358 + SYS_RECVFROM = 0x359 + SYS_SETHOSTENT = 0x360 + SYS_SETNETENT = 0x361 + SYS_SETPEER = 0x362 + SYS_SETPROTOENT = 0x363 + SYS_SETSERVENT = 0x364 + SYS_SETSOCKOPT = 0x365 + SYS_SHUTDOWN = 0x366 + SYS_SOCKET = 0x367 + SYS_SOCKETPAIR = 0x368 + SYS_WRITEV = 0x369 + SYS_ENDNETENT = 0x370 + SYS_CLOSELOG = 0x371 + SYS_OPENLOG = 0x372 + SYS_SETLOGMASK = 0x373 + SYS_SYSLOG = 0x374 + SYS_PTSNAME = 0x375 + SYS_SETREUID = 0x376 + SYS_SETREGID = 0x377 + SYS_REALPATH = 0x378 + SYS___SIGNGAM = 0x379 + SYS_POLL = 0x380 + SYS_REXEC = 0x381 + SYS___ISASCII2 = 0x382 + SYS___TOASCII2 = 0x383 + SYS_CHPRIORITY = 0x384 + SYS_PTHREAD_ATTR_SETSYNCTYPE_NP = 0x385 + SYS_PTHREAD_ATTR_GETSYNCTYPE_NP = 0x386 + SYS_PTHREAD_SET_LIMIT_NP = 0x387 + SYS___STNETENT = 0x388 + SYS___STPROTOENT = 0x389 + SYS___SELECT1 = 0x390 + SYS_PTHREAD_SECURITY_NP = 0x391 + SYS___CHECK_RESOURCE_AUTH_NP = 0x392 + SYS___CONVERT_ID_NP = 0x393 + SYS___OPENVMREL = 0x394 + SYS_WMEMCHR = 0x395 + SYS_WMEMCMP = 0x396 + SYS_WMEMCPY = 0x397 + SYS_WMEMMOVE = 0x398 + SYS_WMEMSET = 0x399 + SYS___FPUTWC = 0x400 + SYS___PUTWC = 0x401 + SYS___PWCHAR = 0x402 + SYS___WCSFTM = 0x403 + SYS___WCSTOK = 0x404 + SYS___WCWDTH = 0x405 + SYS_T_ACCEPT = 0x409 + SYS_T_GETINFO = 0x410 + SYS_T_GETPROTADDR = 0x411 + SYS_T_GETSTATE = 0x412 + SYS_T_LISTEN = 0x413 + SYS_T_LOOK = 0x414 + SYS_T_OPEN = 0x415 + SYS_T_OPTMGMT = 0x416 + SYS_T_RCV = 0x417 + SYS_T_RCVCONNECT = 0x418 + SYS_T_RCVDIS = 0x419 + SYS_T_SNDUDATA = 0x420 + SYS_T_STRERROR = 0x421 + SYS_T_SYNC = 0x422 + SYS_T_UNBIND = 0x423 + SYS___T_ERRNO = 0x424 + SYS___RECVMSG2 = 0x425 + SYS___SENDMSG2 = 0x426 + SYS_FATTACH = 0x427 + SYS_FDETACH = 0x428 + SYS_GETMSG = 0x429 + SYS_GETCONTEXT = 0x430 + SYS_SETCONTEXT = 0x431 + SYS_MAKECONTEXT = 0x432 + SYS_SWAPCONTEXT = 0x433 + SYS_PTHREAD_GETSPECIFIC_D8_NP = 0x434 + SYS_GETCLIENTID = 0x470 + SYS___GETCLIENTID = 0x471 + SYS_GETSTABLESIZE = 0x472 + SYS_GETIBMOPT = 0x473 + SYS_GETIBMSOCKOPT = 0x474 + SYS_GIVESOCKET = 0x475 + SYS_IBMSFLUSH = 0x476 + SYS_MAXDESC = 0x477 + SYS_SETIBMOPT = 0x478 + SYS_SETIBMSOCKOPT = 0x479 + SYS___SERVER_PWU = 0x480 + SYS_PTHREAD_TAG_NP = 0x481 + SYS___CONSOLE = 0x482 + SYS___WSINIT = 0x483 + SYS___IPTCPN = 0x489 + SYS___SERVER_CLASSIFY = 0x490 + SYS___HEAPRPT = 0x496 + SYS___ISBFP = 0x500 + SYS___FP_CAST = 0x501 + SYS___CERTIFICATE = 0x502 + SYS_SEND_FILE = 0x503 + SYS_AIO_CANCEL = 0x504 + SYS_AIO_ERROR = 0x505 + SYS_AIO_READ = 0x506 + SYS_AIO_RETURN = 0x507 + SYS_AIO_SUSPEND = 0x508 + SYS_AIO_WRITE = 0x509 + SYS_PTHREAD_RWLOCK_TRYWRLOCK = 0x510 + SYS_PTHREAD_RWLOCK_UNLOCK = 0x511 + SYS_PTHREAD_RWLOCK_WRLOCK = 0x512 + SYS_PTHREAD_RWLOCKATTR_GETPSHARED = 0x513 + SYS_PTHREAD_RWLOCKATTR_SETPSHARED = 0x514 + SYS_PTHREAD_RWLOCKATTR_INIT = 0x515 + SYS_PTHREAD_RWLOCKATTR_DESTROY = 0x516 + SYS___CTTBL = 0x517 + SYS_PTHREAD_MUTEXATTR_SETTYPE = 0x518 + SYS_PTHREAD_MUTEXATTR_GETTYPE = 0x519 + SYS___FP_UNORDERED = 0x520 + SYS___FP_READ_RND = 0x521 + SYS___FP_READ_RND_B = 0x522 + SYS___FP_SWAP_RND = 0x523 + SYS___FP_SWAP_RND_B = 0x524 + SYS___FP_LEVEL = 0x525 + SYS___FP_BTOH = 0x526 + SYS___FP_HTOB = 0x527 + SYS___FPC_RD = 0x528 + SYS___FPC_WR = 0x529 + SYS_PTHREAD_SETCANCELTYPE = 0x600 + SYS_PTHREAD_TESTCANCEL = 0x601 + SYS___ATANF_B = 0x602 + SYS___ATANL_B = 0x603 + SYS___CEILF_B = 0x604 + SYS___CEILL_B = 0x605 + SYS___COSF_B = 0x606 + SYS___COSL_B = 0x607 + SYS___FABSF_B = 0x608 + SYS___FABSL_B = 0x609 + SYS___SINF_B = 0x610 + SYS___SINL_B = 0x611 + SYS___TANF_B = 0x612 + SYS___TANL_B = 0x613 + SYS___TANHF_B = 0x614 + SYS___TANHL_B = 0x615 + SYS___ACOSF_B = 0x616 + SYS___ACOSL_B = 0x617 + SYS___ASINF_B = 0x618 + SYS___ASINL_B = 0x619 + SYS___LOGF_B = 0x620 + SYS___LOGL_B = 0x621 + SYS___LOG10F_B = 0x622 + SYS___LOG10L_B = 0x623 + SYS___POWF_B = 0x624 + SYS___POWL_B = 0x625 + SYS___SINHF_B = 0x626 + SYS___SINHL_B = 0x627 + SYS___SQRTF_B = 0x628 + SYS___SQRTL_B = 0x629 + SYS___MODFL_B = 0x630 + SYS_ABSF = 0x631 + SYS_ABSL = 0x632 + SYS_ACOSF = 0x633 + SYS_ACOSL = 0x634 + SYS_ASINF = 0x635 + SYS_ASINL = 0x636 + SYS_ATAN2F = 0x637 + SYS_ATAN2L = 0x638 + SYS_ATANF = 0x639 + SYS_COSHL = 0x640 + SYS_EXPF = 0x641 + SYS_EXPL = 0x642 + SYS_TANHF = 0x643 + SYS_TANHL = 0x644 + SYS_LOG10F = 0x645 + SYS_LOG10L = 0x646 + SYS_LOGF = 0x647 + SYS_LOGL = 0x648 + SYS_POWF = 0x649 + SYS_SINHL = 0x650 + SYS_TANF = 0x651 + SYS_TANL = 0x652 + SYS_FABSF = 0x653 + SYS_FABSL = 0x654 + SYS_FLOORF = 0x655 + SYS_FLOORL = 0x656 + SYS_FMODF = 0x657 + SYS_FMODL = 0x658 + SYS_FREXPF = 0x659 + SYS___CHATTR = 0x660 + SYS___FCHATTR = 0x661 + SYS___TOCCSID = 0x662 + SYS___CSNAMETYPE = 0x663 + SYS___TOCSNAME = 0x664 + SYS___CCSIDTYPE = 0x665 + SYS___AE_CORRESTBL_QUERY = 0x666 + SYS___AE_AUTOCONVERT_STATE = 0x667 + SYS_DN_FIND = 0x668 + SYS___GETHOSTBYADDR_A = 0x669 + SYS___MBLEN_SB_A = 0x670 + SYS___MBLEN_STD_A = 0x671 + SYS___MBLEN_UTF = 0x672 + SYS___MBSTOWCS_A = 0x673 + SYS___MBSTOWCS_STD_A = 0x674 + SYS___MBTOWC_A = 0x675 + SYS___MBTOWC_ISO1 = 0x676 + SYS___MBTOWC_SBCS = 0x677 + SYS___MBTOWC_MBCS = 0x678 + SYS___MBTOWC_UTF = 0x679 + SYS___CSID_A = 0x680 + SYS___CSID_STD_A = 0x681 + SYS___WCSID_A = 0x682 + SYS___WCSID_STD_A = 0x683 + SYS___WCTOMB_A = 0x684 + SYS___WCTOMB_ISO1 = 0x685 + SYS___WCTOMB_STD_A = 0x686 + SYS___WCTOMB_UTF = 0x687 + SYS___WCWIDTH_A = 0x688 + SYS___GETGRNAM_R_A = 0x689 + SYS___READDIR_R_A = 0x690 + SYS___E2A_S = 0x691 + SYS___FNMATCH_A = 0x692 + SYS___FNMATCH_C_A = 0x693 + SYS___EXECL_A = 0x694 + SYS___FNMATCH_STD_A = 0x695 + SYS___REGCOMP_A = 0x696 + SYS___REGCOMP_STD_A = 0x697 + SYS___REGERROR_A = 0x698 + SYS___REGERROR_STD_A = 0x699 + SYS___SWPRINTF_A = 0x700 + SYS___FSCANF_A = 0x701 + SYS___SCANF_A = 0x702 + SYS___SSCANF_A = 0x703 + SYS___SWSCANF_A = 0x704 + SYS___ATOF_A = 0x705 + SYS___ATOI_A = 0x706 + SYS___ATOL_A = 0x707 + SYS___STRTOD_A = 0x708 + SYS___STRTOL_A = 0x709 + SYS___L64A_A = 0x710 + SYS___STRERROR_A = 0x711 + SYS___PERROR_A = 0x712 + SYS___FETCH_A = 0x713 + SYS___GETENV_A = 0x714 + SYS___MKSTEMP_A = 0x717 + SYS___PTSNAME_A = 0x718 + SYS___PUTENV_A = 0x719 + SYS___CHDIR_A = 0x720 + SYS___CHOWN_A = 0x721 + SYS___CHROOT_A = 0x722 + SYS___GETCWD_A = 0x723 + SYS___GETWD_A = 0x724 + SYS___LCHOWN_A = 0x725 + SYS___LINK_A = 0x726 + SYS___PATHCONF_A = 0x727 + SYS___IF_NAMEINDEX_A = 0x728 + SYS___READLINK_A = 0x729 + SYS___EXTLINK_NP_A = 0x730 + SYS___ISALNUM_A = 0x731 + SYS___ISALPHA_A = 0x732 + SYS___A2E_S = 0x733 + SYS___ISCNTRL_A = 0x734 + SYS___ISDIGIT_A = 0x735 + SYS___ISGRAPH_A = 0x736 + SYS___ISLOWER_A = 0x737 + SYS___ISPRINT_A = 0x738 + SYS___ISPUNCT_A = 0x739 + SYS___ISWALPHA_A = 0x740 + SYS___A2E_L = 0x741 + SYS___ISWCNTRL_A = 0x742 + SYS___ISWDIGIT_A = 0x743 + SYS___ISWGRAPH_A = 0x744 + SYS___ISWLOWER_A = 0x745 + SYS___ISWPRINT_A = 0x746 + SYS___ISWPUNCT_A = 0x747 + SYS___ISWSPACE_A = 0x748 + SYS___ISWUPPER_A = 0x749 + SYS___REMOVE_A = 0x750 + SYS___RENAME_A = 0x751 + SYS___TMPNAM_A = 0x752 + SYS___FOPEN_A = 0x753 + SYS___FREOPEN_A = 0x754 + SYS___CUSERID_A = 0x755 + SYS___POPEN_A = 0x756 + SYS___TEMPNAM_A = 0x757 + SYS___FTW_A = 0x758 + SYS___GETGRENT_A = 0x759 + SYS___INET_NTOP_A = 0x760 + SYS___GETPASS_A = 0x761 + SYS___GETPWENT_A = 0x762 + SYS___GETPWNAM_A = 0x763 + SYS___GETPWUID_A = 0x764 + SYS_____CHECK_RESOURCE_AUTH_NP_A = 0x765 + SYS___CHECKSCHENV_A = 0x766 + SYS___CONNECTSERVER_A = 0x767 + SYS___CONNECTWORKMGR_A = 0x768 + SYS_____CONSOLE_A = 0x769 + SYS___MSGSND_A = 0x770 + SYS___MSGXRCV_A = 0x771 + SYS___NFTW_A = 0x772 + SYS_____PASSWD_A = 0x773 + SYS___PTHREAD_SECURITY_NP_A = 0x774 + SYS___QUERYMETRICS_A = 0x775 + SYS___QUERYSCHENV = 0x776 + SYS___READV_A = 0x777 + SYS_____SERVER_CLASSIFY_A = 0x778 + SYS_____SERVER_INIT_A = 0x779 + SYS___W_GETPSENT_A = 0x780 + SYS___WRITEV_A = 0x781 + SYS___W_STATFS_A = 0x782 + SYS___W_STATVFS_A = 0x783 + SYS___FPUTC_A = 0x784 + SYS___PUTCHAR_A = 0x785 + SYS___PUTS_A = 0x786 + SYS___FGETS_A = 0x787 + SYS___GETS_A = 0x788 + SYS___FPUTS_A = 0x789 + SYS___PUTC_A = 0x790 + SYS___AE_THREAD_SETMODE = 0x791 + SYS___AE_THREAD_SWAPMODE = 0x792 + SYS___GETNETBYADDR_A = 0x793 + SYS___GETNETBYNAME_A = 0x794 + SYS___GETNETENT_A = 0x795 + SYS___GETPROTOBYNAME_A = 0x796 + SYS___GETPROTOBYNUMBER_A = 0x797 + SYS___GETPROTOENT_A = 0x798 + SYS___GETSERVBYNAME_A = 0x799 + SYS_ACL_FIRST_ENTRY = 0x800 + SYS_ACL_GET_ENTRY = 0x801 + SYS_ACL_VALID = 0x802 + SYS_ACL_CREATE_ENTRY = 0x803 + SYS_ACL_DELETE_ENTRY = 0x804 + SYS_ACL_UPDATE_ENTRY = 0x805 + SYS_ACL_DELETE_FD = 0x806 + SYS_ACL_DELETE_FILE = 0x807 + SYS_ACL_GET_FD = 0x808 + SYS_ACL_GET_FILE = 0x809 + SYS___ERFL_B = 0x810 + SYS___ERFCL_B = 0x811 + SYS___LGAMMAL_B = 0x812 + SYS___SETHOOKEVENTS = 0x813 + SYS_IF_NAMETOINDEX = 0x814 + SYS_IF_INDEXTONAME = 0x815 + SYS_IF_NAMEINDEX = 0x816 + SYS_IF_FREENAMEINDEX = 0x817 + SYS_GETADDRINFO = 0x818 + SYS_GETNAMEINFO = 0x819 + SYS___DYNFREE_A = 0x820 + SYS___RES_QUERY_A = 0x821 + SYS___RES_SEARCH_A = 0x822 + SYS___RES_QUERYDOMAIN_A = 0x823 + SYS___RES_MKQUERY_A = 0x824 + SYS___RES_SEND_A = 0x825 + SYS___DN_EXPAND_A = 0x826 + SYS___DN_SKIPNAME_A = 0x827 + SYS___DN_COMP_A = 0x828 + SYS___DN_FIND_A = 0x829 + SYS___INET_NTOA_A = 0x830 + SYS___INET_NETWORK_A = 0x831 + SYS___ACCEPT_A = 0x832 + SYS___ACCEPT_AND_RECV_A = 0x833 + SYS___BIND_A = 0x834 + SYS___CONNECT_A = 0x835 + SYS___GETPEERNAME_A = 0x836 + SYS___GETSOCKNAME_A = 0x837 + SYS___RECVFROM_A = 0x838 + SYS___SENDTO_A = 0x839 + SYS___LCHATTR = 0x840 + SYS___WRITEDOWN = 0x841 + SYS_PTHREAD_MUTEX_INIT2 = 0x842 + SYS___ACOSHF_B = 0x843 + SYS___ACOSHL_B = 0x844 + SYS___ASINHF_B = 0x845 + SYS___ASINHL_B = 0x846 + SYS___ATANHF_B = 0x847 + SYS___ATANHL_B = 0x848 + SYS___CBRTF_B = 0x849 + SYS___EXP2F_B = 0x850 + SYS___EXP2L_B = 0x851 + SYS___EXPM1F_B = 0x852 + SYS___EXPM1L_B = 0x853 + SYS___FDIMF_B = 0x854 + SYS___FDIM_B = 0x855 + SYS___FDIML_B = 0x856 + SYS___HYPOTF_B = 0x857 + SYS___HYPOTL_B = 0x858 + SYS___LOG1PF_B = 0x859 + SYS___REMQUOF_B = 0x860 + SYS___REMQUO_B = 0x861 + SYS___REMQUOL_B = 0x862 + SYS___TGAMMAF_B = 0x863 + SYS___TGAMMA_B = 0x864 + SYS___TGAMMAL_B = 0x865 + SYS___TRUNCF_B = 0x866 + SYS___TRUNC_B = 0x867 + SYS___TRUNCL_B = 0x868 + SYS___LGAMMAF_B = 0x869 + SYS_ASINHF = 0x870 + SYS_ASINHL = 0x871 + SYS_ATANHF = 0x872 + SYS_ATANHL = 0x873 + SYS_CBRTF = 0x874 + SYS_CBRTL = 0x875 + SYS_COPYSIGNF = 0x876 + SYS_CPYSIGNF = 0x876 + SYS_COPYSIGNL = 0x877 + SYS_CPYSIGNL = 0x877 + SYS_COTANF = 0x878 + SYS___COTANF = 0x878 + SYS_COTAN = 0x879 + SYS___COTAN = 0x879 + SYS_FDIM = 0x881 + SYS_FDIML = 0x882 + SYS_HYPOTF = 0x883 + SYS_HYPOTL = 0x884 + SYS_LOG1PF = 0x885 + SYS_LOG1PL = 0x886 + SYS_LOG2F = 0x887 + SYS_LOG2 = 0x888 + SYS_LOG2L = 0x889 + SYS_TGAMMA = 0x890 + SYS_TGAMMAL = 0x891 + SYS_TRUNCF = 0x892 + SYS_TRUNC = 0x893 + SYS_TRUNCL = 0x894 + SYS_LGAMMAF = 0x895 + SYS_LGAMMAL = 0x896 + SYS_LROUNDF = 0x897 + SYS_LROUND = 0x898 + SYS_ERFF = 0x899 + SYS___COSHF_H = 0x900 + SYS___COSHL_H = 0x901 + SYS___COTAN_H = 0x902 + SYS___COTANF_H = 0x903 + SYS___COTANL_H = 0x904 + SYS___ERF_H = 0x905 + SYS___ERFF_H = 0x906 + SYS___ERFL_H = 0x907 + SYS___ERFC_H = 0x908 + SYS___ERFCF_H = 0x909 + SYS___FDIMF_H = 0x910 + SYS___FDIML_H = 0x911 + SYS___FMOD_H = 0x912 + SYS___FMODF_H = 0x913 + SYS___FMODL_H = 0x914 + SYS___GAMMA_H = 0x915 + SYS___HYPOT_H = 0x916 + SYS___ILOGB_H = 0x917 + SYS___LGAMMA_H = 0x918 + SYS___LGAMMAF_H = 0x919 + SYS___LOG2L_H = 0x920 + SYS___LOG1P_H = 0x921 + SYS___LOG10_H = 0x922 + SYS___LOG10F_H = 0x923 + SYS___LOG10L_H = 0x924 + SYS___LROUND_H = 0x925 + SYS___LROUNDF_H = 0x926 + SYS___NEXTAFTER_H = 0x927 + SYS___POW_H = 0x928 + SYS___POWF_H = 0x929 + SYS___SINL_H = 0x930 + SYS___SINH_H = 0x931 + SYS___SINHF_H = 0x932 + SYS___SINHL_H = 0x933 + SYS___SQRT_H = 0x934 + SYS___SQRTF_H = 0x935 + SYS___SQRTL_H = 0x936 + SYS___TAN_H = 0x937 + SYS___TANF_H = 0x938 + SYS___TANL_H = 0x939 + SYS___TRUNCF_H = 0x940 + SYS___TRUNCL_H = 0x941 + SYS___COSH_H = 0x942 + SYS___LE_DEBUG_SET_RESUME_MCH = 0x943 + SYS_VFSCANF = 0x944 + SYS_VSCANF = 0x946 + SYS_VSSCANF = 0x948 + SYS_IMAXABS = 0x950 + SYS_IMAXDIV = 0x951 + SYS_STRTOIMAX = 0x952 + SYS_STRTOUMAX = 0x953 + SYS_WCSTOIMAX = 0x954 + SYS_WCSTOUMAX = 0x955 + SYS_ATOLL = 0x956 + SYS_STRTOF = 0x957 + SYS_STRTOLD = 0x958 + SYS_WCSTOF = 0x959 + SYS_INET6_RTH_GETADDR = 0x960 + SYS_INET6_OPT_INIT = 0x961 + SYS_INET6_OPT_APPEND = 0x962 + SYS_INET6_OPT_FINISH = 0x963 + SYS_INET6_OPT_SET_VAL = 0x964 + SYS_INET6_OPT_NEXT = 0x965 + SYS_INET6_OPT_FIND = 0x966 + SYS_INET6_OPT_GET_VAL = 0x967 + SYS___POW_I = 0x987 + SYS___POW_I_B = 0x988 + SYS___POW_I_H = 0x989 + SYS___CABS_H = 0x990 + SYS_CABSF = 0x991 + SYS___CABSF_B = 0x992 + SYS___CABSF_H = 0x993 + SYS_CABSL = 0x994 + SYS___CABSL_B = 0x995 + SYS___CABSL_H = 0x996 + SYS_CACOS = 0x997 + SYS___CACOS_B = 0x998 + SYS___CACOS_H = 0x999 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go b/vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go index 295859c503..7a8161c1d1 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go +++ b/vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go @@ -1,6 +1,7 @@ // cgo -godefs types_aix.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build ppc && aix // +build ppc,aix package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go index a9ee0ffd44..07ed733c51 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go @@ -1,6 +1,7 @@ // cgo -godefs types_aix.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build ppc64 && aix // +build ppc64,aix package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_386.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_386.go deleted file mode 100644 index 725b4bee27..0000000000 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_386.go +++ /dev/null @@ -1,516 +0,0 @@ -// cgo -godefs types_darwin.go | go run mkpost.go -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build 386,darwin - -package unix - -const ( - SizeofPtr = 0x4 - SizeofShort = 0x2 - SizeofInt = 0x4 - SizeofLong = 0x4 - SizeofLongLong = 0x8 -) - -type ( - _C_short int16 - _C_int int32 - _C_long int32 - _C_long_long int64 -) - -type Timespec struct { - Sec int32 - Nsec int32 -} - -type Timeval struct { - Sec int32 - Usec int32 -} - -type Timeval32 struct{} - -type Rusage struct { - Utime Timeval - Stime Timeval - Maxrss int32 - Ixrss int32 - Idrss int32 - Isrss int32 - Minflt int32 - Majflt int32 - Nswap int32 - Inblock int32 - Oublock int32 - Msgsnd int32 - Msgrcv int32 - Nsignals int32 - Nvcsw int32 - Nivcsw int32 -} - -type Rlimit struct { - Cur uint64 - Max uint64 -} - -type _Gid_t uint32 - -type Stat_t struct { - Dev int32 - Mode uint16 - Nlink uint16 - Ino uint64 - Uid uint32 - Gid uint32 - Rdev int32 - Atim Timespec - Mtim Timespec - Ctim Timespec - Btim Timespec - Size int64 - Blocks int64 - Blksize int32 - Flags uint32 - Gen uint32 - Lspare int32 - Qspare [2]int64 -} - -type Statfs_t struct { - Bsize uint32 - Iosize int32 - Blocks uint64 - Bfree uint64 - Bavail uint64 - Files uint64 - Ffree uint64 - Fsid Fsid - Owner uint32 - Type uint32 - Flags uint32 - Fssubtype uint32 - Fstypename [16]byte - Mntonname [1024]byte - Mntfromname [1024]byte - Reserved [8]uint32 -} - -type Flock_t struct { - Start int64 - Len int64 - Pid int32 - Type int16 - Whence int16 -} - -type Fstore_t struct { - Flags uint32 - Posmode int32 - Offset int64 - Length int64 - Bytesalloc int64 -} - -type Radvisory_t struct { - Offset int64 - Count int32 -} - -type Fbootstraptransfer_t struct { - Offset int64 - Length uint32 - Buffer *byte -} - -type Log2phys_t struct { - Flags uint32 - Contigbytes int64 - Devoffset int64 -} - -type Fsid struct { - Val [2]int32 -} - -type Dirent struct { - Ino uint64 - Seekoff uint64 - Reclen uint16 - Namlen uint16 - Type uint8 - Name [1024]int8 - _ [3]byte -} - -const ( - PathMax = 0x400 -) - -type RawSockaddrInet4 struct { - Len uint8 - Family uint8 - Port uint16 - Addr [4]byte /* in_addr */ - Zero [8]int8 -} - -type RawSockaddrInet6 struct { - Len uint8 - Family uint8 - Port uint16 - Flowinfo uint32 - Addr [16]byte /* in6_addr */ - Scope_id uint32 -} - -type RawSockaddrUnix struct { - Len uint8 - Family uint8 - Path [104]int8 -} - -type RawSockaddrDatalink struct { - Len uint8 - Family uint8 - Index uint16 - Type uint8 - Nlen uint8 - Alen uint8 - Slen uint8 - Data [12]int8 -} - -type RawSockaddr struct { - Len uint8 - Family uint8 - Data [14]int8 -} - -type RawSockaddrAny struct { - Addr RawSockaddr - Pad [92]int8 -} - -type RawSockaddrCtl struct { - Sc_len uint8 - Sc_family uint8 - Ss_sysaddr uint16 - Sc_id uint32 - Sc_unit uint32 - Sc_reserved [5]uint32 -} - -type _Socklen uint32 - -type Linger struct { - Onoff int32 - Linger int32 -} - -type Iovec struct { - Base *byte - Len uint32 -} - -type IPMreq struct { - Multiaddr [4]byte /* in_addr */ - Interface [4]byte /* in_addr */ -} - -type IPv6Mreq struct { - Multiaddr [16]byte /* in6_addr */ - Interface uint32 -} - -type Msghdr struct { - Name *byte - Namelen uint32 - Iov *Iovec - Iovlen int32 - Control *byte - Controllen uint32 - Flags int32 -} - -type Cmsghdr struct { - Len uint32 - Level int32 - Type int32 -} - -type Inet4Pktinfo struct { - Ifindex uint32 - Spec_dst [4]byte /* in_addr */ - Addr [4]byte /* in_addr */ -} - -type Inet6Pktinfo struct { - Addr [16]byte /* in6_addr */ - Ifindex uint32 -} - -type IPv6MTUInfo struct { - Addr RawSockaddrInet6 - Mtu uint32 -} - -type ICMPv6Filter struct { - Filt [8]uint32 -} - -const ( - SizeofSockaddrInet4 = 0x10 - SizeofSockaddrInet6 = 0x1c - SizeofSockaddrAny = 0x6c - SizeofSockaddrUnix = 0x6a - SizeofSockaddrDatalink = 0x14 - SizeofSockaddrCtl = 0x20 - SizeofLinger = 0x8 - SizeofIovec = 0x8 - SizeofIPMreq = 0x8 - SizeofIPv6Mreq = 0x14 - SizeofMsghdr = 0x1c - SizeofCmsghdr = 0xc - SizeofInet4Pktinfo = 0xc - SizeofInet6Pktinfo = 0x14 - SizeofIPv6MTUInfo = 0x20 - SizeofICMPv6Filter = 0x20 -) - -const ( - PTRACE_TRACEME = 0x0 - PTRACE_CONT = 0x7 - PTRACE_KILL = 0x8 -) - -type Kevent_t struct { - Ident uint32 - Filter int16 - Flags uint16 - Fflags uint32 - Data int32 - Udata *byte -} - -type FdSet struct { - Bits [32]int32 -} - -const ( - SizeofIfMsghdr = 0x70 - SizeofIfData = 0x60 - SizeofIfaMsghdr = 0x14 - SizeofIfmaMsghdr = 0x10 - SizeofIfmaMsghdr2 = 0x14 - SizeofRtMsghdr = 0x5c - SizeofRtMetrics = 0x38 -) - -type IfMsghdr struct { - Msglen uint16 - Version uint8 - Type uint8 - Addrs int32 - Flags int32 - Index uint16 - Data IfData -} - -type IfData struct { - Type uint8 - Typelen uint8 - Physical uint8 - Addrlen uint8 - Hdrlen uint8 - Recvquota uint8 - Xmitquota uint8 - Unused1 uint8 - Mtu uint32 - Metric uint32 - Baudrate uint32 - Ipackets uint32 - Ierrors uint32 - Opackets uint32 - Oerrors uint32 - Collisions uint32 - Ibytes uint32 - Obytes uint32 - Imcasts uint32 - Omcasts uint32 - Iqdrops uint32 - Noproto uint32 - Recvtiming uint32 - Xmittiming uint32 - Lastchange Timeval - Unused2 uint32 - Hwassist uint32 - Reserved1 uint32 - Reserved2 uint32 -} - -type IfaMsghdr struct { - Msglen uint16 - Version uint8 - Type uint8 - Addrs int32 - Flags int32 - Index uint16 - Metric int32 -} - -type IfmaMsghdr struct { - Msglen uint16 - Version uint8 - Type uint8 - Addrs int32 - Flags int32 - Index uint16 - _ [2]byte -} - -type IfmaMsghdr2 struct { - Msglen uint16 - Version uint8 - Type uint8 - Addrs int32 - Flags int32 - Index uint16 - Refcount int32 -} - -type RtMsghdr struct { - Msglen uint16 - Version uint8 - Type uint8 - Index uint16 - Flags int32 - Addrs int32 - Pid int32 - Seq int32 - Errno int32 - Use int32 - Inits uint32 - Rmx RtMetrics -} - -type RtMetrics struct { - Locks uint32 - Mtu uint32 - Hopcount uint32 - Expire int32 - Recvpipe uint32 - Sendpipe uint32 - Ssthresh uint32 - Rtt uint32 - Rttvar uint32 - Pksent uint32 - State uint32 - Filler [3]uint32 -} - -const ( - SizeofBpfVersion = 0x4 - SizeofBpfStat = 0x8 - SizeofBpfProgram = 0x8 - SizeofBpfInsn = 0x8 - SizeofBpfHdr = 0x14 -) - -type BpfVersion struct { - Major uint16 - Minor uint16 -} - -type BpfStat struct { - Recv uint32 - Drop uint32 -} - -type BpfProgram struct { - Len uint32 - Insns *BpfInsn -} - -type BpfInsn struct { - Code uint16 - Jt uint8 - Jf uint8 - K uint32 -} - -type BpfHdr struct { - Tstamp Timeval - Caplen uint32 - Datalen uint32 - Hdrlen uint16 - _ [2]byte -} - -type Termios struct { - Iflag uint32 - Oflag uint32 - Cflag uint32 - Lflag uint32 - Cc [20]uint8 - Ispeed uint32 - Ospeed uint32 -} - -type Winsize struct { - Row uint16 - Col uint16 - Xpixel uint16 - Ypixel uint16 -} - -const ( - AT_FDCWD = -0x2 - AT_REMOVEDIR = 0x80 - AT_SYMLINK_FOLLOW = 0x40 - AT_SYMLINK_NOFOLLOW = 0x20 -) - -type PollFd struct { - Fd int32 - Events int16 - Revents int16 -} - -const ( - POLLERR = 0x8 - POLLHUP = 0x10 - POLLIN = 0x1 - POLLNVAL = 0x20 - POLLOUT = 0x4 - POLLPRI = 0x2 - POLLRDBAND = 0x80 - POLLRDNORM = 0x40 - POLLWRBAND = 0x100 - POLLWRNORM = 0x4 -) - -type Utsname struct { - Sysname [256]byte - Nodename [256]byte - Release [256]byte - Version [256]byte - Machine [256]byte -} - -const SizeofClockinfo = 0x14 - -type Clockinfo struct { - Hz int32 - Tick int32 - Tickadj int32 - Stathz int32 - Profhz int32 -} - -type CtlInfo struct { - Id uint32 - Name [96]byte -} diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go index 080ffce325..2673e6c590 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go @@ -1,6 +1,7 @@ // cgo -godefs types_darwin.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build amd64 && darwin // +build amd64,darwin package unix @@ -210,6 +211,13 @@ type RawSockaddrCtl struct { type _Socklen uint32 +type Xucred struct { + Version uint32 + Uid uint32 + Ngroups int16 + Groups [16]uint32 +} + type Linger struct { Onoff int32 Linger int32 @@ -225,6 +233,12 @@ type IPMreq struct { Interface [4]byte /* in_addr */ } +type IPMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + type IPv6Mreq struct { Multiaddr [16]byte /* in6_addr */ Interface uint32 @@ -273,9 +287,11 @@ const ( SizeofSockaddrUnix = 0x6a SizeofSockaddrDatalink = 0x14 SizeofSockaddrCtl = 0x20 + SizeofXucred = 0x4c SizeofLinger = 0x8 SizeofIovec = 0x10 SizeofIPMreq = 0x8 + SizeofIPMreqn = 0xc SizeofIPv6Mreq = 0x14 SizeofMsghdr = 0x30 SizeofCmsghdr = 0xc diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go deleted file mode 100644 index f2a77bc4e2..0000000000 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go +++ /dev/null @@ -1,516 +0,0 @@ -// cgo -godefs types_darwin.go | go run mkpost.go -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build arm,darwin - -package unix - -const ( - SizeofPtr = 0x4 - SizeofShort = 0x2 - SizeofInt = 0x4 - SizeofLong = 0x4 - SizeofLongLong = 0x8 -) - -type ( - _C_short int16 - _C_int int32 - _C_long int32 - _C_long_long int64 -) - -type Timespec struct { - Sec int32 - Nsec int32 -} - -type Timeval struct { - Sec int32 - Usec int32 -} - -type Timeval32 struct{} - -type Rusage struct { - Utime Timeval - Stime Timeval - Maxrss int32 - Ixrss int32 - Idrss int32 - Isrss int32 - Minflt int32 - Majflt int32 - Nswap int32 - Inblock int32 - Oublock int32 - Msgsnd int32 - Msgrcv int32 - Nsignals int32 - Nvcsw int32 - Nivcsw int32 -} - -type Rlimit struct { - Cur uint64 - Max uint64 -} - -type _Gid_t uint32 - -type Stat_t struct { - Dev int32 - Mode uint16 - Nlink uint16 - Ino uint64 - Uid uint32 - Gid uint32 - Rdev int32 - Atim Timespec - Mtim Timespec - Ctim Timespec - Btim Timespec - Size int64 - Blocks int64 - Blksize int32 - Flags uint32 - Gen uint32 - Lspare int32 - Qspare [2]int64 -} - -type Statfs_t struct { - Bsize uint32 - Iosize int32 - Blocks uint64 - Bfree uint64 - Bavail uint64 - Files uint64 - Ffree uint64 - Fsid Fsid - Owner uint32 - Type uint32 - Flags uint32 - Fssubtype uint32 - Fstypename [16]byte - Mntonname [1024]byte - Mntfromname [1024]byte - Reserved [8]uint32 -} - -type Flock_t struct { - Start int64 - Len int64 - Pid int32 - Type int16 - Whence int16 -} - -type Fstore_t struct { - Flags uint32 - Posmode int32 - Offset int64 - Length int64 - Bytesalloc int64 -} - -type Radvisory_t struct { - Offset int64 - Count int32 -} - -type Fbootstraptransfer_t struct { - Offset int64 - Length uint32 - Buffer *byte -} - -type Log2phys_t struct { - Flags uint32 - Contigbytes int64 - Devoffset int64 -} - -type Fsid struct { - Val [2]int32 -} - -type Dirent struct { - Ino uint64 - Seekoff uint64 - Reclen uint16 - Namlen uint16 - Type uint8 - Name [1024]int8 - _ [3]byte -} - -const ( - PathMax = 0x400 -) - -type RawSockaddrInet4 struct { - Len uint8 - Family uint8 - Port uint16 - Addr [4]byte /* in_addr */ - Zero [8]int8 -} - -type RawSockaddrInet6 struct { - Len uint8 - Family uint8 - Port uint16 - Flowinfo uint32 - Addr [16]byte /* in6_addr */ - Scope_id uint32 -} - -type RawSockaddrUnix struct { - Len uint8 - Family uint8 - Path [104]int8 -} - -type RawSockaddrDatalink struct { - Len uint8 - Family uint8 - Index uint16 - Type uint8 - Nlen uint8 - Alen uint8 - Slen uint8 - Data [12]int8 -} - -type RawSockaddr struct { - Len uint8 - Family uint8 - Data [14]int8 -} - -type RawSockaddrAny struct { - Addr RawSockaddr - Pad [92]int8 -} - -type RawSockaddrCtl struct { - Sc_len uint8 - Sc_family uint8 - Ss_sysaddr uint16 - Sc_id uint32 - Sc_unit uint32 - Sc_reserved [5]uint32 -} - -type _Socklen uint32 - -type Linger struct { - Onoff int32 - Linger int32 -} - -type Iovec struct { - Base *byte - Len uint32 -} - -type IPMreq struct { - Multiaddr [4]byte /* in_addr */ - Interface [4]byte /* in_addr */ -} - -type IPv6Mreq struct { - Multiaddr [16]byte /* in6_addr */ - Interface uint32 -} - -type Msghdr struct { - Name *byte - Namelen uint32 - Iov *Iovec - Iovlen int32 - Control *byte - Controllen uint32 - Flags int32 -} - -type Cmsghdr struct { - Len uint32 - Level int32 - Type int32 -} - -type Inet4Pktinfo struct { - Ifindex uint32 - Spec_dst [4]byte /* in_addr */ - Addr [4]byte /* in_addr */ -} - -type Inet6Pktinfo struct { - Addr [16]byte /* in6_addr */ - Ifindex uint32 -} - -type IPv6MTUInfo struct { - Addr RawSockaddrInet6 - Mtu uint32 -} - -type ICMPv6Filter struct { - Filt [8]uint32 -} - -const ( - SizeofSockaddrInet4 = 0x10 - SizeofSockaddrInet6 = 0x1c - SizeofSockaddrAny = 0x6c - SizeofSockaddrUnix = 0x6a - SizeofSockaddrDatalink = 0x14 - SizeofSockaddrCtl = 0x20 - SizeofLinger = 0x8 - SizeofIovec = 0x8 - SizeofIPMreq = 0x8 - SizeofIPv6Mreq = 0x14 - SizeofMsghdr = 0x1c - SizeofCmsghdr = 0xc - SizeofInet4Pktinfo = 0xc - SizeofInet6Pktinfo = 0x14 - SizeofIPv6MTUInfo = 0x20 - SizeofICMPv6Filter = 0x20 -) - -const ( - PTRACE_TRACEME = 0x0 - PTRACE_CONT = 0x7 - PTRACE_KILL = 0x8 -) - -type Kevent_t struct { - Ident uint32 - Filter int16 - Flags uint16 - Fflags uint32 - Data int32 - Udata *byte -} - -type FdSet struct { - Bits [32]int32 -} - -const ( - SizeofIfMsghdr = 0x70 - SizeofIfData = 0x60 - SizeofIfaMsghdr = 0x14 - SizeofIfmaMsghdr = 0x10 - SizeofIfmaMsghdr2 = 0x14 - SizeofRtMsghdr = 0x5c - SizeofRtMetrics = 0x38 -) - -type IfMsghdr struct { - Msglen uint16 - Version uint8 - Type uint8 - Addrs int32 - Flags int32 - Index uint16 - Data IfData -} - -type IfData struct { - Type uint8 - Typelen uint8 - Physical uint8 - Addrlen uint8 - Hdrlen uint8 - Recvquota uint8 - Xmitquota uint8 - Unused1 uint8 - Mtu uint32 - Metric uint32 - Baudrate uint32 - Ipackets uint32 - Ierrors uint32 - Opackets uint32 - Oerrors uint32 - Collisions uint32 - Ibytes uint32 - Obytes uint32 - Imcasts uint32 - Omcasts uint32 - Iqdrops uint32 - Noproto uint32 - Recvtiming uint32 - Xmittiming uint32 - Lastchange Timeval - Unused2 uint32 - Hwassist uint32 - Reserved1 uint32 - Reserved2 uint32 -} - -type IfaMsghdr struct { - Msglen uint16 - Version uint8 - Type uint8 - Addrs int32 - Flags int32 - Index uint16 - Metric int32 -} - -type IfmaMsghdr struct { - Msglen uint16 - Version uint8 - Type uint8 - Addrs int32 - Flags int32 - Index uint16 - _ [2]byte -} - -type IfmaMsghdr2 struct { - Msglen uint16 - Version uint8 - Type uint8 - Addrs int32 - Flags int32 - Index uint16 - Refcount int32 -} - -type RtMsghdr struct { - Msglen uint16 - Version uint8 - Type uint8 - Index uint16 - Flags int32 - Addrs int32 - Pid int32 - Seq int32 - Errno int32 - Use int32 - Inits uint32 - Rmx RtMetrics -} - -type RtMetrics struct { - Locks uint32 - Mtu uint32 - Hopcount uint32 - Expire int32 - Recvpipe uint32 - Sendpipe uint32 - Ssthresh uint32 - Rtt uint32 - Rttvar uint32 - Pksent uint32 - State uint32 - Filler [3]uint32 -} - -const ( - SizeofBpfVersion = 0x4 - SizeofBpfStat = 0x8 - SizeofBpfProgram = 0x8 - SizeofBpfInsn = 0x8 - SizeofBpfHdr = 0x14 -) - -type BpfVersion struct { - Major uint16 - Minor uint16 -} - -type BpfStat struct { - Recv uint32 - Drop uint32 -} - -type BpfProgram struct { - Len uint32 - Insns *BpfInsn -} - -type BpfInsn struct { - Code uint16 - Jt uint8 - Jf uint8 - K uint32 -} - -type BpfHdr struct { - Tstamp Timeval - Caplen uint32 - Datalen uint32 - Hdrlen uint16 - _ [2]byte -} - -type Termios struct { - Iflag uint32 - Oflag uint32 - Cflag uint32 - Lflag uint32 - Cc [20]uint8 - Ispeed uint32 - Ospeed uint32 -} - -type Winsize struct { - Row uint16 - Col uint16 - Xpixel uint16 - Ypixel uint16 -} - -const ( - AT_FDCWD = -0x2 - AT_REMOVEDIR = 0x80 - AT_SYMLINK_FOLLOW = 0x40 - AT_SYMLINK_NOFOLLOW = 0x20 -) - -type PollFd struct { - Fd int32 - Events int16 - Revents int16 -} - -const ( - POLLERR = 0x8 - POLLHUP = 0x10 - POLLIN = 0x1 - POLLNVAL = 0x20 - POLLOUT = 0x4 - POLLPRI = 0x2 - POLLRDBAND = 0x80 - POLLRDNORM = 0x40 - POLLWRBAND = 0x100 - POLLWRNORM = 0x4 -) - -type Utsname struct { - Sysname [256]byte - Nodename [256]byte - Release [256]byte - Version [256]byte - Machine [256]byte -} - -const SizeofClockinfo = 0x14 - -type Clockinfo struct { - Hz int32 - Tick int32 - Tickadj int32 - Stathz int32 - Profhz int32 -} - -type CtlInfo struct { - Id uint32 - Name [96]byte -} diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go index c9492428bf..1465cbcffe 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go @@ -1,6 +1,7 @@ // cgo -godefs types_darwin.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build arm64 && darwin // +build arm64,darwin package unix @@ -210,6 +211,13 @@ type RawSockaddrCtl struct { type _Socklen uint32 +type Xucred struct { + Version uint32 + Uid uint32 + Ngroups int16 + Groups [16]uint32 +} + type Linger struct { Onoff int32 Linger int32 @@ -225,6 +233,12 @@ type IPMreq struct { Interface [4]byte /* in_addr */ } +type IPMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + type IPv6Mreq struct { Multiaddr [16]byte /* in6_addr */ Interface uint32 @@ -273,9 +287,11 @@ const ( SizeofSockaddrUnix = 0x6a SizeofSockaddrDatalink = 0x14 SizeofSockaddrCtl = 0x20 + SizeofXucred = 0x4c SizeofLinger = 0x8 SizeofIovec = 0x10 SizeofIPMreq = 0x8 + SizeofIPMreqn = 0xc SizeofIPv6Mreq = 0x14 SizeofMsghdr = 0x30 SizeofCmsghdr = 0xc diff --git a/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go index 85506a05d4..1d049d7a12 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go @@ -1,6 +1,7 @@ // cgo -godefs types_dragonfly.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build amd64 && dragonfly // +build amd64,dragonfly package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go index 3e9dad33e3..c51bc88ffd 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go @@ -1,6 +1,7 @@ // cgo -godefs types_freebsd.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build 386 && freebsd // +build 386,freebsd package unix @@ -250,6 +251,14 @@ type RawSockaddrAny struct { type _Socklen uint32 +type Xucred struct { + Version uint32 + Uid uint32 + Ngroups int16 + Groups [16]uint32 + _ *byte +} + type Linger struct { Onoff int32 Linger int32 @@ -312,6 +321,7 @@ const ( SizeofSockaddrAny = 0x6c SizeofSockaddrUnix = 0x6a SizeofSockaddrDatalink = 0x36 + SizeofXucred = 0x50 SizeofLinger = 0x8 SizeofIovec = 0x8 SizeofIPMreq = 0x8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go index e00e615544..395b691871 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go @@ -1,6 +1,7 @@ // cgo -godefs types_freebsd.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build amd64 && freebsd // +build amd64,freebsd package unix @@ -246,6 +247,14 @@ type RawSockaddrAny struct { type _Socklen uint32 +type Xucred struct { + Version uint32 + Uid uint32 + Ngroups int16 + Groups [16]uint32 + _ *byte +} + type Linger struct { Onoff int32 Linger int32 @@ -308,6 +317,7 @@ const ( SizeofSockaddrAny = 0x6c SizeofSockaddrUnix = 0x6a SizeofSockaddrDatalink = 0x36 + SizeofXucred = 0x58 SizeofLinger = 0x8 SizeofIovec = 0x10 SizeofIPMreq = 0x8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go index 5da13c871b..d3f9d2541b 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go @@ -1,6 +1,7 @@ // cgo -godefs -- -fsigned-char types_freebsd.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build arm && freebsd // +build arm,freebsd package unix @@ -248,6 +249,14 @@ type RawSockaddrAny struct { type _Socklen uint32 +type Xucred struct { + Version uint32 + Uid uint32 + Ngroups int16 + Groups [16]uint32 + _ *byte +} + type Linger struct { Onoff int32 Linger int32 @@ -310,6 +319,7 @@ const ( SizeofSockaddrAny = 0x6c SizeofSockaddrUnix = 0x6a SizeofSockaddrDatalink = 0x36 + SizeofXucred = 0x50 SizeofLinger = 0x8 SizeofIovec = 0x8 SizeofIPMreq = 0x8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go index 995ecf9d4e..434d6e8e83 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go @@ -1,6 +1,7 @@ // cgo -godefs -- -fsigned-char types_freebsd.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build arm64 && freebsd // +build arm64,freebsd package unix @@ -246,6 +247,14 @@ type RawSockaddrAny struct { type _Socklen uint32 +type Xucred struct { + Version uint32 + Uid uint32 + Ngroups int16 + Groups [16]uint32 + _ *byte +} + type Linger struct { Onoff int32 Linger int32 @@ -308,6 +317,7 @@ const ( SizeofSockaddrAny = 0x6c SizeofSockaddrUnix = 0x6a SizeofSockaddrDatalink = 0x36 + SizeofXucred = 0x58 SizeofLinger = 0x8 SizeofIovec = 0x10 SizeofIPMreq = 0x8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_illumos_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_illumos_amd64.go new file mode 100644 index 0000000000..236f37ef6f --- /dev/null +++ b/vendor/golang.org/x/sys/unix/ztypes_illumos_amd64.go @@ -0,0 +1,40 @@ +// cgo -godefs types_illumos.go | go run mkpost.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +//go:build amd64 && illumos +// +build amd64,illumos + +package unix + +const ( + TUNNEWPPA = 0x540001 + TUNSETPPA = 0x540002 + + I_STR = 0x5308 + I_POP = 0x5303 + I_PUSH = 0x5302 + I_PLINK = 0x5316 + I_PUNLINK = 0x5317 + + IF_UNITSEL = -0x7ffb8cca +) + +type strbuf struct { + Maxlen int32 + Len int32 + Buf *int8 +} + +type Strioctl struct { + Cmd int32 + Timout int32 + Len int32 + Dp *int8 +} + +type Lifreq struct { + Name [32]int8 + Lifru1 [4]byte + Type uint32 + Lifru [336]byte +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index 9f3b1a4e56..c9b2c9aae0 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -1,5 +1,6 @@ // Code generated by mkmerge.go; DO NOT EDIT. +//go:build linux // +build linux package unix @@ -83,7 +84,7 @@ type FileCloneRange struct { Dest_offset uint64 } -type FileDedupeRange struct { +type RawFileDedupeRange struct { Src_offset uint64 Src_length uint64 Dest_count uint16 @@ -91,6 +92,21 @@ type FileDedupeRange struct { Reserved2 uint32 } +type RawFileDedupeRangeInfo struct { + Dest_fd int64 + Dest_offset uint64 + Bytes_deduped uint64 + Status int32 + Reserved uint32 +} + +const ( + SizeofRawFileDedupeRange = 0x18 + SizeofRawFileDedupeRangeInfo = 0x20 + FILE_DEDUPE_RANGE_SAME = 0x0 + FILE_DEDUPE_RANGE_DIFFERS = 0x1 +) + type FscryptPolicy struct { Version uint8 Contents_encryption_mode uint8 @@ -288,7 +304,8 @@ type RawSockaddrVM struct { Reserved1 uint16 Port uint32 Cid uint32 - Zero [4]uint8 + Flags uint8 + Zero [3]uint8 } type RawSockaddrXDP struct { @@ -999,7 +1016,10 @@ const ( PERF_SAMPLE_PHYS_ADDR = 0x80000 PERF_SAMPLE_AUX = 0x100000 PERF_SAMPLE_CGROUP = 0x200000 - PERF_SAMPLE_MAX = 0x400000 + PERF_SAMPLE_DATA_PAGE_SIZE = 0x400000 + PERF_SAMPLE_CODE_PAGE_SIZE = 0x800000 + PERF_SAMPLE_WEIGHT_STRUCT = 0x1000000 + PERF_SAMPLE_MAX = 0x2000000 PERF_SAMPLE_BRANCH_USER_SHIFT = 0x0 PERF_SAMPLE_BRANCH_KERNEL_SHIFT = 0x1 PERF_SAMPLE_BRANCH_HV_SHIFT = 0x2 @@ -3109,7 +3129,8 @@ const ( DEVLINK_ATTR_REMOTE_RELOAD_STATS = 0xa1 DEVLINK_ATTR_RELOAD_ACTION_INFO = 0xa2 DEVLINK_ATTR_RELOAD_ACTION_STATS = 0xa3 - DEVLINK_ATTR_MAX = 0xa3 + DEVLINK_ATTR_PORT_PCI_SF_NUMBER = 0xa4 + DEVLINK_ATTR_MAX = 0xa4 DEVLINK_DPIPE_FIELD_MAPPING_TYPE_NONE = 0x0 DEVLINK_DPIPE_FIELD_MAPPING_TYPE_IFINDEX = 0x1 DEVLINK_DPIPE_MATCH_TYPE_FIELD_EXACT = 0x0 @@ -3123,7 +3144,9 @@ const ( DEVLINK_RESOURCE_UNIT_ENTRY = 0x0 DEVLINK_PORT_FUNCTION_ATTR_UNSPEC = 0x0 DEVLINK_PORT_FUNCTION_ATTR_HW_ADDR = 0x1 - DEVLINK_PORT_FUNCTION_ATTR_MAX = 0x1 + DEVLINK_PORT_FN_ATTR_STATE = 0x2 + DEVLINK_PORT_FN_ATTR_OPSTATE = 0x3 + DEVLINK_PORT_FUNCTION_ATTR_MAX = 0x3 ) type FsverityDigest struct { @@ -3492,7 +3515,8 @@ const ( ETHTOOL_A_LINKMODES_DUPLEX = 0x6 ETHTOOL_A_LINKMODES_MASTER_SLAVE_CFG = 0x7 ETHTOOL_A_LINKMODES_MASTER_SLAVE_STATE = 0x8 - ETHTOOL_A_LINKMODES_MAX = 0x8 + ETHTOOL_A_LINKMODES_LANES = 0x9 + ETHTOOL_A_LINKMODES_MAX = 0x9 ETHTOOL_A_LINKSTATE_UNSPEC = 0x0 ETHTOOL_A_LINKSTATE_HEADER = 0x1 ETHTOOL_A_LINKSTATE_LINK = 0x2 @@ -3680,3 +3704,127 @@ const ( ETHTOOL_A_TUNNEL_INFO_UDP_PORTS = 0x2 ETHTOOL_A_TUNNEL_INFO_MAX = 0x2 ) + +type EthtoolDrvinfo struct { + Cmd uint32 + Driver [32]byte + Version [32]byte + Fw_version [32]byte + Bus_info [32]byte + Erom_version [32]byte + Reserved2 [12]byte + N_priv_flags uint32 + N_stats uint32 + Testinfo_len uint32 + Eedump_len uint32 + Regdump_len uint32 +} + +type ( + HIDRawReportDescriptor struct { + Size uint32 + Value [4096]uint8 + } + HIDRawDevInfo struct { + Bustype uint32 + Vendor int16 + Product int16 + } +) + +const ( + CLOSE_RANGE_UNSHARE = 0x2 + CLOSE_RANGE_CLOEXEC = 0x4 +) + +const ( + NLMSGERR_ATTR_MSG = 0x1 + NLMSGERR_ATTR_OFFS = 0x2 + NLMSGERR_ATTR_COOKIE = 0x3 +) + +type ( + EraseInfo struct { + Start uint32 + Length uint32 + } + EraseInfo64 struct { + Start uint64 + Length uint64 + } + MtdOobBuf struct { + Start uint32 + Length uint32 + Ptr *uint8 + } + MtdOobBuf64 struct { + Start uint64 + Pad uint32 + Length uint32 + Ptr uint64 + } + MtdWriteReq struct { + Start uint64 + Len uint64 + Ooblen uint64 + Data uint64 + Oob uint64 + Mode uint8 + _ [7]uint8 + } + MtdInfo struct { + Type uint8 + Flags uint32 + Size uint32 + Erasesize uint32 + Writesize uint32 + Oobsize uint32 + _ uint64 + } + RegionInfo struct { + Offset uint32 + Erasesize uint32 + Numblocks uint32 + Regionindex uint32 + } + OtpInfo struct { + Start uint32 + Length uint32 + Locked uint32 + } + NandOobinfo struct { + Useecc uint32 + Eccbytes uint32 + Oobfree [8][2]uint32 + Eccpos [32]uint32 + } + NandOobfree struct { + Offset uint32 + Length uint32 + } + NandEcclayout struct { + Eccbytes uint32 + Eccpos [64]uint32 + Oobavail uint32 + Oobfree [8]NandOobfree + } + MtdEccStats struct { + Corrected uint32 + Failed uint32 + Badblocks uint32 + Bbtblocks uint32 + } +) + +const ( + MTD_OPS_PLACE_OOB = 0x0 + MTD_OPS_AUTO_OOB = 0x1 + MTD_OPS_RAW = 0x2 +) + +const ( + MTD_FILE_MODE_NORMAL = 0x0 + MTD_FILE_MODE_OTP_FACTORY = 0x1 + MTD_FILE_MODE_OTP_USER = 0x2 + MTD_FILE_MODE_RAW = 0x3 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go index 088bd77e3b..4d4d283de5 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go @@ -1,6 +1,7 @@ // cgo -godefs -- -Wall -Werror -static -I/tmp/include -m32 /build/linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build 386 && linux // +build 386,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go index 078d958ec9..8a2eed5ec4 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go @@ -1,6 +1,7 @@ // cgo -godefs -- -Wall -Werror -static -I/tmp/include -m64 /build/linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build amd64 && linux // +build amd64,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go index 2d39122f41..94b34add64 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go @@ -1,6 +1,7 @@ // cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build arm && linux // +build arm,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go index 304cbd0453..2143de4d59 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go @@ -1,6 +1,7 @@ // cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char /build/linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build arm64 && linux // +build arm64,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go index 7d9d57006a..a40216eee6 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go @@ -1,6 +1,7 @@ // cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build mips && linux // +build mips,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go index a1eb2577b0..e834b069fd 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go @@ -1,6 +1,7 @@ // cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build mips64 && linux // +build mips64,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go index 2e5ce3b6a6..e31083b048 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go @@ -1,6 +1,7 @@ // cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build mips64le && linux // +build mips64le,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go index bbaa1200b6..42811f7fb5 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go @@ -1,6 +1,7 @@ // cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build mipsle && linux // +build mipsle,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go new file mode 100644 index 0000000000..af7a72017e --- /dev/null +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go @@ -0,0 +1,627 @@ +// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/linux/types.go | go run mkpost.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +//go:build ppc && linux +// +build ppc,linux + +package unix + +const ( + SizeofPtr = 0x4 + SizeofLong = 0x4 +) + +type ( + _C_long int32 +) + +type Timespec struct { + Sec int32 + Nsec int32 +} + +type Timeval struct { + Sec int32 + Usec int32 +} + +type Timex struct { + Modes uint32 + Offset int32 + Freq int32 + Maxerror int32 + Esterror int32 + Status int32 + Constant int32 + Precision int32 + Tolerance int32 + Time Timeval + Tick int32 + Ppsfreq int32 + Jitter int32 + Shift int32 + Stabil int32 + Jitcnt int32 + Calcnt int32 + Errcnt int32 + Stbcnt int32 + Tai int32 + _ [44]byte +} + +type Time_t int32 + +type Tms struct { + Utime int32 + Stime int32 + Cutime int32 + Cstime int32 +} + +type Utimbuf struct { + Actime int32 + Modtime int32 +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int32 + Ixrss int32 + Idrss int32 + Isrss int32 + Minflt int32 + Majflt int32 + Nswap int32 + Inblock int32 + Oublock int32 + Msgsnd int32 + Msgrcv int32 + Nsignals int32 + Nvcsw int32 + Nivcsw int32 +} + +type Stat_t struct { + Dev uint64 + Ino uint64 + Mode uint32 + Nlink uint32 + Uid uint32 + Gid uint32 + Rdev uint64 + _ uint16 + _ [4]byte + Size int64 + Blksize int32 + _ [4]byte + Blocks int64 + Atim Timespec + Mtim Timespec + Ctim Timespec + _ uint32 + _ uint32 +} + +type Dirent struct { + Ino uint64 + Off int64 + Reclen uint16 + Type uint8 + Name [256]uint8 + _ [5]byte +} + +type Flock_t struct { + Type int16 + Whence int16 + _ [4]byte + Start int64 + Len int64 + Pid int32 + _ [4]byte +} + +type DmNameList struct { + Dev uint64 + Next uint32 + Name [0]byte + _ [4]byte +} + +const ( + FADV_DONTNEED = 0x4 + FADV_NOREUSE = 0x5 +) + +type RawSockaddr struct { + Family uint16 + Data [14]uint8 +} + +type RawSockaddrAny struct { + Addr RawSockaddr + Pad [96]uint8 +} + +type Iovec struct { + Base *byte + Len uint32 +} + +type Msghdr struct { + Name *byte + Namelen uint32 + Iov *Iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 +} + +type Cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +const ( + SizeofIovec = 0x8 + SizeofMsghdr = 0x1c + SizeofCmsghdr = 0xc +) + +const ( + SizeofSockFprog = 0x8 +) + +type PtraceRegs struct { + Gpr [32]uint32 + Nip uint32 + Msr uint32 + Orig_gpr3 uint32 + Ctr uint32 + Link uint32 + Xer uint32 + Ccr uint32 + Mq uint32 + Trap uint32 + Dar uint32 + Dsisr uint32 + Result uint32 +} + +type FdSet struct { + Bits [32]int32 +} + +type Sysinfo_t struct { + Uptime int32 + Loads [3]uint32 + Totalram uint32 + Freeram uint32 + Sharedram uint32 + Bufferram uint32 + Totalswap uint32 + Freeswap uint32 + Procs uint16 + Pad uint16 + Totalhigh uint32 + Freehigh uint32 + Unit uint32 + _ [8]uint8 +} + +type Ustat_t struct { + Tfree int32 + Tinode uint32 + Fname [6]uint8 + Fpack [6]uint8 +} + +type EpollEvent struct { + Events uint32 + _ int32 + Fd int32 + Pad int32 +} + +const ( + POLLRDHUP = 0x2000 +) + +type Sigset_t struct { + Val [32]uint32 +} + +const _C__NSIG = 0x41 + +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Cc [19]uint8 + Line uint8 + Ispeed uint32 + Ospeed uint32 +} + +type Taskstats struct { + Version uint16 + Ac_exitcode uint32 + Ac_flag uint8 + Ac_nice uint8 + _ [4]byte + Cpu_count uint64 + Cpu_delay_total uint64 + Blkio_count uint64 + Blkio_delay_total uint64 + Swapin_count uint64 + Swapin_delay_total uint64 + Cpu_run_real_total uint64 + Cpu_run_virtual_total uint64 + Ac_comm [32]uint8 + Ac_sched uint8 + Ac_pad [3]uint8 + _ [4]byte + Ac_uid uint32 + Ac_gid uint32 + Ac_pid uint32 + Ac_ppid uint32 + Ac_btime uint32 + _ [4]byte + Ac_etime uint64 + Ac_utime uint64 + Ac_stime uint64 + Ac_minflt uint64 + Ac_majflt uint64 + Coremem uint64 + Virtmem uint64 + Hiwater_rss uint64 + Hiwater_vm uint64 + Read_char uint64 + Write_char uint64 + Read_syscalls uint64 + Write_syscalls uint64 + Read_bytes uint64 + Write_bytes uint64 + Cancelled_write_bytes uint64 + Nvcsw uint64 + Nivcsw uint64 + Ac_utimescaled uint64 + Ac_stimescaled uint64 + Cpu_scaled_run_real_total uint64 + Freepages_count uint64 + Freepages_delay_total uint64 + Thrashing_count uint64 + Thrashing_delay_total uint64 + Ac_btime64 uint64 +} + +type cpuMask uint32 + +const ( + _NCPUBITS = 0x20 +) + +const ( + CBitFieldMaskBit0 = 0x8000000000000000 + CBitFieldMaskBit1 = 0x4000000000000000 + CBitFieldMaskBit2 = 0x2000000000000000 + CBitFieldMaskBit3 = 0x1000000000000000 + CBitFieldMaskBit4 = 0x800000000000000 + CBitFieldMaskBit5 = 0x400000000000000 + CBitFieldMaskBit6 = 0x200000000000000 + CBitFieldMaskBit7 = 0x100000000000000 + CBitFieldMaskBit8 = 0x80000000000000 + CBitFieldMaskBit9 = 0x40000000000000 + CBitFieldMaskBit10 = 0x20000000000000 + CBitFieldMaskBit11 = 0x10000000000000 + CBitFieldMaskBit12 = 0x8000000000000 + CBitFieldMaskBit13 = 0x4000000000000 + CBitFieldMaskBit14 = 0x2000000000000 + CBitFieldMaskBit15 = 0x1000000000000 + CBitFieldMaskBit16 = 0x800000000000 + CBitFieldMaskBit17 = 0x400000000000 + CBitFieldMaskBit18 = 0x200000000000 + CBitFieldMaskBit19 = 0x100000000000 + CBitFieldMaskBit20 = 0x80000000000 + CBitFieldMaskBit21 = 0x40000000000 + CBitFieldMaskBit22 = 0x20000000000 + CBitFieldMaskBit23 = 0x10000000000 + CBitFieldMaskBit24 = 0x8000000000 + CBitFieldMaskBit25 = 0x4000000000 + CBitFieldMaskBit26 = 0x2000000000 + CBitFieldMaskBit27 = 0x1000000000 + CBitFieldMaskBit28 = 0x800000000 + CBitFieldMaskBit29 = 0x400000000 + CBitFieldMaskBit30 = 0x200000000 + CBitFieldMaskBit31 = 0x100000000 + CBitFieldMaskBit32 = 0x80000000 + CBitFieldMaskBit33 = 0x40000000 + CBitFieldMaskBit34 = 0x20000000 + CBitFieldMaskBit35 = 0x10000000 + CBitFieldMaskBit36 = 0x8000000 + CBitFieldMaskBit37 = 0x4000000 + CBitFieldMaskBit38 = 0x2000000 + CBitFieldMaskBit39 = 0x1000000 + CBitFieldMaskBit40 = 0x800000 + CBitFieldMaskBit41 = 0x400000 + CBitFieldMaskBit42 = 0x200000 + CBitFieldMaskBit43 = 0x100000 + CBitFieldMaskBit44 = 0x80000 + CBitFieldMaskBit45 = 0x40000 + CBitFieldMaskBit46 = 0x20000 + CBitFieldMaskBit47 = 0x10000 + CBitFieldMaskBit48 = 0x8000 + CBitFieldMaskBit49 = 0x4000 + CBitFieldMaskBit50 = 0x2000 + CBitFieldMaskBit51 = 0x1000 + CBitFieldMaskBit52 = 0x800 + CBitFieldMaskBit53 = 0x400 + CBitFieldMaskBit54 = 0x200 + CBitFieldMaskBit55 = 0x100 + CBitFieldMaskBit56 = 0x80 + CBitFieldMaskBit57 = 0x40 + CBitFieldMaskBit58 = 0x20 + CBitFieldMaskBit59 = 0x10 + CBitFieldMaskBit60 = 0x8 + CBitFieldMaskBit61 = 0x4 + CBitFieldMaskBit62 = 0x2 + CBitFieldMaskBit63 = 0x1 +) + +type SockaddrStorage struct { + Family uint16 + _ [122]uint8 + _ uint32 +} + +type HDGeometry struct { + Heads uint8 + Sectors uint8 + Cylinders uint16 + Start uint32 +} + +type Statfs_t struct { + Type int32 + Bsize int32 + Blocks uint64 + Bfree uint64 + Bavail uint64 + Files uint64 + Ffree uint64 + Fsid Fsid + Namelen int32 + Frsize int32 + Flags int32 + Spare [4]int32 + _ [4]byte +} + +type TpacketHdr struct { + Status uint32 + Len uint32 + Snaplen uint32 + Mac uint16 + Net uint16 + Sec uint32 + Usec uint32 +} + +const ( + SizeofTpacketHdr = 0x18 +) + +type RTCPLLInfo struct { + Ctrl int32 + Value int32 + Max int32 + Min int32 + Posmult int32 + Negmult int32 + Clock int32 +} + +type BlkpgPartition struct { + Start int64 + Length int64 + Pno int32 + Devname [64]uint8 + Volname [64]uint8 + _ [4]byte +} + +const ( + BLKPG = 0x20001269 +) + +type XDPUmemReg struct { + Addr uint64 + Len uint64 + Size uint32 + Headroom uint32 + Flags uint32 + _ [4]byte +} + +type CryptoUserAlg struct { + Name [64]uint8 + Driver_name [64]uint8 + Module_name [64]uint8 + Type uint32 + Mask uint32 + Refcnt uint32 + Flags uint32 +} + +type CryptoStatAEAD struct { + Type [64]uint8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatAKCipher struct { + Type [64]uint8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Verify_cnt uint64 + Sign_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatCipher struct { + Type [64]uint8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatCompress struct { + Type [64]uint8 + Compress_cnt uint64 + Compress_tlen uint64 + Decompress_cnt uint64 + Decompress_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatHash struct { + Type [64]uint8 + Hash_cnt uint64 + Hash_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatKPP struct { + Type [64]uint8 + Setsecret_cnt uint64 + Generate_public_key_cnt uint64 + Compute_shared_secret_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatRNG struct { + Type [64]uint8 + Generate_cnt uint64 + Generate_tlen uint64 + Seed_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatLarval struct { + Type [64]uint8 +} + +type CryptoReportLarval struct { + Type [64]uint8 +} + +type CryptoReportHash struct { + Type [64]uint8 + Blocksize uint32 + Digestsize uint32 +} + +type CryptoReportCipher struct { + Type [64]uint8 + Blocksize uint32 + Min_keysize uint32 + Max_keysize uint32 +} + +type CryptoReportBlkCipher struct { + Type [64]uint8 + Geniv [64]uint8 + Blocksize uint32 + Min_keysize uint32 + Max_keysize uint32 + Ivsize uint32 +} + +type CryptoReportAEAD struct { + Type [64]uint8 + Geniv [64]uint8 + Blocksize uint32 + Maxauthsize uint32 + Ivsize uint32 +} + +type CryptoReportComp struct { + Type [64]uint8 +} + +type CryptoReportRNG struct { + Type [64]uint8 + Seedsize uint32 +} + +type CryptoReportAKCipher struct { + Type [64]uint8 +} + +type CryptoReportKPP struct { + Type [64]uint8 +} + +type CryptoReportAcomp struct { + Type [64]uint8 +} + +type LoopInfo struct { + Number int32 + Device uint32 + Inode uint32 + Rdevice uint32 + Offset int32 + Encrypt_type int32 + Encrypt_key_size int32 + Flags int32 + Name [64]uint8 + Encrypt_key [32]uint8 + Init [2]uint32 + Reserved [4]uint8 +} + +type TIPCSubscr struct { + Seq TIPCServiceRange + Timeout uint32 + Filter uint32 + Handle [8]uint8 +} + +type TIPCSIOCLNReq struct { + Peer uint32 + Id uint32 + Linkname [68]uint8 +} + +type TIPCSIOCNodeIDReq struct { + Peer uint32 + Id [16]uint8 +} + +type PPSKInfo struct { + Assert_sequence uint32 + Clear_sequence uint32 + Assert_tu PPSKTime + Clear_tu PPSKTime + Current_mode int32 + _ [4]byte +} + +const ( + PPS_GETPARAMS = 0x400470a1 + PPS_SETPARAMS = 0x800470a2 + PPS_GETCAP = 0x400470a3 + PPS_FETCH = 0xc00470a4 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go index 0e6e8a7748..2a3afbaef9 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go @@ -1,6 +1,7 @@ // cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build ppc64 && linux // +build ppc64,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go index 7382f385fa..c0de30a658 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go @@ -1,6 +1,7 @@ // cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build ppc64le && linux // +build ppc64le,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go index 28d5522166..74faf2e91f 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go @@ -1,6 +1,7 @@ // cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build riscv64 && linux // +build riscv64,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go index a91a7a44bd..9a8f0c2c6a 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go @@ -1,6 +1,7 @@ // cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char /build/linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build s390x && linux // +build s390x,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go index f824b2358d..72cdda75bd 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go @@ -1,6 +1,7 @@ // cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build sparc64 && linux // +build sparc64,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go index 3f11f88e3c..b10e73abf9 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go @@ -1,6 +1,7 @@ // cgo -godefs types_netbsd.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build 386 && netbsd // +build 386,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go index 0bed83af57..28ed6d55ae 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go @@ -1,6 +1,7 @@ // cgo -godefs types_netbsd.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build amd64 && netbsd // +build amd64,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go index e4e3bf736d..4ba196ebe5 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go @@ -1,6 +1,7 @@ // cgo -godefs types_netbsd.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build arm && netbsd // +build arm,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go index efac861bb7..dd642bd9c8 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go @@ -1,6 +1,7 @@ // cgo -godefs types_netbsd.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build arm64 && netbsd // +build arm64,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go index 80fa295f1d..1fdb0e5fa5 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go @@ -1,6 +1,7 @@ // cgo -godefs types_openbsd.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build 386 && openbsd // +build 386,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go index 560dd6d08a..e2fc93c7c0 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go @@ -1,6 +1,7 @@ // cgo -godefs types_openbsd.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build amd64 && openbsd // +build amd64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go index 0c1700fa43..8d34b5a2fc 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go @@ -1,6 +1,7 @@ // cgo -godefs -- -fsigned-char types_openbsd.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build arm && openbsd // +build arm,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go index 5b3e46633e..ea8f1a0d9b 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go @@ -1,6 +1,7 @@ // cgo -godefs -- -fsigned-char types_openbsd.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build arm64 && openbsd // +build arm64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go index 62bff16709..ec6e8bc3f1 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go @@ -1,6 +1,7 @@ // cgo -godefs -- -fsigned-char types_openbsd.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build mips64 && openbsd // +build mips64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go index ca512aff7f..85effef9c1 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go @@ -1,6 +1,7 @@ // cgo -godefs types_solaris.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build amd64 && solaris // +build amd64,solaris package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go new file mode 100644 index 0000000000..4ab638cb94 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go @@ -0,0 +1,406 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build zos && s390x +// +build zos,s390x + +// Hand edited based on ztypes_linux_s390x.go +// TODO: auto-generate. + +package unix + +const ( + SizeofPtr = 0x8 + SizeofShort = 0x2 + SizeofInt = 0x4 + SizeofLong = 0x8 + SizeofLongLong = 0x8 + PathMax = 0x1000 +) + +const ( + SizeofSockaddrAny = 128 + SizeofCmsghdr = 12 + SizeofIPMreq = 8 + SizeofIPv6Mreq = 20 + SizeofICMPv6Filter = 32 + SizeofIPv6MTUInfo = 32 + SizeofLinger = 8 + SizeofSockaddrInet4 = 16 + SizeofSockaddrInet6 = 28 + SizeofTCPInfo = 0x68 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int64 + _C_long_long int64 +) + +type Timespec struct { + Sec int64 + Nsec int64 +} + +type Timeval struct { + Sec int64 + Usec int64 +} + +type timeval_zos struct { //correct (with padding and all) + Sec int64 + _ [4]byte // pad + Usec int32 +} + +type Tms struct { //clock_t is 4-byte unsigned int in zos + Utime uint32 + Stime uint32 + Cutime uint32 + Cstime uint32 +} + +type Time_t int64 + +type Utimbuf struct { + Actime int64 + Modtime int64 +} + +type Utsname struct { + Sysname [65]byte + Nodename [65]byte + Release [65]byte + Version [65]byte + Machine [65]byte + Domainname [65]byte +} + +type RawSockaddrInet4 struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]uint8 +} + +type RawSockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type RawSockaddrUnix struct { + Len uint8 + Family uint8 + Path [108]int8 +} + +type RawSockaddr struct { + Len uint8 + Family uint8 + Data [14]uint8 +} + +type RawSockaddrAny struct { + Addr RawSockaddr + _ [112]uint8 // pad +} + +type _Socklen uint32 + +type Linger struct { + Onoff int32 + Linger int32 +} + +type Iovec struct { + Base *byte + Len uint64 +} + +type IPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type IPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type Msghdr struct { + Name *byte + Iov *Iovec + Control *byte + Flags int32 + Namelen int32 + Iovlen int32 + Controllen int32 +} + +type Cmsghdr struct { + Len int32 + Level int32 + Type int32 +} + +type Inet4Pktinfo struct { + Addr [4]byte /* in_addr */ + Ifindex uint32 +} + +type Inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type IPv6MTUInfo struct { + Addr RawSockaddrInet6 + Mtu uint32 +} + +type ICMPv6Filter struct { + Data [8]uint32 +} + +type TCPInfo struct { + State uint8 + Ca_state uint8 + Retransmits uint8 + Probes uint8 + Backoff uint8 + Options uint8 + Rto uint32 + Ato uint32 + Snd_mss uint32 + Rcv_mss uint32 + Unacked uint32 + Sacked uint32 + Lost uint32 + Retrans uint32 + Fackets uint32 + Last_data_sent uint32 + Last_ack_sent uint32 + Last_data_recv uint32 + Last_ack_recv uint32 + Pmtu uint32 + Rcv_ssthresh uint32 + Rtt uint32 + Rttvar uint32 + Snd_ssthresh uint32 + Snd_cwnd uint32 + Advmss uint32 + Reordering uint32 + Rcv_rtt uint32 + Rcv_space uint32 + Total_retrans uint32 +} + +type _Gid_t uint32 + +type rusage_zos struct { + Utime timeval_zos + Stime timeval_zos +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int64 + Ixrss int64 + Idrss int64 + Isrss int64 + Minflt int64 + Majflt int64 + Nswap int64 + Inblock int64 + Oublock int64 + Msgsnd int64 + Msgrcv int64 + Nsignals int64 + Nvcsw int64 + Nivcsw int64 +} + +type Rlimit struct { + Cur uint64 + Max uint64 +} + +// { int, short, short } in poll.h +type PollFd struct { + Fd int32 + Events int16 + Revents int16 +} + +type Stat_t struct { //Linux Definition + Dev uint64 + Ino uint64 + Nlink uint64 + Mode uint32 + Uid uint32 + Gid uint32 + _ int32 + Rdev uint64 + Size int64 + Atim Timespec + Mtim Timespec + Ctim Timespec + Blksize int64 + Blocks int64 + _ [3]int64 +} + +type Stat_LE_t struct { + _ [4]byte // eye catcher + Length uint16 + Version uint16 + Mode int32 + Ino uint32 + Dev uint32 + Nlink int32 + Uid int32 + Gid int32 + Size int64 + Atim31 [4]byte + Mtim31 [4]byte + Ctim31 [4]byte + Rdev uint32 + Auditoraudit uint32 + Useraudit uint32 + Blksize int32 + Creatim31 [4]byte + AuditID [16]byte + _ [4]byte // rsrvd1 + File_tag struct { + Ccsid uint16 + Txtflag uint16 // aggregating Txflag:1 deferred:1 rsvflags:14 + } + CharsetID [8]byte + Blocks int64 + Genvalue uint32 + Reftim31 [4]byte + Fid [8]byte + Filefmt byte + Fspflag2 byte + _ [2]byte // rsrvd2 + Ctimemsec int32 + Seclabel [8]byte + _ [4]byte // rsrvd3 + _ [4]byte // rsrvd4 + Atim Time_t + Mtim Time_t + Ctim Time_t + Creatim Time_t + Reftim Time_t + _ [24]byte // rsrvd5 +} + +type Statvfs_t struct { + ID [4]byte + Len int32 + Bsize uint64 + Blocks uint64 + Usedspace uint64 + Bavail uint64 + Flag uint64 + Maxfilesize int64 + _ [16]byte + Frsize uint64 + Bfree uint64 + Files uint32 + Ffree uint32 + Favail uint32 + Namemax31 uint32 + Invarsec uint32 + _ [4]byte + Fsid uint64 + Namemax uint64 +} + +type Statfs_t struct { + Type uint32 + Bsize uint64 + Blocks uint64 + Bfree uint64 + Bavail uint64 + Files uint32 + Ffree uint32 + Fsid uint64 + Namelen uint64 + Frsize uint64 + Flags uint64 +} + +type Dirent struct { + Reclen uint16 + Namlen uint16 + Ino uint32 + Extra uintptr + Name [256]byte +} + +type FdSet struct { + Bits [64]int32 +} + +// This struct is packed on z/OS so it can't be used directly. +type Flock_t struct { + Type int16 + Whence int16 + Start int64 + Len int64 + Pid int32 +} + +type Termios struct { + Cflag uint32 + Iflag uint32 + Lflag uint32 + Oflag uint32 + Cc [11]uint8 +} + +type Winsize struct { + Row uint16 + Col uint16 + Xpixel uint16 + Ypixel uint16 +} + +type W_Mnth struct { + Hid [4]byte + Size int32 + Cur1 int32 //32bit pointer + Cur2 int32 //^ + Devno uint32 + _ [4]byte +} + +type W_Mntent struct { + Fstype uint32 + Mode uint32 + Dev uint32 + Parentdev uint32 + Rootino uint32 + Status byte + Ddname [9]byte + Fstname [9]byte + Fsname [45]byte + Pathlen uint32 + Mountpoint [1024]byte + Jobname [8]byte + PID int32 + Parmoffset int32 + Parmlen int16 + Owner [8]byte + Quiesceowner [8]byte + _ [38]byte +} diff --git a/vendor/golang.org/x/sys/windows/empty.s b/vendor/golang.org/x/sys/windows/empty.s index 69309e4da5..fdbbbcd317 100644 --- a/vendor/golang.org/x/sys/windows/empty.s +++ b/vendor/golang.org/x/sys/windows/empty.s @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !go1.12 // +build !go1.12 // This file is here to allow bodyless functions with go:linkname for Go 1.11 diff --git a/vendor/golang.org/x/sys/windows/exec_windows.go b/vendor/golang.org/x/sys/windows/exec_windows.go index 3606c3a8b3..7a11e83b7e 100644 --- a/vendor/golang.org/x/sys/windows/exec_windows.go +++ b/vendor/golang.org/x/sys/windows/exec_windows.go @@ -6,6 +6,13 @@ package windows +import ( + errorspkg "errors" + "unsafe" + + "golang.org/x/sys/internal/unsafeheader" +) + // EscapeArg rewrites command line argument s as prescribed // in http://msdn.microsoft.com/en-us/library/ms880421. // This function returns "" (2 double quotes) if s is empty. @@ -73,6 +80,40 @@ func EscapeArg(s string) string { return string(qs[:j]) } +// ComposeCommandLine escapes and joins the given arguments suitable for use as a Windows command line, +// in CreateProcess's CommandLine argument, CreateService/ChangeServiceConfig's BinaryPathName argument, +// or any program that uses CommandLineToArgv. +func ComposeCommandLine(args []string) string { + var commandLine string + for i := range args { + if i > 0 { + commandLine += " " + } + commandLine += EscapeArg(args[i]) + } + return commandLine +} + +// DecomposeCommandLine breaks apart its argument command line into unescaped parts using CommandLineToArgv, +// as gathered from GetCommandLine, QUERY_SERVICE_CONFIG's BinaryPathName argument, or elsewhere that +// command lines are passed around. +func DecomposeCommandLine(commandLine string) ([]string, error) { + if len(commandLine) == 0 { + return []string{}, nil + } + var argc int32 + argv, err := CommandLineToArgv(StringToUTF16Ptr(commandLine), &argc) + if err != nil { + return nil, err + } + defer LocalFree(Handle(unsafe.Pointer(argv))) + var args []string + for _, v := range (*argv)[:argc] { + args = append(args, UTF16ToString((*v)[:])) + } + return args, nil +} + func CloseOnExec(fd Handle) { SetHandleInformation(Handle(fd), HANDLE_FLAG_INHERIT, 0) } @@ -95,3 +136,60 @@ func FullPath(name string) (path string, err error) { } } } + +// NewProcThreadAttributeList allocates a new ProcThreadAttributeListContainer, with the requested maximum number of attributes. +func NewProcThreadAttributeList(maxAttrCount uint32) (*ProcThreadAttributeListContainer, error) { + var size uintptr + err := initializeProcThreadAttributeList(nil, maxAttrCount, 0, &size) + if err != ERROR_INSUFFICIENT_BUFFER { + if err == nil { + return nil, errorspkg.New("unable to query buffer size from InitializeProcThreadAttributeList") + } + return nil, err + } + // size is guaranteed to be ≥1 by InitializeProcThreadAttributeList. + al := &ProcThreadAttributeListContainer{data: (*ProcThreadAttributeList)(unsafe.Pointer(&make([]byte, size)[0]))} + err = initializeProcThreadAttributeList(al.data, maxAttrCount, 0, &size) + if err != nil { + return nil, err + } + return al, err +} + +// Update modifies the ProcThreadAttributeList using UpdateProcThreadAttribute. +// Note that the value passed to this function will be copied into memory +// allocated by LocalAlloc, the contents of which should not contain any +// Go-managed pointers, even if the passed value itself is a Go-managed +// pointer. +func (al *ProcThreadAttributeListContainer) Update(attribute uintptr, value unsafe.Pointer, size uintptr) error { + alloc, err := LocalAlloc(LMEM_FIXED, uint32(size)) + if err != nil { + return err + } + var src, dst []byte + hdr := (*unsafeheader.Slice)(unsafe.Pointer(&src)) + hdr.Data = value + hdr.Cap = int(size) + hdr.Len = int(size) + hdr = (*unsafeheader.Slice)(unsafe.Pointer(&dst)) + hdr.Data = unsafe.Pointer(alloc) + hdr.Cap = int(size) + hdr.Len = int(size) + copy(dst, src) + al.heapAllocations = append(al.heapAllocations, alloc) + return updateProcThreadAttribute(al.data, 0, attribute, unsafe.Pointer(alloc), size, nil, nil) +} + +// Delete frees ProcThreadAttributeList's resources. +func (al *ProcThreadAttributeListContainer) Delete() { + deleteProcThreadAttributeList(al.data) + for i := range al.heapAllocations { + LocalFree(Handle(al.heapAllocations[i])) + } + al.heapAllocations = nil +} + +// List returns the actual ProcThreadAttributeList to be passed to StartupInfoEx. +func (al *ProcThreadAttributeListContainer) List() *ProcThreadAttributeList { + return al.data +} diff --git a/vendor/golang.org/x/sys/windows/mkerrors.bash b/vendor/golang.org/x/sys/windows/mkerrors.bash index 2163843a11..58e0188fb7 100644 --- a/vendor/golang.org/x/sys/windows/mkerrors.bash +++ b/vendor/golang.org/x/sys/windows/mkerrors.bash @@ -9,6 +9,8 @@ shopt -s nullglob winerror="$(printf '%s\n' "/mnt/c/Program Files (x86)/Windows Kits/"/*/Include/*/shared/winerror.h | sort -Vr | head -n 1)" [[ -n $winerror ]] || { echo "Unable to find winerror.h" >&2; exit 1; } +ntstatus="$(printf '%s\n' "/mnt/c/Program Files (x86)/Windows Kits/"/*/Include/*/shared/ntstatus.h | sort -Vr | head -n 1)" +[[ -n $ntstatus ]] || { echo "Unable to find ntstatus.h" >&2; exit 1; } declare -A errors @@ -59,5 +61,10 @@ declare -A errors echo "$key $vtype = $value" done < "$winerror" + while read -r line; do + [[ $line =~ ^#define\ (STATUS_[^\s]+)\ +\(\(NTSTATUS\)((0x)?[0-9a-fA-F]+)L?\) ]] || continue + echo "${BASH_REMATCH[1]} NTStatus = ${BASH_REMATCH[2]}" + done < "$ntstatus" + echo ")" } | gofmt > "zerrors_windows.go" diff --git a/vendor/golang.org/x/sys/windows/security_windows.go b/vendor/golang.org/x/sys/windows/security_windows.go index 69eb462c59..111c10d3a7 100644 --- a/vendor/golang.org/x/sys/windows/security_windows.go +++ b/vendor/golang.org/x/sys/windows/security_windows.go @@ -908,6 +908,19 @@ type SECURITY_DESCRIPTOR struct { dacl *ACL } +type SECURITY_QUALITY_OF_SERVICE struct { + Length uint32 + ImpersonationLevel uint32 + ContextTrackingMode byte + EffectiveOnly byte +} + +// Constants for the ContextTrackingMode field of SECURITY_QUALITY_OF_SERVICE. +const ( + SECURITY_STATIC_TRACKING = 0 + SECURITY_DYNAMIC_TRACKING = 1 +) + type SecurityAttributes struct { Length uint32 SecurityDescriptor *SECURITY_DESCRIPTOR @@ -1321,7 +1334,11 @@ func (absoluteSD *SECURITY_DESCRIPTOR) ToSelfRelative() (selfRelativeSD *SECURIT } func (selfRelativeSD *SECURITY_DESCRIPTOR) copySelfRelativeSecurityDescriptor() *SECURITY_DESCRIPTOR { - sdLen := (int)(selfRelativeSD.Length()) + sdLen := int(selfRelativeSD.Length()) + const min = int(unsafe.Sizeof(SECURITY_DESCRIPTOR{})) + if sdLen < min { + sdLen = min + } var src []byte h := (*unsafeheader.Slice)(unsafe.Pointer(&src)) @@ -1329,7 +1346,15 @@ func (selfRelativeSD *SECURITY_DESCRIPTOR) copySelfRelativeSecurityDescriptor() h.Len = sdLen h.Cap = sdLen - dst := make([]byte, sdLen) + const psize = int(unsafe.Sizeof(uintptr(0))) + + var dst []byte + h = (*unsafeheader.Slice)(unsafe.Pointer(&dst)) + alloc := make([]uintptr, (sdLen+psize-1)/psize) + h.Data = (*unsafeheader.Slice)(unsafe.Pointer(&alloc)).Data + h.Len = sdLen + h.Cap = sdLen + copy(dst, src) return (*SECURITY_DESCRIPTOR)(unsafe.Pointer(&dst[0])) } diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index 0197df8727..1215b2ae20 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -8,6 +8,8 @@ package windows import ( errorspkg "errors" + "fmt" + "runtime" "sync" "syscall" "time" @@ -65,9 +67,8 @@ const ( LOCKFILE_FAIL_IMMEDIATELY = 0x00000001 LOCKFILE_EXCLUSIVE_LOCK = 0x00000002 - // Return values of SleepEx and other APC functions - STATUS_USER_APC = 0x000000C0 - WAIT_IO_COMPLETION = STATUS_USER_APC + // Return value of SleepEx and other APC functions + WAIT_IO_COMPLETION = 0x000000C0 ) // StringToUTF16 is deprecated. Use UTF16FromString instead. @@ -180,6 +181,11 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys IsWow64Process(handle Handle, isWow64 *bool) (err error) = IsWow64Process //sys IsWow64Process2(handle Handle, processMachine *uint16, nativeMachine *uint16) (err error) = IsWow64Process2? //sys CreateFile(name *uint16, access uint32, mode uint32, sa *SecurityAttributes, createmode uint32, attrs uint32, templatefile Handle) (handle Handle, err error) [failretval==InvalidHandle] = CreateFileW +//sys CreateNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *SecurityAttributes) (handle Handle, err error) [failretval==InvalidHandle] = CreateNamedPipeW +//sys ConnectNamedPipe(pipe Handle, overlapped *Overlapped) (err error) +//sys GetNamedPipeInfo(pipe Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) +//sys GetNamedPipeHandleState(pipe Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW +//sys SetNamedPipeHandleState(pipe Handle, state *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32) (err error) = SetNamedPipeHandleState //sys ReadFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (err error) //sys WriteFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (err error) //sys GetOverlappedResult(handle Handle, overlapped *Overlapped, done *uint32, wait bool) (err error) @@ -208,12 +214,16 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys GetSystemTimeAsFileTime(time *Filetime) //sys GetSystemTimePreciseAsFileTime(time *Filetime) //sys GetTimeZoneInformation(tzi *Timezoneinformation) (rc uint32, err error) [failretval==0xffffffff] -//sys CreateIoCompletionPort(filehandle Handle, cphandle Handle, key uint32, threadcnt uint32) (handle Handle, err error) -//sys GetQueuedCompletionStatus(cphandle Handle, qty *uint32, key *uint32, overlapped **Overlapped, timeout uint32) (err error) -//sys PostQueuedCompletionStatus(cphandle Handle, qty uint32, key uint32, overlapped *Overlapped) (err error) +//sys CreateIoCompletionPort(filehandle Handle, cphandle Handle, key uintptr, threadcnt uint32) (handle Handle, err error) +//sys GetQueuedCompletionStatus(cphandle Handle, qty *uint32, key *uintptr, overlapped **Overlapped, timeout uint32) (err error) +//sys PostQueuedCompletionStatus(cphandle Handle, qty uint32, key uintptr, overlapped *Overlapped) (err error) //sys CancelIo(s Handle) (err error) //sys CancelIoEx(s Handle, o *Overlapped) (err error) //sys CreateProcess(appName *uint16, commandLine *uint16, procSecurity *SecurityAttributes, threadSecurity *SecurityAttributes, inheritHandles bool, creationFlags uint32, env *uint16, currentDir *uint16, startupInfo *StartupInfo, outProcInfo *ProcessInformation) (err error) = CreateProcessW +//sys CreateProcessAsUser(token Token, appName *uint16, commandLine *uint16, procSecurity *SecurityAttributes, threadSecurity *SecurityAttributes, inheritHandles bool, creationFlags uint32, env *uint16, currentDir *uint16, startupInfo *StartupInfo, outProcInfo *ProcessInformation) (err error) = advapi32.CreateProcessAsUserW +//sys initializeProcThreadAttributeList(attrlist *ProcThreadAttributeList, attrcount uint32, flags uint32, size *uintptr) (err error) = InitializeProcThreadAttributeList +//sys deleteProcThreadAttributeList(attrlist *ProcThreadAttributeList) = DeleteProcThreadAttributeList +//sys updateProcThreadAttribute(attrlist *ProcThreadAttributeList, flags uint32, attr uintptr, value unsafe.Pointer, size uintptr, prevvalue unsafe.Pointer, returnedsize *uintptr) (err error) = UpdateProcThreadAttribute //sys OpenProcess(desiredAccess uint32, inheritHandle bool, processId uint32) (handle Handle, err error) //sys ShellExecute(hwnd Handle, verb *uint16, file *uint16, args *uint16, cwd *uint16, showCmd int32) (err error) [failretval<=32] = shell32.ShellExecuteW //sys GetWindowThreadProcessId(hwnd HWND, pid *uint32) (tid uint32, err error) = user32.GetWindowThreadProcessId @@ -248,13 +258,14 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys GetCommandLine() (cmd *uint16) = kernel32.GetCommandLineW //sys CommandLineToArgv(cmd *uint16, argc *int32) (argv *[8192]*[8192]uint16, err error) [failretval==nil] = shell32.CommandLineToArgvW //sys LocalFree(hmem Handle) (handle Handle, err error) [failretval!=0] +//sys LocalAlloc(flags uint32, length uint32) (ptr uintptr, err error) //sys SetHandleInformation(handle Handle, mask uint32, flags uint32) (err error) //sys FlushFileBuffers(handle Handle) (err error) //sys GetFullPathName(path *uint16, buflen uint32, buf *uint16, fname **uint16) (n uint32, err error) = kernel32.GetFullPathNameW //sys GetLongPathName(path *uint16, buf *uint16, buflen uint32) (n uint32, err error) = kernel32.GetLongPathNameW //sys GetShortPathName(longpath *uint16, shortpath *uint16, buflen uint32) (n uint32, err error) = kernel32.GetShortPathNameW //sys GetFinalPathNameByHandle(file Handle, filePath *uint16, filePathSize uint32, flags uint32) (n uint32, err error) = kernel32.GetFinalPathNameByHandleW -//sys CreateFileMapping(fhandle Handle, sa *SecurityAttributes, prot uint32, maxSizeHigh uint32, maxSizeLow uint32, name *uint16) (handle Handle, err error) = kernel32.CreateFileMappingW +//sys CreateFileMapping(fhandle Handle, sa *SecurityAttributes, prot uint32, maxSizeHigh uint32, maxSizeLow uint32, name *uint16) (handle Handle, err error) [failretval == 0 || e1 == ERROR_ALREADY_EXISTS] = kernel32.CreateFileMappingW //sys MapViewOfFile(handle Handle, access uint32, offsetHigh uint32, offsetLow uint32, length uintptr) (addr uintptr, err error) //sys UnmapViewOfFile(addr uintptr) (err error) //sys FlushViewOfFile(addr uintptr, length uintptr) (err error) @@ -283,6 +294,9 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys CertVerifyCertificateChainPolicy(policyOID uintptr, chain *CertChainContext, para *CertChainPolicyPara, status *CertChainPolicyStatus) (err error) = crypt32.CertVerifyCertificateChainPolicy //sys CertGetNameString(certContext *CertContext, nameType uint32, flags uint32, typePara unsafe.Pointer, name *uint16, size uint32) (chars uint32) = crypt32.CertGetNameStringW //sys CertFindExtension(objId *byte, countExtensions uint32, extensions *CertExtension) (ret *CertExtension) = crypt32.CertFindExtension +//sys CertFindCertificateInStore(store Handle, certEncodingType uint32, findFlags uint32, findType uint32, findPara unsafe.Pointer, prevCertContext *CertContext) (cert *CertContext, err error) [failretval==nil] = crypt32.CertFindCertificateInStore +//sys CertFindChainInStore(store Handle, certEncodingType uint32, findFlags uint32, findType uint32, findPara unsafe.Pointer, prevChainContext *CertChainContext) (certchain *CertChainContext, err error) [failretval==nil] = crypt32.CertFindChainInStore +//sys CryptAcquireCertificatePrivateKey(cert *CertContext, flags uint32, parameters unsafe.Pointer, cryptProvOrNCryptKey *Handle, keySpec *uint32, callerFreeProvOrNCryptKey *bool) (err error) = crypt32.CryptAcquireCertificatePrivateKey //sys CryptQueryObject(objectType uint32, object unsafe.Pointer, expectedContentTypeFlags uint32, expectedFormatTypeFlags uint32, flags uint32, msgAndCertEncodingType *uint32, contentType *uint32, formatType *uint32, certStore *Handle, msg *Handle, context *unsafe.Pointer) (err error) = crypt32.CryptQueryObject //sys CryptDecodeObject(encodingType uint32, structType *byte, encodedBytes *byte, lenEncodedBytes uint32, flags uint32, decoded unsafe.Pointer, decodedLen *uint32) (err error) = crypt32.CryptDecodeObject //sys CryptProtectData(dataIn *DataBlob, name *uint16, optionalEntropy *DataBlob, reserved uintptr, promptStruct *CryptProtectPromptStruct, flags uint32, dataOut *DataBlob) (err error) = crypt32.CryptProtectData @@ -312,14 +326,14 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys CreateSymbolicLink(symlinkfilename *uint16, targetfilename *uint16, flags uint32) (err error) [failretval&0xff==0] = CreateSymbolicLinkW //sys CreateHardLink(filename *uint16, existingfilename *uint16, reserved uintptr) (err error) [failretval&0xff==0] = CreateHardLinkW //sys GetCurrentThreadId() (id uint32) -//sys CreateEvent(eventAttrs *SecurityAttributes, manualReset uint32, initialState uint32, name *uint16) (handle Handle, err error) = kernel32.CreateEventW -//sys CreateEventEx(eventAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) = kernel32.CreateEventExW +//sys CreateEvent(eventAttrs *SecurityAttributes, manualReset uint32, initialState uint32, name *uint16) (handle Handle, err error) [failretval == 0 || e1 == ERROR_ALREADY_EXISTS] = kernel32.CreateEventW +//sys CreateEventEx(eventAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) [failretval == 0 || e1 == ERROR_ALREADY_EXISTS] = kernel32.CreateEventExW //sys OpenEvent(desiredAccess uint32, inheritHandle bool, name *uint16) (handle Handle, err error) = kernel32.OpenEventW //sys SetEvent(event Handle) (err error) = kernel32.SetEvent //sys ResetEvent(event Handle) (err error) = kernel32.ResetEvent //sys PulseEvent(event Handle) (err error) = kernel32.PulseEvent -//sys CreateMutex(mutexAttrs *SecurityAttributes, initialOwner bool, name *uint16) (handle Handle, err error) = kernel32.CreateMutexW -//sys CreateMutexEx(mutexAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) = kernel32.CreateMutexExW +//sys CreateMutex(mutexAttrs *SecurityAttributes, initialOwner bool, name *uint16) (handle Handle, err error) [failretval == 0 || e1 == ERROR_ALREADY_EXISTS] = kernel32.CreateMutexW +//sys CreateMutexEx(mutexAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) [failretval == 0 || e1 == ERROR_ALREADY_EXISTS] = kernel32.CreateMutexExW //sys OpenMutex(desiredAccess uint32, inheritHandle bool, name *uint16) (handle Handle, err error) = kernel32.OpenMutexW //sys ReleaseMutex(mutex Handle) (err error) = kernel32.ReleaseMutex //sys SleepEx(milliseconds uint32, alertable bool) (ret uint32) = kernel32.SleepEx @@ -334,10 +348,13 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys SetInformationJobObject(job Handle, JobObjectInformationClass uint32, JobObjectInformation uintptr, JobObjectInformationLength uint32) (ret int, err error) //sys GenerateConsoleCtrlEvent(ctrlEvent uint32, processGroupID uint32) (err error) //sys GetProcessId(process Handle) (id uint32, err error) +//sys QueryFullProcessImageName(proc Handle, flags uint32, exeName *uint16, size *uint32) (err error) = kernel32.QueryFullProcessImageNameW //sys OpenThread(desiredAccess uint32, inheritHandle bool, threadId uint32) (handle Handle, err error) //sys SetProcessPriorityBoost(process Handle, disable bool) (err error) = kernel32.SetProcessPriorityBoost //sys GetProcessWorkingSetSizeEx(hProcess Handle, lpMinimumWorkingSetSize *uintptr, lpMaximumWorkingSetSize *uintptr, flags *uint32) //sys SetProcessWorkingSetSizeEx(hProcess Handle, dwMinimumWorkingSetSize uintptr, dwMaximumWorkingSetSize uintptr, flags uint32) (err error) +//sys GetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) +//sys SetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) // Volume Management Functions //sys DefineDosDevice(flags uint32, deviceName *uint16, targetPath *uint16) (err error) = DefineDosDeviceW @@ -367,16 +384,36 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys stringFromGUID2(rguid *GUID, lpsz *uint16, cchMax int32) (chars int32) = ole32.StringFromGUID2 //sys coCreateGuid(pguid *GUID) (ret error) = ole32.CoCreateGuid //sys CoTaskMemFree(address unsafe.Pointer) = ole32.CoTaskMemFree -//sys rtlGetVersion(info *OsVersionInfoEx) (ret error) = ntdll.RtlGetVersion -//sys rtlGetNtVersionNumbers(majorVersion *uint32, minorVersion *uint32, buildNumber *uint32) = ntdll.RtlGetNtVersionNumbers +//sys CoInitializeEx(reserved uintptr, coInit uint32) (ret error) = ole32.CoInitializeEx +//sys CoUninitialize() = ole32.CoUninitialize +//sys CoGetObject(name *uint16, bindOpts *BIND_OPTS3, guid *GUID, functionTable **uintptr) (ret error) = ole32.CoGetObject //sys getProcessPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) = kernel32.GetProcessPreferredUILanguages //sys getThreadPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) = kernel32.GetThreadPreferredUILanguages //sys getUserPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) = kernel32.GetUserPreferredUILanguages //sys getSystemPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) = kernel32.GetSystemPreferredUILanguages +//sys findResource(module Handle, name uintptr, resType uintptr) (resInfo Handle, err error) = kernel32.FindResourceW +//sys SizeofResource(module Handle, resInfo Handle) (size uint32, err error) = kernel32.SizeofResource +//sys LoadResource(module Handle, resInfo Handle) (resData Handle, err error) = kernel32.LoadResource +//sys LockResource(resData Handle) (addr uintptr, err error) = kernel32.LockResource // Process Status API (PSAPI) //sys EnumProcesses(processIds []uint32, bytesReturned *uint32) (err error) = psapi.EnumProcesses +// NT Native APIs +//sys rtlNtStatusToDosErrorNoTeb(ntstatus NTStatus) (ret syscall.Errno) = ntdll.RtlNtStatusToDosErrorNoTeb +//sys rtlGetVersion(info *OsVersionInfoEx) (ntstatus error) = ntdll.RtlGetVersion +//sys rtlGetNtVersionNumbers(majorVersion *uint32, minorVersion *uint32, buildNumber *uint32) = ntdll.RtlGetNtVersionNumbers +//sys RtlGetCurrentPeb() (peb *PEB) = ntdll.RtlGetCurrentPeb +//sys RtlInitUnicodeString(destinationString *NTUnicodeString, sourceString *uint16) = ntdll.RtlInitUnicodeString +//sys RtlInitString(destinationString *NTString, sourceString *byte) = ntdll.RtlInitString +//sys NtCreateFile(handle *Handle, access uint32, oa *OBJECT_ATTRIBUTES, iosb *IO_STATUS_BLOCK, allocationSize *int64, attributes uint32, share uint32, disposition uint32, options uint32, eabuffer uintptr, ealength uint32) (ntstatus error) = ntdll.NtCreateFile +//sys NtCreateNamedPipeFile(pipe *Handle, access uint32, oa *OBJECT_ATTRIBUTES, iosb *IO_STATUS_BLOCK, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (ntstatus error) = ntdll.NtCreateNamedPipeFile +//sys RtlDosPathNameToNtPathName(dosName *uint16, ntName *NTUnicodeString, ntFileNamePart *uint16, relativeName *RTL_RELATIVE_NAME) (ntstatus error) = ntdll.RtlDosPathNameToNtPathName_U_WithStatus +//sys RtlDosPathNameToRelativeNtPathName(dosName *uint16, ntName *NTUnicodeString, ntFileNamePart *uint16, relativeName *RTL_RELATIVE_NAME) (ntstatus error) = ntdll.RtlDosPathNameToRelativeNtPathName_U_WithStatus +//sys RtlDefaultNpAcl(acl **ACL) (ntstatus error) = ntdll.RtlDefaultNpAcl +//sys NtQueryInformationProcess(proc Handle, procInfoClass int32, procInfo unsafe.Pointer, procInfoLen uint32, retLen *uint32) (ntstatus error) = ntdll.NtQueryInformationProcess +//sys NtSetInformationProcess(proc Handle, procInfoClass int32, procInfo unsafe.Pointer, procInfoLen uint32) (ntstatus error) = ntdll.NtSetInformationProcess + // syscall interface implementation for other packages // GetCurrentProcess returns the handle for the current process. @@ -770,6 +807,7 @@ const socket_error = uintptr(^uint32(0)) //sys WSASend(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, overlapped *Overlapped, croutine *byte) (err error) [failretval==socket_error] = ws2_32.WSASend //sys WSARecvFrom(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, from *RawSockaddrAny, fromlen *int32, overlapped *Overlapped, croutine *byte) (err error) [failretval==socket_error] = ws2_32.WSARecvFrom //sys WSASendTo(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, to *RawSockaddrAny, tolen int32, overlapped *Overlapped, croutine *byte) (err error) [failretval==socket_error] = ws2_32.WSASendTo +//sys WSASocket(af int32, typ int32, protocol int32, protoInfo *WSAProtocolInfo, group uint32, flags uint32) (handle Handle, err error) [failretval==InvalidHandle] = ws2_32.WSASocketW //sys GetHostByName(name string) (h *Hostent, err error) [failretval==nil] = ws2_32.gethostbyname //sys GetServByName(name string, proto string) (s *Servent, err error) [failretval==nil] = ws2_32.getservbyname //sys Ntohs(netshort uint16) (u uint16) = ws2_32.ntohs @@ -783,6 +821,7 @@ const socket_error = uintptr(^uint32(0)) //sys GetAdaptersInfo(ai *IpAdapterInfo, ol *uint32) (errcode error) = iphlpapi.GetAdaptersInfo //sys SetFileCompletionNotificationModes(handle Handle, flags uint8) (err error) = kernel32.SetFileCompletionNotificationModes //sys WSAEnumProtocols(protocols *int32, protocolBuffer *WSAProtocolInfo, bufferLength *uint32) (n int32, err error) [failretval==-1] = ws2_32.WSAEnumProtocolsW +//sys WSAGetOverlappedResult(h Handle, o *Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) = ws2_32.WSAGetOverlappedResult //sys GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapterAddresses *IpAdapterAddresses, sizePointer *uint32) (errcode error) = iphlpapi.GetAdaptersAddresses //sys GetACP() (acp uint32) = kernel32.GetACP //sys MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, wchar *uint16, nwchar int32) (nwrite int32, err error) = kernel32.MultiByteToWideChar @@ -1505,3 +1544,129 @@ func getUILanguages(flags uint32, f func(flags uint32, numLanguages *uint32, buf func SetConsoleCursorPosition(console Handle, position Coord) error { return setConsoleCursorPosition(console, *((*uint32)(unsafe.Pointer(&position)))) } + +func (s NTStatus) Errno() syscall.Errno { + return rtlNtStatusToDosErrorNoTeb(s) +} + +func langID(pri, sub uint16) uint32 { return uint32(sub)<<10 | uint32(pri) } + +func (s NTStatus) Error() string { + b := make([]uint16, 300) + n, err := FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM|FORMAT_MESSAGE_FROM_HMODULE|FORMAT_MESSAGE_ARGUMENT_ARRAY, modntdll.Handle(), uint32(s), langID(LANG_ENGLISH, SUBLANG_ENGLISH_US), b, nil) + if err != nil { + return fmt.Sprintf("NTSTATUS 0x%08x", uint32(s)) + } + // trim terminating \r and \n + for ; n > 0 && (b[n-1] == '\n' || b[n-1] == '\r'); n-- { + } + return string(utf16.Decode(b[:n])) +} + +// NewNTUnicodeString returns a new NTUnicodeString structure for use with native +// NT APIs that work over the NTUnicodeString type. Note that most Windows APIs +// do not use NTUnicodeString, and instead UTF16PtrFromString should be used for +// the more common *uint16 string type. +func NewNTUnicodeString(s string) (*NTUnicodeString, error) { + var u NTUnicodeString + s16, err := UTF16PtrFromString(s) + if err != nil { + return nil, err + } + RtlInitUnicodeString(&u, s16) + return &u, nil +} + +// Slice returns a uint16 slice that aliases the data in the NTUnicodeString. +func (s *NTUnicodeString) Slice() []uint16 { + var slice []uint16 + hdr := (*unsafeheader.Slice)(unsafe.Pointer(&slice)) + hdr.Data = unsafe.Pointer(s.Buffer) + hdr.Len = int(s.Length) + hdr.Cap = int(s.MaximumLength) + return slice +} + +func (s *NTUnicodeString) String() string { + return UTF16ToString(s.Slice()) +} + +// NewNTString returns a new NTString structure for use with native +// NT APIs that work over the NTString type. Note that most Windows APIs +// do not use NTString, and instead UTF16PtrFromString should be used for +// the more common *uint16 string type. +func NewNTString(s string) (*NTString, error) { + var nts NTString + s8, err := BytePtrFromString(s) + if err != nil { + return nil, err + } + RtlInitString(&nts, s8) + return &nts, nil +} + +// Slice returns a byte slice that aliases the data in the NTString. +func (s *NTString) Slice() []byte { + var slice []byte + hdr := (*unsafeheader.Slice)(unsafe.Pointer(&slice)) + hdr.Data = unsafe.Pointer(s.Buffer) + hdr.Len = int(s.Length) + hdr.Cap = int(s.MaximumLength) + return slice +} + +func (s *NTString) String() string { + return ByteSliceToString(s.Slice()) +} + +// FindResource resolves a resource of the given name and resource type. +func FindResource(module Handle, name, resType ResourceIDOrString) (Handle, error) { + var namePtr, resTypePtr uintptr + var name16, resType16 *uint16 + var err error + resolvePtr := func(i interface{}, keep **uint16) (uintptr, error) { + switch v := i.(type) { + case string: + *keep, err = UTF16PtrFromString(v) + if err != nil { + return 0, err + } + return uintptr(unsafe.Pointer(*keep)), nil + case ResourceID: + return uintptr(v), nil + } + return 0, errorspkg.New("parameter must be a ResourceID or a string") + } + namePtr, err = resolvePtr(name, &name16) + if err != nil { + return 0, err + } + resTypePtr, err = resolvePtr(resType, &resType16) + if err != nil { + return 0, err + } + resInfo, err := findResource(module, namePtr, resTypePtr) + runtime.KeepAlive(name16) + runtime.KeepAlive(resType16) + return resInfo, err +} + +func LoadResourceData(module, resInfo Handle) (data []byte, err error) { + size, err := SizeofResource(module, resInfo) + if err != nil { + return + } + resData, err := LoadResource(module, resInfo) + if err != nil { + return + } + ptr, err := LockResource(resData) + if err != nil { + return + } + h := (*unsafeheader.Slice)(unsafe.Pointer(&data)) + h.Data = unsafe.Pointer(ptr) + h.Len = int(size) + h.Cap = int(size) + return +} diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index fd4260762a..1f733398ee 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -10,6 +10,10 @@ import ( "unsafe" ) +// NTStatus corresponds with NTSTATUS, error values returned by ntdll.dll and +// other native functions. +type NTStatus uint32 + const ( // Invented values to support what package os expects. O_RDONLY = 0x00000 @@ -215,6 +219,18 @@ const ( INHERIT_PARENT_AFFINITY = 0x00010000 ) +const ( + // attributes for ProcThreadAttributeList + PROC_THREAD_ATTRIBUTE_PARENT_PROCESS = 0x00020000 + PROC_THREAD_ATTRIBUTE_HANDLE_LIST = 0x00020002 + PROC_THREAD_ATTRIBUTE_GROUP_AFFINITY = 0x00030003 + PROC_THREAD_ATTRIBUTE_PREFERRED_NODE = 0x00020004 + PROC_THREAD_ATTRIBUTE_IDEAL_PROCESSOR = 0x00030005 + PROC_THREAD_ATTRIBUTE_MITIGATION_POLICY = 0x00020007 + PROC_THREAD_ATTRIBUTE_UMS_THREAD = 0x00030006 + PROC_THREAD_ATTRIBUTE_PROTECTION_LEVEL = 0x0002000b +) + const ( // flags for CreateToolhelp32Snapshot TH32CS_SNAPHEAPLIST = 0x01 @@ -287,6 +303,23 @@ const ( PKCS12_NO_PERSIST_KEY = 0x00008000 PKCS12_INCLUDE_EXTENDED_PROPERTIES = 0x00000010 + /* Flags for CryptAcquireCertificatePrivateKey */ + CRYPT_ACQUIRE_CACHE_FLAG = 0x00000001 + CRYPT_ACQUIRE_USE_PROV_INFO_FLAG = 0x00000002 + CRYPT_ACQUIRE_COMPARE_KEY_FLAG = 0x00000004 + CRYPT_ACQUIRE_NO_HEALING = 0x00000008 + CRYPT_ACQUIRE_SILENT_FLAG = 0x00000040 + CRYPT_ACQUIRE_WINDOW_HANDLE_FLAG = 0x00000080 + CRYPT_ACQUIRE_NCRYPT_KEY_FLAGS_MASK = 0x00070000 + CRYPT_ACQUIRE_ALLOW_NCRYPT_KEY_FLAG = 0x00010000 + CRYPT_ACQUIRE_PREFER_NCRYPT_KEY_FLAG = 0x00020000 + CRYPT_ACQUIRE_ONLY_NCRYPT_KEY_FLAG = 0x00040000 + + /* pdwKeySpec for CryptAcquireCertificatePrivateKey */ + AT_KEYEXCHANGE = 1 + AT_SIGNATURE = 2 + CERT_NCRYPT_KEY_SPEC = 0xFFFFFFFF + /* Default usage match type is AND with value zero */ USAGE_MATCH_TYPE_AND = 0 USAGE_MATCH_TYPE_OR = 1 @@ -412,6 +445,89 @@ const ( CERT_TRUST_IS_CA_TRUSTED = 0x00004000 CERT_TRUST_IS_COMPLEX_CHAIN = 0x00010000 + /* Certificate Information Flags */ + CERT_INFO_VERSION_FLAG = 1 + CERT_INFO_SERIAL_NUMBER_FLAG = 2 + CERT_INFO_SIGNATURE_ALGORITHM_FLAG = 3 + CERT_INFO_ISSUER_FLAG = 4 + CERT_INFO_NOT_BEFORE_FLAG = 5 + CERT_INFO_NOT_AFTER_FLAG = 6 + CERT_INFO_SUBJECT_FLAG = 7 + CERT_INFO_SUBJECT_PUBLIC_KEY_INFO_FLAG = 8 + CERT_INFO_ISSUER_UNIQUE_ID_FLAG = 9 + CERT_INFO_SUBJECT_UNIQUE_ID_FLAG = 10 + CERT_INFO_EXTENSION_FLAG = 11 + + /* dwFindType for CertFindCertificateInStore */ + CERT_COMPARE_MASK = 0xFFFF + CERT_COMPARE_SHIFT = 16 + CERT_COMPARE_ANY = 0 + CERT_COMPARE_SHA1_HASH = 1 + CERT_COMPARE_NAME = 2 + CERT_COMPARE_ATTR = 3 + CERT_COMPARE_MD5_HASH = 4 + CERT_COMPARE_PROPERTY = 5 + CERT_COMPARE_PUBLIC_KEY = 6 + CERT_COMPARE_HASH = CERT_COMPARE_SHA1_HASH + CERT_COMPARE_NAME_STR_A = 7 + CERT_COMPARE_NAME_STR_W = 8 + CERT_COMPARE_KEY_SPEC = 9 + CERT_COMPARE_ENHKEY_USAGE = 10 + CERT_COMPARE_CTL_USAGE = CERT_COMPARE_ENHKEY_USAGE + CERT_COMPARE_SUBJECT_CERT = 11 + CERT_COMPARE_ISSUER_OF = 12 + CERT_COMPARE_EXISTING = 13 + CERT_COMPARE_SIGNATURE_HASH = 14 + CERT_COMPARE_KEY_IDENTIFIER = 15 + CERT_COMPARE_CERT_ID = 16 + CERT_COMPARE_CROSS_CERT_DIST_POINTS = 17 + CERT_COMPARE_PUBKEY_MD5_HASH = 18 + CERT_COMPARE_SUBJECT_INFO_ACCESS = 19 + CERT_COMPARE_HASH_STR = 20 + CERT_COMPARE_HAS_PRIVATE_KEY = 21 + CERT_FIND_ANY = (CERT_COMPARE_ANY << CERT_COMPARE_SHIFT) + CERT_FIND_SHA1_HASH = (CERT_COMPARE_SHA1_HASH << CERT_COMPARE_SHIFT) + CERT_FIND_MD5_HASH = (CERT_COMPARE_MD5_HASH << CERT_COMPARE_SHIFT) + CERT_FIND_SIGNATURE_HASH = (CERT_COMPARE_SIGNATURE_HASH << CERT_COMPARE_SHIFT) + CERT_FIND_KEY_IDENTIFIER = (CERT_COMPARE_KEY_IDENTIFIER << CERT_COMPARE_SHIFT) + CERT_FIND_HASH = CERT_FIND_SHA1_HASH + CERT_FIND_PROPERTY = (CERT_COMPARE_PROPERTY << CERT_COMPARE_SHIFT) + CERT_FIND_PUBLIC_KEY = (CERT_COMPARE_PUBLIC_KEY << CERT_COMPARE_SHIFT) + CERT_FIND_SUBJECT_NAME = (CERT_COMPARE_NAME<= nbuf { bufp = 0 - nbuf, err = syscall.ReadDirent(fd, buf) + nbuf, err = readDirent(fd, buf) if err != nil { return os.NewSyscallError("readdirent", err) } @@ -126,3 +127,27 @@ func parseDirEnt(buf []byte) (consumed int, name string, typ os.FileMode) { } return } + +// According to https://golang.org/doc/go1.14#runtime +// A consequence of the implementation of preemption is that on Unix systems, including Linux and macOS +// systems, programs built with Go 1.14 will receive more signals than programs built with earlier releases. +// +// This causes syscall.Open and syscall.ReadDirent sometimes fail with EINTR errors. +// We need to retry in this case. +func open(path string, mode int, perm uint32) (fd int, err error) { + for { + fd, err := syscall.Open(path, mode, perm) + if err != syscall.EINTR { + return fd, err + } + } +} + +func readDirent(fd int, buf []byte) (n int, err error) { + for { + nbuf, err := syscall.ReadDirent(fd, buf) + if err != syscall.EINTR { + return nbuf, err + } + } +} diff --git a/vendor/golang.org/x/tools/internal/gocommand/vendor.go b/vendor/golang.org/x/tools/internal/gocommand/vendor.go index 1cd8d8473e..5e75bd6d8f 100644 --- a/vendor/golang.org/x/tools/internal/gocommand/vendor.go +++ b/vendor/golang.org/x/tools/internal/gocommand/vendor.go @@ -12,6 +12,7 @@ import ( "path/filepath" "regexp" "strings" + "time" "golang.org/x/mod/semver" ) @@ -19,11 +20,15 @@ import ( // ModuleJSON holds information about a module. type ModuleJSON struct { Path string // module path + Version string // module version + Versions []string // available module versions (with -versions) Replace *ModuleJSON // replaced by this module + Time *time.Time // time version was created + Update *ModuleJSON // available update, if any (with -u) Main bool // is this the main module? Indirect bool // is this module only an indirect dependency of main module? Dir string // directory holding files for this module, if any - GoMod string // path to go.mod file for this module, if any + GoMod string // path to go.mod file used when loading this module, if any GoVersion string // go version used in module } diff --git a/vendor/golang.org/x/tools/internal/gocommand/version.go b/vendor/golang.org/x/tools/internal/gocommand/version.go index 0cebac6e66..7130436802 100644 --- a/vendor/golang.org/x/tools/internal/gocommand/version.go +++ b/vendor/golang.org/x/tools/internal/gocommand/version.go @@ -14,7 +14,7 @@ import ( // It returns the X in Go 1.X. func GoVersion(ctx context.Context, inv Invocation, r *Runner) (int, error) { inv.Verb = "list" - inv.Args = []string{"-e", "-f", `{{context.ReleaseTags}}`} + inv.Args = []string{"-e", "-f", `{{context.ReleaseTags}}`, `--`, `unsafe`} inv.Env = append(append([]string{}, inv.Env...), "GO111MODULE=off") // Unset any unneeded flags, and remove them from BuildFlags, if they're // present. diff --git a/vendor/golang.org/x/tools/internal/imports/mod.go b/vendor/golang.org/x/tools/internal/imports/mod.go index 65e0b94b17..dff6d55362 100644 --- a/vendor/golang.org/x/tools/internal/imports/mod.go +++ b/vendor/golang.org/x/tools/internal/imports/mod.go @@ -89,8 +89,10 @@ func (r *ModuleResolver) init() error { err := r.initAllMods() // We expect an error when running outside of a module with // GO111MODULE=on. Other errors are fatal. - if err != nil && !strings.Contains(err.Error(), "working directory is not part of a module") { - return err + if err != nil { + if errMsg := err.Error(); !strings.Contains(errMsg, "working directory is not part of a module") && !strings.Contains(errMsg, "go.mod file not found") { + return err + } } } diff --git a/vendor/golang.org/x/tools/internal/imports/zstdlib.go b/vendor/golang.org/x/tools/internal/imports/zstdlib.go index 7b573b9830..ccdd4e0ffc 100644 --- a/vendor/golang.org/x/tools/internal/imports/zstdlib.go +++ b/vendor/golang.org/x/tools/internal/imports/zstdlib.go @@ -974,13 +974,29 @@ var stdlib = map[string][]string{ "DF_STATIC_TLS", "DF_SYMBOLIC", "DF_TEXTREL", + "DT_ADDRRNGHI", + "DT_ADDRRNGLO", + "DT_AUDIT", + "DT_AUXILIARY", "DT_BIND_NOW", + "DT_CHECKSUM", + "DT_CONFIG", "DT_DEBUG", + "DT_DEPAUDIT", "DT_ENCODING", + "DT_FEATURE", + "DT_FILTER", "DT_FINI", "DT_FINI_ARRAY", "DT_FINI_ARRAYSZ", "DT_FLAGS", + "DT_FLAGS_1", + "DT_GNU_CONFLICT", + "DT_GNU_CONFLICTSZ", + "DT_GNU_HASH", + "DT_GNU_LIBLIST", + "DT_GNU_LIBLISTSZ", + "DT_GNU_PRELINKED", "DT_HASH", "DT_HIOS", "DT_HIPROC", @@ -990,28 +1006,100 @@ var stdlib = map[string][]string{ "DT_JMPREL", "DT_LOOS", "DT_LOPROC", + "DT_MIPS_AUX_DYNAMIC", + "DT_MIPS_BASE_ADDRESS", + "DT_MIPS_COMPACT_SIZE", + "DT_MIPS_CONFLICT", + "DT_MIPS_CONFLICTNO", + "DT_MIPS_CXX_FLAGS", + "DT_MIPS_DELTA_CLASS", + "DT_MIPS_DELTA_CLASSSYM", + "DT_MIPS_DELTA_CLASSSYM_NO", + "DT_MIPS_DELTA_CLASS_NO", + "DT_MIPS_DELTA_INSTANCE", + "DT_MIPS_DELTA_INSTANCE_NO", + "DT_MIPS_DELTA_RELOC", + "DT_MIPS_DELTA_RELOC_NO", + "DT_MIPS_DELTA_SYM", + "DT_MIPS_DELTA_SYM_NO", + "DT_MIPS_DYNSTR_ALIGN", + "DT_MIPS_FLAGS", + "DT_MIPS_GOTSYM", + "DT_MIPS_GP_VALUE", + "DT_MIPS_HIDDEN_GOTIDX", + "DT_MIPS_HIPAGENO", + "DT_MIPS_ICHECKSUM", + "DT_MIPS_INTERFACE", + "DT_MIPS_INTERFACE_SIZE", + "DT_MIPS_IVERSION", + "DT_MIPS_LIBLIST", + "DT_MIPS_LIBLISTNO", + "DT_MIPS_LOCALPAGE_GOTIDX", + "DT_MIPS_LOCAL_GOTIDX", + "DT_MIPS_LOCAL_GOTNO", + "DT_MIPS_MSYM", + "DT_MIPS_OPTIONS", + "DT_MIPS_PERF_SUFFIX", + "DT_MIPS_PIXIE_INIT", + "DT_MIPS_PLTGOT", + "DT_MIPS_PROTECTED_GOTIDX", + "DT_MIPS_RLD_MAP", + "DT_MIPS_RLD_MAP_REL", + "DT_MIPS_RLD_TEXT_RESOLVE_ADDR", + "DT_MIPS_RLD_VERSION", + "DT_MIPS_RWPLT", + "DT_MIPS_SYMBOL_LIB", + "DT_MIPS_SYMTABNO", + "DT_MIPS_TIME_STAMP", + "DT_MIPS_UNREFEXTNO", + "DT_MOVEENT", + "DT_MOVESZ", + "DT_MOVETAB", "DT_NEEDED", "DT_NULL", "DT_PLTGOT", + "DT_PLTPAD", + "DT_PLTPADSZ", "DT_PLTREL", "DT_PLTRELSZ", + "DT_POSFLAG_1", + "DT_PPC64_GLINK", + "DT_PPC64_OPD", + "DT_PPC64_OPDSZ", + "DT_PPC64_OPT", + "DT_PPC_GOT", + "DT_PPC_OPT", "DT_PREINIT_ARRAY", "DT_PREINIT_ARRAYSZ", "DT_REL", "DT_RELA", + "DT_RELACOUNT", "DT_RELAENT", "DT_RELASZ", + "DT_RELCOUNT", "DT_RELENT", "DT_RELSZ", "DT_RPATH", "DT_RUNPATH", "DT_SONAME", + "DT_SPARC_REGISTER", "DT_STRSZ", "DT_STRTAB", "DT_SYMBOLIC", "DT_SYMENT", + "DT_SYMINENT", + "DT_SYMINFO", + "DT_SYMINSZ", "DT_SYMTAB", + "DT_SYMTAB_SHNDX", "DT_TEXTREL", + "DT_TLSDESC_GOT", + "DT_TLSDESC_PLT", + "DT_USED", + "DT_VALRNGHI", + "DT_VALRNGLO", + "DT_VERDEF", + "DT_VERDEFNUM", "DT_VERNEED", "DT_VERNEEDNUM", "DT_VERSYM", @@ -1271,17 +1359,38 @@ var stdlib = map[string][]string{ "PF_R", "PF_W", "PF_X", + "PT_AARCH64_ARCHEXT", + "PT_AARCH64_UNWIND", + "PT_ARM_ARCHEXT", + "PT_ARM_EXIDX", "PT_DYNAMIC", + "PT_GNU_EH_FRAME", + "PT_GNU_MBIND_HI", + "PT_GNU_MBIND_LO", + "PT_GNU_PROPERTY", + "PT_GNU_RELRO", + "PT_GNU_STACK", "PT_HIOS", "PT_HIPROC", "PT_INTERP", "PT_LOAD", "PT_LOOS", "PT_LOPROC", + "PT_MIPS_ABIFLAGS", + "PT_MIPS_OPTIONS", + "PT_MIPS_REGINFO", + "PT_MIPS_RTPROC", "PT_NOTE", "PT_NULL", + "PT_OPENBSD_BOOTDATA", + "PT_OPENBSD_RANDOMIZE", + "PT_OPENBSD_WXNEEDED", + "PT_PAX_FLAGS", "PT_PHDR", + "PT_S390_PGSTE", "PT_SHLIB", + "PT_SUNWSTACK", + "PT_SUNW_EH_FRAME", "PT_TLS", "Prog", "Prog32", @@ -2445,6 +2554,9 @@ var stdlib = map[string][]string{ "SectionHeader", "Sym", }, + "embed": []string{ + "FS", + }, "encoding": []string{ "BinaryMarshaler", "BinaryUnmarshaler", @@ -2680,6 +2792,7 @@ var stdlib = map[string][]string{ "FlagSet", "Float64", "Float64Var", + "Func", "Getter", "Int", "Int64", @@ -2853,6 +2966,18 @@ var stdlib = map[string][]string{ "Package", "ToolDir", }, + "go/build/constraint": []string{ + "AndExpr", + "Expr", + "IsGoBuild", + "IsPlusBuild", + "NotExpr", + "OrExpr", + "Parse", + "PlusBuildLines", + "SyntaxError", + "TagExpr", + }, "go/constant": []string{ "BinaryOp", "BitLen", @@ -3273,6 +3398,7 @@ var stdlib = map[string][]string{ "Must", "New", "OK", + "ParseFS", "ParseFiles", "ParseGlob", "Srcset", @@ -3432,6 +3558,7 @@ var stdlib = map[string][]string{ "Copy", "CopyBuffer", "CopyN", + "Discard", "EOF", "ErrClosedPipe", "ErrNoProgress", @@ -3443,12 +3570,15 @@ var stdlib = map[string][]string{ "MultiReader", "MultiWriter", "NewSectionReader", + "NopCloser", "Pipe", "PipeReader", "PipeWriter", + "ReadAll", "ReadAtLeast", "ReadCloser", "ReadFull", + "ReadSeekCloser", "ReadSeeker", "ReadWriteCloser", "ReadWriteSeeker", @@ -3472,6 +3602,49 @@ var stdlib = map[string][]string{ "WriterAt", "WriterTo", }, + "io/fs": []string{ + "DirEntry", + "ErrClosed", + "ErrExist", + "ErrInvalid", + "ErrNotExist", + "ErrPermission", + "FS", + "File", + "FileInfo", + "FileMode", + "Glob", + "GlobFS", + "ModeAppend", + "ModeCharDevice", + "ModeDevice", + "ModeDir", + "ModeExclusive", + "ModeIrregular", + "ModeNamedPipe", + "ModePerm", + "ModeSetgid", + "ModeSetuid", + "ModeSocket", + "ModeSticky", + "ModeSymlink", + "ModeTemporary", + "ModeType", + "PathError", + "ReadDir", + "ReadDirFS", + "ReadDirFile", + "ReadFile", + "ReadFileFS", + "SkipDir", + "Stat", + "StatFS", + "Sub", + "SubFS", + "ValidPath", + "WalkDir", + "WalkDirFunc", + }, "io/ioutil": []string{ "Discard", "NopCloser", @@ -3483,6 +3656,7 @@ var stdlib = map[string][]string{ "WriteFile", }, "log": []string{ + "Default", "Fatal", "Fatalf", "Fatalln", @@ -3819,6 +3993,7 @@ var stdlib = map[string][]string{ "DialUDP", "DialUnix", "Dialer", + "ErrClosed", "ErrWriteToConnected", "Error", "FileConn", @@ -3938,6 +4113,7 @@ var stdlib = map[string][]string{ "ErrUseLastResponse", "ErrWriteAfterFlush", "Error", + "FS", "File", "FileServer", "FileSystem", @@ -4234,7 +4410,10 @@ var stdlib = map[string][]string{ "Chtimes", "Clearenv", "Create", + "CreateTemp", "DevNull", + "DirEntry", + "DirFS", "Environ", "ErrClosed", "ErrDeadlineExceeded", @@ -4243,6 +4422,7 @@ var stdlib = map[string][]string{ "ErrNoDeadline", "ErrNotExist", "ErrPermission", + "ErrProcessDone", "Executable", "Exit", "Expand", @@ -4276,6 +4456,7 @@ var stdlib = map[string][]string{ "Lstat", "Mkdir", "MkdirAll", + "MkdirTemp", "ModeAppend", "ModeCharDevice", "ModeDevice", @@ -4310,6 +4491,8 @@ var stdlib = map[string][]string{ "ProcAttr", "Process", "ProcessState", + "ReadDir", + "ReadFile", "Readlink", "Remove", "RemoveAll", @@ -4333,6 +4516,7 @@ var stdlib = map[string][]string{ "UserCacheDir", "UserConfigDir", "UserHomeDir", + "WriteFile", }, "os/exec": []string{ "Cmd", @@ -4347,6 +4531,7 @@ var stdlib = map[string][]string{ "Ignore", "Ignored", "Notify", + "NotifyContext", "Reset", "Stop", }, @@ -4397,6 +4582,7 @@ var stdlib = map[string][]string{ "ToSlash", "VolumeName", "Walk", + "WalkDir", "WalkFunc", }, "plugin": []string{ @@ -4629,6 +4815,19 @@ var stdlib = map[string][]string{ "Stack", "WriteHeapDump", }, + "runtime/metrics": []string{ + "All", + "Description", + "Float64Histogram", + "KindBad", + "KindFloat64", + "KindFloat64Histogram", + "KindUint64", + "Read", + "Sample", + "Value", + "ValueKind", + }, "runtime/pprof": []string{ "Do", "ForLabels", @@ -5012,6 +5211,8 @@ var stdlib = map[string][]string{ "AddrinfoW", "Adjtime", "Adjtimex", + "AllThreadsSyscall", + "AllThreadsSyscall6", "AttachLsf", "B0", "B1000000", @@ -10010,13 +10211,20 @@ var stdlib = map[string][]string{ "TB", "Verbose", }, + "testing/fstest": []string{ + "MapFS", + "MapFile", + "TestFS", + }, "testing/iotest": []string{ "DataErrReader", + "ErrReader", "ErrTimeout", "HalfReader", "NewReadLogger", "NewWriteLogger", "OneByteReader", + "TestReader", "TimeoutReader", "TruncateWriter", }, @@ -10076,6 +10284,7 @@ var stdlib = map[string][]string{ "JSEscaper", "Must", "New", + "ParseFS", "ParseFiles", "ParseGlob", "Template", @@ -10087,12 +10296,14 @@ var stdlib = map[string][]string{ "BranchNode", "ChainNode", "CommandNode", + "CommentNode", "DotNode", "FieldNode", "IdentifierNode", "IfNode", "IsEmptyTree", "ListNode", + "Mode", "New", "NewIdentifier", "NilNode", @@ -10101,6 +10312,7 @@ var stdlib = map[string][]string{ "NodeBool", "NodeChain", "NodeCommand", + "NodeComment", "NodeDot", "NodeField", "NodeIdentifier", @@ -10118,6 +10330,7 @@ var stdlib = map[string][]string{ "NodeWith", "NumberNode", "Parse", + "ParseComments", "PipeNode", "Pos", "RangeNode", @@ -10230,6 +10443,7 @@ var stdlib = map[string][]string{ "Chakma", "Cham", "Cherokee", + "Chorasmian", "Co", "Common", "Coptic", @@ -10243,6 +10457,7 @@ var stdlib = map[string][]string{ "Devanagari", "Diacritic", "Digit", + "Dives_Akuru", "Dogra", "Duployan", "Egyptian_Hieroglyphs", @@ -10300,6 +10515,7 @@ var stdlib = map[string][]string{ "Katakana", "Kayah_Li", "Kharoshthi", + "Khitan_Small_Script", "Khmer", "Khojki", "Khudawadi", @@ -10472,6 +10688,7 @@ var stdlib = map[string][]string{ "Wancho", "Warang_Citi", "White_Space", + "Yezidi", "Yi", "Z", "Zanabazar_Square", diff --git a/vendor/golang.org/x/tools/internal/typeparams/doc.go b/vendor/golang.org/x/tools/internal/typeparams/doc.go new file mode 100644 index 0000000000..5583947e21 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typeparams/doc.go @@ -0,0 +1,11 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package typeparams provides functions to work indirectly with type parameter +// data stored in go/ast and go/types objects, while these API are guarded by a +// build constraint. +// +// This package exists to make it easier for tools to work with generic code, +// while also compiling against older Go versions. +package typeparams diff --git a/vendor/golang.org/x/tools/internal/typeparams/notypeparams.go b/vendor/golang.org/x/tools/internal/typeparams/notypeparams.go new file mode 100644 index 0000000000..3a0abc7c18 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typeparams/notypeparams.go @@ -0,0 +1,90 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !typeparams || !go1.17 +// +build !typeparams !go1.17 + +package typeparams + +import ( + "go/ast" + "go/types" +) + +// NOTE: doc comments must be kept in sync with typeparams.go. + +// Enabled reports whether type parameters are enabled in the current build +// environment. +const Enabled = false + +// UnpackIndex extracts all index expressions from e. For non-generic code this +// is always one expression: e.Index, but may be more than one expression for +// generic type instantiation. +func UnpackIndex(e *ast.IndexExpr) []ast.Expr { + return []ast.Expr{e.Index} +} + +// IsListExpr reports whether n is an *ast.ListExpr, which is a new node type +// introduced to hold type arguments for generic type instantiation. +func IsListExpr(n ast.Node) bool { + return false +} + +// ForTypeDecl extracts the (possibly nil) type parameter node list from n. +func ForTypeDecl(*ast.TypeSpec) *ast.FieldList { + return nil +} + +// ForFuncDecl extracts the (possibly nil) type parameter node list from n. +func ForFuncDecl(*ast.FuncDecl) *ast.FieldList { + return nil +} + +// ForSignature extracts the (possibly empty) type parameter object list from +// sig. +func ForSignature(*types.Signature) []*types.TypeName { + return nil +} + +// HasTypeSet reports if iface has a type set. +func HasTypeSet(*types.Interface) bool { + return false +} + +// IsComparable reports if iface is the comparable interface. +func IsComparable(*types.Interface) bool { + return false +} + +// IsConstraint reports whether iface may only be used as a type parameter +// constraint (i.e. has a type set or is the comparable interface). +func IsConstraint(*types.Interface) bool { + return false +} + +// ForNamed extracts the (possibly empty) type parameter object list from +// named. +func ForNamed(*types.Named) []*types.TypeName { + return nil +} + +// NamedTArgs extracts the (possibly empty) type argument list from named. +func NamedTArgs(*types.Named) []types.Type { + return nil +} + +// InitInferred initializes info to record inferred type information. +func InitInferred(*types.Info) { +} + +// GetInferred extracts inferred type information from info for e. +// +// The expression e may have an inferred type if it is an *ast.IndexExpr +// representing partial instantiation of a generic function type for which type +// arguments have been inferred using constraint type inference, or if it is an +// *ast.CallExpr for which type type arguments have be inferred using both +// constraint type inference and function argument inference. +func GetInferred(*types.Info, ast.Expr) ([]types.Type, *types.Signature) { + return nil, nil +} diff --git a/vendor/golang.org/x/tools/internal/typeparams/typeparams.go b/vendor/golang.org/x/tools/internal/typeparams/typeparams.go new file mode 100644 index 0000000000..6b7958af06 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typeparams/typeparams.go @@ -0,0 +1,105 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build typeparams && go1.17 +// +build typeparams,go1.17 + +package typeparams + +import ( + "go/ast" + "go/types" +) + +// NOTE: doc comments must be kept in sync with notypeparams.go. + +// Enabled reports whether type parameters are enabled in the current build +// environment. +const Enabled = true + +// UnpackIndex extracts all index expressions from e. For non-generic code this +// is always one expression: e.Index, but may be more than one expression for +// generic type instantiation. +func UnpackIndex(e *ast.IndexExpr) []ast.Expr { + if x, _ := e.Index.(*ast.ListExpr); x != nil { + return x.ElemList + } + if e.Index != nil { + return []ast.Expr{e.Index} + } + return nil +} + +// IsListExpr reports whether n is an *ast.ListExpr, which is a new node type +// introduced to hold type arguments for generic type instantiation. +func IsListExpr(n ast.Node) bool { + _, ok := n.(*ast.ListExpr) + return ok +} + +// ForTypeDecl extracts the (possibly nil) type parameter node list from n. +func ForTypeDecl(n *ast.TypeSpec) *ast.FieldList { + return n.TParams +} + +// ForFuncDecl extracts the (possibly nil) type parameter node list from n. +func ForFuncDecl(n *ast.FuncDecl) *ast.FieldList { + if n.Type != nil { + return n.Type.TParams + } + return nil +} + +// ForSignature extracts the (possibly empty) type parameter object list from +// sig. +func ForSignature(sig *types.Signature) []*types.TypeName { + return sig.TParams() +} + +// HasTypeSet reports if iface has a type set. +func HasTypeSet(iface *types.Interface) bool { + return iface.HasTypeList() +} + +// IsComparable reports if iface is the comparable interface. +func IsComparable(iface *types.Interface) bool { + return iface.IsComparable() +} + +// IsConstraint reports whether iface may only be used as a type parameter +// constraint (i.e. has a type set or is the comparable interface). +func IsConstraint(iface *types.Interface) bool { + return iface.IsConstraint() +} + +// ForNamed extracts the (possibly empty) type parameter object list from +// named. +func ForNamed(named *types.Named) []*types.TypeName { + return named.TParams() +} + +// NamedTArgs extracts the (possibly empty) type argument list from named. +func NamedTArgs(named *types.Named) []types.Type { + return named.TArgs() +} + +// InitInferred initializes info to record inferred type information. +func InitInferred(info *types.Info) { + info.Inferred = make(map[ast.Expr]types.Inferred) +} + +// GetInferred extracts inferred type information from info for e. +// +// The expression e may have an inferred type if it is an *ast.IndexExpr +// representing partial instantiation of a generic function type for which type +// arguments have been inferred using constraint type inference, or if it is an +// *ast.CallExpr for which type type arguments have be inferred using both +// constraint type inference and function argument inference. +func GetInferred(info *types.Info, e ast.Expr) ([]types.Type, *types.Signature) { + if info.Inferred == nil { + return nil, nil + } + inf := info.Inferred[e] + return inf.TArgs, inf.Sig +} diff --git a/vendor/gomodules.xyz/jsonpatch/v2/go.mod b/vendor/gomodules.xyz/jsonpatch/v2/go.mod index b5eaf830eb..0395a5805e 100644 --- a/vendor/gomodules.xyz/jsonpatch/v2/go.mod +++ b/vendor/gomodules.xyz/jsonpatch/v2/go.mod @@ -3,7 +3,6 @@ module gomodules.xyz/jsonpatch/v2 go 1.12 require ( - github.com/evanphx/json-patch v4.5.0+incompatible - github.com/pkg/errors v0.8.1 // indirect + github.com/evanphx/json-patch v0.5.2 github.com/stretchr/testify v1.3.0 ) diff --git a/vendor/gomodules.xyz/jsonpatch/v2/go.sum b/vendor/gomodules.xyz/jsonpatch/v2/go.sum index d8f9ffe1c9..d931385bc1 100644 --- a/vendor/gomodules.xyz/jsonpatch/v2/go.sum +++ b/vendor/gomodules.xyz/jsonpatch/v2/go.sum @@ -1,9 +1,10 @@ github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/evanphx/json-patch v4.5.0+incompatible h1:ouOWdg56aJriqS0huScTkVXPC5IcNrDCXZ6OoTAWu7M= -github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/evanphx/json-patch v0.5.2 h1:xVCHIVMUu1wtM/VkR9jVZ45N3FhZfYMMYGorLCR8P3k= +github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= diff --git a/vendor/gomodules.xyz/jsonpatch/v2/jsonpatch.go b/vendor/gomodules.xyz/jsonpatch/v2/jsonpatch.go index b8ae4456d2..0ffb315604 100644 --- a/vendor/gomodules.xyz/jsonpatch/v2/jsonpatch.go +++ b/vendor/gomodules.xyz/jsonpatch/v2/jsonpatch.go @@ -191,8 +191,6 @@ func handleValues(av, bv interface{}, p string, patch []Operation) ([]Operation, if at == nil && bt == nil { // do nothing return patch, nil - } else if at == nil && bt != nil { - return append(patch, NewOperation("add", p, bv)), nil } else if at != bt { // If types have changed, replace completely (preserves null in destination) return append(patch, NewOperation("replace", p, bv)), nil diff --git a/vendor/google.golang.org/api/internal/conn_pool.go b/vendor/google.golang.org/api/internal/conn_pool.go deleted file mode 100644 index fedcce15b4..0000000000 --- a/vendor/google.golang.org/api/internal/conn_pool.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2020 Google LLC. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package internal - -import ( - "google.golang.org/grpc" -) - -// ConnPool is a pool of grpc.ClientConns. -type ConnPool interface { - // Conn returns a ClientConn from the pool. - // - // Conns aren't returned to the pool. - Conn() *grpc.ClientConn - - // Num returns the number of connections in the pool. - // - // It will always return the same value. - Num() int - - // Close closes every ClientConn in the pool. - // - // The error returned by Close may be a single error or multiple errors. - Close() error - - // ConnPool implements grpc.ClientConnInterface to enable it to be used directly with generated proto stubs. - grpc.ClientConnInterface -} diff --git a/vendor/google.golang.org/api/internal/creds.go b/vendor/google.golang.org/api/internal/creds.go deleted file mode 100644 index c93daa98c3..0000000000 --- a/vendor/google.golang.org/api/internal/creds.go +++ /dev/null @@ -1,130 +0,0 @@ -// Copyright 2017 Google LLC. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package internal - -import ( - "context" - "encoding/json" - "fmt" - "io/ioutil" - - "golang.org/x/oauth2" - "google.golang.org/api/internal/impersonate" - - "golang.org/x/oauth2/google" -) - -// Creds returns credential information obtained from DialSettings, or if none, then -// it returns default credential information. -func Creds(ctx context.Context, ds *DialSettings) (*google.Credentials, error) { - creds, err := baseCreds(ctx, ds) - if err != nil { - return nil, err - } - if ds.ImpersonationConfig != nil { - return impersonateCredentials(ctx, creds, ds) - } - return creds, nil -} - -func baseCreds(ctx context.Context, ds *DialSettings) (*google.Credentials, error) { - if ds.Credentials != nil { - return ds.Credentials, nil - } - if ds.CredentialsJSON != nil { - return credentialsFromJSON(ctx, ds.CredentialsJSON, ds) - } - if ds.CredentialsFile != "" { - data, err := ioutil.ReadFile(ds.CredentialsFile) - if err != nil { - return nil, fmt.Errorf("cannot read credentials file: %v", err) - } - return credentialsFromJSON(ctx, data, ds) - } - if ds.TokenSource != nil { - return &google.Credentials{TokenSource: ds.TokenSource}, nil - } - cred, err := google.FindDefaultCredentials(ctx, ds.GetScopes()...) - if err != nil { - return nil, err - } - if len(cred.JSON) > 0 { - return credentialsFromJSON(ctx, cred.JSON, ds) - } - // For GAE and GCE, the JSON is empty so return the default credentials directly. - return cred, nil -} - -// JSON key file type. -const ( - serviceAccountKey = "service_account" -) - -// credentialsFromJSON returns a google.Credentials based on the input. -// -// - If the JSON is a service account and no scopes provided, returns self-signed JWT auth flow -// - Otherwise, returns OAuth 2.0 flow. -func credentialsFromJSON(ctx context.Context, data []byte, ds *DialSettings) (*google.Credentials, error) { - cred, err := google.CredentialsFromJSON(ctx, data, ds.GetScopes()...) - if err != nil { - return nil, err - } - if len(data) > 0 && len(ds.Scopes) == 0 && (ds.DefaultAudience != "" || len(ds.Audiences) > 0) { - var f struct { - Type string `json:"type"` - // The rest JSON fields are omitted because they are not used. - } - if err := json.Unmarshal(cred.JSON, &f); err != nil { - return nil, err - } - if f.Type == serviceAccountKey { - ts, err := selfSignedJWTTokenSource(data, ds.DefaultAudience, ds.Audiences) - if err != nil { - return nil, err - } - cred.TokenSource = ts - } - } - return cred, err -} - -func selfSignedJWTTokenSource(data []byte, defaultAudience string, audiences []string) (oauth2.TokenSource, error) { - audience := defaultAudience - if len(audiences) > 0 { - // TODO(shinfan): Update golang oauth to support multiple audiences. - if len(audiences) > 1 { - return nil, fmt.Errorf("multiple audiences support is not implemented") - } - audience = audiences[0] - } - return google.JWTAccessTokenSourceFromJSON(data, audience) -} - -// QuotaProjectFromCreds returns the quota project from the JSON blob in the provided credentials. -// -// NOTE(cbro): consider promoting this to a field on google.Credentials. -func QuotaProjectFromCreds(cred *google.Credentials) string { - var v struct { - QuotaProject string `json:"quota_project_id"` - } - if err := json.Unmarshal(cred.JSON, &v); err != nil { - return "" - } - return v.QuotaProject -} - -func impersonateCredentials(ctx context.Context, creds *google.Credentials, ds *DialSettings) (*google.Credentials, error) { - if len(ds.ImpersonationConfig.Scopes) == 0 { - ds.ImpersonationConfig.Scopes = ds.GetScopes() - } - ts, err := impersonate.TokenSource(ctx, creds.TokenSource, ds.ImpersonationConfig) - if err != nil { - return nil, err - } - return &google.Credentials{ - TokenSource: ts, - ProjectID: creds.ProjectID, - }, nil -} diff --git a/vendor/google.golang.org/api/internal/impersonate/impersonate.go b/vendor/google.golang.org/api/internal/impersonate/impersonate.go deleted file mode 100644 index b465bbcd12..0000000000 --- a/vendor/google.golang.org/api/internal/impersonate/impersonate.go +++ /dev/null @@ -1,128 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package impersonate is used to impersonate Google Credentials. -package impersonate - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net/http" - "time" - - "golang.org/x/oauth2" -) - -// Config for generating impersonated credentials. -type Config struct { - // Target is the service account to impersonate. Required. - Target string - // Scopes the impersonated credential should have. Required. - Scopes []string - // Delegates are the service accounts in a delegation chain. Each service - // account must be granted roles/iam.serviceAccountTokenCreator on the next - // service account in the chain. Optional. - Delegates []string -} - -// TokenSource returns an impersonated TokenSource configured with the provided -// config using ts as the base credential provider for making requests. -func TokenSource(ctx context.Context, ts oauth2.TokenSource, config *Config) (oauth2.TokenSource, error) { - if len(config.Scopes) == 0 { - return nil, fmt.Errorf("impersonate: scopes must be provided") - } - its := impersonatedTokenSource{ - ctx: ctx, - ts: ts, - name: formatIAMServiceAccountName(config.Target), - // Default to the longest acceptable value of one hour as the token will - // be refreshed automatically. - lifetime: "3600s", - } - - its.delegates = make([]string, len(config.Delegates)) - for i, v := range config.Delegates { - its.delegates[i] = formatIAMServiceAccountName(v) - } - its.scopes = make([]string, len(config.Scopes)) - copy(its.scopes, config.Scopes) - - return oauth2.ReuseTokenSource(nil, its), nil -} - -func formatIAMServiceAccountName(name string) string { - return fmt.Sprintf("projects/-/serviceAccounts/%s", name) -} - -type generateAccessTokenReq struct { - Delegates []string `json:"delegates,omitempty"` - Lifetime string `json:"lifetime,omitempty"` - Scope []string `json:"scope,omitempty"` -} - -type generateAccessTokenResp struct { - AccessToken string `json:"accessToken"` - ExpireTime string `json:"expireTime"` -} - -type impersonatedTokenSource struct { - ctx context.Context - ts oauth2.TokenSource - - name string - lifetime string - scopes []string - delegates []string -} - -// Token returns an impersonated Token. -func (i impersonatedTokenSource) Token() (*oauth2.Token, error) { - hc := oauth2.NewClient(i.ctx, i.ts) - reqBody := generateAccessTokenReq{ - Delegates: i.delegates, - Lifetime: i.lifetime, - Scope: i.scopes, - } - b, err := json.Marshal(reqBody) - if err != nil { - return nil, fmt.Errorf("impersonate: unable to marshal request: %v", err) - } - url := fmt.Sprintf("https://iamcredentials.googleapis.com/v1/%s:generateAccessToken", i.name) - req, err := http.NewRequest("POST", url, bytes.NewReader(b)) - if err != nil { - return nil, fmt.Errorf("impersonate: unable to create request: %v", err) - } - req = req.WithContext(i.ctx) - req.Header.Set("Content-Type", "application/json") - - resp, err := hc.Do(req) - if err != nil { - return nil, fmt.Errorf("impersonate: unable to generate access token: %v", err) - } - defer resp.Body.Close() - body, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) - if err != nil { - return nil, fmt.Errorf("impersonate: unable to read body: %v", err) - } - if c := resp.StatusCode; c < 200 || c > 299 { - return nil, fmt.Errorf("impersonate: status code %d: %s", c, body) - } - - var accessTokenResp generateAccessTokenResp - if err := json.Unmarshal(body, &accessTokenResp); err != nil { - return nil, fmt.Errorf("impersonate: unable to parse response: %v", err) - } - expiry, err := time.Parse(time.RFC3339, accessTokenResp.ExpireTime) - if err != nil { - return nil, fmt.Errorf("impersonate: unable to parse expiry: %v", err) - } - return &oauth2.Token{ - AccessToken: accessTokenResp.AccessToken, - Expiry: expiry, - }, nil -} diff --git a/vendor/google.golang.org/api/internal/settings.go b/vendor/google.golang.org/api/internal/settings.go deleted file mode 100644 index 0ae1cb9778..0000000000 --- a/vendor/google.golang.org/api/internal/settings.go +++ /dev/null @@ -1,126 +0,0 @@ -// Copyright 2017 Google LLC. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package internal supports the options and transport packages. -package internal - -import ( - "crypto/tls" - "errors" - "net/http" - - "golang.org/x/oauth2" - "golang.org/x/oauth2/google" - "google.golang.org/api/internal/impersonate" - "google.golang.org/grpc" -) - -// DialSettings holds information needed to establish a connection with a -// Google API service. -type DialSettings struct { - Endpoint string - DefaultEndpoint string - DefaultMTLSEndpoint string - Scopes []string - DefaultScopes []string - TokenSource oauth2.TokenSource - Credentials *google.Credentials - CredentialsFile string // if set, Token Source is ignored. - CredentialsJSON []byte - UserAgent string - APIKey string - Audiences []string - DefaultAudience string - HTTPClient *http.Client - GRPCDialOpts []grpc.DialOption - GRPCConn *grpc.ClientConn - GRPCConnPool ConnPool - GRPCConnPoolSize int - NoAuth bool - TelemetryDisabled bool - ClientCertSource func(*tls.CertificateRequestInfo) (*tls.Certificate, error) - CustomClaims map[string]interface{} - SkipValidation bool - ImpersonationConfig *impersonate.Config - EnableDirectPath bool - - // Google API system parameters. For more information please read: - // https://cloud.google.com/apis/docs/system-parameters - QuotaProject string - RequestReason string -} - -// GetScopes returns the user-provided scopes, if set, or else falls back to the -// default scopes. -func (ds *DialSettings) GetScopes() []string { - if len(ds.Scopes) > 0 { - return ds.Scopes - } - return ds.DefaultScopes -} - -// Validate reports an error if ds is invalid. -func (ds *DialSettings) Validate() error { - if ds.SkipValidation { - return nil - } - hasCreds := ds.APIKey != "" || ds.TokenSource != nil || ds.CredentialsFile != "" || ds.Credentials != nil - if ds.NoAuth && hasCreds { - return errors.New("options.WithoutAuthentication is incompatible with any option that provides credentials") - } - // Credentials should not appear with other options. - // We currently allow TokenSource and CredentialsFile to coexist. - // TODO(jba): make TokenSource & CredentialsFile an error (breaking change). - nCreds := 0 - if ds.Credentials != nil { - nCreds++ - } - if ds.CredentialsJSON != nil { - nCreds++ - } - if ds.CredentialsFile != "" { - nCreds++ - } - if ds.APIKey != "" { - nCreds++ - } - if ds.TokenSource != nil { - nCreds++ - } - if len(ds.Scopes) > 0 && len(ds.Audiences) > 0 { - return errors.New("WithScopes is incompatible with WithAudience") - } - // Accept only one form of credentials, except we allow TokenSource and CredentialsFile for backwards compatibility. - if nCreds > 1 && !(nCreds == 2 && ds.TokenSource != nil && ds.CredentialsFile != "") { - return errors.New("multiple credential options provided") - } - if ds.GRPCConn != nil && ds.GRPCConnPool != nil { - return errors.New("WithGRPCConn is incompatible with WithConnPool") - } - if ds.HTTPClient != nil && ds.GRPCConnPool != nil { - return errors.New("WithHTTPClient is incompatible with WithConnPool") - } - if ds.HTTPClient != nil && ds.GRPCConn != nil { - return errors.New("WithHTTPClient is incompatible with WithGRPCConn") - } - if ds.HTTPClient != nil && ds.GRPCDialOpts != nil { - return errors.New("WithHTTPClient is incompatible with gRPC dial options") - } - if ds.HTTPClient != nil && ds.QuotaProject != "" { - return errors.New("WithHTTPClient is incompatible with QuotaProject") - } - if ds.HTTPClient != nil && ds.RequestReason != "" { - return errors.New("WithHTTPClient is incompatible with RequestReason") - } - if ds.HTTPClient != nil && ds.ClientCertSource != nil { - return errors.New("WithHTTPClient is incompatible with WithClientCertSource") - } - if ds.ClientCertSource != nil && (ds.GRPCConn != nil || ds.GRPCConnPool != nil || ds.GRPCConnPoolSize != 0 || ds.GRPCDialOpts != nil) { - return errors.New("WithClientCertSource is currently only supported for HTTP. gRPC settings are incompatible") - } - if ds.ImpersonationConfig != nil && len(ds.ImpersonationConfig.Scopes) == 0 && len(ds.Scopes) == 0 { - return errors.New("WithImpersonatedCredentials requires scopes being provided") - } - return nil -} diff --git a/vendor/google.golang.org/api/iterator/iterator.go b/vendor/google.golang.org/api/iterator/iterator.go deleted file mode 100644 index 1799b5d9af..0000000000 --- a/vendor/google.golang.org/api/iterator/iterator.go +++ /dev/null @@ -1,227 +0,0 @@ -// Copyright 2016 Google LLC. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package iterator provides support for standard Google API iterators. -// See https://github.com/GoogleCloudPlatform/gcloud-golang/wiki/Iterator-Guidelines. -package iterator - -import ( - "errors" - "fmt" - "reflect" -) - -// Done is returned by an iterator's Next method when the iteration is -// complete; when there are no more items to return. -var Done = errors.New("no more items in iterator") - -// We don't support mixed calls to Next and NextPage because they play -// with the paging state in incompatible ways. -var errMixed = errors.New("iterator: Next and NextPage called on same iterator") - -// PageInfo contains information about an iterator's paging state. -type PageInfo struct { - // Token is the token used to retrieve the next page of items from the - // API. You may set Token immediately after creating an iterator to - // begin iteration at a particular point. If Token is the empty string, - // the iterator will begin with the first eligible item. - // - // The result of setting Token after the first call to Next is undefined. - // - // After the underlying API method is called to retrieve a page of items, - // Token is set to the next-page token in the response. - Token string - - // MaxSize is the maximum number of items returned by a call to the API. - // Set MaxSize as a hint to optimize the buffering behavior of the iterator. - // If zero, the page size is determined by the underlying service. - // - // Use Pager to retrieve a page of a specific, exact size. - MaxSize int - - // The error state of the iterator. Manipulated by PageInfo.next and Pager. - // This is a latch: it starts as nil, and once set should never change. - err error - - // If true, no more calls to fetch should be made. Set to true when fetch - // returns an empty page token. The iterator is Done when this is true AND - // the buffer is empty. - atEnd bool - - // Function that fetches a page from the underlying service. It should pass - // the pageSize and pageToken arguments to the service, fill the buffer - // with the results from the call, and return the next-page token returned - // by the service. The function must not remove any existing items from the - // buffer. If the underlying RPC takes an int32 page size, pageSize should - // be silently truncated. - fetch func(pageSize int, pageToken string) (nextPageToken string, err error) - - // Function that returns the number of currently buffered items. - bufLen func() int - - // Function that returns the buffer, after setting the buffer variable to nil. - takeBuf func() interface{} - - // Set to true on first call to PageInfo.next or Pager.NextPage. Used to check - // for calls to both Next and NextPage with the same iterator. - nextCalled, nextPageCalled bool -} - -// NewPageInfo exposes internals for iterator implementations. -// It is not a stable interface. -var NewPageInfo = newPageInfo - -// newPageInfo creates and returns a PageInfo and a next func. If an iterator can -// support paging, its iterator-creating method should call this. Each time the -// iterator's Next is called, it should call the returned next fn to determine -// whether a next item exists, and if so it should pop an item from the buffer. -// -// The fetch, bufLen and takeBuf arguments provide access to the iterator's -// internal slice of buffered items. They behave as described in PageInfo, above. -// -// The return value is the PageInfo.next method bound to the returned PageInfo value. -// (Returning it avoids exporting PageInfo.next.) -// -// Note: the returned PageInfo and next fn do not remove items from the buffer. -// It is up to the iterator using these to remove items from the buffer: -// typically by performing a pop in its Next. If items are not removed from the -// buffer, memory may grow unbounded. -func newPageInfo(fetch func(int, string) (string, error), bufLen func() int, takeBuf func() interface{}) (pi *PageInfo, next func() error) { - pi = &PageInfo{ - fetch: fetch, - bufLen: bufLen, - takeBuf: takeBuf, - } - return pi, pi.next -} - -// Remaining returns the number of items available before the iterator makes another API call. -func (pi *PageInfo) Remaining() int { return pi.bufLen() } - -// next provides support for an iterator's Next function. An iterator's Next -// should return the error returned by next if non-nil; else it can assume -// there is at least one item in its buffer, and it should return that item and -// remove it from the buffer. -func (pi *PageInfo) next() error { - pi.nextCalled = true - if pi.err != nil { // Once we get an error, always return it. - // TODO(jba): fix so users can retry on transient errors? Probably not worth it. - return pi.err - } - if pi.nextPageCalled { - pi.err = errMixed - return pi.err - } - // Loop until we get some items or reach the end. - for pi.bufLen() == 0 && !pi.atEnd { - if err := pi.fill(pi.MaxSize); err != nil { - pi.err = err - return pi.err - } - if pi.Token == "" { - pi.atEnd = true - } - } - // Either the buffer is non-empty or pi.atEnd is true (or both). - if pi.bufLen() == 0 { - // The buffer is empty and pi.atEnd is true, i.e. the service has no - // more items. - pi.err = Done - } - return pi.err -} - -// Call the service to fill the buffer, using size and pi.Token. Set pi.Token to the -// next-page token returned by the call. -// If fill returns a non-nil error, the buffer will be empty. -func (pi *PageInfo) fill(size int) error { - tok, err := pi.fetch(size, pi.Token) - if err != nil { - pi.takeBuf() // clear the buffer - return err - } - pi.Token = tok - return nil -} - -// Pageable is implemented by iterators that support paging. -type Pageable interface { - // PageInfo returns paging information associated with the iterator. - PageInfo() *PageInfo -} - -// Pager supports retrieving iterator items a page at a time. -type Pager struct { - pageInfo *PageInfo - pageSize int -} - -// NewPager returns a pager that uses iter. Calls to its NextPage method will -// obtain exactly pageSize items, unless fewer remain. The pageToken argument -// indicates where to start the iteration. Pass the empty string to start at -// the beginning, or pass a token retrieved from a call to Pager.NextPage. -// -// If you use an iterator with a Pager, you must not call Next on the iterator. -func NewPager(iter Pageable, pageSize int, pageToken string) *Pager { - p := &Pager{ - pageInfo: iter.PageInfo(), - pageSize: pageSize, - } - p.pageInfo.Token = pageToken - if pageSize <= 0 { - p.pageInfo.err = errors.New("iterator: page size must be positive") - } - return p -} - -// NextPage retrieves a sequence of items from the iterator and appends them -// to slicep, which must be a pointer to a slice of the iterator's item type. -// Exactly p.pageSize items will be appended, unless fewer remain. -// -// The first return value is the page token to use for the next page of items. -// If empty, there are no more pages. Aside from checking for the end of the -// iteration, the returned page token is only needed if the iteration is to be -// resumed a later time, in another context (possibly another process). -// -// The second return value is non-nil if an error occurred. It will never be -// the special iterator sentinel value Done. To recognize the end of the -// iteration, compare nextPageToken to the empty string. -// -// It is possible for NextPage to return a single zero-length page along with -// an empty page token when there are no more items in the iteration. -func (p *Pager) NextPage(slicep interface{}) (nextPageToken string, err error) { - p.pageInfo.nextPageCalled = true - if p.pageInfo.err != nil { - return "", p.pageInfo.err - } - if p.pageInfo.nextCalled { - p.pageInfo.err = errMixed - return "", p.pageInfo.err - } - if p.pageInfo.bufLen() > 0 { - return "", errors.New("must call NextPage with an empty buffer") - } - // The buffer must be empty here, so takeBuf is a no-op. We call it just to get - // the buffer's type. - wantSliceType := reflect.PtrTo(reflect.ValueOf(p.pageInfo.takeBuf()).Type()) - if slicep == nil { - return "", errors.New("nil passed to Pager.NextPage") - } - vslicep := reflect.ValueOf(slicep) - if vslicep.Type() != wantSliceType { - return "", fmt.Errorf("slicep should be of type %s, got %T", wantSliceType, slicep) - } - for p.pageInfo.bufLen() < p.pageSize { - if err := p.pageInfo.fill(p.pageSize - p.pageInfo.bufLen()); err != nil { - p.pageInfo.err = err - return "", p.pageInfo.err - } - if p.pageInfo.Token == "" { - break - } - } - e := vslicep.Elem() - e.Set(reflect.AppendSlice(e, reflect.ValueOf(p.pageInfo.takeBuf()))) - return p.pageInfo.Token, nil -} diff --git a/vendor/google.golang.org/api/option/credentials_go19.go b/vendor/google.golang.org/api/option/credentials_go19.go deleted file mode 100644 index d06f918b0e..0000000000 --- a/vendor/google.golang.org/api/option/credentials_go19.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2018 Google LLC. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.9 - -package option - -import ( - "golang.org/x/oauth2/google" - "google.golang.org/api/internal" -) - -type withCreds google.Credentials - -func (w *withCreds) Apply(o *internal.DialSettings) { - o.Credentials = (*google.Credentials)(w) -} - -// WithCredentials returns a ClientOption that authenticates API calls. -func WithCredentials(creds *google.Credentials) ClientOption { - return (*withCreds)(creds) -} diff --git a/vendor/google.golang.org/api/option/credentials_notgo19.go b/vendor/google.golang.org/api/option/credentials_notgo19.go deleted file mode 100644 index 0ce107a624..0000000000 --- a/vendor/google.golang.org/api/option/credentials_notgo19.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2018 Google LLC. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.9 - -package option - -import ( - "golang.org/x/oauth2/google" - "google.golang.org/api/internal" -) - -type withCreds google.DefaultCredentials - -func (w *withCreds) Apply(o *internal.DialSettings) { - o.Credentials = (*google.DefaultCredentials)(w) -} - -func WithCredentials(creds *google.DefaultCredentials) ClientOption { - return (*withCreds)(creds) -} diff --git a/vendor/google.golang.org/api/option/internaloption/internaloption.go b/vendor/google.golang.org/api/option/internaloption/internaloption.go deleted file mode 100644 index 1fff22fd5d..0000000000 --- a/vendor/google.golang.org/api/option/internaloption/internaloption.go +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright 2020 Google LLC. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package internaloption contains options used internally by Google client code. -package internaloption - -import ( - "google.golang.org/api/internal" - "google.golang.org/api/option" -) - -type defaultEndpointOption string - -func (o defaultEndpointOption) Apply(settings *internal.DialSettings) { - settings.DefaultEndpoint = string(o) -} - -// WithDefaultEndpoint is an option that indicates the default endpoint. -// -// It should only be used internally by generated clients. -// -// This is similar to WithEndpoint, but allows us to determine whether the user has overridden the default endpoint. -func WithDefaultEndpoint(url string) option.ClientOption { - return defaultEndpointOption(url) -} - -type defaultMTLSEndpointOption string - -func (o defaultMTLSEndpointOption) Apply(settings *internal.DialSettings) { - settings.DefaultMTLSEndpoint = string(o) -} - -// WithDefaultMTLSEndpoint is an option that indicates the default mTLS endpoint. -// -// It should only be used internally by generated clients. -func WithDefaultMTLSEndpoint(url string) option.ClientOption { - return defaultMTLSEndpointOption(url) -} - -// SkipDialSettingsValidation bypasses validation on ClientOptions. -// -// It should only be used internally. -func SkipDialSettingsValidation() option.ClientOption { - return skipDialSettingsValidation{} -} - -type skipDialSettingsValidation struct{} - -func (s skipDialSettingsValidation) Apply(settings *internal.DialSettings) { - settings.SkipValidation = true -} - -// EnableDirectPath returns a ClientOption that overrides the default -// attempt to use DirectPath. -// -// It should only be used internally by generated clients. -// This is an EXPERIMENTAL API and may be changed or removed in the future. -func EnableDirectPath(dp bool) option.ClientOption { - return enableDirectPath(dp) -} - -type enableDirectPath bool - -func (e enableDirectPath) Apply(o *internal.DialSettings) { - o.EnableDirectPath = bool(e) -} - -// WithDefaultAudience returns a ClientOption that specifies a default audience -// to be used as the audience field ("aud") for the JWT token authentication. -// -// It should only be used internally by generated clients. -func WithDefaultAudience(audience string) option.ClientOption { - return withDefaultAudience(audience) -} - -type withDefaultAudience string - -func (w withDefaultAudience) Apply(o *internal.DialSettings) { - o.DefaultAudience = string(w) -} - -// WithDefaultScopes returns a ClientOption that overrides the default OAuth2 -// scopes to be used for a service. -// -// It should only be used internally by generated clients. -func WithDefaultScopes(scope ...string) option.ClientOption { - return withDefaultScopes(scope) -} - -type withDefaultScopes []string - -func (w withDefaultScopes) Apply(o *internal.DialSettings) { - o.DefaultScopes = make([]string, len(w)) - copy(o.DefaultScopes, w) -} diff --git a/vendor/google.golang.org/api/option/option.go b/vendor/google.golang.org/api/option/option.go deleted file mode 100644 index 686476f9cb..0000000000 --- a/vendor/google.golang.org/api/option/option.go +++ /dev/null @@ -1,326 +0,0 @@ -// Copyright 2017 Google LLC. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package option contains options for Google API clients. -package option - -import ( - "crypto/tls" - "net/http" - - "golang.org/x/oauth2" - "google.golang.org/api/internal" - "google.golang.org/api/internal/impersonate" - "google.golang.org/grpc" -) - -// A ClientOption is an option for a Google API client. -type ClientOption interface { - Apply(*internal.DialSettings) -} - -// WithTokenSource returns a ClientOption that specifies an OAuth2 token -// source to be used as the basis for authentication. -func WithTokenSource(s oauth2.TokenSource) ClientOption { - return withTokenSource{s} -} - -type withTokenSource struct{ ts oauth2.TokenSource } - -func (w withTokenSource) Apply(o *internal.DialSettings) { - o.TokenSource = w.ts -} - -type withCredFile string - -func (w withCredFile) Apply(o *internal.DialSettings) { - o.CredentialsFile = string(w) -} - -// WithCredentialsFile returns a ClientOption that authenticates -// API calls with the given service account or refresh token JSON -// credentials file. -func WithCredentialsFile(filename string) ClientOption { - return withCredFile(filename) -} - -// WithServiceAccountFile returns a ClientOption that uses a Google service -// account credentials file to authenticate. -// -// Deprecated: Use WithCredentialsFile instead. -func WithServiceAccountFile(filename string) ClientOption { - return WithCredentialsFile(filename) -} - -// WithCredentialsJSON returns a ClientOption that authenticates -// API calls with the given service account or refresh token JSON -// credentials. -func WithCredentialsJSON(p []byte) ClientOption { - return withCredentialsJSON(p) -} - -type withCredentialsJSON []byte - -func (w withCredentialsJSON) Apply(o *internal.DialSettings) { - o.CredentialsJSON = make([]byte, len(w)) - copy(o.CredentialsJSON, w) -} - -// WithEndpoint returns a ClientOption that overrides the default endpoint -// to be used for a service. -func WithEndpoint(url string) ClientOption { - return withEndpoint(url) -} - -type withEndpoint string - -func (w withEndpoint) Apply(o *internal.DialSettings) { - o.Endpoint = string(w) -} - -// WithScopes returns a ClientOption that overrides the default OAuth2 scopes -// to be used for a service. -func WithScopes(scope ...string) ClientOption { - return withScopes(scope) -} - -type withScopes []string - -func (w withScopes) Apply(o *internal.DialSettings) { - o.Scopes = make([]string, len(w)) - copy(o.Scopes, w) -} - -// WithUserAgent returns a ClientOption that sets the User-Agent. -func WithUserAgent(ua string) ClientOption { - return withUA(ua) -} - -type withUA string - -func (w withUA) Apply(o *internal.DialSettings) { o.UserAgent = string(w) } - -// WithHTTPClient returns a ClientOption that specifies the HTTP client to use -// as the basis of communications. This option may only be used with services -// that support HTTP as their communication transport. When used, the -// WithHTTPClient option takes precedent over all other supplied options. -func WithHTTPClient(client *http.Client) ClientOption { - return withHTTPClient{client} -} - -type withHTTPClient struct{ client *http.Client } - -func (w withHTTPClient) Apply(o *internal.DialSettings) { - o.HTTPClient = w.client -} - -// WithGRPCConn returns a ClientOption that specifies the gRPC client -// connection to use as the basis of communications. This option may only be -// used with services that support gRPC as their communication transport. When -// used, the WithGRPCConn option takes precedent over all other supplied -// options. -func WithGRPCConn(conn *grpc.ClientConn) ClientOption { - return withGRPCConn{conn} -} - -type withGRPCConn struct{ conn *grpc.ClientConn } - -func (w withGRPCConn) Apply(o *internal.DialSettings) { - o.GRPCConn = w.conn -} - -// WithGRPCDialOption returns a ClientOption that appends a new grpc.DialOption -// to an underlying gRPC dial. It does not work with WithGRPCConn. -func WithGRPCDialOption(opt grpc.DialOption) ClientOption { - return withGRPCDialOption{opt} -} - -type withGRPCDialOption struct{ opt grpc.DialOption } - -func (w withGRPCDialOption) Apply(o *internal.DialSettings) { - o.GRPCDialOpts = append(o.GRPCDialOpts, w.opt) -} - -// WithGRPCConnectionPool returns a ClientOption that creates a pool of gRPC -// connections that requests will be balanced between. -// -// This is an EXPERIMENTAL API and may be changed or removed in the future. -func WithGRPCConnectionPool(size int) ClientOption { - return withGRPCConnectionPool(size) -} - -type withGRPCConnectionPool int - -func (w withGRPCConnectionPool) Apply(o *internal.DialSettings) { - o.GRPCConnPoolSize = int(w) -} - -// WithAPIKey returns a ClientOption that specifies an API key to be used -// as the basis for authentication. -// -// API Keys can only be used for JSON-over-HTTP APIs, including those under -// the import path google.golang.org/api/.... -func WithAPIKey(apiKey string) ClientOption { - return withAPIKey(apiKey) -} - -type withAPIKey string - -func (w withAPIKey) Apply(o *internal.DialSettings) { o.APIKey = string(w) } - -// WithAudiences returns a ClientOption that specifies an audience to be used -// as the audience field ("aud") for the JWT token authentication. -func WithAudiences(audience ...string) ClientOption { - return withAudiences(audience) -} - -type withAudiences []string - -func (w withAudiences) Apply(o *internal.DialSettings) { - o.Audiences = make([]string, len(w)) - copy(o.Audiences, w) -} - -// WithoutAuthentication returns a ClientOption that specifies that no -// authentication should be used. It is suitable only for testing and for -// accessing public resources, like public Google Cloud Storage buckets. -// It is an error to provide both WithoutAuthentication and any of WithAPIKey, -// WithTokenSource, WithCredentialsFile or WithServiceAccountFile. -func WithoutAuthentication() ClientOption { - return withoutAuthentication{} -} - -type withoutAuthentication struct{} - -func (w withoutAuthentication) Apply(o *internal.DialSettings) { o.NoAuth = true } - -// WithQuotaProject returns a ClientOption that specifies the project used -// for quota and billing purposes. -// -// For more information please read: -// https://cloud.google.com/apis/docs/system-parameters -func WithQuotaProject(quotaProject string) ClientOption { - return withQuotaProject(quotaProject) -} - -type withQuotaProject string - -func (w withQuotaProject) Apply(o *internal.DialSettings) { - o.QuotaProject = string(w) -} - -// WithRequestReason returns a ClientOption that specifies a reason for -// making the request, which is intended to be recorded in audit logging. -// An example reason would be a support-case ticket number. -// -// For more information please read: -// https://cloud.google.com/apis/docs/system-parameters -func WithRequestReason(requestReason string) ClientOption { - return withRequestReason(requestReason) -} - -type withRequestReason string - -func (w withRequestReason) Apply(o *internal.DialSettings) { - o.RequestReason = string(w) -} - -// WithTelemetryDisabled returns a ClientOption that disables default telemetry (OpenCensus) -// settings on gRPC and HTTP clients. -// An example reason would be to bind custom telemetry that overrides the defaults. -func WithTelemetryDisabled() ClientOption { - return withTelemetryDisabled{} -} - -type withTelemetryDisabled struct{} - -func (w withTelemetryDisabled) Apply(o *internal.DialSettings) { - o.TelemetryDisabled = true -} - -// ClientCertSource is a function that returns a TLS client certificate to be used -// when opening TLS connections. -// -// It follows the same semantics as crypto/tls.Config.GetClientCertificate. -// -// This is an EXPERIMENTAL API and may be changed or removed in the future. -type ClientCertSource = func(*tls.CertificateRequestInfo) (*tls.Certificate, error) - -// WithClientCertSource returns a ClientOption that specifies a -// callback function for obtaining a TLS client certificate. -// -// This option is used for supporting mTLS authentication, where the -// server validates the client certifcate when establishing a connection. -// -// The callback function will be invoked whenever the server requests a -// certificate from the client. Implementations of the callback function -// should try to ensure that a valid certificate can be repeatedly returned -// on demand for the entire life cycle of the transport client. If a nil -// Certificate is returned (i.e. no Certificate can be obtained), an error -// should be returned. -// -// This is an EXPERIMENTAL API and may be changed or removed in the future. -func WithClientCertSource(s ClientCertSource) ClientOption { - return withClientCertSource{s} -} - -type withClientCertSource struct{ s ClientCertSource } - -func (w withClientCertSource) Apply(o *internal.DialSettings) { - o.ClientCertSource = w.s -} - -// ImpersonateCredentials returns a ClientOption that will impersonate the -// target service account. -// -// In order to impersonate the target service account -// the base service account must have the Service Account Token Creator role, -// roles/iam.serviceAccountTokenCreator, on the target service account. -// See https://cloud.google.com/iam/docs/understanding-service-accounts. -// -// Optionally, delegates can be used during impersonation if the base service -// account lacks the token creator role on the target. When using delegates, -// each service account must be granted roles/iam.serviceAccountTokenCreator -// on the next service account in the chain. -// -// For example, if a base service account of SA1 is trying to impersonate target -// service account SA2 while using delegate service accounts DSA1 and DSA2, -// the following must be true: -// -// 1. Base service account SA1 has roles/iam.serviceAccountTokenCreator on -// DSA1. -// 2. DSA1 has roles/iam.serviceAccountTokenCreator on DSA2. -// 3. DSA2 has roles/iam.serviceAccountTokenCreator on target SA2. -// -// The resulting impersonated credential will either have the default scopes of -// the client being instantiating or the scopes from WithScopes if provided. -// Scopes are required for creating impersonated credentials, so if this option -// is used while not using a NewClient/NewService function, WithScopes must also -// be explicitly passed in as well. -// -// If the base credential is an authorized user and not a service account, or if -// the option WithQuotaProject is set, the target service account must have a -// role that grants the serviceusage.services.use permission such as -// roles/serviceusage.serviceUsageConsumer. -// -// This is an EXPERIMENTAL API and may be changed or removed in the future. -func ImpersonateCredentials(target string, delegates ...string) ClientOption { - return impersonateServiceAccount{ - target: target, - delegates: delegates, - } -} - -type impersonateServiceAccount struct { - target string - delegates []string -} - -func (i impersonateServiceAccount) Apply(o *internal.DialSettings) { - o.ImpersonationConfig = &impersonate.Config{ - Target: i.target, - } - o.ImpersonationConfig.Delegates = make([]string, len(i.delegates)) - copy(o.ImpersonationConfig.Delegates, i.delegates) -} diff --git a/vendor/google.golang.org/api/transport/cert/default_cert.go b/vendor/google.golang.org/api/transport/cert/default_cert.go deleted file mode 100644 index 141ae45793..0000000000 --- a/vendor/google.golang.org/api/transport/cert/default_cert.go +++ /dev/null @@ -1,137 +0,0 @@ -// Copyright 2020 Google LLC. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package cert contains certificate tools for Google API clients. -// This package is intended to be used with crypto/tls.Config.GetClientCertificate. -// -// The certificates can be used to satisfy Google's Endpoint Validation. -// See https://cloud.google.com/endpoint-verification/docs/overview -// -// This package is not intended for use by end developers. Use the -// google.golang.org/api/option package to configure API clients. -package cert - -import ( - "crypto/tls" - "crypto/x509" - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "os" - "os/exec" - "os/user" - "path/filepath" - "sync" - "time" -) - -const ( - metadataPath = ".secureConnect" - metadataFile = "context_aware_metadata.json" -) - -// defaultCertData holds all the variables pertaining to -// the default certficate source created by DefaultSource. -type defaultCertData struct { - once sync.Once - source Source - err error - cachedCertMutex sync.Mutex - cachedCert *tls.Certificate -} - -var ( - defaultCert defaultCertData -) - -// Source is a function that can be passed into crypto/tls.Config.GetClientCertificate. -type Source func(*tls.CertificateRequestInfo) (*tls.Certificate, error) - -// DefaultSource returns a certificate source that execs the command specified -// in the file at ~/.secureConnect/context_aware_metadata.json -// -// If that file does not exist, a nil source is returned. -func DefaultSource() (Source, error) { - defaultCert.once.Do(func() { - defaultCert.source, defaultCert.err = newSecureConnectSource() - }) - return defaultCert.source, defaultCert.err -} - -type secureConnectSource struct { - metadata secureConnectMetadata -} - -type secureConnectMetadata struct { - Cmd []string `json:"cert_provider_command"` -} - -// newSecureConnectSource creates a secureConnectSource by reading the well-known file. -func newSecureConnectSource() (Source, error) { - user, err := user.Current() - if err != nil { - // Ignore. - return nil, nil - } - filename := filepath.Join(user.HomeDir, metadataPath, metadataFile) - file, err := ioutil.ReadFile(filename) - if os.IsNotExist(err) { - // Ignore. - return nil, nil - } - if err != nil { - return nil, err - } - - var metadata secureConnectMetadata - if err := json.Unmarshal(file, &metadata); err != nil { - return nil, fmt.Errorf("cert: could not parse JSON in %q: %v", filename, err) - } - if err := validateMetadata(metadata); err != nil { - return nil, fmt.Errorf("cert: invalid config in %q: %v", filename, err) - } - return (&secureConnectSource{ - metadata: metadata, - }).getClientCertificate, nil -} - -func validateMetadata(metadata secureConnectMetadata) error { - if len(metadata.Cmd) == 0 { - return errors.New("empty cert_provider_command") - } - return nil -} - -func (s *secureConnectSource) getClientCertificate(info *tls.CertificateRequestInfo) (*tls.Certificate, error) { - defaultCert.cachedCertMutex.Lock() - defer defaultCert.cachedCertMutex.Unlock() - if defaultCert.cachedCert != nil && !isCertificateExpired(defaultCert.cachedCert) { - return defaultCert.cachedCert, nil - } - command := s.metadata.Cmd - data, err := exec.Command(command[0], command[1:]...).Output() - if err != nil { - // TODO(cbro): read stderr for error message? Might contain sensitive info. - return nil, err - } - cert, err := tls.X509KeyPair(data, data) - if err != nil { - return nil, err - } - defaultCert.cachedCert = &cert - return &cert, nil -} - -// isCertificateExpired returns true if the given cert is expired or invalid. -func isCertificateExpired(cert *tls.Certificate) bool { - if len(cert.Certificate) == 0 { - return true - } - parsed, err := x509.ParseCertificate(cert.Certificate[0]) - if err != nil { - return true - } - return time.Now().After(parsed.NotAfter) -} diff --git a/vendor/google.golang.org/api/transport/grpc/dial.go b/vendor/google.golang.org/api/transport/grpc/dial.go deleted file mode 100644 index f8a6ca2998..0000000000 --- a/vendor/google.golang.org/api/transport/grpc/dial.go +++ /dev/null @@ -1,294 +0,0 @@ -// Copyright 2015 Google LLC. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package grpc supports network connections to GRPC servers. -// This package is not intended for use by end developers. Use the -// google.golang.org/api/option package to configure API clients. -package grpc - -import ( - "context" - "crypto/tls" - "errors" - "log" - "strings" - - "go.opencensus.io/plugin/ocgrpc" - "golang.org/x/oauth2" - "google.golang.org/api/internal" - "google.golang.org/api/option" - "google.golang.org/api/transport/internal/dca" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials" - grpcgoogle "google.golang.org/grpc/credentials/google" - "google.golang.org/grpc/credentials/oauth" - - // Install grpclb, which is required for direct path. - _ "google.golang.org/grpc/balancer/grpclb" -) - -// Set at init time by dial_appengine.go. If nil, we're not on App Engine. -var appengineDialerHook func(context.Context) grpc.DialOption - -// Set at init time by dial_socketopt.go. If nil, socketopt is not supported. -var timeoutDialerOption grpc.DialOption - -// Dial returns a GRPC connection for use communicating with a Google cloud -// service, configured with the given ClientOptions. -func Dial(ctx context.Context, opts ...option.ClientOption) (*grpc.ClientConn, error) { - o, err := processAndValidateOpts(opts) - if err != nil { - return nil, err - } - if o.GRPCConnPool != nil { - return o.GRPCConnPool.Conn(), nil - } - // NOTE(cbro): We removed support for option.WithGRPCConnPool (GRPCConnPoolSize) - // on 2020-02-12 because RoundRobin and WithBalancer are deprecated and we need to remove usages of it. - // - // Connection pooling is only done via DialPool. - return dial(ctx, false, o) -} - -// DialInsecure returns an insecure GRPC connection for use communicating -// with fake or mock Google cloud service implementations, such as emulators. -// The connection is configured with the given ClientOptions. -func DialInsecure(ctx context.Context, opts ...option.ClientOption) (*grpc.ClientConn, error) { - o, err := processAndValidateOpts(opts) - if err != nil { - return nil, err - } - return dial(ctx, true, o) -} - -// DialPool returns a pool of GRPC connections for the given service. -// This differs from the connection pooling implementation used by Dial, which uses a custom GRPC load balancer. -// DialPool should be used instead of Dial when a pool is used by default or a different custom GRPC load balancer is needed. -// The context and options are shared between each Conn in the pool. -// The pool size is configured using the WithGRPCConnectionPool option. -// -// This API is subject to change as we further refine requirements. It will go away if gRPC stubs accept an interface instead of the concrete ClientConn type. See https://github.com/grpc/grpc-go/issues/1287. -func DialPool(ctx context.Context, opts ...option.ClientOption) (ConnPool, error) { - o, err := processAndValidateOpts(opts) - if err != nil { - return nil, err - } - if o.GRPCConnPool != nil { - return o.GRPCConnPool, nil - } - poolSize := o.GRPCConnPoolSize - if o.GRPCConn != nil { - // WithGRPCConn is technically incompatible with WithGRPCConnectionPool. - // Always assume pool size is 1 when a grpc.ClientConn is explicitly used. - poolSize = 1 - } - o.GRPCConnPoolSize = 0 // we don't *need* to set this to zero, but it's safe to. - - if poolSize == 0 || poolSize == 1 { - // Fast path for common case for a connection pool with a single connection. - conn, err := dial(ctx, false, o) - if err != nil { - return nil, err - } - return &singleConnPool{conn}, nil - } - - pool := &roundRobinConnPool{} - for i := 0; i < poolSize; i++ { - conn, err := dial(ctx, false, o) - if err != nil { - defer pool.Close() // NOTE: error from Close is ignored. - return nil, err - } - pool.conns = append(pool.conns, conn) - } - return pool, nil -} - -func dial(ctx context.Context, insecure bool, o *internal.DialSettings) (*grpc.ClientConn, error) { - if o.HTTPClient != nil { - return nil, errors.New("unsupported HTTP client specified") - } - if o.GRPCConn != nil { - return o.GRPCConn, nil - } - clientCertSource, endpoint, err := dca.GetClientCertificateSourceAndEndpoint(o) - if err != nil { - return nil, err - } - var grpcOpts []grpc.DialOption - if insecure { - grpcOpts = []grpc.DialOption{grpc.WithInsecure()} - } else if !o.NoAuth { - if o.APIKey != "" { - log.Print("API keys are not supported for gRPC APIs. Remove the WithAPIKey option from your client-creating call.") - } - creds, err := internal.Creds(ctx, o) - if err != nil { - return nil, err - } - - if o.QuotaProject == "" { - o.QuotaProject = internal.QuotaProjectFromCreds(creds) - } - - // Attempt Direct Path only if: - // * The endpoint is a host:port (or dns:///host:port). - // * Credentials are obtained via GCE metadata server, using the default - // service account. - if o.EnableDirectPath && checkDirectPathEndPoint(endpoint) && isTokenSourceDirectPathCompatible(creds.TokenSource) { - if !strings.HasPrefix(endpoint, "dns:///") { - endpoint = "dns:///" + endpoint - } - grpcOpts = []grpc.DialOption{ - grpc.WithCredentialsBundle( - grpcgoogle.NewComputeEngineCredentials(), - ), - // For now all DirectPath go clients will be using the following lb config, but in future - // when different services need different configs, then we should change this to a - // per-service config. - grpc.WithDisableServiceConfig(), - grpc.WithDefaultServiceConfig(`{"loadBalancingConfig":[{"grpclb":{"childPolicy":[{"pick_first":{}}]}}]}`), - } - // TODO(cbro): add support for system parameters (quota project, request reason) via chained interceptor. - } else { - tlsConfig := &tls.Config{ - GetClientCertificate: clientCertSource, - } - grpcOpts = []grpc.DialOption{ - grpc.WithPerRPCCredentials(grpcTokenSource{ - TokenSource: oauth.TokenSource{creds.TokenSource}, - quotaProject: o.QuotaProject, - requestReason: o.RequestReason, - }), - grpc.WithTransportCredentials(credentials.NewTLS(tlsConfig)), - } - } - } - - if appengineDialerHook != nil { - // Use the Socket API on App Engine. - // appengine dialer will override socketopt dialer - grpcOpts = append(grpcOpts, appengineDialerHook(ctx)) - } - - // Add tracing, but before the other options, so that clients can override the - // gRPC stats handler. - // This assumes that gRPC options are processed in order, left to right. - grpcOpts = addOCStatsHandler(grpcOpts, o) - grpcOpts = append(grpcOpts, o.GRPCDialOpts...) - if o.UserAgent != "" { - grpcOpts = append(grpcOpts, grpc.WithUserAgent(o.UserAgent)) - } - - // TODO(weiranf): This socketopt dialer will be used by default at some - // point when isDirectPathEnabled will default to true, we guard it by - // the Directpath env var for now once we can introspect user defined - // dialer (https://github.com/grpc/grpc-go/issues/2795). - if timeoutDialerOption != nil && o.EnableDirectPath && checkDirectPathEndPoint(endpoint) { - grpcOpts = append(grpcOpts, timeoutDialerOption) - } - - return grpc.DialContext(ctx, endpoint, grpcOpts...) -} - -func addOCStatsHandler(opts []grpc.DialOption, settings *internal.DialSettings) []grpc.DialOption { - if settings.TelemetryDisabled { - return opts - } - return append(opts, grpc.WithStatsHandler(&ocgrpc.ClientHandler{})) -} - -// grpcTokenSource supplies PerRPCCredentials from an oauth.TokenSource. -type grpcTokenSource struct { - oauth.TokenSource - - // Additional metadata attached as headers. - quotaProject string - requestReason string -} - -// GetRequestMetadata gets the request metadata as a map from a grpcTokenSource. -func (ts grpcTokenSource) GetRequestMetadata(ctx context.Context, uri ...string) ( - map[string]string, error) { - metadata, err := ts.TokenSource.GetRequestMetadata(ctx, uri...) - if err != nil { - return nil, err - } - - // Attach system parameter - if ts.quotaProject != "" { - metadata["X-goog-user-project"] = ts.quotaProject - } - if ts.requestReason != "" { - metadata["X-goog-request-reason"] = ts.requestReason - } - return metadata, nil -} - -func isTokenSourceDirectPathCompatible(ts oauth2.TokenSource) bool { - if ts == nil { - return false - } - tok, err := ts.Token() - if err != nil { - return false - } - if tok == nil { - return false - } - if source, _ := tok.Extra("oauth2.google.tokenSource").(string); source != "compute-metadata" { - return false - } - if acct, _ := tok.Extra("oauth2.google.serviceAccount").(string); acct != "default" { - return false - } - return true -} - -func checkDirectPathEndPoint(endpoint string) bool { - // Only [dns:///]host[:port] is supported, not other schemes (e.g., "tcp://" or "unix://"). - // Also don't try direct path if the user has chosen an alternate name resolver - // (i.e., via ":///" prefix). - // - // TODO(cbro): once gRPC has introspectible options, check the user hasn't - // provided a custom dialer in gRPC options. - if strings.Contains(endpoint, "://") && !strings.HasPrefix(endpoint, "dns:///") { - return false - } - - if endpoint == "" { - return false - } - - return true -} - -func processAndValidateOpts(opts []option.ClientOption) (*internal.DialSettings, error) { - var o internal.DialSettings - for _, opt := range opts { - opt.Apply(&o) - } - if err := o.Validate(); err != nil { - return nil, err - } - - return &o, nil -} - -type connPoolOption struct{ ConnPool } - -// WithConnPool returns a ClientOption that specifies the ConnPool -// connection to use as the basis of communications. -// -// This is only to be used by Google client libraries internally, for example -// when creating a longrunning API client that shares the same connection pool -// as a service client. -func WithConnPool(p ConnPool) option.ClientOption { - return connPoolOption{p} -} - -func (o connPoolOption) Apply(s *internal.DialSettings) { - s.GRPCConnPool = o.ConnPool -} diff --git a/vendor/google.golang.org/api/transport/grpc/dial_appengine.go b/vendor/google.golang.org/api/transport/grpc/dial_appengine.go deleted file mode 100644 index 2c6aef2264..0000000000 --- a/vendor/google.golang.org/api/transport/grpc/dial_appengine.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2016 Google LLC. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build appengine - -package grpc - -import ( - "context" - "net" - "time" - - "google.golang.org/appengine" - "google.golang.org/appengine/socket" - "google.golang.org/grpc" -) - -func init() { - // NOTE: dev_appserver doesn't currently support SSL. - // When it does, this code can be removed. - if appengine.IsDevAppServer() { - return - } - - appengineDialerHook = func(ctx context.Context) grpc.DialOption { - return grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) { - return socket.DialTimeout(ctx, "tcp", addr, timeout) - }) - } -} diff --git a/vendor/google.golang.org/api/transport/grpc/dial_socketopt.go b/vendor/google.golang.org/api/transport/grpc/dial_socketopt.go deleted file mode 100644 index 0e4f388968..0000000000 --- a/vendor/google.golang.org/api/transport/grpc/dial_socketopt.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2019 Google LLC. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.11,linux - -package grpc - -import ( - "context" - "net" - "syscall" - - "golang.org/x/sys/unix" - "google.golang.org/grpc" -) - -const ( - // defaultTCPUserTimeout is the default TCP_USER_TIMEOUT socket option. By - // default is 20 seconds. - tcpUserTimeoutMilliseconds = 20000 -) - -func init() { - // timeoutDialerOption is a grpc.DialOption that contains dialer with - // socket option TCP_USER_TIMEOUT. This dialer requires go versions 1.11+. - timeoutDialerOption = grpc.WithContextDialer(dialTCPUserTimeout) -} - -func dialTCPUserTimeout(ctx context.Context, addr string) (net.Conn, error) { - control := func(network, address string, c syscall.RawConn) error { - var syscallErr error - controlErr := c.Control(func(fd uintptr) { - syscallErr = syscall.SetsockoptInt( - int(fd), syscall.IPPROTO_TCP, unix.TCP_USER_TIMEOUT, tcpUserTimeoutMilliseconds) - }) - if syscallErr != nil { - return syscallErr - } - if controlErr != nil { - return controlErr - } - return nil - } - d := &net.Dialer{ - Control: control, - } - return d.DialContext(ctx, "tcp", addr) -} diff --git a/vendor/google.golang.org/api/transport/grpc/pool.go b/vendor/google.golang.org/api/transport/grpc/pool.go deleted file mode 100644 index 4cf94a2771..0000000000 --- a/vendor/google.golang.org/api/transport/grpc/pool.go +++ /dev/null @@ -1,92 +0,0 @@ -// Copyright 2020 Google LLC. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package grpc - -import ( - "context" - "fmt" - "sync/atomic" - - "google.golang.org/api/internal" - "google.golang.org/grpc" -) - -// ConnPool is a pool of grpc.ClientConns. -type ConnPool = internal.ConnPool // NOTE(cbro): type alias to export the type. It must live in internal to avoid a circular dependency. - -var _ ConnPool = &roundRobinConnPool{} -var _ ConnPool = &singleConnPool{} - -// singleConnPool is a special case for a single connection. -type singleConnPool struct { - *grpc.ClientConn -} - -func (p *singleConnPool) Conn() *grpc.ClientConn { return p.ClientConn } -func (p *singleConnPool) Num() int { return 1 } - -type roundRobinConnPool struct { - conns []*grpc.ClientConn - - idx uint32 // access via sync/atomic -} - -func (p *roundRobinConnPool) Num() int { - return len(p.conns) -} - -func (p *roundRobinConnPool) Conn() *grpc.ClientConn { - i := atomic.AddUint32(&p.idx, 1) - return p.conns[i%uint32(len(p.conns))] -} - -func (p *roundRobinConnPool) Close() error { - var errs multiError - for _, conn := range p.conns { - if err := conn.Close(); err != nil { - errs = append(errs, err) - } - } - if len(errs) == 0 { - return nil - } - return errs -} - -func (p *roundRobinConnPool) Invoke(ctx context.Context, method string, args interface{}, reply interface{}, opts ...grpc.CallOption) error { - return p.Conn().Invoke(ctx, method, args, reply, opts...) -} - -func (p *roundRobinConnPool) NewStream(ctx context.Context, desc *grpc.StreamDesc, method string, opts ...grpc.CallOption) (grpc.ClientStream, error) { - return p.Conn().NewStream(ctx, desc, method, opts...) -} - -// multiError represents errors from multiple conns in the group. -// -// TODO: figure out how and whether this is useful to export. End users should -// not be depending on the transport/grpc package directly, so there might need -// to be some service-specific multi-error type. -type multiError []error - -func (m multiError) Error() string { - s, n := "", 0 - for _, e := range m { - if e != nil { - if n == 0 { - s = e.Error() - } - n++ - } - } - switch n { - case 0: - return "(0 errors)" - case 1: - return s - case 2: - return s + " (and 1 other error)" - } - return fmt.Sprintf("%s (and %d other errors)", s, n-1) -} diff --git a/vendor/google.golang.org/api/transport/internal/dca/dca.go b/vendor/google.golang.org/api/transport/internal/dca/dca.go deleted file mode 100644 index b3be7e4e3a..0000000000 --- a/vendor/google.golang.org/api/transport/internal/dca/dca.go +++ /dev/null @@ -1,145 +0,0 @@ -// Copyright 2020 Google LLC. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package dca contains utils for implementing Device Certificate -// Authentication according to https://google.aip.dev/auth/4114 -// -// The overall logic for DCA is as follows: -// 1. If both endpoint override and client certificate are specified, use them as is. -// 2. If user does not specify client certificate, we will attempt to use default -// client certificate. -// 3. If user does not specify endpoint override, we will use defaultMtlsEndpoint if -// client certificate is available and defaultEndpoint otherwise. -// -// Implications of the above logic: -// 1. If the user specifies a non-mTLS endpoint override but client certificate is -// available, we will pass along the cert anyway and let the server decide what to do. -// 2. If the user specifies an mTLS endpoint override but client certificate is not -// available, we will not fail-fast, but let backend throw error when connecting. -// -// We would like to avoid introducing client-side logic that parses whether the -// endpoint override is an mTLS url, since the url pattern may change at anytime. -// -// This package is not intended for use by end developers. Use the -// google.golang.org/api/option package to configure API clients. -package dca - -import ( - "net/url" - "os" - "strings" - - "google.golang.org/api/internal" - "google.golang.org/api/transport/cert" -) - -const ( - mTLSModeAlways = "always" - mTLSModeNever = "never" - mTLSModeAuto = "auto" -) - -// GetClientCertificateSourceAndEndpoint is a convenience function that invokes -// getClientCertificateSource and getEndpoint sequentially and returns the client -// cert source and endpoint as a tuple. -func GetClientCertificateSourceAndEndpoint(settings *internal.DialSettings) (cert.Source, string, error) { - clientCertSource, err := getClientCertificateSource(settings) - if err != nil { - return nil, "", err - } - endpoint, err := getEndpoint(settings, clientCertSource) - if err != nil { - return nil, "", err - } - return clientCertSource, endpoint, nil -} - -// getClientCertificateSource returns a default client certificate source, if -// not provided by the user. -// -// A nil default source can be returned if the source does not exist. Any exceptions -// encountered while initializing the default source will be reported as client -// error (ex. corrupt metadata file). -// -// Important Note: For now, the environment variable GOOGLE_API_USE_CLIENT_CERTIFICATE -// must be set to "true" to allow certificate to be used (including user provided -// certificates). For details, see AIP-4114. -func getClientCertificateSource(settings *internal.DialSettings) (cert.Source, error) { - if !isClientCertificateEnabled() { - return nil, nil - } else if settings.HTTPClient != nil { - return nil, nil // HTTPClient is incompatible with ClientCertificateSource - } else if settings.ClientCertSource != nil { - return settings.ClientCertSource, nil - } else { - return cert.DefaultSource() - } -} - -func isClientCertificateEnabled() bool { - useClientCert := os.Getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE") - // TODO(andyrzhao): Update default to return "true" after DCA feature is fully released. - return strings.ToLower(useClientCert) == "true" -} - -// getEndpoint returns the endpoint for the service, taking into account the -// user-provided endpoint override "settings.Endpoint". -// -// If no endpoint override is specified, we will either return the default endpoint or -// the default mTLS endpoint if a client certificate is available. -// -// You can override the default endpoint choice (mtls vs. regular) by setting the -// GOOGLE_API_USE_MTLS_ENDPOINT environment variable. -// -// If the endpoint override is an address (host:port) rather than full base -// URL (ex. https://...), then the user-provided address will be merged into -// the default endpoint. For example, WithEndpoint("myhost:8000") and -// WithDefaultEndpoint("https://foo.com/bar/baz") will return "https://myhost:8080/bar/baz" -func getEndpoint(settings *internal.DialSettings, clientCertSource cert.Source) (string, error) { - if settings.Endpoint == "" { - mtlsMode := getMTLSMode() - if mtlsMode == mTLSModeAlways || (clientCertSource != nil && mtlsMode == mTLSModeAuto) { - return settings.DefaultMTLSEndpoint, nil - } - return settings.DefaultEndpoint, nil - } - if strings.Contains(settings.Endpoint, "://") { - // User passed in a full URL path, use it verbatim. - return settings.Endpoint, nil - } - if settings.DefaultEndpoint == "" { - // If DefaultEndpoint is not configured, use the user provided endpoint verbatim. - // This allows a naked "host[:port]" URL to be used with GRPC Direct Path. - return settings.Endpoint, nil - } - - // Assume user-provided endpoint is host[:port], merge it with the default endpoint. - return mergeEndpoints(settings.DefaultEndpoint, settings.Endpoint) -} - -func getMTLSMode() string { - mode := os.Getenv("GOOGLE_API_USE_MTLS_ENDPOINT") - if mode == "" { - mode = os.Getenv("GOOGLE_API_USE_MTLS") // Deprecated. - } - if mode == "" { - return mTLSModeAuto - } - return strings.ToLower(mode) -} - -func mergeEndpoints(baseURL, newHost string) (string, error) { - u, err := url.Parse(fixScheme(baseURL)) - if err != nil { - return "", err - } - return strings.Replace(baseURL, u.Host, newHost, 1), nil -} - -func fixScheme(baseURL string) string { - if !strings.Contains(baseURL, "://") { - return "https://" + baseURL - } - return baseURL -} diff --git a/vendor/google.golang.org/appengine/internal/socket/socket_service.pb.go b/vendor/google.golang.org/appengine/internal/socket/socket_service.pb.go deleted file mode 100644 index 4ec872e460..0000000000 --- a/vendor/google.golang.org/appengine/internal/socket/socket_service.pb.go +++ /dev/null @@ -1,2822 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google.golang.org/appengine/internal/socket/socket_service.proto - -package socket - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type RemoteSocketServiceError_ErrorCode int32 - -const ( - RemoteSocketServiceError_SYSTEM_ERROR RemoteSocketServiceError_ErrorCode = 1 - RemoteSocketServiceError_GAI_ERROR RemoteSocketServiceError_ErrorCode = 2 - RemoteSocketServiceError_FAILURE RemoteSocketServiceError_ErrorCode = 4 - RemoteSocketServiceError_PERMISSION_DENIED RemoteSocketServiceError_ErrorCode = 5 - RemoteSocketServiceError_INVALID_REQUEST RemoteSocketServiceError_ErrorCode = 6 - RemoteSocketServiceError_SOCKET_CLOSED RemoteSocketServiceError_ErrorCode = 7 -) - -var RemoteSocketServiceError_ErrorCode_name = map[int32]string{ - 1: "SYSTEM_ERROR", - 2: "GAI_ERROR", - 4: "FAILURE", - 5: "PERMISSION_DENIED", - 6: "INVALID_REQUEST", - 7: "SOCKET_CLOSED", -} -var RemoteSocketServiceError_ErrorCode_value = map[string]int32{ - "SYSTEM_ERROR": 1, - "GAI_ERROR": 2, - "FAILURE": 4, - "PERMISSION_DENIED": 5, - "INVALID_REQUEST": 6, - "SOCKET_CLOSED": 7, -} - -func (x RemoteSocketServiceError_ErrorCode) Enum() *RemoteSocketServiceError_ErrorCode { - p := new(RemoteSocketServiceError_ErrorCode) - *p = x - return p -} -func (x RemoteSocketServiceError_ErrorCode) String() string { - return proto.EnumName(RemoteSocketServiceError_ErrorCode_name, int32(x)) -} -func (x *RemoteSocketServiceError_ErrorCode) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(RemoteSocketServiceError_ErrorCode_value, data, "RemoteSocketServiceError_ErrorCode") - if err != nil { - return err - } - *x = RemoteSocketServiceError_ErrorCode(value) - return nil -} -func (RemoteSocketServiceError_ErrorCode) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{0, 0} -} - -type RemoteSocketServiceError_SystemError int32 - -const ( - RemoteSocketServiceError_SYS_SUCCESS RemoteSocketServiceError_SystemError = 0 - RemoteSocketServiceError_SYS_EPERM RemoteSocketServiceError_SystemError = 1 - RemoteSocketServiceError_SYS_ENOENT RemoteSocketServiceError_SystemError = 2 - RemoteSocketServiceError_SYS_ESRCH RemoteSocketServiceError_SystemError = 3 - RemoteSocketServiceError_SYS_EINTR RemoteSocketServiceError_SystemError = 4 - RemoteSocketServiceError_SYS_EIO RemoteSocketServiceError_SystemError = 5 - RemoteSocketServiceError_SYS_ENXIO RemoteSocketServiceError_SystemError = 6 - RemoteSocketServiceError_SYS_E2BIG RemoteSocketServiceError_SystemError = 7 - RemoteSocketServiceError_SYS_ENOEXEC RemoteSocketServiceError_SystemError = 8 - RemoteSocketServiceError_SYS_EBADF RemoteSocketServiceError_SystemError = 9 - RemoteSocketServiceError_SYS_ECHILD RemoteSocketServiceError_SystemError = 10 - RemoteSocketServiceError_SYS_EAGAIN RemoteSocketServiceError_SystemError = 11 - RemoteSocketServiceError_SYS_EWOULDBLOCK RemoteSocketServiceError_SystemError = 11 - RemoteSocketServiceError_SYS_ENOMEM RemoteSocketServiceError_SystemError = 12 - RemoteSocketServiceError_SYS_EACCES RemoteSocketServiceError_SystemError = 13 - RemoteSocketServiceError_SYS_EFAULT RemoteSocketServiceError_SystemError = 14 - RemoteSocketServiceError_SYS_ENOTBLK RemoteSocketServiceError_SystemError = 15 - RemoteSocketServiceError_SYS_EBUSY RemoteSocketServiceError_SystemError = 16 - RemoteSocketServiceError_SYS_EEXIST RemoteSocketServiceError_SystemError = 17 - RemoteSocketServiceError_SYS_EXDEV RemoteSocketServiceError_SystemError = 18 - RemoteSocketServiceError_SYS_ENODEV RemoteSocketServiceError_SystemError = 19 - RemoteSocketServiceError_SYS_ENOTDIR RemoteSocketServiceError_SystemError = 20 - RemoteSocketServiceError_SYS_EISDIR RemoteSocketServiceError_SystemError = 21 - RemoteSocketServiceError_SYS_EINVAL RemoteSocketServiceError_SystemError = 22 - RemoteSocketServiceError_SYS_ENFILE RemoteSocketServiceError_SystemError = 23 - RemoteSocketServiceError_SYS_EMFILE RemoteSocketServiceError_SystemError = 24 - RemoteSocketServiceError_SYS_ENOTTY RemoteSocketServiceError_SystemError = 25 - RemoteSocketServiceError_SYS_ETXTBSY RemoteSocketServiceError_SystemError = 26 - RemoteSocketServiceError_SYS_EFBIG RemoteSocketServiceError_SystemError = 27 - RemoteSocketServiceError_SYS_ENOSPC RemoteSocketServiceError_SystemError = 28 - RemoteSocketServiceError_SYS_ESPIPE RemoteSocketServiceError_SystemError = 29 - RemoteSocketServiceError_SYS_EROFS RemoteSocketServiceError_SystemError = 30 - RemoteSocketServiceError_SYS_EMLINK RemoteSocketServiceError_SystemError = 31 - RemoteSocketServiceError_SYS_EPIPE RemoteSocketServiceError_SystemError = 32 - RemoteSocketServiceError_SYS_EDOM RemoteSocketServiceError_SystemError = 33 - RemoteSocketServiceError_SYS_ERANGE RemoteSocketServiceError_SystemError = 34 - RemoteSocketServiceError_SYS_EDEADLK RemoteSocketServiceError_SystemError = 35 - RemoteSocketServiceError_SYS_EDEADLOCK RemoteSocketServiceError_SystemError = 35 - RemoteSocketServiceError_SYS_ENAMETOOLONG RemoteSocketServiceError_SystemError = 36 - RemoteSocketServiceError_SYS_ENOLCK RemoteSocketServiceError_SystemError = 37 - RemoteSocketServiceError_SYS_ENOSYS RemoteSocketServiceError_SystemError = 38 - RemoteSocketServiceError_SYS_ENOTEMPTY RemoteSocketServiceError_SystemError = 39 - RemoteSocketServiceError_SYS_ELOOP RemoteSocketServiceError_SystemError = 40 - RemoteSocketServiceError_SYS_ENOMSG RemoteSocketServiceError_SystemError = 42 - RemoteSocketServiceError_SYS_EIDRM RemoteSocketServiceError_SystemError = 43 - RemoteSocketServiceError_SYS_ECHRNG RemoteSocketServiceError_SystemError = 44 - RemoteSocketServiceError_SYS_EL2NSYNC RemoteSocketServiceError_SystemError = 45 - RemoteSocketServiceError_SYS_EL3HLT RemoteSocketServiceError_SystemError = 46 - RemoteSocketServiceError_SYS_EL3RST RemoteSocketServiceError_SystemError = 47 - RemoteSocketServiceError_SYS_ELNRNG RemoteSocketServiceError_SystemError = 48 - RemoteSocketServiceError_SYS_EUNATCH RemoteSocketServiceError_SystemError = 49 - RemoteSocketServiceError_SYS_ENOCSI RemoteSocketServiceError_SystemError = 50 - RemoteSocketServiceError_SYS_EL2HLT RemoteSocketServiceError_SystemError = 51 - RemoteSocketServiceError_SYS_EBADE RemoteSocketServiceError_SystemError = 52 - RemoteSocketServiceError_SYS_EBADR RemoteSocketServiceError_SystemError = 53 - RemoteSocketServiceError_SYS_EXFULL RemoteSocketServiceError_SystemError = 54 - RemoteSocketServiceError_SYS_ENOANO RemoteSocketServiceError_SystemError = 55 - RemoteSocketServiceError_SYS_EBADRQC RemoteSocketServiceError_SystemError = 56 - RemoteSocketServiceError_SYS_EBADSLT RemoteSocketServiceError_SystemError = 57 - RemoteSocketServiceError_SYS_EBFONT RemoteSocketServiceError_SystemError = 59 - RemoteSocketServiceError_SYS_ENOSTR RemoteSocketServiceError_SystemError = 60 - RemoteSocketServiceError_SYS_ENODATA RemoteSocketServiceError_SystemError = 61 - RemoteSocketServiceError_SYS_ETIME RemoteSocketServiceError_SystemError = 62 - RemoteSocketServiceError_SYS_ENOSR RemoteSocketServiceError_SystemError = 63 - RemoteSocketServiceError_SYS_ENONET RemoteSocketServiceError_SystemError = 64 - RemoteSocketServiceError_SYS_ENOPKG RemoteSocketServiceError_SystemError = 65 - RemoteSocketServiceError_SYS_EREMOTE RemoteSocketServiceError_SystemError = 66 - RemoteSocketServiceError_SYS_ENOLINK RemoteSocketServiceError_SystemError = 67 - RemoteSocketServiceError_SYS_EADV RemoteSocketServiceError_SystemError = 68 - RemoteSocketServiceError_SYS_ESRMNT RemoteSocketServiceError_SystemError = 69 - RemoteSocketServiceError_SYS_ECOMM RemoteSocketServiceError_SystemError = 70 - RemoteSocketServiceError_SYS_EPROTO RemoteSocketServiceError_SystemError = 71 - RemoteSocketServiceError_SYS_EMULTIHOP RemoteSocketServiceError_SystemError = 72 - RemoteSocketServiceError_SYS_EDOTDOT RemoteSocketServiceError_SystemError = 73 - RemoteSocketServiceError_SYS_EBADMSG RemoteSocketServiceError_SystemError = 74 - RemoteSocketServiceError_SYS_EOVERFLOW RemoteSocketServiceError_SystemError = 75 - RemoteSocketServiceError_SYS_ENOTUNIQ RemoteSocketServiceError_SystemError = 76 - RemoteSocketServiceError_SYS_EBADFD RemoteSocketServiceError_SystemError = 77 - RemoteSocketServiceError_SYS_EREMCHG RemoteSocketServiceError_SystemError = 78 - RemoteSocketServiceError_SYS_ELIBACC RemoteSocketServiceError_SystemError = 79 - RemoteSocketServiceError_SYS_ELIBBAD RemoteSocketServiceError_SystemError = 80 - RemoteSocketServiceError_SYS_ELIBSCN RemoteSocketServiceError_SystemError = 81 - RemoteSocketServiceError_SYS_ELIBMAX RemoteSocketServiceError_SystemError = 82 - RemoteSocketServiceError_SYS_ELIBEXEC RemoteSocketServiceError_SystemError = 83 - RemoteSocketServiceError_SYS_EILSEQ RemoteSocketServiceError_SystemError = 84 - RemoteSocketServiceError_SYS_ERESTART RemoteSocketServiceError_SystemError = 85 - RemoteSocketServiceError_SYS_ESTRPIPE RemoteSocketServiceError_SystemError = 86 - RemoteSocketServiceError_SYS_EUSERS RemoteSocketServiceError_SystemError = 87 - RemoteSocketServiceError_SYS_ENOTSOCK RemoteSocketServiceError_SystemError = 88 - RemoteSocketServiceError_SYS_EDESTADDRREQ RemoteSocketServiceError_SystemError = 89 - RemoteSocketServiceError_SYS_EMSGSIZE RemoteSocketServiceError_SystemError = 90 - RemoteSocketServiceError_SYS_EPROTOTYPE RemoteSocketServiceError_SystemError = 91 - RemoteSocketServiceError_SYS_ENOPROTOOPT RemoteSocketServiceError_SystemError = 92 - RemoteSocketServiceError_SYS_EPROTONOSUPPORT RemoteSocketServiceError_SystemError = 93 - RemoteSocketServiceError_SYS_ESOCKTNOSUPPORT RemoteSocketServiceError_SystemError = 94 - RemoteSocketServiceError_SYS_EOPNOTSUPP RemoteSocketServiceError_SystemError = 95 - RemoteSocketServiceError_SYS_ENOTSUP RemoteSocketServiceError_SystemError = 95 - RemoteSocketServiceError_SYS_EPFNOSUPPORT RemoteSocketServiceError_SystemError = 96 - RemoteSocketServiceError_SYS_EAFNOSUPPORT RemoteSocketServiceError_SystemError = 97 - RemoteSocketServiceError_SYS_EADDRINUSE RemoteSocketServiceError_SystemError = 98 - RemoteSocketServiceError_SYS_EADDRNOTAVAIL RemoteSocketServiceError_SystemError = 99 - RemoteSocketServiceError_SYS_ENETDOWN RemoteSocketServiceError_SystemError = 100 - RemoteSocketServiceError_SYS_ENETUNREACH RemoteSocketServiceError_SystemError = 101 - RemoteSocketServiceError_SYS_ENETRESET RemoteSocketServiceError_SystemError = 102 - RemoteSocketServiceError_SYS_ECONNABORTED RemoteSocketServiceError_SystemError = 103 - RemoteSocketServiceError_SYS_ECONNRESET RemoteSocketServiceError_SystemError = 104 - RemoteSocketServiceError_SYS_ENOBUFS RemoteSocketServiceError_SystemError = 105 - RemoteSocketServiceError_SYS_EISCONN RemoteSocketServiceError_SystemError = 106 - RemoteSocketServiceError_SYS_ENOTCONN RemoteSocketServiceError_SystemError = 107 - RemoteSocketServiceError_SYS_ESHUTDOWN RemoteSocketServiceError_SystemError = 108 - RemoteSocketServiceError_SYS_ETOOMANYREFS RemoteSocketServiceError_SystemError = 109 - RemoteSocketServiceError_SYS_ETIMEDOUT RemoteSocketServiceError_SystemError = 110 - RemoteSocketServiceError_SYS_ECONNREFUSED RemoteSocketServiceError_SystemError = 111 - RemoteSocketServiceError_SYS_EHOSTDOWN RemoteSocketServiceError_SystemError = 112 - RemoteSocketServiceError_SYS_EHOSTUNREACH RemoteSocketServiceError_SystemError = 113 - RemoteSocketServiceError_SYS_EALREADY RemoteSocketServiceError_SystemError = 114 - RemoteSocketServiceError_SYS_EINPROGRESS RemoteSocketServiceError_SystemError = 115 - RemoteSocketServiceError_SYS_ESTALE RemoteSocketServiceError_SystemError = 116 - RemoteSocketServiceError_SYS_EUCLEAN RemoteSocketServiceError_SystemError = 117 - RemoteSocketServiceError_SYS_ENOTNAM RemoteSocketServiceError_SystemError = 118 - RemoteSocketServiceError_SYS_ENAVAIL RemoteSocketServiceError_SystemError = 119 - RemoteSocketServiceError_SYS_EISNAM RemoteSocketServiceError_SystemError = 120 - RemoteSocketServiceError_SYS_EREMOTEIO RemoteSocketServiceError_SystemError = 121 - RemoteSocketServiceError_SYS_EDQUOT RemoteSocketServiceError_SystemError = 122 - RemoteSocketServiceError_SYS_ENOMEDIUM RemoteSocketServiceError_SystemError = 123 - RemoteSocketServiceError_SYS_EMEDIUMTYPE RemoteSocketServiceError_SystemError = 124 - RemoteSocketServiceError_SYS_ECANCELED RemoteSocketServiceError_SystemError = 125 - RemoteSocketServiceError_SYS_ENOKEY RemoteSocketServiceError_SystemError = 126 - RemoteSocketServiceError_SYS_EKEYEXPIRED RemoteSocketServiceError_SystemError = 127 - RemoteSocketServiceError_SYS_EKEYREVOKED RemoteSocketServiceError_SystemError = 128 - RemoteSocketServiceError_SYS_EKEYREJECTED RemoteSocketServiceError_SystemError = 129 - RemoteSocketServiceError_SYS_EOWNERDEAD RemoteSocketServiceError_SystemError = 130 - RemoteSocketServiceError_SYS_ENOTRECOVERABLE RemoteSocketServiceError_SystemError = 131 - RemoteSocketServiceError_SYS_ERFKILL RemoteSocketServiceError_SystemError = 132 -) - -var RemoteSocketServiceError_SystemError_name = map[int32]string{ - 0: "SYS_SUCCESS", - 1: "SYS_EPERM", - 2: "SYS_ENOENT", - 3: "SYS_ESRCH", - 4: "SYS_EINTR", - 5: "SYS_EIO", - 6: "SYS_ENXIO", - 7: "SYS_E2BIG", - 8: "SYS_ENOEXEC", - 9: "SYS_EBADF", - 10: "SYS_ECHILD", - 11: "SYS_EAGAIN", - // Duplicate value: 11: "SYS_EWOULDBLOCK", - 12: "SYS_ENOMEM", - 13: "SYS_EACCES", - 14: "SYS_EFAULT", - 15: "SYS_ENOTBLK", - 16: "SYS_EBUSY", - 17: "SYS_EEXIST", - 18: "SYS_EXDEV", - 19: "SYS_ENODEV", - 20: "SYS_ENOTDIR", - 21: "SYS_EISDIR", - 22: "SYS_EINVAL", - 23: "SYS_ENFILE", - 24: "SYS_EMFILE", - 25: "SYS_ENOTTY", - 26: "SYS_ETXTBSY", - 27: "SYS_EFBIG", - 28: "SYS_ENOSPC", - 29: "SYS_ESPIPE", - 30: "SYS_EROFS", - 31: "SYS_EMLINK", - 32: "SYS_EPIPE", - 33: "SYS_EDOM", - 34: "SYS_ERANGE", - 35: "SYS_EDEADLK", - // Duplicate value: 35: "SYS_EDEADLOCK", - 36: "SYS_ENAMETOOLONG", - 37: "SYS_ENOLCK", - 38: "SYS_ENOSYS", - 39: "SYS_ENOTEMPTY", - 40: "SYS_ELOOP", - 42: "SYS_ENOMSG", - 43: "SYS_EIDRM", - 44: "SYS_ECHRNG", - 45: "SYS_EL2NSYNC", - 46: "SYS_EL3HLT", - 47: "SYS_EL3RST", - 48: "SYS_ELNRNG", - 49: "SYS_EUNATCH", - 50: "SYS_ENOCSI", - 51: "SYS_EL2HLT", - 52: "SYS_EBADE", - 53: "SYS_EBADR", - 54: "SYS_EXFULL", - 55: "SYS_ENOANO", - 56: "SYS_EBADRQC", - 57: "SYS_EBADSLT", - 59: "SYS_EBFONT", - 60: "SYS_ENOSTR", - 61: "SYS_ENODATA", - 62: "SYS_ETIME", - 63: "SYS_ENOSR", - 64: "SYS_ENONET", - 65: "SYS_ENOPKG", - 66: "SYS_EREMOTE", - 67: "SYS_ENOLINK", - 68: "SYS_EADV", - 69: "SYS_ESRMNT", - 70: "SYS_ECOMM", - 71: "SYS_EPROTO", - 72: "SYS_EMULTIHOP", - 73: "SYS_EDOTDOT", - 74: "SYS_EBADMSG", - 75: "SYS_EOVERFLOW", - 76: "SYS_ENOTUNIQ", - 77: "SYS_EBADFD", - 78: "SYS_EREMCHG", - 79: "SYS_ELIBACC", - 80: "SYS_ELIBBAD", - 81: "SYS_ELIBSCN", - 82: "SYS_ELIBMAX", - 83: "SYS_ELIBEXEC", - 84: "SYS_EILSEQ", - 85: "SYS_ERESTART", - 86: "SYS_ESTRPIPE", - 87: "SYS_EUSERS", - 88: "SYS_ENOTSOCK", - 89: "SYS_EDESTADDRREQ", - 90: "SYS_EMSGSIZE", - 91: "SYS_EPROTOTYPE", - 92: "SYS_ENOPROTOOPT", - 93: "SYS_EPROTONOSUPPORT", - 94: "SYS_ESOCKTNOSUPPORT", - 95: "SYS_EOPNOTSUPP", - // Duplicate value: 95: "SYS_ENOTSUP", - 96: "SYS_EPFNOSUPPORT", - 97: "SYS_EAFNOSUPPORT", - 98: "SYS_EADDRINUSE", - 99: "SYS_EADDRNOTAVAIL", - 100: "SYS_ENETDOWN", - 101: "SYS_ENETUNREACH", - 102: "SYS_ENETRESET", - 103: "SYS_ECONNABORTED", - 104: "SYS_ECONNRESET", - 105: "SYS_ENOBUFS", - 106: "SYS_EISCONN", - 107: "SYS_ENOTCONN", - 108: "SYS_ESHUTDOWN", - 109: "SYS_ETOOMANYREFS", - 110: "SYS_ETIMEDOUT", - 111: "SYS_ECONNREFUSED", - 112: "SYS_EHOSTDOWN", - 113: "SYS_EHOSTUNREACH", - 114: "SYS_EALREADY", - 115: "SYS_EINPROGRESS", - 116: "SYS_ESTALE", - 117: "SYS_EUCLEAN", - 118: "SYS_ENOTNAM", - 119: "SYS_ENAVAIL", - 120: "SYS_EISNAM", - 121: "SYS_EREMOTEIO", - 122: "SYS_EDQUOT", - 123: "SYS_ENOMEDIUM", - 124: "SYS_EMEDIUMTYPE", - 125: "SYS_ECANCELED", - 126: "SYS_ENOKEY", - 127: "SYS_EKEYEXPIRED", - 128: "SYS_EKEYREVOKED", - 129: "SYS_EKEYREJECTED", - 130: "SYS_EOWNERDEAD", - 131: "SYS_ENOTRECOVERABLE", - 132: "SYS_ERFKILL", -} -var RemoteSocketServiceError_SystemError_value = map[string]int32{ - "SYS_SUCCESS": 0, - "SYS_EPERM": 1, - "SYS_ENOENT": 2, - "SYS_ESRCH": 3, - "SYS_EINTR": 4, - "SYS_EIO": 5, - "SYS_ENXIO": 6, - "SYS_E2BIG": 7, - "SYS_ENOEXEC": 8, - "SYS_EBADF": 9, - "SYS_ECHILD": 10, - "SYS_EAGAIN": 11, - "SYS_EWOULDBLOCK": 11, - "SYS_ENOMEM": 12, - "SYS_EACCES": 13, - "SYS_EFAULT": 14, - "SYS_ENOTBLK": 15, - "SYS_EBUSY": 16, - "SYS_EEXIST": 17, - "SYS_EXDEV": 18, - "SYS_ENODEV": 19, - "SYS_ENOTDIR": 20, - "SYS_EISDIR": 21, - "SYS_EINVAL": 22, - "SYS_ENFILE": 23, - "SYS_EMFILE": 24, - "SYS_ENOTTY": 25, - "SYS_ETXTBSY": 26, - "SYS_EFBIG": 27, - "SYS_ENOSPC": 28, - "SYS_ESPIPE": 29, - "SYS_EROFS": 30, - "SYS_EMLINK": 31, - "SYS_EPIPE": 32, - "SYS_EDOM": 33, - "SYS_ERANGE": 34, - "SYS_EDEADLK": 35, - "SYS_EDEADLOCK": 35, - "SYS_ENAMETOOLONG": 36, - "SYS_ENOLCK": 37, - "SYS_ENOSYS": 38, - "SYS_ENOTEMPTY": 39, - "SYS_ELOOP": 40, - "SYS_ENOMSG": 42, - "SYS_EIDRM": 43, - "SYS_ECHRNG": 44, - "SYS_EL2NSYNC": 45, - "SYS_EL3HLT": 46, - "SYS_EL3RST": 47, - "SYS_ELNRNG": 48, - "SYS_EUNATCH": 49, - "SYS_ENOCSI": 50, - "SYS_EL2HLT": 51, - "SYS_EBADE": 52, - "SYS_EBADR": 53, - "SYS_EXFULL": 54, - "SYS_ENOANO": 55, - "SYS_EBADRQC": 56, - "SYS_EBADSLT": 57, - "SYS_EBFONT": 59, - "SYS_ENOSTR": 60, - "SYS_ENODATA": 61, - "SYS_ETIME": 62, - "SYS_ENOSR": 63, - "SYS_ENONET": 64, - "SYS_ENOPKG": 65, - "SYS_EREMOTE": 66, - "SYS_ENOLINK": 67, - "SYS_EADV": 68, - "SYS_ESRMNT": 69, - "SYS_ECOMM": 70, - "SYS_EPROTO": 71, - "SYS_EMULTIHOP": 72, - "SYS_EDOTDOT": 73, - "SYS_EBADMSG": 74, - "SYS_EOVERFLOW": 75, - "SYS_ENOTUNIQ": 76, - "SYS_EBADFD": 77, - "SYS_EREMCHG": 78, - "SYS_ELIBACC": 79, - "SYS_ELIBBAD": 80, - "SYS_ELIBSCN": 81, - "SYS_ELIBMAX": 82, - "SYS_ELIBEXEC": 83, - "SYS_EILSEQ": 84, - "SYS_ERESTART": 85, - "SYS_ESTRPIPE": 86, - "SYS_EUSERS": 87, - "SYS_ENOTSOCK": 88, - "SYS_EDESTADDRREQ": 89, - "SYS_EMSGSIZE": 90, - "SYS_EPROTOTYPE": 91, - "SYS_ENOPROTOOPT": 92, - "SYS_EPROTONOSUPPORT": 93, - "SYS_ESOCKTNOSUPPORT": 94, - "SYS_EOPNOTSUPP": 95, - "SYS_ENOTSUP": 95, - "SYS_EPFNOSUPPORT": 96, - "SYS_EAFNOSUPPORT": 97, - "SYS_EADDRINUSE": 98, - "SYS_EADDRNOTAVAIL": 99, - "SYS_ENETDOWN": 100, - "SYS_ENETUNREACH": 101, - "SYS_ENETRESET": 102, - "SYS_ECONNABORTED": 103, - "SYS_ECONNRESET": 104, - "SYS_ENOBUFS": 105, - "SYS_EISCONN": 106, - "SYS_ENOTCONN": 107, - "SYS_ESHUTDOWN": 108, - "SYS_ETOOMANYREFS": 109, - "SYS_ETIMEDOUT": 110, - "SYS_ECONNREFUSED": 111, - "SYS_EHOSTDOWN": 112, - "SYS_EHOSTUNREACH": 113, - "SYS_EALREADY": 114, - "SYS_EINPROGRESS": 115, - "SYS_ESTALE": 116, - "SYS_EUCLEAN": 117, - "SYS_ENOTNAM": 118, - "SYS_ENAVAIL": 119, - "SYS_EISNAM": 120, - "SYS_EREMOTEIO": 121, - "SYS_EDQUOT": 122, - "SYS_ENOMEDIUM": 123, - "SYS_EMEDIUMTYPE": 124, - "SYS_ECANCELED": 125, - "SYS_ENOKEY": 126, - "SYS_EKEYEXPIRED": 127, - "SYS_EKEYREVOKED": 128, - "SYS_EKEYREJECTED": 129, - "SYS_EOWNERDEAD": 130, - "SYS_ENOTRECOVERABLE": 131, - "SYS_ERFKILL": 132, -} - -func (x RemoteSocketServiceError_SystemError) Enum() *RemoteSocketServiceError_SystemError { - p := new(RemoteSocketServiceError_SystemError) - *p = x - return p -} -func (x RemoteSocketServiceError_SystemError) String() string { - return proto.EnumName(RemoteSocketServiceError_SystemError_name, int32(x)) -} -func (x *RemoteSocketServiceError_SystemError) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(RemoteSocketServiceError_SystemError_value, data, "RemoteSocketServiceError_SystemError") - if err != nil { - return err - } - *x = RemoteSocketServiceError_SystemError(value) - return nil -} -func (RemoteSocketServiceError_SystemError) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{0, 1} -} - -type CreateSocketRequest_SocketFamily int32 - -const ( - CreateSocketRequest_IPv4 CreateSocketRequest_SocketFamily = 1 - CreateSocketRequest_IPv6 CreateSocketRequest_SocketFamily = 2 -) - -var CreateSocketRequest_SocketFamily_name = map[int32]string{ - 1: "IPv4", - 2: "IPv6", -} -var CreateSocketRequest_SocketFamily_value = map[string]int32{ - "IPv4": 1, - "IPv6": 2, -} - -func (x CreateSocketRequest_SocketFamily) Enum() *CreateSocketRequest_SocketFamily { - p := new(CreateSocketRequest_SocketFamily) - *p = x - return p -} -func (x CreateSocketRequest_SocketFamily) String() string { - return proto.EnumName(CreateSocketRequest_SocketFamily_name, int32(x)) -} -func (x *CreateSocketRequest_SocketFamily) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(CreateSocketRequest_SocketFamily_value, data, "CreateSocketRequest_SocketFamily") - if err != nil { - return err - } - *x = CreateSocketRequest_SocketFamily(value) - return nil -} -func (CreateSocketRequest_SocketFamily) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{2, 0} -} - -type CreateSocketRequest_SocketProtocol int32 - -const ( - CreateSocketRequest_TCP CreateSocketRequest_SocketProtocol = 1 - CreateSocketRequest_UDP CreateSocketRequest_SocketProtocol = 2 -) - -var CreateSocketRequest_SocketProtocol_name = map[int32]string{ - 1: "TCP", - 2: "UDP", -} -var CreateSocketRequest_SocketProtocol_value = map[string]int32{ - "TCP": 1, - "UDP": 2, -} - -func (x CreateSocketRequest_SocketProtocol) Enum() *CreateSocketRequest_SocketProtocol { - p := new(CreateSocketRequest_SocketProtocol) - *p = x - return p -} -func (x CreateSocketRequest_SocketProtocol) String() string { - return proto.EnumName(CreateSocketRequest_SocketProtocol_name, int32(x)) -} -func (x *CreateSocketRequest_SocketProtocol) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(CreateSocketRequest_SocketProtocol_value, data, "CreateSocketRequest_SocketProtocol") - if err != nil { - return err - } - *x = CreateSocketRequest_SocketProtocol(value) - return nil -} -func (CreateSocketRequest_SocketProtocol) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{2, 1} -} - -type SocketOption_SocketOptionLevel int32 - -const ( - SocketOption_SOCKET_SOL_IP SocketOption_SocketOptionLevel = 0 - SocketOption_SOCKET_SOL_SOCKET SocketOption_SocketOptionLevel = 1 - SocketOption_SOCKET_SOL_TCP SocketOption_SocketOptionLevel = 6 - SocketOption_SOCKET_SOL_UDP SocketOption_SocketOptionLevel = 17 -) - -var SocketOption_SocketOptionLevel_name = map[int32]string{ - 0: "SOCKET_SOL_IP", - 1: "SOCKET_SOL_SOCKET", - 6: "SOCKET_SOL_TCP", - 17: "SOCKET_SOL_UDP", -} -var SocketOption_SocketOptionLevel_value = map[string]int32{ - "SOCKET_SOL_IP": 0, - "SOCKET_SOL_SOCKET": 1, - "SOCKET_SOL_TCP": 6, - "SOCKET_SOL_UDP": 17, -} - -func (x SocketOption_SocketOptionLevel) Enum() *SocketOption_SocketOptionLevel { - p := new(SocketOption_SocketOptionLevel) - *p = x - return p -} -func (x SocketOption_SocketOptionLevel) String() string { - return proto.EnumName(SocketOption_SocketOptionLevel_name, int32(x)) -} -func (x *SocketOption_SocketOptionLevel) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(SocketOption_SocketOptionLevel_value, data, "SocketOption_SocketOptionLevel") - if err != nil { - return err - } - *x = SocketOption_SocketOptionLevel(value) - return nil -} -func (SocketOption_SocketOptionLevel) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{10, 0} -} - -type SocketOption_SocketOptionName int32 - -const ( - SocketOption_SOCKET_SO_DEBUG SocketOption_SocketOptionName = 1 - SocketOption_SOCKET_SO_REUSEADDR SocketOption_SocketOptionName = 2 - SocketOption_SOCKET_SO_TYPE SocketOption_SocketOptionName = 3 - SocketOption_SOCKET_SO_ERROR SocketOption_SocketOptionName = 4 - SocketOption_SOCKET_SO_DONTROUTE SocketOption_SocketOptionName = 5 - SocketOption_SOCKET_SO_BROADCAST SocketOption_SocketOptionName = 6 - SocketOption_SOCKET_SO_SNDBUF SocketOption_SocketOptionName = 7 - SocketOption_SOCKET_SO_RCVBUF SocketOption_SocketOptionName = 8 - SocketOption_SOCKET_SO_KEEPALIVE SocketOption_SocketOptionName = 9 - SocketOption_SOCKET_SO_OOBINLINE SocketOption_SocketOptionName = 10 - SocketOption_SOCKET_SO_LINGER SocketOption_SocketOptionName = 13 - SocketOption_SOCKET_SO_RCVTIMEO SocketOption_SocketOptionName = 20 - SocketOption_SOCKET_SO_SNDTIMEO SocketOption_SocketOptionName = 21 - SocketOption_SOCKET_IP_TOS SocketOption_SocketOptionName = 1 - SocketOption_SOCKET_IP_TTL SocketOption_SocketOptionName = 2 - SocketOption_SOCKET_IP_HDRINCL SocketOption_SocketOptionName = 3 - SocketOption_SOCKET_IP_OPTIONS SocketOption_SocketOptionName = 4 - SocketOption_SOCKET_TCP_NODELAY SocketOption_SocketOptionName = 1 - SocketOption_SOCKET_TCP_MAXSEG SocketOption_SocketOptionName = 2 - SocketOption_SOCKET_TCP_CORK SocketOption_SocketOptionName = 3 - SocketOption_SOCKET_TCP_KEEPIDLE SocketOption_SocketOptionName = 4 - SocketOption_SOCKET_TCP_KEEPINTVL SocketOption_SocketOptionName = 5 - SocketOption_SOCKET_TCP_KEEPCNT SocketOption_SocketOptionName = 6 - SocketOption_SOCKET_TCP_SYNCNT SocketOption_SocketOptionName = 7 - SocketOption_SOCKET_TCP_LINGER2 SocketOption_SocketOptionName = 8 - SocketOption_SOCKET_TCP_DEFER_ACCEPT SocketOption_SocketOptionName = 9 - SocketOption_SOCKET_TCP_WINDOW_CLAMP SocketOption_SocketOptionName = 10 - SocketOption_SOCKET_TCP_INFO SocketOption_SocketOptionName = 11 - SocketOption_SOCKET_TCP_QUICKACK SocketOption_SocketOptionName = 12 -) - -var SocketOption_SocketOptionName_name = map[int32]string{ - 1: "SOCKET_SO_DEBUG", - 2: "SOCKET_SO_REUSEADDR", - 3: "SOCKET_SO_TYPE", - 4: "SOCKET_SO_ERROR", - 5: "SOCKET_SO_DONTROUTE", - 6: "SOCKET_SO_BROADCAST", - 7: "SOCKET_SO_SNDBUF", - 8: "SOCKET_SO_RCVBUF", - 9: "SOCKET_SO_KEEPALIVE", - 10: "SOCKET_SO_OOBINLINE", - 13: "SOCKET_SO_LINGER", - 20: "SOCKET_SO_RCVTIMEO", - 21: "SOCKET_SO_SNDTIMEO", - // Duplicate value: 1: "SOCKET_IP_TOS", - // Duplicate value: 2: "SOCKET_IP_TTL", - // Duplicate value: 3: "SOCKET_IP_HDRINCL", - // Duplicate value: 4: "SOCKET_IP_OPTIONS", - // Duplicate value: 1: "SOCKET_TCP_NODELAY", - // Duplicate value: 2: "SOCKET_TCP_MAXSEG", - // Duplicate value: 3: "SOCKET_TCP_CORK", - // Duplicate value: 4: "SOCKET_TCP_KEEPIDLE", - // Duplicate value: 5: "SOCKET_TCP_KEEPINTVL", - // Duplicate value: 6: "SOCKET_TCP_KEEPCNT", - // Duplicate value: 7: "SOCKET_TCP_SYNCNT", - // Duplicate value: 8: "SOCKET_TCP_LINGER2", - // Duplicate value: 9: "SOCKET_TCP_DEFER_ACCEPT", - // Duplicate value: 10: "SOCKET_TCP_WINDOW_CLAMP", - 11: "SOCKET_TCP_INFO", - 12: "SOCKET_TCP_QUICKACK", -} -var SocketOption_SocketOptionName_value = map[string]int32{ - "SOCKET_SO_DEBUG": 1, - "SOCKET_SO_REUSEADDR": 2, - "SOCKET_SO_TYPE": 3, - "SOCKET_SO_ERROR": 4, - "SOCKET_SO_DONTROUTE": 5, - "SOCKET_SO_BROADCAST": 6, - "SOCKET_SO_SNDBUF": 7, - "SOCKET_SO_RCVBUF": 8, - "SOCKET_SO_KEEPALIVE": 9, - "SOCKET_SO_OOBINLINE": 10, - "SOCKET_SO_LINGER": 13, - "SOCKET_SO_RCVTIMEO": 20, - "SOCKET_SO_SNDTIMEO": 21, - "SOCKET_IP_TOS": 1, - "SOCKET_IP_TTL": 2, - "SOCKET_IP_HDRINCL": 3, - "SOCKET_IP_OPTIONS": 4, - "SOCKET_TCP_NODELAY": 1, - "SOCKET_TCP_MAXSEG": 2, - "SOCKET_TCP_CORK": 3, - "SOCKET_TCP_KEEPIDLE": 4, - "SOCKET_TCP_KEEPINTVL": 5, - "SOCKET_TCP_KEEPCNT": 6, - "SOCKET_TCP_SYNCNT": 7, - "SOCKET_TCP_LINGER2": 8, - "SOCKET_TCP_DEFER_ACCEPT": 9, - "SOCKET_TCP_WINDOW_CLAMP": 10, - "SOCKET_TCP_INFO": 11, - "SOCKET_TCP_QUICKACK": 12, -} - -func (x SocketOption_SocketOptionName) Enum() *SocketOption_SocketOptionName { - p := new(SocketOption_SocketOptionName) - *p = x - return p -} -func (x SocketOption_SocketOptionName) String() string { - return proto.EnumName(SocketOption_SocketOptionName_name, int32(x)) -} -func (x *SocketOption_SocketOptionName) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(SocketOption_SocketOptionName_value, data, "SocketOption_SocketOptionName") - if err != nil { - return err - } - *x = SocketOption_SocketOptionName(value) - return nil -} -func (SocketOption_SocketOptionName) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{10, 1} -} - -type ShutDownRequest_How int32 - -const ( - ShutDownRequest_SOCKET_SHUT_RD ShutDownRequest_How = 1 - ShutDownRequest_SOCKET_SHUT_WR ShutDownRequest_How = 2 - ShutDownRequest_SOCKET_SHUT_RDWR ShutDownRequest_How = 3 -) - -var ShutDownRequest_How_name = map[int32]string{ - 1: "SOCKET_SHUT_RD", - 2: "SOCKET_SHUT_WR", - 3: "SOCKET_SHUT_RDWR", -} -var ShutDownRequest_How_value = map[string]int32{ - "SOCKET_SHUT_RD": 1, - "SOCKET_SHUT_WR": 2, - "SOCKET_SHUT_RDWR": 3, -} - -func (x ShutDownRequest_How) Enum() *ShutDownRequest_How { - p := new(ShutDownRequest_How) - *p = x - return p -} -func (x ShutDownRequest_How) String() string { - return proto.EnumName(ShutDownRequest_How_name, int32(x)) -} -func (x *ShutDownRequest_How) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(ShutDownRequest_How_value, data, "ShutDownRequest_How") - if err != nil { - return err - } - *x = ShutDownRequest_How(value) - return nil -} -func (ShutDownRequest_How) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{21, 0} -} - -type ReceiveRequest_Flags int32 - -const ( - ReceiveRequest_MSG_OOB ReceiveRequest_Flags = 1 - ReceiveRequest_MSG_PEEK ReceiveRequest_Flags = 2 -) - -var ReceiveRequest_Flags_name = map[int32]string{ - 1: "MSG_OOB", - 2: "MSG_PEEK", -} -var ReceiveRequest_Flags_value = map[string]int32{ - "MSG_OOB": 1, - "MSG_PEEK": 2, -} - -func (x ReceiveRequest_Flags) Enum() *ReceiveRequest_Flags { - p := new(ReceiveRequest_Flags) - *p = x - return p -} -func (x ReceiveRequest_Flags) String() string { - return proto.EnumName(ReceiveRequest_Flags_name, int32(x)) -} -func (x *ReceiveRequest_Flags) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(ReceiveRequest_Flags_value, data, "ReceiveRequest_Flags") - if err != nil { - return err - } - *x = ReceiveRequest_Flags(value) - return nil -} -func (ReceiveRequest_Flags) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{27, 0} -} - -type PollEvent_PollEventFlag int32 - -const ( - PollEvent_SOCKET_POLLNONE PollEvent_PollEventFlag = 0 - PollEvent_SOCKET_POLLIN PollEvent_PollEventFlag = 1 - PollEvent_SOCKET_POLLPRI PollEvent_PollEventFlag = 2 - PollEvent_SOCKET_POLLOUT PollEvent_PollEventFlag = 4 - PollEvent_SOCKET_POLLERR PollEvent_PollEventFlag = 8 - PollEvent_SOCKET_POLLHUP PollEvent_PollEventFlag = 16 - PollEvent_SOCKET_POLLNVAL PollEvent_PollEventFlag = 32 - PollEvent_SOCKET_POLLRDNORM PollEvent_PollEventFlag = 64 - PollEvent_SOCKET_POLLRDBAND PollEvent_PollEventFlag = 128 - PollEvent_SOCKET_POLLWRNORM PollEvent_PollEventFlag = 256 - PollEvent_SOCKET_POLLWRBAND PollEvent_PollEventFlag = 512 - PollEvent_SOCKET_POLLMSG PollEvent_PollEventFlag = 1024 - PollEvent_SOCKET_POLLREMOVE PollEvent_PollEventFlag = 4096 - PollEvent_SOCKET_POLLRDHUP PollEvent_PollEventFlag = 8192 -) - -var PollEvent_PollEventFlag_name = map[int32]string{ - 0: "SOCKET_POLLNONE", - 1: "SOCKET_POLLIN", - 2: "SOCKET_POLLPRI", - 4: "SOCKET_POLLOUT", - 8: "SOCKET_POLLERR", - 16: "SOCKET_POLLHUP", - 32: "SOCKET_POLLNVAL", - 64: "SOCKET_POLLRDNORM", - 128: "SOCKET_POLLRDBAND", - 256: "SOCKET_POLLWRNORM", - 512: "SOCKET_POLLWRBAND", - 1024: "SOCKET_POLLMSG", - 4096: "SOCKET_POLLREMOVE", - 8192: "SOCKET_POLLRDHUP", -} -var PollEvent_PollEventFlag_value = map[string]int32{ - "SOCKET_POLLNONE": 0, - "SOCKET_POLLIN": 1, - "SOCKET_POLLPRI": 2, - "SOCKET_POLLOUT": 4, - "SOCKET_POLLERR": 8, - "SOCKET_POLLHUP": 16, - "SOCKET_POLLNVAL": 32, - "SOCKET_POLLRDNORM": 64, - "SOCKET_POLLRDBAND": 128, - "SOCKET_POLLWRNORM": 256, - "SOCKET_POLLWRBAND": 512, - "SOCKET_POLLMSG": 1024, - "SOCKET_POLLREMOVE": 4096, - "SOCKET_POLLRDHUP": 8192, -} - -func (x PollEvent_PollEventFlag) Enum() *PollEvent_PollEventFlag { - p := new(PollEvent_PollEventFlag) - *p = x - return p -} -func (x PollEvent_PollEventFlag) String() string { - return proto.EnumName(PollEvent_PollEventFlag_name, int32(x)) -} -func (x *PollEvent_PollEventFlag) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(PollEvent_PollEventFlag_value, data, "PollEvent_PollEventFlag") - if err != nil { - return err - } - *x = PollEvent_PollEventFlag(value) - return nil -} -func (PollEvent_PollEventFlag) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{29, 0} -} - -type ResolveReply_ErrorCode int32 - -const ( - ResolveReply_SOCKET_EAI_ADDRFAMILY ResolveReply_ErrorCode = 1 - ResolveReply_SOCKET_EAI_AGAIN ResolveReply_ErrorCode = 2 - ResolveReply_SOCKET_EAI_BADFLAGS ResolveReply_ErrorCode = 3 - ResolveReply_SOCKET_EAI_FAIL ResolveReply_ErrorCode = 4 - ResolveReply_SOCKET_EAI_FAMILY ResolveReply_ErrorCode = 5 - ResolveReply_SOCKET_EAI_MEMORY ResolveReply_ErrorCode = 6 - ResolveReply_SOCKET_EAI_NODATA ResolveReply_ErrorCode = 7 - ResolveReply_SOCKET_EAI_NONAME ResolveReply_ErrorCode = 8 - ResolveReply_SOCKET_EAI_SERVICE ResolveReply_ErrorCode = 9 - ResolveReply_SOCKET_EAI_SOCKTYPE ResolveReply_ErrorCode = 10 - ResolveReply_SOCKET_EAI_SYSTEM ResolveReply_ErrorCode = 11 - ResolveReply_SOCKET_EAI_BADHINTS ResolveReply_ErrorCode = 12 - ResolveReply_SOCKET_EAI_PROTOCOL ResolveReply_ErrorCode = 13 - ResolveReply_SOCKET_EAI_OVERFLOW ResolveReply_ErrorCode = 14 - ResolveReply_SOCKET_EAI_MAX ResolveReply_ErrorCode = 15 -) - -var ResolveReply_ErrorCode_name = map[int32]string{ - 1: "SOCKET_EAI_ADDRFAMILY", - 2: "SOCKET_EAI_AGAIN", - 3: "SOCKET_EAI_BADFLAGS", - 4: "SOCKET_EAI_FAIL", - 5: "SOCKET_EAI_FAMILY", - 6: "SOCKET_EAI_MEMORY", - 7: "SOCKET_EAI_NODATA", - 8: "SOCKET_EAI_NONAME", - 9: "SOCKET_EAI_SERVICE", - 10: "SOCKET_EAI_SOCKTYPE", - 11: "SOCKET_EAI_SYSTEM", - 12: "SOCKET_EAI_BADHINTS", - 13: "SOCKET_EAI_PROTOCOL", - 14: "SOCKET_EAI_OVERFLOW", - 15: "SOCKET_EAI_MAX", -} -var ResolveReply_ErrorCode_value = map[string]int32{ - "SOCKET_EAI_ADDRFAMILY": 1, - "SOCKET_EAI_AGAIN": 2, - "SOCKET_EAI_BADFLAGS": 3, - "SOCKET_EAI_FAIL": 4, - "SOCKET_EAI_FAMILY": 5, - "SOCKET_EAI_MEMORY": 6, - "SOCKET_EAI_NODATA": 7, - "SOCKET_EAI_NONAME": 8, - "SOCKET_EAI_SERVICE": 9, - "SOCKET_EAI_SOCKTYPE": 10, - "SOCKET_EAI_SYSTEM": 11, - "SOCKET_EAI_BADHINTS": 12, - "SOCKET_EAI_PROTOCOL": 13, - "SOCKET_EAI_OVERFLOW": 14, - "SOCKET_EAI_MAX": 15, -} - -func (x ResolveReply_ErrorCode) Enum() *ResolveReply_ErrorCode { - p := new(ResolveReply_ErrorCode) - *p = x - return p -} -func (x ResolveReply_ErrorCode) String() string { - return proto.EnumName(ResolveReply_ErrorCode_name, int32(x)) -} -func (x *ResolveReply_ErrorCode) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(ResolveReply_ErrorCode_value, data, "ResolveReply_ErrorCode") - if err != nil { - return err - } - *x = ResolveReply_ErrorCode(value) - return nil -} -func (ResolveReply_ErrorCode) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{33, 0} -} - -type RemoteSocketServiceError struct { - SystemError *int32 `protobuf:"varint,1,opt,name=system_error,json=systemError,def=0" json:"system_error,omitempty"` - ErrorDetail *string `protobuf:"bytes,2,opt,name=error_detail,json=errorDetail" json:"error_detail,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *RemoteSocketServiceError) Reset() { *m = RemoteSocketServiceError{} } -func (m *RemoteSocketServiceError) String() string { return proto.CompactTextString(m) } -func (*RemoteSocketServiceError) ProtoMessage() {} -func (*RemoteSocketServiceError) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{0} -} -func (m *RemoteSocketServiceError) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_RemoteSocketServiceError.Unmarshal(m, b) -} -func (m *RemoteSocketServiceError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_RemoteSocketServiceError.Marshal(b, m, deterministic) -} -func (dst *RemoteSocketServiceError) XXX_Merge(src proto.Message) { - xxx_messageInfo_RemoteSocketServiceError.Merge(dst, src) -} -func (m *RemoteSocketServiceError) XXX_Size() int { - return xxx_messageInfo_RemoteSocketServiceError.Size(m) -} -func (m *RemoteSocketServiceError) XXX_DiscardUnknown() { - xxx_messageInfo_RemoteSocketServiceError.DiscardUnknown(m) -} - -var xxx_messageInfo_RemoteSocketServiceError proto.InternalMessageInfo - -const Default_RemoteSocketServiceError_SystemError int32 = 0 - -func (m *RemoteSocketServiceError) GetSystemError() int32 { - if m != nil && m.SystemError != nil { - return *m.SystemError - } - return Default_RemoteSocketServiceError_SystemError -} - -func (m *RemoteSocketServiceError) GetErrorDetail() string { - if m != nil && m.ErrorDetail != nil { - return *m.ErrorDetail - } - return "" -} - -type AddressPort struct { - Port *int32 `protobuf:"varint,1,req,name=port" json:"port,omitempty"` - PackedAddress []byte `protobuf:"bytes,2,opt,name=packed_address,json=packedAddress" json:"packed_address,omitempty"` - HostnameHint *string `protobuf:"bytes,3,opt,name=hostname_hint,json=hostnameHint" json:"hostname_hint,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AddressPort) Reset() { *m = AddressPort{} } -func (m *AddressPort) String() string { return proto.CompactTextString(m) } -func (*AddressPort) ProtoMessage() {} -func (*AddressPort) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{1} -} -func (m *AddressPort) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_AddressPort.Unmarshal(m, b) -} -func (m *AddressPort) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_AddressPort.Marshal(b, m, deterministic) -} -func (dst *AddressPort) XXX_Merge(src proto.Message) { - xxx_messageInfo_AddressPort.Merge(dst, src) -} -func (m *AddressPort) XXX_Size() int { - return xxx_messageInfo_AddressPort.Size(m) -} -func (m *AddressPort) XXX_DiscardUnknown() { - xxx_messageInfo_AddressPort.DiscardUnknown(m) -} - -var xxx_messageInfo_AddressPort proto.InternalMessageInfo - -func (m *AddressPort) GetPort() int32 { - if m != nil && m.Port != nil { - return *m.Port - } - return 0 -} - -func (m *AddressPort) GetPackedAddress() []byte { - if m != nil { - return m.PackedAddress - } - return nil -} - -func (m *AddressPort) GetHostnameHint() string { - if m != nil && m.HostnameHint != nil { - return *m.HostnameHint - } - return "" -} - -type CreateSocketRequest struct { - Family *CreateSocketRequest_SocketFamily `protobuf:"varint,1,req,name=family,enum=appengine.CreateSocketRequest_SocketFamily" json:"family,omitempty"` - Protocol *CreateSocketRequest_SocketProtocol `protobuf:"varint,2,req,name=protocol,enum=appengine.CreateSocketRequest_SocketProtocol" json:"protocol,omitempty"` - SocketOptions []*SocketOption `protobuf:"bytes,3,rep,name=socket_options,json=socketOptions" json:"socket_options,omitempty"` - ProxyExternalIp *AddressPort `protobuf:"bytes,4,opt,name=proxy_external_ip,json=proxyExternalIp" json:"proxy_external_ip,omitempty"` - ListenBacklog *int32 `protobuf:"varint,5,opt,name=listen_backlog,json=listenBacklog,def=0" json:"listen_backlog,omitempty"` - RemoteIp *AddressPort `protobuf:"bytes,6,opt,name=remote_ip,json=remoteIp" json:"remote_ip,omitempty"` - AppId *string `protobuf:"bytes,9,opt,name=app_id,json=appId" json:"app_id,omitempty"` - ProjectId *int64 `protobuf:"varint,10,opt,name=project_id,json=projectId" json:"project_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CreateSocketRequest) Reset() { *m = CreateSocketRequest{} } -func (m *CreateSocketRequest) String() string { return proto.CompactTextString(m) } -func (*CreateSocketRequest) ProtoMessage() {} -func (*CreateSocketRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{2} -} -func (m *CreateSocketRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CreateSocketRequest.Unmarshal(m, b) -} -func (m *CreateSocketRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CreateSocketRequest.Marshal(b, m, deterministic) -} -func (dst *CreateSocketRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_CreateSocketRequest.Merge(dst, src) -} -func (m *CreateSocketRequest) XXX_Size() int { - return xxx_messageInfo_CreateSocketRequest.Size(m) -} -func (m *CreateSocketRequest) XXX_DiscardUnknown() { - xxx_messageInfo_CreateSocketRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_CreateSocketRequest proto.InternalMessageInfo - -const Default_CreateSocketRequest_ListenBacklog int32 = 0 - -func (m *CreateSocketRequest) GetFamily() CreateSocketRequest_SocketFamily { - if m != nil && m.Family != nil { - return *m.Family - } - return CreateSocketRequest_IPv4 -} - -func (m *CreateSocketRequest) GetProtocol() CreateSocketRequest_SocketProtocol { - if m != nil && m.Protocol != nil { - return *m.Protocol - } - return CreateSocketRequest_TCP -} - -func (m *CreateSocketRequest) GetSocketOptions() []*SocketOption { - if m != nil { - return m.SocketOptions - } - return nil -} - -func (m *CreateSocketRequest) GetProxyExternalIp() *AddressPort { - if m != nil { - return m.ProxyExternalIp - } - return nil -} - -func (m *CreateSocketRequest) GetListenBacklog() int32 { - if m != nil && m.ListenBacklog != nil { - return *m.ListenBacklog - } - return Default_CreateSocketRequest_ListenBacklog -} - -func (m *CreateSocketRequest) GetRemoteIp() *AddressPort { - if m != nil { - return m.RemoteIp - } - return nil -} - -func (m *CreateSocketRequest) GetAppId() string { - if m != nil && m.AppId != nil { - return *m.AppId - } - return "" -} - -func (m *CreateSocketRequest) GetProjectId() int64 { - if m != nil && m.ProjectId != nil { - return *m.ProjectId - } - return 0 -} - -type CreateSocketReply struct { - SocketDescriptor *string `protobuf:"bytes,1,opt,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` - ServerAddress *AddressPort `protobuf:"bytes,3,opt,name=server_address,json=serverAddress" json:"server_address,omitempty"` - ProxyExternalIp *AddressPort `protobuf:"bytes,4,opt,name=proxy_external_ip,json=proxyExternalIp" json:"proxy_external_ip,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CreateSocketReply) Reset() { *m = CreateSocketReply{} } -func (m *CreateSocketReply) String() string { return proto.CompactTextString(m) } -func (*CreateSocketReply) ProtoMessage() {} -func (*CreateSocketReply) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{3} -} - -var extRange_CreateSocketReply = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, -} - -func (*CreateSocketReply) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_CreateSocketReply -} -func (m *CreateSocketReply) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CreateSocketReply.Unmarshal(m, b) -} -func (m *CreateSocketReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CreateSocketReply.Marshal(b, m, deterministic) -} -func (dst *CreateSocketReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_CreateSocketReply.Merge(dst, src) -} -func (m *CreateSocketReply) XXX_Size() int { - return xxx_messageInfo_CreateSocketReply.Size(m) -} -func (m *CreateSocketReply) XXX_DiscardUnknown() { - xxx_messageInfo_CreateSocketReply.DiscardUnknown(m) -} - -var xxx_messageInfo_CreateSocketReply proto.InternalMessageInfo - -func (m *CreateSocketReply) GetSocketDescriptor() string { - if m != nil && m.SocketDescriptor != nil { - return *m.SocketDescriptor - } - return "" -} - -func (m *CreateSocketReply) GetServerAddress() *AddressPort { - if m != nil { - return m.ServerAddress - } - return nil -} - -func (m *CreateSocketReply) GetProxyExternalIp() *AddressPort { - if m != nil { - return m.ProxyExternalIp - } - return nil -} - -type BindRequest struct { - SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` - ProxyExternalIp *AddressPort `protobuf:"bytes,2,req,name=proxy_external_ip,json=proxyExternalIp" json:"proxy_external_ip,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *BindRequest) Reset() { *m = BindRequest{} } -func (m *BindRequest) String() string { return proto.CompactTextString(m) } -func (*BindRequest) ProtoMessage() {} -func (*BindRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{4} -} -func (m *BindRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_BindRequest.Unmarshal(m, b) -} -func (m *BindRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_BindRequest.Marshal(b, m, deterministic) -} -func (dst *BindRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_BindRequest.Merge(dst, src) -} -func (m *BindRequest) XXX_Size() int { - return xxx_messageInfo_BindRequest.Size(m) -} -func (m *BindRequest) XXX_DiscardUnknown() { - xxx_messageInfo_BindRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_BindRequest proto.InternalMessageInfo - -func (m *BindRequest) GetSocketDescriptor() string { - if m != nil && m.SocketDescriptor != nil { - return *m.SocketDescriptor - } - return "" -} - -func (m *BindRequest) GetProxyExternalIp() *AddressPort { - if m != nil { - return m.ProxyExternalIp - } - return nil -} - -type BindReply struct { - ProxyExternalIp *AddressPort `protobuf:"bytes,1,opt,name=proxy_external_ip,json=proxyExternalIp" json:"proxy_external_ip,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *BindReply) Reset() { *m = BindReply{} } -func (m *BindReply) String() string { return proto.CompactTextString(m) } -func (*BindReply) ProtoMessage() {} -func (*BindReply) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{5} -} -func (m *BindReply) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_BindReply.Unmarshal(m, b) -} -func (m *BindReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_BindReply.Marshal(b, m, deterministic) -} -func (dst *BindReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_BindReply.Merge(dst, src) -} -func (m *BindReply) XXX_Size() int { - return xxx_messageInfo_BindReply.Size(m) -} -func (m *BindReply) XXX_DiscardUnknown() { - xxx_messageInfo_BindReply.DiscardUnknown(m) -} - -var xxx_messageInfo_BindReply proto.InternalMessageInfo - -func (m *BindReply) GetProxyExternalIp() *AddressPort { - if m != nil { - return m.ProxyExternalIp - } - return nil -} - -type GetSocketNameRequest struct { - SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetSocketNameRequest) Reset() { *m = GetSocketNameRequest{} } -func (m *GetSocketNameRequest) String() string { return proto.CompactTextString(m) } -func (*GetSocketNameRequest) ProtoMessage() {} -func (*GetSocketNameRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{6} -} -func (m *GetSocketNameRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetSocketNameRequest.Unmarshal(m, b) -} -func (m *GetSocketNameRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetSocketNameRequest.Marshal(b, m, deterministic) -} -func (dst *GetSocketNameRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetSocketNameRequest.Merge(dst, src) -} -func (m *GetSocketNameRequest) XXX_Size() int { - return xxx_messageInfo_GetSocketNameRequest.Size(m) -} -func (m *GetSocketNameRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetSocketNameRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetSocketNameRequest proto.InternalMessageInfo - -func (m *GetSocketNameRequest) GetSocketDescriptor() string { - if m != nil && m.SocketDescriptor != nil { - return *m.SocketDescriptor - } - return "" -} - -type GetSocketNameReply struct { - ProxyExternalIp *AddressPort `protobuf:"bytes,2,opt,name=proxy_external_ip,json=proxyExternalIp" json:"proxy_external_ip,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetSocketNameReply) Reset() { *m = GetSocketNameReply{} } -func (m *GetSocketNameReply) String() string { return proto.CompactTextString(m) } -func (*GetSocketNameReply) ProtoMessage() {} -func (*GetSocketNameReply) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{7} -} -func (m *GetSocketNameReply) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetSocketNameReply.Unmarshal(m, b) -} -func (m *GetSocketNameReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetSocketNameReply.Marshal(b, m, deterministic) -} -func (dst *GetSocketNameReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetSocketNameReply.Merge(dst, src) -} -func (m *GetSocketNameReply) XXX_Size() int { - return xxx_messageInfo_GetSocketNameReply.Size(m) -} -func (m *GetSocketNameReply) XXX_DiscardUnknown() { - xxx_messageInfo_GetSocketNameReply.DiscardUnknown(m) -} - -var xxx_messageInfo_GetSocketNameReply proto.InternalMessageInfo - -func (m *GetSocketNameReply) GetProxyExternalIp() *AddressPort { - if m != nil { - return m.ProxyExternalIp - } - return nil -} - -type GetPeerNameRequest struct { - SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetPeerNameRequest) Reset() { *m = GetPeerNameRequest{} } -func (m *GetPeerNameRequest) String() string { return proto.CompactTextString(m) } -func (*GetPeerNameRequest) ProtoMessage() {} -func (*GetPeerNameRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{8} -} -func (m *GetPeerNameRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetPeerNameRequest.Unmarshal(m, b) -} -func (m *GetPeerNameRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetPeerNameRequest.Marshal(b, m, deterministic) -} -func (dst *GetPeerNameRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetPeerNameRequest.Merge(dst, src) -} -func (m *GetPeerNameRequest) XXX_Size() int { - return xxx_messageInfo_GetPeerNameRequest.Size(m) -} -func (m *GetPeerNameRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetPeerNameRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetPeerNameRequest proto.InternalMessageInfo - -func (m *GetPeerNameRequest) GetSocketDescriptor() string { - if m != nil && m.SocketDescriptor != nil { - return *m.SocketDescriptor - } - return "" -} - -type GetPeerNameReply struct { - PeerIp *AddressPort `protobuf:"bytes,2,opt,name=peer_ip,json=peerIp" json:"peer_ip,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetPeerNameReply) Reset() { *m = GetPeerNameReply{} } -func (m *GetPeerNameReply) String() string { return proto.CompactTextString(m) } -func (*GetPeerNameReply) ProtoMessage() {} -func (*GetPeerNameReply) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{9} -} -func (m *GetPeerNameReply) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetPeerNameReply.Unmarshal(m, b) -} -func (m *GetPeerNameReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetPeerNameReply.Marshal(b, m, deterministic) -} -func (dst *GetPeerNameReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetPeerNameReply.Merge(dst, src) -} -func (m *GetPeerNameReply) XXX_Size() int { - return xxx_messageInfo_GetPeerNameReply.Size(m) -} -func (m *GetPeerNameReply) XXX_DiscardUnknown() { - xxx_messageInfo_GetPeerNameReply.DiscardUnknown(m) -} - -var xxx_messageInfo_GetPeerNameReply proto.InternalMessageInfo - -func (m *GetPeerNameReply) GetPeerIp() *AddressPort { - if m != nil { - return m.PeerIp - } - return nil -} - -type SocketOption struct { - Level *SocketOption_SocketOptionLevel `protobuf:"varint,1,req,name=level,enum=appengine.SocketOption_SocketOptionLevel" json:"level,omitempty"` - Option *SocketOption_SocketOptionName `protobuf:"varint,2,req,name=option,enum=appengine.SocketOption_SocketOptionName" json:"option,omitempty"` - Value []byte `protobuf:"bytes,3,req,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SocketOption) Reset() { *m = SocketOption{} } -func (m *SocketOption) String() string { return proto.CompactTextString(m) } -func (*SocketOption) ProtoMessage() {} -func (*SocketOption) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{10} -} -func (m *SocketOption) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SocketOption.Unmarshal(m, b) -} -func (m *SocketOption) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SocketOption.Marshal(b, m, deterministic) -} -func (dst *SocketOption) XXX_Merge(src proto.Message) { - xxx_messageInfo_SocketOption.Merge(dst, src) -} -func (m *SocketOption) XXX_Size() int { - return xxx_messageInfo_SocketOption.Size(m) -} -func (m *SocketOption) XXX_DiscardUnknown() { - xxx_messageInfo_SocketOption.DiscardUnknown(m) -} - -var xxx_messageInfo_SocketOption proto.InternalMessageInfo - -func (m *SocketOption) GetLevel() SocketOption_SocketOptionLevel { - if m != nil && m.Level != nil { - return *m.Level - } - return SocketOption_SOCKET_SOL_IP -} - -func (m *SocketOption) GetOption() SocketOption_SocketOptionName { - if m != nil && m.Option != nil { - return *m.Option - } - return SocketOption_SOCKET_SO_DEBUG -} - -func (m *SocketOption) GetValue() []byte { - if m != nil { - return m.Value - } - return nil -} - -type SetSocketOptionsRequest struct { - SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` - Options []*SocketOption `protobuf:"bytes,2,rep,name=options" json:"options,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SetSocketOptionsRequest) Reset() { *m = SetSocketOptionsRequest{} } -func (m *SetSocketOptionsRequest) String() string { return proto.CompactTextString(m) } -func (*SetSocketOptionsRequest) ProtoMessage() {} -func (*SetSocketOptionsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{11} -} -func (m *SetSocketOptionsRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SetSocketOptionsRequest.Unmarshal(m, b) -} -func (m *SetSocketOptionsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SetSocketOptionsRequest.Marshal(b, m, deterministic) -} -func (dst *SetSocketOptionsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_SetSocketOptionsRequest.Merge(dst, src) -} -func (m *SetSocketOptionsRequest) XXX_Size() int { - return xxx_messageInfo_SetSocketOptionsRequest.Size(m) -} -func (m *SetSocketOptionsRequest) XXX_DiscardUnknown() { - xxx_messageInfo_SetSocketOptionsRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_SetSocketOptionsRequest proto.InternalMessageInfo - -func (m *SetSocketOptionsRequest) GetSocketDescriptor() string { - if m != nil && m.SocketDescriptor != nil { - return *m.SocketDescriptor - } - return "" -} - -func (m *SetSocketOptionsRequest) GetOptions() []*SocketOption { - if m != nil { - return m.Options - } - return nil -} - -type SetSocketOptionsReply struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SetSocketOptionsReply) Reset() { *m = SetSocketOptionsReply{} } -func (m *SetSocketOptionsReply) String() string { return proto.CompactTextString(m) } -func (*SetSocketOptionsReply) ProtoMessage() {} -func (*SetSocketOptionsReply) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{12} -} -func (m *SetSocketOptionsReply) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SetSocketOptionsReply.Unmarshal(m, b) -} -func (m *SetSocketOptionsReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SetSocketOptionsReply.Marshal(b, m, deterministic) -} -func (dst *SetSocketOptionsReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_SetSocketOptionsReply.Merge(dst, src) -} -func (m *SetSocketOptionsReply) XXX_Size() int { - return xxx_messageInfo_SetSocketOptionsReply.Size(m) -} -func (m *SetSocketOptionsReply) XXX_DiscardUnknown() { - xxx_messageInfo_SetSocketOptionsReply.DiscardUnknown(m) -} - -var xxx_messageInfo_SetSocketOptionsReply proto.InternalMessageInfo - -type GetSocketOptionsRequest struct { - SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` - Options []*SocketOption `protobuf:"bytes,2,rep,name=options" json:"options,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetSocketOptionsRequest) Reset() { *m = GetSocketOptionsRequest{} } -func (m *GetSocketOptionsRequest) String() string { return proto.CompactTextString(m) } -func (*GetSocketOptionsRequest) ProtoMessage() {} -func (*GetSocketOptionsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{13} -} -func (m *GetSocketOptionsRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetSocketOptionsRequest.Unmarshal(m, b) -} -func (m *GetSocketOptionsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetSocketOptionsRequest.Marshal(b, m, deterministic) -} -func (dst *GetSocketOptionsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetSocketOptionsRequest.Merge(dst, src) -} -func (m *GetSocketOptionsRequest) XXX_Size() int { - return xxx_messageInfo_GetSocketOptionsRequest.Size(m) -} -func (m *GetSocketOptionsRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetSocketOptionsRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetSocketOptionsRequest proto.InternalMessageInfo - -func (m *GetSocketOptionsRequest) GetSocketDescriptor() string { - if m != nil && m.SocketDescriptor != nil { - return *m.SocketDescriptor - } - return "" -} - -func (m *GetSocketOptionsRequest) GetOptions() []*SocketOption { - if m != nil { - return m.Options - } - return nil -} - -type GetSocketOptionsReply struct { - Options []*SocketOption `protobuf:"bytes,2,rep,name=options" json:"options,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetSocketOptionsReply) Reset() { *m = GetSocketOptionsReply{} } -func (m *GetSocketOptionsReply) String() string { return proto.CompactTextString(m) } -func (*GetSocketOptionsReply) ProtoMessage() {} -func (*GetSocketOptionsReply) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{14} -} -func (m *GetSocketOptionsReply) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetSocketOptionsReply.Unmarshal(m, b) -} -func (m *GetSocketOptionsReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetSocketOptionsReply.Marshal(b, m, deterministic) -} -func (dst *GetSocketOptionsReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetSocketOptionsReply.Merge(dst, src) -} -func (m *GetSocketOptionsReply) XXX_Size() int { - return xxx_messageInfo_GetSocketOptionsReply.Size(m) -} -func (m *GetSocketOptionsReply) XXX_DiscardUnknown() { - xxx_messageInfo_GetSocketOptionsReply.DiscardUnknown(m) -} - -var xxx_messageInfo_GetSocketOptionsReply proto.InternalMessageInfo - -func (m *GetSocketOptionsReply) GetOptions() []*SocketOption { - if m != nil { - return m.Options - } - return nil -} - -type ConnectRequest struct { - SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` - RemoteIp *AddressPort `protobuf:"bytes,2,req,name=remote_ip,json=remoteIp" json:"remote_ip,omitempty"` - TimeoutSeconds *float64 `protobuf:"fixed64,3,opt,name=timeout_seconds,json=timeoutSeconds,def=-1" json:"timeout_seconds,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ConnectRequest) Reset() { *m = ConnectRequest{} } -func (m *ConnectRequest) String() string { return proto.CompactTextString(m) } -func (*ConnectRequest) ProtoMessage() {} -func (*ConnectRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{15} -} -func (m *ConnectRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ConnectRequest.Unmarshal(m, b) -} -func (m *ConnectRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ConnectRequest.Marshal(b, m, deterministic) -} -func (dst *ConnectRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ConnectRequest.Merge(dst, src) -} -func (m *ConnectRequest) XXX_Size() int { - return xxx_messageInfo_ConnectRequest.Size(m) -} -func (m *ConnectRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ConnectRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ConnectRequest proto.InternalMessageInfo - -const Default_ConnectRequest_TimeoutSeconds float64 = -1 - -func (m *ConnectRequest) GetSocketDescriptor() string { - if m != nil && m.SocketDescriptor != nil { - return *m.SocketDescriptor - } - return "" -} - -func (m *ConnectRequest) GetRemoteIp() *AddressPort { - if m != nil { - return m.RemoteIp - } - return nil -} - -func (m *ConnectRequest) GetTimeoutSeconds() float64 { - if m != nil && m.TimeoutSeconds != nil { - return *m.TimeoutSeconds - } - return Default_ConnectRequest_TimeoutSeconds -} - -type ConnectReply struct { - ProxyExternalIp *AddressPort `protobuf:"bytes,1,opt,name=proxy_external_ip,json=proxyExternalIp" json:"proxy_external_ip,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ConnectReply) Reset() { *m = ConnectReply{} } -func (m *ConnectReply) String() string { return proto.CompactTextString(m) } -func (*ConnectReply) ProtoMessage() {} -func (*ConnectReply) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{16} -} - -var extRange_ConnectReply = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, -} - -func (*ConnectReply) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_ConnectReply -} -func (m *ConnectReply) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ConnectReply.Unmarshal(m, b) -} -func (m *ConnectReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ConnectReply.Marshal(b, m, deterministic) -} -func (dst *ConnectReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_ConnectReply.Merge(dst, src) -} -func (m *ConnectReply) XXX_Size() int { - return xxx_messageInfo_ConnectReply.Size(m) -} -func (m *ConnectReply) XXX_DiscardUnknown() { - xxx_messageInfo_ConnectReply.DiscardUnknown(m) -} - -var xxx_messageInfo_ConnectReply proto.InternalMessageInfo - -func (m *ConnectReply) GetProxyExternalIp() *AddressPort { - if m != nil { - return m.ProxyExternalIp - } - return nil -} - -type ListenRequest struct { - SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` - Backlog *int32 `protobuf:"varint,2,req,name=backlog" json:"backlog,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ListenRequest) Reset() { *m = ListenRequest{} } -func (m *ListenRequest) String() string { return proto.CompactTextString(m) } -func (*ListenRequest) ProtoMessage() {} -func (*ListenRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{17} -} -func (m *ListenRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ListenRequest.Unmarshal(m, b) -} -func (m *ListenRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ListenRequest.Marshal(b, m, deterministic) -} -func (dst *ListenRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListenRequest.Merge(dst, src) -} -func (m *ListenRequest) XXX_Size() int { - return xxx_messageInfo_ListenRequest.Size(m) -} -func (m *ListenRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ListenRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ListenRequest proto.InternalMessageInfo - -func (m *ListenRequest) GetSocketDescriptor() string { - if m != nil && m.SocketDescriptor != nil { - return *m.SocketDescriptor - } - return "" -} - -func (m *ListenRequest) GetBacklog() int32 { - if m != nil && m.Backlog != nil { - return *m.Backlog - } - return 0 -} - -type ListenReply struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ListenReply) Reset() { *m = ListenReply{} } -func (m *ListenReply) String() string { return proto.CompactTextString(m) } -func (*ListenReply) ProtoMessage() {} -func (*ListenReply) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{18} -} -func (m *ListenReply) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ListenReply.Unmarshal(m, b) -} -func (m *ListenReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ListenReply.Marshal(b, m, deterministic) -} -func (dst *ListenReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListenReply.Merge(dst, src) -} -func (m *ListenReply) XXX_Size() int { - return xxx_messageInfo_ListenReply.Size(m) -} -func (m *ListenReply) XXX_DiscardUnknown() { - xxx_messageInfo_ListenReply.DiscardUnknown(m) -} - -var xxx_messageInfo_ListenReply proto.InternalMessageInfo - -type AcceptRequest struct { - SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` - TimeoutSeconds *float64 `protobuf:"fixed64,2,opt,name=timeout_seconds,json=timeoutSeconds,def=-1" json:"timeout_seconds,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AcceptRequest) Reset() { *m = AcceptRequest{} } -func (m *AcceptRequest) String() string { return proto.CompactTextString(m) } -func (*AcceptRequest) ProtoMessage() {} -func (*AcceptRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{19} -} -func (m *AcceptRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_AcceptRequest.Unmarshal(m, b) -} -func (m *AcceptRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_AcceptRequest.Marshal(b, m, deterministic) -} -func (dst *AcceptRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_AcceptRequest.Merge(dst, src) -} -func (m *AcceptRequest) XXX_Size() int { - return xxx_messageInfo_AcceptRequest.Size(m) -} -func (m *AcceptRequest) XXX_DiscardUnknown() { - xxx_messageInfo_AcceptRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_AcceptRequest proto.InternalMessageInfo - -const Default_AcceptRequest_TimeoutSeconds float64 = -1 - -func (m *AcceptRequest) GetSocketDescriptor() string { - if m != nil && m.SocketDescriptor != nil { - return *m.SocketDescriptor - } - return "" -} - -func (m *AcceptRequest) GetTimeoutSeconds() float64 { - if m != nil && m.TimeoutSeconds != nil { - return *m.TimeoutSeconds - } - return Default_AcceptRequest_TimeoutSeconds -} - -type AcceptReply struct { - NewSocketDescriptor []byte `protobuf:"bytes,2,opt,name=new_socket_descriptor,json=newSocketDescriptor" json:"new_socket_descriptor,omitempty"` - RemoteAddress *AddressPort `protobuf:"bytes,3,opt,name=remote_address,json=remoteAddress" json:"remote_address,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AcceptReply) Reset() { *m = AcceptReply{} } -func (m *AcceptReply) String() string { return proto.CompactTextString(m) } -func (*AcceptReply) ProtoMessage() {} -func (*AcceptReply) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{20} -} -func (m *AcceptReply) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_AcceptReply.Unmarshal(m, b) -} -func (m *AcceptReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_AcceptReply.Marshal(b, m, deterministic) -} -func (dst *AcceptReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_AcceptReply.Merge(dst, src) -} -func (m *AcceptReply) XXX_Size() int { - return xxx_messageInfo_AcceptReply.Size(m) -} -func (m *AcceptReply) XXX_DiscardUnknown() { - xxx_messageInfo_AcceptReply.DiscardUnknown(m) -} - -var xxx_messageInfo_AcceptReply proto.InternalMessageInfo - -func (m *AcceptReply) GetNewSocketDescriptor() []byte { - if m != nil { - return m.NewSocketDescriptor - } - return nil -} - -func (m *AcceptReply) GetRemoteAddress() *AddressPort { - if m != nil { - return m.RemoteAddress - } - return nil -} - -type ShutDownRequest struct { - SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` - How *ShutDownRequest_How `protobuf:"varint,2,req,name=how,enum=appengine.ShutDownRequest_How" json:"how,omitempty"` - SendOffset *int64 `protobuf:"varint,3,req,name=send_offset,json=sendOffset" json:"send_offset,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ShutDownRequest) Reset() { *m = ShutDownRequest{} } -func (m *ShutDownRequest) String() string { return proto.CompactTextString(m) } -func (*ShutDownRequest) ProtoMessage() {} -func (*ShutDownRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{21} -} -func (m *ShutDownRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ShutDownRequest.Unmarshal(m, b) -} -func (m *ShutDownRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ShutDownRequest.Marshal(b, m, deterministic) -} -func (dst *ShutDownRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ShutDownRequest.Merge(dst, src) -} -func (m *ShutDownRequest) XXX_Size() int { - return xxx_messageInfo_ShutDownRequest.Size(m) -} -func (m *ShutDownRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ShutDownRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ShutDownRequest proto.InternalMessageInfo - -func (m *ShutDownRequest) GetSocketDescriptor() string { - if m != nil && m.SocketDescriptor != nil { - return *m.SocketDescriptor - } - return "" -} - -func (m *ShutDownRequest) GetHow() ShutDownRequest_How { - if m != nil && m.How != nil { - return *m.How - } - return ShutDownRequest_SOCKET_SHUT_RD -} - -func (m *ShutDownRequest) GetSendOffset() int64 { - if m != nil && m.SendOffset != nil { - return *m.SendOffset - } - return 0 -} - -type ShutDownReply struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ShutDownReply) Reset() { *m = ShutDownReply{} } -func (m *ShutDownReply) String() string { return proto.CompactTextString(m) } -func (*ShutDownReply) ProtoMessage() {} -func (*ShutDownReply) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{22} -} -func (m *ShutDownReply) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ShutDownReply.Unmarshal(m, b) -} -func (m *ShutDownReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ShutDownReply.Marshal(b, m, deterministic) -} -func (dst *ShutDownReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_ShutDownReply.Merge(dst, src) -} -func (m *ShutDownReply) XXX_Size() int { - return xxx_messageInfo_ShutDownReply.Size(m) -} -func (m *ShutDownReply) XXX_DiscardUnknown() { - xxx_messageInfo_ShutDownReply.DiscardUnknown(m) -} - -var xxx_messageInfo_ShutDownReply proto.InternalMessageInfo - -type CloseRequest struct { - SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` - SendOffset *int64 `protobuf:"varint,2,opt,name=send_offset,json=sendOffset,def=-1" json:"send_offset,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CloseRequest) Reset() { *m = CloseRequest{} } -func (m *CloseRequest) String() string { return proto.CompactTextString(m) } -func (*CloseRequest) ProtoMessage() {} -func (*CloseRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{23} -} -func (m *CloseRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CloseRequest.Unmarshal(m, b) -} -func (m *CloseRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CloseRequest.Marshal(b, m, deterministic) -} -func (dst *CloseRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_CloseRequest.Merge(dst, src) -} -func (m *CloseRequest) XXX_Size() int { - return xxx_messageInfo_CloseRequest.Size(m) -} -func (m *CloseRequest) XXX_DiscardUnknown() { - xxx_messageInfo_CloseRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_CloseRequest proto.InternalMessageInfo - -const Default_CloseRequest_SendOffset int64 = -1 - -func (m *CloseRequest) GetSocketDescriptor() string { - if m != nil && m.SocketDescriptor != nil { - return *m.SocketDescriptor - } - return "" -} - -func (m *CloseRequest) GetSendOffset() int64 { - if m != nil && m.SendOffset != nil { - return *m.SendOffset - } - return Default_CloseRequest_SendOffset -} - -type CloseReply struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CloseReply) Reset() { *m = CloseReply{} } -func (m *CloseReply) String() string { return proto.CompactTextString(m) } -func (*CloseReply) ProtoMessage() {} -func (*CloseReply) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{24} -} -func (m *CloseReply) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CloseReply.Unmarshal(m, b) -} -func (m *CloseReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CloseReply.Marshal(b, m, deterministic) -} -func (dst *CloseReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_CloseReply.Merge(dst, src) -} -func (m *CloseReply) XXX_Size() int { - return xxx_messageInfo_CloseReply.Size(m) -} -func (m *CloseReply) XXX_DiscardUnknown() { - xxx_messageInfo_CloseReply.DiscardUnknown(m) -} - -var xxx_messageInfo_CloseReply proto.InternalMessageInfo - -type SendRequest struct { - SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` - Data []byte `protobuf:"bytes,2,req,name=data" json:"data,omitempty"` - StreamOffset *int64 `protobuf:"varint,3,req,name=stream_offset,json=streamOffset" json:"stream_offset,omitempty"` - Flags *int32 `protobuf:"varint,4,opt,name=flags,def=0" json:"flags,omitempty"` - SendTo *AddressPort `protobuf:"bytes,5,opt,name=send_to,json=sendTo" json:"send_to,omitempty"` - TimeoutSeconds *float64 `protobuf:"fixed64,6,opt,name=timeout_seconds,json=timeoutSeconds,def=-1" json:"timeout_seconds,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SendRequest) Reset() { *m = SendRequest{} } -func (m *SendRequest) String() string { return proto.CompactTextString(m) } -func (*SendRequest) ProtoMessage() {} -func (*SendRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{25} -} -func (m *SendRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SendRequest.Unmarshal(m, b) -} -func (m *SendRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SendRequest.Marshal(b, m, deterministic) -} -func (dst *SendRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_SendRequest.Merge(dst, src) -} -func (m *SendRequest) XXX_Size() int { - return xxx_messageInfo_SendRequest.Size(m) -} -func (m *SendRequest) XXX_DiscardUnknown() { - xxx_messageInfo_SendRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_SendRequest proto.InternalMessageInfo - -const Default_SendRequest_Flags int32 = 0 -const Default_SendRequest_TimeoutSeconds float64 = -1 - -func (m *SendRequest) GetSocketDescriptor() string { - if m != nil && m.SocketDescriptor != nil { - return *m.SocketDescriptor - } - return "" -} - -func (m *SendRequest) GetData() []byte { - if m != nil { - return m.Data - } - return nil -} - -func (m *SendRequest) GetStreamOffset() int64 { - if m != nil && m.StreamOffset != nil { - return *m.StreamOffset - } - return 0 -} - -func (m *SendRequest) GetFlags() int32 { - if m != nil && m.Flags != nil { - return *m.Flags - } - return Default_SendRequest_Flags -} - -func (m *SendRequest) GetSendTo() *AddressPort { - if m != nil { - return m.SendTo - } - return nil -} - -func (m *SendRequest) GetTimeoutSeconds() float64 { - if m != nil && m.TimeoutSeconds != nil { - return *m.TimeoutSeconds - } - return Default_SendRequest_TimeoutSeconds -} - -type SendReply struct { - DataSent *int32 `protobuf:"varint,1,opt,name=data_sent,json=dataSent" json:"data_sent,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SendReply) Reset() { *m = SendReply{} } -func (m *SendReply) String() string { return proto.CompactTextString(m) } -func (*SendReply) ProtoMessage() {} -func (*SendReply) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{26} -} -func (m *SendReply) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SendReply.Unmarshal(m, b) -} -func (m *SendReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SendReply.Marshal(b, m, deterministic) -} -func (dst *SendReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_SendReply.Merge(dst, src) -} -func (m *SendReply) XXX_Size() int { - return xxx_messageInfo_SendReply.Size(m) -} -func (m *SendReply) XXX_DiscardUnknown() { - xxx_messageInfo_SendReply.DiscardUnknown(m) -} - -var xxx_messageInfo_SendReply proto.InternalMessageInfo - -func (m *SendReply) GetDataSent() int32 { - if m != nil && m.DataSent != nil { - return *m.DataSent - } - return 0 -} - -type ReceiveRequest struct { - SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` - DataSize *int32 `protobuf:"varint,2,req,name=data_size,json=dataSize" json:"data_size,omitempty"` - Flags *int32 `protobuf:"varint,3,opt,name=flags,def=0" json:"flags,omitempty"` - TimeoutSeconds *float64 `protobuf:"fixed64,5,opt,name=timeout_seconds,json=timeoutSeconds,def=-1" json:"timeout_seconds,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ReceiveRequest) Reset() { *m = ReceiveRequest{} } -func (m *ReceiveRequest) String() string { return proto.CompactTextString(m) } -func (*ReceiveRequest) ProtoMessage() {} -func (*ReceiveRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{27} -} -func (m *ReceiveRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ReceiveRequest.Unmarshal(m, b) -} -func (m *ReceiveRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ReceiveRequest.Marshal(b, m, deterministic) -} -func (dst *ReceiveRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReceiveRequest.Merge(dst, src) -} -func (m *ReceiveRequest) XXX_Size() int { - return xxx_messageInfo_ReceiveRequest.Size(m) -} -func (m *ReceiveRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ReceiveRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ReceiveRequest proto.InternalMessageInfo - -const Default_ReceiveRequest_Flags int32 = 0 -const Default_ReceiveRequest_TimeoutSeconds float64 = -1 - -func (m *ReceiveRequest) GetSocketDescriptor() string { - if m != nil && m.SocketDescriptor != nil { - return *m.SocketDescriptor - } - return "" -} - -func (m *ReceiveRequest) GetDataSize() int32 { - if m != nil && m.DataSize != nil { - return *m.DataSize - } - return 0 -} - -func (m *ReceiveRequest) GetFlags() int32 { - if m != nil && m.Flags != nil { - return *m.Flags - } - return Default_ReceiveRequest_Flags -} - -func (m *ReceiveRequest) GetTimeoutSeconds() float64 { - if m != nil && m.TimeoutSeconds != nil { - return *m.TimeoutSeconds - } - return Default_ReceiveRequest_TimeoutSeconds -} - -type ReceiveReply struct { - StreamOffset *int64 `protobuf:"varint,2,opt,name=stream_offset,json=streamOffset" json:"stream_offset,omitempty"` - Data []byte `protobuf:"bytes,3,opt,name=data" json:"data,omitempty"` - ReceivedFrom *AddressPort `protobuf:"bytes,4,opt,name=received_from,json=receivedFrom" json:"received_from,omitempty"` - BufferSize *int32 `protobuf:"varint,5,opt,name=buffer_size,json=bufferSize" json:"buffer_size,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ReceiveReply) Reset() { *m = ReceiveReply{} } -func (m *ReceiveReply) String() string { return proto.CompactTextString(m) } -func (*ReceiveReply) ProtoMessage() {} -func (*ReceiveReply) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{28} -} -func (m *ReceiveReply) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ReceiveReply.Unmarshal(m, b) -} -func (m *ReceiveReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ReceiveReply.Marshal(b, m, deterministic) -} -func (dst *ReceiveReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReceiveReply.Merge(dst, src) -} -func (m *ReceiveReply) XXX_Size() int { - return xxx_messageInfo_ReceiveReply.Size(m) -} -func (m *ReceiveReply) XXX_DiscardUnknown() { - xxx_messageInfo_ReceiveReply.DiscardUnknown(m) -} - -var xxx_messageInfo_ReceiveReply proto.InternalMessageInfo - -func (m *ReceiveReply) GetStreamOffset() int64 { - if m != nil && m.StreamOffset != nil { - return *m.StreamOffset - } - return 0 -} - -func (m *ReceiveReply) GetData() []byte { - if m != nil { - return m.Data - } - return nil -} - -func (m *ReceiveReply) GetReceivedFrom() *AddressPort { - if m != nil { - return m.ReceivedFrom - } - return nil -} - -func (m *ReceiveReply) GetBufferSize() int32 { - if m != nil && m.BufferSize != nil { - return *m.BufferSize - } - return 0 -} - -type PollEvent struct { - SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` - RequestedEvents *int32 `protobuf:"varint,2,req,name=requested_events,json=requestedEvents" json:"requested_events,omitempty"` - ObservedEvents *int32 `protobuf:"varint,3,req,name=observed_events,json=observedEvents" json:"observed_events,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PollEvent) Reset() { *m = PollEvent{} } -func (m *PollEvent) String() string { return proto.CompactTextString(m) } -func (*PollEvent) ProtoMessage() {} -func (*PollEvent) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{29} -} -func (m *PollEvent) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PollEvent.Unmarshal(m, b) -} -func (m *PollEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PollEvent.Marshal(b, m, deterministic) -} -func (dst *PollEvent) XXX_Merge(src proto.Message) { - xxx_messageInfo_PollEvent.Merge(dst, src) -} -func (m *PollEvent) XXX_Size() int { - return xxx_messageInfo_PollEvent.Size(m) -} -func (m *PollEvent) XXX_DiscardUnknown() { - xxx_messageInfo_PollEvent.DiscardUnknown(m) -} - -var xxx_messageInfo_PollEvent proto.InternalMessageInfo - -func (m *PollEvent) GetSocketDescriptor() string { - if m != nil && m.SocketDescriptor != nil { - return *m.SocketDescriptor - } - return "" -} - -func (m *PollEvent) GetRequestedEvents() int32 { - if m != nil && m.RequestedEvents != nil { - return *m.RequestedEvents - } - return 0 -} - -func (m *PollEvent) GetObservedEvents() int32 { - if m != nil && m.ObservedEvents != nil { - return *m.ObservedEvents - } - return 0 -} - -type PollRequest struct { - Events []*PollEvent `protobuf:"bytes,1,rep,name=events" json:"events,omitempty"` - TimeoutSeconds *float64 `protobuf:"fixed64,2,opt,name=timeout_seconds,json=timeoutSeconds,def=-1" json:"timeout_seconds,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PollRequest) Reset() { *m = PollRequest{} } -func (m *PollRequest) String() string { return proto.CompactTextString(m) } -func (*PollRequest) ProtoMessage() {} -func (*PollRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{30} -} -func (m *PollRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PollRequest.Unmarshal(m, b) -} -func (m *PollRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PollRequest.Marshal(b, m, deterministic) -} -func (dst *PollRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_PollRequest.Merge(dst, src) -} -func (m *PollRequest) XXX_Size() int { - return xxx_messageInfo_PollRequest.Size(m) -} -func (m *PollRequest) XXX_DiscardUnknown() { - xxx_messageInfo_PollRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_PollRequest proto.InternalMessageInfo - -const Default_PollRequest_TimeoutSeconds float64 = -1 - -func (m *PollRequest) GetEvents() []*PollEvent { - if m != nil { - return m.Events - } - return nil -} - -func (m *PollRequest) GetTimeoutSeconds() float64 { - if m != nil && m.TimeoutSeconds != nil { - return *m.TimeoutSeconds - } - return Default_PollRequest_TimeoutSeconds -} - -type PollReply struct { - Events []*PollEvent `protobuf:"bytes,2,rep,name=events" json:"events,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PollReply) Reset() { *m = PollReply{} } -func (m *PollReply) String() string { return proto.CompactTextString(m) } -func (*PollReply) ProtoMessage() {} -func (*PollReply) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{31} -} -func (m *PollReply) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PollReply.Unmarshal(m, b) -} -func (m *PollReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PollReply.Marshal(b, m, deterministic) -} -func (dst *PollReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_PollReply.Merge(dst, src) -} -func (m *PollReply) XXX_Size() int { - return xxx_messageInfo_PollReply.Size(m) -} -func (m *PollReply) XXX_DiscardUnknown() { - xxx_messageInfo_PollReply.DiscardUnknown(m) -} - -var xxx_messageInfo_PollReply proto.InternalMessageInfo - -func (m *PollReply) GetEvents() []*PollEvent { - if m != nil { - return m.Events - } - return nil -} - -type ResolveRequest struct { - Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` - AddressFamilies []CreateSocketRequest_SocketFamily `protobuf:"varint,2,rep,name=address_families,json=addressFamilies,enum=appengine.CreateSocketRequest_SocketFamily" json:"address_families,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ResolveRequest) Reset() { *m = ResolveRequest{} } -func (m *ResolveRequest) String() string { return proto.CompactTextString(m) } -func (*ResolveRequest) ProtoMessage() {} -func (*ResolveRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{32} -} -func (m *ResolveRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ResolveRequest.Unmarshal(m, b) -} -func (m *ResolveRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ResolveRequest.Marshal(b, m, deterministic) -} -func (dst *ResolveRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResolveRequest.Merge(dst, src) -} -func (m *ResolveRequest) XXX_Size() int { - return xxx_messageInfo_ResolveRequest.Size(m) -} -func (m *ResolveRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ResolveRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ResolveRequest proto.InternalMessageInfo - -func (m *ResolveRequest) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *ResolveRequest) GetAddressFamilies() []CreateSocketRequest_SocketFamily { - if m != nil { - return m.AddressFamilies - } - return nil -} - -type ResolveReply struct { - PackedAddress [][]byte `protobuf:"bytes,2,rep,name=packed_address,json=packedAddress" json:"packed_address,omitempty"` - CanonicalName *string `protobuf:"bytes,3,opt,name=canonical_name,json=canonicalName" json:"canonical_name,omitempty"` - Aliases []string `protobuf:"bytes,4,rep,name=aliases" json:"aliases,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ResolveReply) Reset() { *m = ResolveReply{} } -func (m *ResolveReply) String() string { return proto.CompactTextString(m) } -func (*ResolveReply) ProtoMessage() {} -func (*ResolveReply) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{33} -} -func (m *ResolveReply) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ResolveReply.Unmarshal(m, b) -} -func (m *ResolveReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ResolveReply.Marshal(b, m, deterministic) -} -func (dst *ResolveReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResolveReply.Merge(dst, src) -} -func (m *ResolveReply) XXX_Size() int { - return xxx_messageInfo_ResolveReply.Size(m) -} -func (m *ResolveReply) XXX_DiscardUnknown() { - xxx_messageInfo_ResolveReply.DiscardUnknown(m) -} - -var xxx_messageInfo_ResolveReply proto.InternalMessageInfo - -func (m *ResolveReply) GetPackedAddress() [][]byte { - if m != nil { - return m.PackedAddress - } - return nil -} - -func (m *ResolveReply) GetCanonicalName() string { - if m != nil && m.CanonicalName != nil { - return *m.CanonicalName - } - return "" -} - -func (m *ResolveReply) GetAliases() []string { - if m != nil { - return m.Aliases - } - return nil -} - -func init() { - proto.RegisterType((*RemoteSocketServiceError)(nil), "appengine.RemoteSocketServiceError") - proto.RegisterType((*AddressPort)(nil), "appengine.AddressPort") - proto.RegisterType((*CreateSocketRequest)(nil), "appengine.CreateSocketRequest") - proto.RegisterType((*CreateSocketReply)(nil), "appengine.CreateSocketReply") - proto.RegisterType((*BindRequest)(nil), "appengine.BindRequest") - proto.RegisterType((*BindReply)(nil), "appengine.BindReply") - proto.RegisterType((*GetSocketNameRequest)(nil), "appengine.GetSocketNameRequest") - proto.RegisterType((*GetSocketNameReply)(nil), "appengine.GetSocketNameReply") - proto.RegisterType((*GetPeerNameRequest)(nil), "appengine.GetPeerNameRequest") - proto.RegisterType((*GetPeerNameReply)(nil), "appengine.GetPeerNameReply") - proto.RegisterType((*SocketOption)(nil), "appengine.SocketOption") - proto.RegisterType((*SetSocketOptionsRequest)(nil), "appengine.SetSocketOptionsRequest") - proto.RegisterType((*SetSocketOptionsReply)(nil), "appengine.SetSocketOptionsReply") - proto.RegisterType((*GetSocketOptionsRequest)(nil), "appengine.GetSocketOptionsRequest") - proto.RegisterType((*GetSocketOptionsReply)(nil), "appengine.GetSocketOptionsReply") - proto.RegisterType((*ConnectRequest)(nil), "appengine.ConnectRequest") - proto.RegisterType((*ConnectReply)(nil), "appengine.ConnectReply") - proto.RegisterType((*ListenRequest)(nil), "appengine.ListenRequest") - proto.RegisterType((*ListenReply)(nil), "appengine.ListenReply") - proto.RegisterType((*AcceptRequest)(nil), "appengine.AcceptRequest") - proto.RegisterType((*AcceptReply)(nil), "appengine.AcceptReply") - proto.RegisterType((*ShutDownRequest)(nil), "appengine.ShutDownRequest") - proto.RegisterType((*ShutDownReply)(nil), "appengine.ShutDownReply") - proto.RegisterType((*CloseRequest)(nil), "appengine.CloseRequest") - proto.RegisterType((*CloseReply)(nil), "appengine.CloseReply") - proto.RegisterType((*SendRequest)(nil), "appengine.SendRequest") - proto.RegisterType((*SendReply)(nil), "appengine.SendReply") - proto.RegisterType((*ReceiveRequest)(nil), "appengine.ReceiveRequest") - proto.RegisterType((*ReceiveReply)(nil), "appengine.ReceiveReply") - proto.RegisterType((*PollEvent)(nil), "appengine.PollEvent") - proto.RegisterType((*PollRequest)(nil), "appengine.PollRequest") - proto.RegisterType((*PollReply)(nil), "appengine.PollReply") - proto.RegisterType((*ResolveRequest)(nil), "appengine.ResolveRequest") - proto.RegisterType((*ResolveReply)(nil), "appengine.ResolveReply") -} - -func init() { - proto.RegisterFile("google.golang.org/appengine/internal/socket/socket_service.proto", fileDescriptor_socket_service_b5f8f233dc327808) -} - -var fileDescriptor_socket_service_b5f8f233dc327808 = []byte{ - // 3088 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0x5f, 0x77, 0xe3, 0xc6, - 0x75, 0x37, 0x48, 0xfd, 0xe3, 0x90, 0x94, 0xee, 0x62, 0xa5, 0x5d, 0x25, 0x6e, 0x12, 0x05, 0x8e, - 0x1b, 0x25, 0x8e, 0x77, 0x6d, 0x39, 0x4d, 0x9b, 0xa4, 0x49, 0x16, 0x04, 0x86, 0x24, 0x4c, 0x00, - 0x03, 0xcd, 0x0c, 0x25, 0xd1, 0x6d, 0x8a, 0xd0, 0x22, 0xa4, 0x65, 0x4c, 0x11, 0x0c, 0xc9, 0xdd, - 0xf5, 0xba, 0x69, 0xaa, 0xfe, 0x39, 0xfd, 0x12, 0x7d, 0xe8, 0x73, 0x3f, 0x43, 0x4f, 0x4f, 0x5f, - 0xfa, 0xec, 0xc7, 0x7e, 0x84, 0x9e, 0xbe, 0xb4, 0x9f, 0xa1, 0x67, 0x06, 0xe0, 0x60, 0xc8, 0xd5, - 0xae, 0x77, 0x75, 0x72, 0x4e, 0x9e, 0xa4, 0xfb, 0xbb, 0x77, 0xee, 0xff, 0x99, 0xb9, 0x03, 0xa2, - 0x47, 0x97, 0x69, 0x7a, 0x39, 0x4a, 0x1e, 0x5c, 0xa6, 0xa3, 0xfe, 0xf8, 0xf2, 0x41, 0x3a, 0xbd, - 0x7c, 0xd8, 0x9f, 0x4c, 0x92, 0xf1, 0xe5, 0x70, 0x9c, 0x3c, 0x1c, 0x8e, 0xe7, 0xc9, 0x74, 0xdc, - 0x1f, 0x3d, 0x9c, 0xa5, 0xe7, 0x9f, 0x25, 0xf3, 0xfc, 0x4f, 0x3c, 0x4b, 0xa6, 0x4f, 0x87, 0xe7, - 0xc9, 0x83, 0xc9, 0x34, 0x9d, 0xa7, 0x66, 0x45, 0xc9, 0x5b, 0xff, 0xbc, 0x8b, 0xf6, 0x69, 0x72, - 0x95, 0xce, 0x13, 0x26, 0x25, 0x59, 0x26, 0x88, 0xa7, 0xd3, 0x74, 0x6a, 0x7e, 0x07, 0xd5, 0x66, - 0xcf, 0x67, 0xf3, 0xe4, 0x2a, 0x4e, 0x04, 0xbd, 0x6f, 0x1c, 0x18, 0x87, 0xeb, 0x3f, 0x31, 0x3e, - 0xa0, 0xd5, 0x0c, 0xce, 0xa4, 0xbe, 0x8d, 0x6a, 0x92, 0x1d, 0x0f, 0x92, 0x79, 0x7f, 0x38, 0xda, - 0x2f, 0x1d, 0x18, 0x87, 0x15, 0x5a, 0x95, 0x98, 0x2b, 0x21, 0xeb, 0x73, 0x54, 0x91, 0xb2, 0x4e, - 0x3a, 0x48, 0x4c, 0x40, 0x35, 0xd6, 0x63, 0x1c, 0x07, 0x31, 0xa6, 0x94, 0x50, 0x30, 0xcc, 0x3a, - 0xaa, 0xb4, 0x6c, 0x2f, 0x27, 0x4b, 0x66, 0x15, 0x6d, 0x36, 0x6d, 0xcf, 0xef, 0x52, 0x0c, 0x6b, - 0xe6, 0x1e, 0xba, 0x13, 0x61, 0x1a, 0x78, 0x8c, 0x79, 0x24, 0x8c, 0x5d, 0x1c, 0x7a, 0xd8, 0x85, - 0x75, 0xf3, 0x2e, 0xda, 0xf1, 0xc2, 0x13, 0xdb, 0xf7, 0xdc, 0x98, 0xe2, 0xe3, 0x2e, 0x66, 0x1c, - 0x36, 0xcc, 0x3b, 0xa8, 0xce, 0x88, 0xd3, 0xc1, 0x3c, 0x76, 0x7c, 0xc2, 0xb0, 0x0b, 0x9b, 0xd6, - 0xbf, 0x99, 0xa8, 0xca, 0x34, 0x67, 0x77, 0x50, 0x95, 0xf5, 0x58, 0xcc, 0xba, 0x8e, 0x83, 0x19, - 0x83, 0xb7, 0x84, 0x6d, 0x01, 0x60, 0x61, 0x04, 0x0c, 0x73, 0x1b, 0x21, 0x49, 0x86, 0x04, 0x87, - 0x1c, 0x4a, 0x8a, 0xcd, 0xa8, 0xd3, 0x86, 0xb2, 0x22, 0xbd, 0x90, 0x53, 0x58, 0x13, 0x9e, 0x66, - 0x24, 0x81, 0x75, 0xc5, 0x0b, 0xcf, 0x3c, 0x02, 0x1b, 0x8a, 0x3c, 0x6a, 0x78, 0x2d, 0xd8, 0x5c, - 0x18, 0x16, 0x8a, 0xcf, 0xb0, 0x03, 0x5b, 0x8a, 0xdf, 0xb0, 0xdd, 0x26, 0x54, 0x94, 0x61, 0xa7, - 0xed, 0xf9, 0x2e, 0x20, 0x45, 0xdb, 0x2d, 0xdb, 0x0b, 0xa1, 0x2a, 0x02, 0x96, 0xf4, 0x29, 0xe9, - 0xfa, 0x6e, 0xc3, 0x27, 0x4e, 0x07, 0xaa, 0x9a, 0xb7, 0x01, 0x0e, 0xa0, 0x56, 0x2c, 0x12, 0xd1, - 0x41, 0x5d, 0xd1, 0x4d, 0xbb, 0xeb, 0x73, 0xd8, 0xd6, 0x9c, 0xe0, 0x0d, 0xbf, 0x03, 0x3b, 0x85, - 0x13, 0x5d, 0xd6, 0x03, 0x50, 0xf2, 0xf8, 0xcc, 0x63, 0x1c, 0xee, 0x28, 0xf6, 0x99, 0x8b, 0x4f, - 0xc0, 0xd4, 0xcc, 0x09, 0xfa, 0xae, 0xae, 0xce, 0xf5, 0x28, 0xec, 0x2a, 0x01, 0x8f, 0x09, 0x7a, - 0xaf, 0xa0, 0x45, 0xa9, 0xe0, 0x5e, 0xa1, 0xa0, 0xe9, 0xf9, 0x18, 0xee, 0x2b, 0x3a, 0x90, 0xf4, - 0xbe, 0x66, 0x80, 0xf3, 0x1e, 0x7c, 0x4d, 0x19, 0xe0, 0x67, 0xbc, 0xc1, 0x7a, 0xf0, 0x75, 0xe5, - 0x50, 0x53, 0x24, 0xf5, 0x6d, 0x4d, 0x9e, 0x45, 0x0e, 0xfc, 0x91, 0xa2, 0x59, 0xe4, 0x45, 0x18, - 0xbe, 0xa1, 0xc4, 0x29, 0x69, 0x32, 0xf8, 0x66, 0x61, 0xce, 0xf7, 0xc2, 0x0e, 0x7c, 0xab, 0xa8, - 0xbd, 0x90, 0x3e, 0x30, 0x6b, 0x68, 0x4b, 0x92, 0x2e, 0x09, 0xe0, 0xdb, 0x4a, 0x98, 0xda, 0x61, - 0x0b, 0x83, 0xa5, 0x7c, 0x71, 0xb1, 0xed, 0xfa, 0x1d, 0x78, 0x47, 0x76, 0x9b, 0x02, 0x44, 0x3d, - 0xde, 0x31, 0x77, 0x11, 0x64, 0xfe, 0xd8, 0x01, 0xe6, 0x84, 0xf8, 0x24, 0x6c, 0xc1, 0x77, 0x34, - 0x2f, 0x7d, 0xa7, 0x03, 0xef, 0xea, 0x5e, 0xf7, 0x18, 0xfc, 0xb1, 0x52, 0x14, 0x12, 0x8e, 0x83, - 0x88, 0xf7, 0xe0, 0xbb, 0xca, 0x33, 0x9f, 0x90, 0x08, 0x0e, 0xf5, 0x3a, 0xb3, 0x16, 0x7c, 0xbf, - 0x68, 0x43, 0x97, 0x06, 0xf0, 0x9e, 0xd6, 0x3b, 0x34, 0x6c, 0xc1, 0x0f, 0xf2, 0x1d, 0x16, 0x63, - 0xff, 0x28, 0x64, 0xbd, 0xd0, 0x81, 0xf7, 0x95, 0x84, 0xff, 0x51, 0xdb, 0xe7, 0xf0, 0x40, 0xa3, - 0x29, 0xe3, 0xf0, 0xb0, 0xa0, 0x43, 0xa1, 0xe1, 0x03, 0x15, 0x6c, 0x37, 0xb4, 0xb9, 0xd3, 0x86, - 0x0f, 0x35, 0x0f, 0x1c, 0xe6, 0xc1, 0x51, 0xb1, 0xe0, 0x48, 0x28, 0xfc, 0x48, 0xef, 0x66, 0x0c, - 0x3f, 0xd4, 0x49, 0x0a, 0x7f, 0xa2, 0xa4, 0xcf, 0x9a, 0x5d, 0xdf, 0x87, 0x1f, 0x69, 0xda, 0xec, - 0x90, 0xc0, 0x9f, 0x2a, 0x73, 0x42, 0xfc, 0xd8, 0x81, 0x3f, 0xd3, 0x01, 0xe6, 0x73, 0xf8, 0xb1, - 0x5a, 0xd1, 0x68, 0x92, 0x90, 0xc3, 0x4f, 0xf5, 0x1c, 0x72, 0x0a, 0x7f, 0xae, 0xb5, 0xa2, 0x6b, - 0x73, 0x1b, 0x7e, 0xa6, 0x3c, 0xe0, 0x5e, 0x80, 0xe1, 0xe7, 0xc5, 0xe6, 0x24, 0x8c, 0xc2, 0x2f, - 0xb4, 0xe5, 0x21, 0xe6, 0xf0, 0x48, 0xa3, 0xa3, 0x4e, 0x0b, 0x6c, 0xa5, 0x8e, 0xe2, 0x80, 0x70, - 0x0c, 0x0d, 0x4d, 0xbf, 0xec, 0x1d, 0x47, 0x35, 0x8b, 0xed, 0x9e, 0x80, 0x5b, 0x34, 0x1e, 0x0d, - 0x42, 0x0e, 0x58, 0x99, 0x73, 0x48, 0x10, 0x40, 0x53, 0xb1, 0x23, 0x4a, 0x38, 0x81, 0x96, 0xaa, - 0x78, 0xd0, 0xf5, 0xb9, 0xd7, 0x26, 0x11, 0xb4, 0x8b, 0xf6, 0x22, 0xdc, 0x25, 0x1c, 0x3c, 0x3d, - 0x05, 0xa2, 0xe8, 0x1f, 0xab, 0x45, 0xe4, 0x04, 0xd3, 0xa6, 0x4f, 0x4e, 0xa1, 0xa3, 0x0a, 0x1d, - 0x12, 0xde, 0x0d, 0xbd, 0x63, 0xf0, 0x8b, 0x3c, 0xd9, 0x6e, 0xd3, 0x85, 0x40, 0x0f, 0xc4, 0x69, - 0xb7, 0x20, 0x54, 0x80, 0xef, 0x35, 0x6c, 0xc7, 0x01, 0xa2, 0x03, 0x0d, 0xdb, 0x85, 0x48, 0x07, - 0x98, 0x13, 0xc2, 0xb1, 0x0e, 0x04, 0xf6, 0x19, 0xd0, 0xa2, 0xbf, 0xbc, 0x86, 0x3c, 0xcc, 0x58, - 0xb1, 0xd1, 0x7d, 0x86, 0x8f, 0x81, 0x2b, 0x09, 0x8a, 0x19, 0xb7, 0x29, 0x87, 0xae, 0x42, 0x18, - 0xa7, 0x72, 0xbb, 0x9d, 0xa8, 0x35, 0x5d, 0x86, 0x29, 0x83, 0x53, 0x3d, 0x18, 0x71, 0x8a, 0xc3, - 0x99, 0xda, 0x4e, 0xae, 0xd0, 0xe2, 0xba, 0x94, 0xe2, 0x63, 0xe8, 0x29, 0xb9, 0x80, 0xb5, 0x98, - 0xf7, 0x09, 0x86, 0x4f, 0x4c, 0x13, 0x6d, 0x17, 0xe9, 0xe5, 0xbd, 0x08, 0xc3, 0x5f, 0xa8, 0xf3, - 0x32, 0x24, 0x12, 0x25, 0x11, 0x87, 0xbf, 0x34, 0xef, 0xa3, 0xbb, 0x85, 0x60, 0x48, 0x58, 0x37, - 0x8a, 0x08, 0xe5, 0xf0, 0x4b, 0xc5, 0x10, 0x86, 0x79, 0xc1, 0xf8, 0x2b, 0xa5, 0x9a, 0x44, 0xc2, - 0xad, 0x6e, 0x14, 0x41, 0xac, 0x1f, 0x7b, 0xac, 0x2b, 0x80, 0x85, 0x9f, 0x51, 0xb3, 0x58, 0xfa, - 0x2b, 0x85, 0xda, 0x1a, 0xda, 0x57, 0x0a, 0x45, 0x3c, 0x5e, 0xd8, 0x65, 0x18, 0x3e, 0x15, 0x77, - 0x9c, 0xc2, 0x42, 0xc2, 0xed, 0x13, 0xdb, 0xf3, 0xe1, 0xbc, 0x48, 0x08, 0xe6, 0x2e, 0x39, 0x0d, - 0x61, 0x50, 0x04, 0x85, 0x79, 0x37, 0xa4, 0xd8, 0x76, 0xda, 0x90, 0x14, 0xc7, 0x07, 0xe6, 0x14, - 0x33, 0xcc, 0xe1, 0x42, 0x99, 0x76, 0x48, 0x18, 0xda, 0x0d, 0x42, 0x39, 0x76, 0xe1, 0x52, 0x99, - 0x16, 0x68, 0x26, 0xf9, 0x58, 0x8b, 0xa5, 0xd1, 0x6d, 0x32, 0x18, 0x2a, 0xc0, 0x63, 0x42, 0x0c, - 0x7e, 0xad, 0x97, 0x45, 0x22, 0x9f, 0x29, 0x83, 0xac, 0xdd, 0xcd, 0x1c, 0x1b, 0x29, 0x83, 0x9c, - 0x90, 0xc0, 0x0e, 0x7b, 0x14, 0x37, 0x19, 0x5c, 0x29, 0x41, 0xb1, 0x07, 0x5d, 0xd2, 0xe5, 0x30, - 0x5e, 0xf2, 0x8c, 0xe2, 0x66, 0x57, 0xdc, 0xd2, 0xa9, 0x12, 0x6c, 0x13, 0x96, 0x69, 0x9c, 0x28, - 0x41, 0x01, 0x2d, 0x62, 0xfd, 0x8d, 0x72, 0xc6, 0xf6, 0x29, 0xb6, 0xdd, 0x1e, 0x4c, 0x55, 0x4a, - 0xbc, 0x30, 0xa2, 0xa4, 0x45, 0xc5, 0xa5, 0x3e, 0x2b, 0xb6, 0x23, 0xb7, 0x7d, 0x0c, 0xf3, 0xe2, - 0x38, 0x73, 0x7c, 0x6c, 0x87, 0xf0, 0x44, 0x2f, 0x61, 0x68, 0x07, 0xf0, 0xb4, 0x00, 0xb2, 0xe4, - 0x3f, 0xd3, 0xae, 0x32, 0x21, 0xf0, 0xb9, 0x72, 0x31, 0x3b, 0x11, 0x3c, 0x02, 0xcf, 0x95, 0x88, - 0x7b, 0xdc, 0x25, 0x1c, 0xbe, 0xd0, 0xce, 0xf1, 0x00, 0xbb, 0x5e, 0x37, 0x80, 0xbf, 0x56, 0xde, - 0x65, 0x80, 0x6c, 0xcd, 0xdf, 0x2a, 0x39, 0xc7, 0x0e, 0x1d, 0xec, 0x63, 0x17, 0xfe, 0x46, 0x3b, - 0x7f, 0x3a, 0xb8, 0x07, 0xbf, 0x53, 0xeb, 0x3a, 0xb8, 0x87, 0xcf, 0x22, 0x8f, 0x62, 0x17, 0xfe, - 0xd6, 0xdc, 0x2d, 0x40, 0x8a, 0x4f, 0x48, 0x07, 0xbb, 0x70, 0x6d, 0x98, 0x7b, 0x79, 0xa2, 0x24, - 0xfa, 0x31, 0x76, 0x44, 0xad, 0xff, 0xce, 0x30, 0xef, 0x2e, 0x1a, 0xf7, 0x34, 0xc4, 0x54, 0x5c, - 0x51, 0xf0, 0xf7, 0x86, 0xb9, 0x9f, 0xb7, 0x79, 0x48, 0x38, 0xc5, 0x8e, 0x38, 0x48, 0xec, 0x86, - 0x8f, 0xe1, 0x1f, 0x0c, 0x13, 0x16, 0xe7, 0x44, 0xb3, 0xe3, 0xf9, 0x3e, 0xfc, 0xa3, 0xf1, 0xf5, - 0x12, 0x18, 0xd6, 0x15, 0xaa, 0xda, 0x83, 0xc1, 0x34, 0x99, 0xcd, 0xa2, 0x74, 0x3a, 0x37, 0x4d, - 0xb4, 0x36, 0x49, 0xa7, 0xf3, 0x7d, 0xe3, 0xa0, 0x74, 0xb8, 0x4e, 0xe5, 0xff, 0xe6, 0xbb, 0x68, - 0x7b, 0xd2, 0x3f, 0xff, 0x2c, 0x19, 0xc4, 0xfd, 0x4c, 0x52, 0xce, 0x7f, 0x35, 0x5a, 0xcf, 0xd0, - 0x7c, 0xb9, 0xf9, 0x0e, 0xaa, 0x3f, 0x4e, 0x67, 0xf3, 0x71, 0xff, 0x2a, 0x89, 0x1f, 0x0f, 0xc7, - 0xf3, 0xfd, 0xb2, 0x9c, 0x12, 0x6b, 0x0b, 0xb0, 0x3d, 0x1c, 0xcf, 0xad, 0x7f, 0x5a, 0x43, 0x77, - 0x9d, 0x69, 0xd2, 0x5f, 0x0c, 0xa3, 0x34, 0xf9, 0xcd, 0x93, 0x64, 0x36, 0x37, 0x1d, 0xb4, 0x71, - 0xd1, 0xbf, 0x1a, 0x8e, 0x9e, 0x4b, 0xcb, 0xdb, 0x47, 0xef, 0x3d, 0x50, 0x03, 0xec, 0x83, 0x1b, - 0xe4, 0x1f, 0x64, 0x54, 0x53, 0x2e, 0xa1, 0xf9, 0x52, 0xd3, 0x43, 0x5b, 0x72, 0xfa, 0x3d, 0x4f, - 0xc5, 0x88, 0x2a, 0xd4, 0xbc, 0xff, 0x5a, 0x6a, 0xa2, 0x7c, 0x11, 0x55, 0xcb, 0xcd, 0x9f, 0xa3, - 0xed, 0x7c, 0xae, 0x4e, 0x27, 0xf3, 0x61, 0x3a, 0x9e, 0xed, 0x97, 0x0f, 0xca, 0x87, 0xd5, 0xa3, - 0xfb, 0x9a, 0xc2, 0x6c, 0x31, 0x91, 0x7c, 0x5a, 0x9f, 0x69, 0xd4, 0xcc, 0x6c, 0xa0, 0x3b, 0x93, - 0x69, 0xfa, 0xf9, 0xf3, 0x38, 0xf9, 0x3c, 0x9b, 0xd6, 0xe3, 0xe1, 0x64, 0x7f, 0xed, 0xc0, 0x38, - 0xac, 0x1e, 0xdd, 0xd3, 0x54, 0x68, 0xa9, 0xa7, 0x3b, 0x72, 0x01, 0xce, 0xe5, 0xbd, 0x89, 0x79, - 0x88, 0xb6, 0x47, 0xc3, 0xd9, 0x3c, 0x19, 0xc7, 0x9f, 0xf6, 0xcf, 0x3f, 0x1b, 0xa5, 0x97, 0xfb, - 0xeb, 0x8b, 0xe9, 0xbc, 0x9e, 0x31, 0x1a, 0x19, 0x6e, 0x7e, 0x84, 0x2a, 0x53, 0x39, 0xe1, 0x0b, - 0x2b, 0x1b, 0xaf, 0xb4, 0xb2, 0x95, 0x09, 0x7a, 0x13, 0x73, 0x0f, 0x6d, 0xf4, 0x27, 0x93, 0x78, - 0x38, 0xd8, 0xaf, 0xc8, 0x42, 0xad, 0xf7, 0x27, 0x13, 0x6f, 0x60, 0x7e, 0x03, 0xa1, 0xc9, 0x34, - 0xfd, 0x75, 0x72, 0x3e, 0x17, 0x2c, 0x74, 0x60, 0x1c, 0x96, 0x69, 0x25, 0x47, 0xbc, 0x81, 0x65, - 0xa1, 0x9a, 0x9e, 0x7b, 0x73, 0x0b, 0xad, 0x79, 0xd1, 0xd3, 0x1f, 0x82, 0x91, 0xff, 0xf7, 0x23, - 0x28, 0x59, 0x16, 0xda, 0x5e, 0x4e, 0xac, 0xb9, 0x89, 0xca, 0xdc, 0x89, 0xc0, 0x10, 0xff, 0x74, - 0xdd, 0x08, 0x4a, 0xd6, 0x97, 0x06, 0xba, 0xb3, 0x5c, 0x91, 0xc9, 0xe8, 0xb9, 0xf9, 0x1e, 0xba, - 0x93, 0xa7, 0x7d, 0x90, 0xcc, 0xce, 0xa7, 0xc3, 0xc9, 0x3c, 0x7f, 0x93, 0x54, 0x28, 0x64, 0x0c, - 0x57, 0xe1, 0xe6, 0xcf, 0xd0, 0xb6, 0x78, 0xf4, 0x24, 0x53, 0xd5, 0x97, 0xe5, 0x57, 0x86, 0x5e, - 0xcf, 0xa4, 0x17, 0xfd, 0xfa, 0x7b, 0x28, 0xd1, 0xf7, 0x2b, 0x5b, 0xff, 0xb3, 0x09, 0xd7, 0xd7, - 0xd7, 0xd7, 0x25, 0xeb, 0x77, 0xa8, 0xda, 0x18, 0x8e, 0x07, 0x8b, 0x86, 0x7e, 0x49, 0x24, 0xa5, - 0x1b, 0x23, 0xb9, 0xd1, 0x15, 0xd1, 0xc1, 0xaf, 0xef, 0x8a, 0x45, 0x50, 0x25, 0xb3, 0x2f, 0xf2, - 0x78, 0xa3, 0x42, 0xe3, 0x8d, 0x62, 0xb3, 0x1c, 0xb4, 0xdb, 0x4a, 0xe6, 0x59, 0x75, 0xc2, 0xfe, - 0x55, 0x72, 0x9b, 0xc8, 0xac, 0x33, 0x64, 0xae, 0x28, 0x79, 0xa9, 0x7b, 0xa5, 0x37, 0x73, 0xcf, - 0x96, 0x9a, 0xa3, 0x24, 0x99, 0xde, 0xda, 0x39, 0x07, 0xc1, 0x92, 0x0a, 0xe1, 0xda, 0x43, 0xb4, - 0x39, 0x49, 0x92, 0xe9, 0x57, 0x3b, 0xb4, 0x21, 0xc4, 0xbc, 0x89, 0xf5, 0xe5, 0xe6, 0x62, 0x47, - 0x64, 0x7b, 0xdf, 0xfc, 0x05, 0x5a, 0x1f, 0x25, 0x4f, 0x93, 0x51, 0x7e, 0x92, 0x7d, 0xef, 0x25, - 0x27, 0xc6, 0x12, 0xe1, 0x8b, 0x05, 0x34, 0x5b, 0x67, 0x3e, 0x42, 0x1b, 0xd9, 0xa1, 0x93, 0x1f, - 0x62, 0x87, 0xaf, 0xa3, 0x41, 0x46, 0x90, 0xaf, 0x33, 0x77, 0xd1, 0xfa, 0xd3, 0xfe, 0xe8, 0x49, - 0xb2, 0x5f, 0x3e, 0x28, 0x1d, 0xd6, 0x68, 0x46, 0x58, 0x09, 0xba, 0xf3, 0x82, 0x4d, 0xed, 0x41, - 0xcd, 0x88, 0x1f, 0x7b, 0x11, 0xbc, 0x25, 0x67, 0x95, 0x02, 0xca, 0xfe, 0x05, 0x43, 0xce, 0x16, - 0x05, 0x2c, 0xb6, 0xf3, 0xc6, 0x0a, 0x26, 0x76, 0xf6, 0x1d, 0xeb, 0xdf, 0xd7, 0x11, 0xac, 0x7a, - 0x26, 0x6f, 0xbb, 0x85, 0x60, 0xec, 0xe2, 0x46, 0xb7, 0x05, 0x86, 0x1c, 0xc9, 0x14, 0x48, 0xc5, - 0x94, 0x28, 0xc6, 0x23, 0x28, 0x2d, 0xa9, 0x8d, 0xe5, 0x95, 0x5a, 0x5e, 0xd6, 0x90, 0x7d, 0x47, - 0x58, 0x5b, 0xd6, 0xe0, 0x92, 0x90, 0x53, 0xd2, 0xe5, 0x18, 0xd6, 0x97, 0x19, 0x0d, 0x4a, 0x6c, - 0xd7, 0xb1, 0xe5, 0x07, 0x04, 0x31, 0x74, 0x28, 0x06, 0x0b, 0xdd, 0x46, 0xb7, 0x09, 0x9b, 0xcb, - 0x28, 0x75, 0x4e, 0x04, 0xba, 0xb5, 0xac, 0xa4, 0x83, 0x71, 0x64, 0xfb, 0xde, 0x09, 0x86, 0xca, - 0x32, 0x83, 0x90, 0x86, 0x17, 0xfa, 0x5e, 0x88, 0x01, 0x2d, 0xeb, 0xf1, 0xbd, 0xb0, 0x85, 0x29, - 0xd4, 0xcd, 0x7b, 0xc8, 0x5c, 0xd2, 0x2e, 0x86, 0x25, 0x02, 0xbb, 0xcb, 0x38, 0x0b, 0xdd, 0x0c, - 0xdf, 0xd3, 0x6a, 0xe2, 0x45, 0x31, 0x27, 0x0c, 0x8c, 0x15, 0x88, 0xfb, 0x50, 0xd2, 0xca, 0xe4, - 0x45, 0x71, 0x5b, 0x8c, 0x9a, 0x8e, 0x0f, 0xe5, 0x65, 0x98, 0x44, 0xdc, 0x23, 0x21, 0x83, 0x35, - 0xcd, 0x16, 0x77, 0xa2, 0x58, 0x3c, 0xef, 0x7d, 0xbb, 0x07, 0x86, 0x26, 0x2e, 0xf0, 0xc0, 0x3e, - 0x63, 0xb8, 0x05, 0x25, 0x2d, 0xdb, 0x02, 0x76, 0x08, 0xed, 0x40, 0x59, 0x0b, 0x5b, 0x80, 0x22, - 0x21, 0x9e, 0xeb, 0x63, 0x58, 0x33, 0xf7, 0xd1, 0xee, 0x2a, 0x23, 0xe4, 0x27, 0x3e, 0xac, 0xaf, - 0x98, 0x15, 0x1c, 0x27, 0x14, 0x65, 0x58, 0x36, 0x2b, 0x9e, 0xb0, 0x21, 0x87, 0xcd, 0x15, 0xf1, - 0x2c, 0x81, 0x47, 0xb0, 0x65, 0xbe, 0x8d, 0xee, 0x6b, 0xb8, 0x8b, 0x9b, 0x98, 0xc6, 0xb6, 0xe3, - 0xe0, 0x88, 0x43, 0x65, 0x85, 0x79, 0xea, 0x85, 0x2e, 0x39, 0x8d, 0x1d, 0xdf, 0x0e, 0x22, 0x40, - 0x2b, 0x81, 0x78, 0x61, 0x93, 0x40, 0x75, 0x25, 0x90, 0xe3, 0xae, 0xe7, 0x74, 0x6c, 0xa7, 0x03, - 0x35, 0x39, 0x11, 0x3d, 0x47, 0xf7, 0xd9, 0xe2, 0xc8, 0xca, 0xaf, 0xf3, 0x5b, 0x1d, 0xea, 0x1f, - 0xa2, 0xcd, 0xc5, 0xec, 0x50, 0x7a, 0xf5, 0xec, 0xb0, 0x90, 0xb3, 0xee, 0xa3, 0xbd, 0x17, 0x4d, - 0x4f, 0x46, 0xcf, 0x85, 0x4f, 0xad, 0x3f, 0x90, 0x4f, 0x1f, 0xa3, 0xbd, 0xd6, 0x4d, 0x3e, 0xdd, - 0x46, 0xd7, 0xbf, 0x18, 0x68, 0xdb, 0x49, 0xc7, 0xe3, 0xe4, 0x7c, 0x7e, 0x2b, 0xf7, 0x97, 0xe6, - 0x9c, 0x57, 0xdf, 0x8f, 0xc5, 0x9c, 0xf3, 0x1e, 0xda, 0x99, 0x0f, 0xaf, 0x92, 0xf4, 0xc9, 0x3c, - 0x9e, 0x25, 0xe7, 0xe9, 0x78, 0x90, 0xcd, 0x09, 0xc6, 0x4f, 0x4a, 0xef, 0x7f, 0x48, 0xb7, 0x73, - 0x16, 0xcb, 0x38, 0xd6, 0x2f, 0x51, 0x4d, 0x39, 0xf8, 0x7b, 0xba, 0x48, 0xf5, 0x21, 0xe1, 0x04, - 0xd5, 0x7d, 0x39, 0xb9, 0xdd, 0x2a, 0xfc, 0x7d, 0xb4, 0xb9, 0x98, 0x04, 0x4b, 0x72, 0x3e, 0x5f, - 0x90, 0x56, 0x1d, 0x55, 0x17, 0x7a, 0x45, 0xbb, 0x0c, 0x51, 0xdd, 0x3e, 0x3f, 0x4f, 0x26, 0xb7, - 0xcb, 0xf2, 0x0d, 0x09, 0x2b, 0xbd, 0x34, 0x61, 0xd7, 0x06, 0xaa, 0x2e, 0x6c, 0x89, 0x84, 0x1d, - 0xa1, 0xbd, 0x71, 0xf2, 0x2c, 0x7e, 0xd1, 0x5a, 0xf6, 0x66, 0xb8, 0x3b, 0x4e, 0x9e, 0xb1, 0x1b, - 0x06, 0xb9, 0xbc, 0xac, 0xaf, 0x39, 0xc8, 0x65, 0xd2, 0x39, 0x64, 0xfd, 0x97, 0x81, 0x76, 0xd8, - 0xe3, 0x27, 0x73, 0x37, 0x7d, 0x76, 0xbb, 0xbc, 0x7e, 0x80, 0xca, 0x8f, 0xd3, 0x67, 0xf9, 0x6d, - 0xfb, 0x4d, 0xbd, 0x8b, 0x97, 0xb5, 0x3e, 0x68, 0xa7, 0xcf, 0xa8, 0x10, 0x35, 0xbf, 0x85, 0xaa, - 0xb3, 0x64, 0x3c, 0x88, 0xd3, 0x8b, 0x8b, 0x59, 0x32, 0x97, 0xd7, 0x6c, 0x99, 0x22, 0x01, 0x11, - 0x89, 0x58, 0x0e, 0x2a, 0xb7, 0xd3, 0x67, 0xfa, 0x45, 0xd6, 0xee, 0xf2, 0x98, 0xba, 0xcb, 0xf7, - 0xa8, 0xc0, 0x4e, 0xc5, 0x85, 0xa7, 0xdd, 0x1b, 0x99, 0xdc, 0x29, 0x85, 0xb2, 0xb5, 0x83, 0xea, - 0x85, 0x07, 0xa2, 0xae, 0xbf, 0x42, 0x35, 0x67, 0x94, 0xce, 0x6e, 0x35, 0xed, 0x98, 0xef, 0x2c, - 0xfb, 0x2c, 0xea, 0x51, 0x96, 0x25, 0xd5, 0xfd, 0xae, 0x21, 0x94, 0x5b, 0x10, 0xf6, 0xfe, 0xcf, - 0x40, 0x55, 0x96, 0xdc, 0x72, 0xa8, 0xbd, 0x87, 0xd6, 0x06, 0xfd, 0x79, 0x5f, 0xa6, 0xb5, 0xd6, - 0x28, 0x6d, 0x19, 0x54, 0xd2, 0xe2, 0x9d, 0x38, 0x9b, 0x4f, 0x93, 0xfe, 0xd5, 0x72, 0xf6, 0x6a, - 0x19, 0x98, 0xf9, 0x61, 0xde, 0x47, 0xeb, 0x17, 0xa3, 0xfe, 0xe5, 0x4c, 0x0e, 0xe4, 0xf2, 0xc9, - 0x93, 0xd1, 0x62, 0x3e, 0x93, 0x51, 0xcc, 0x53, 0xf9, 0x1a, 0x7a, 0xc5, 0x7c, 0x26, 0xc4, 0x78, - 0x7a, 0x53, 0x37, 0x6f, 0xbc, 0xb4, 0x9b, 0x0f, 0x51, 0x25, 0x8b, 0x57, 0xb4, 0xf2, 0xdb, 0xa8, - 0x22, 0x1c, 0x8e, 0x67, 0xc9, 0x78, 0x9e, 0xfd, 0x30, 0x42, 0xb7, 0x04, 0xc0, 0x92, 0xf1, 0xdc, - 0xfa, 0x4f, 0x03, 0x6d, 0xd3, 0xe4, 0x3c, 0x19, 0x3e, 0xbd, 0x5d, 0x35, 0x94, 0xf2, 0xe1, 0x17, - 0x49, 0xbe, 0x9b, 0x33, 0xe5, 0xc3, 0x2f, 0x92, 0x22, 0xfa, 0xf2, 0x4a, 0xf4, 0x37, 0x04, 0xb3, - 0xfe, 0xd2, 0x60, 0x2c, 0xb4, 0xde, 0x94, 0xab, 0xaa, 0x68, 0x33, 0x60, 0x2d, 0x31, 0xa8, 0x80, - 0x61, 0xd6, 0xd0, 0x96, 0x20, 0x22, 0x8c, 0x3b, 0x50, 0xb2, 0xfe, 0xd5, 0x40, 0x35, 0x15, 0x86, - 0x08, 0xfa, 0x85, 0xea, 0xc8, 0x3e, 0x59, 0xa9, 0xce, 0xa2, 0xb4, 0xc2, 0x3d, 0xbd, 0xb4, 0x3f, - 0x45, 0xf5, 0x69, 0xa6, 0x6c, 0x10, 0x5f, 0x4c, 0xd3, 0xab, 0xaf, 0x78, 0x4e, 0xd5, 0x16, 0xc2, - 0xcd, 0x69, 0x7a, 0x25, 0xf6, 0xd4, 0xa7, 0x4f, 0x2e, 0x2e, 0x92, 0x69, 0x96, 0x13, 0xf9, 0xd6, - 0xa5, 0x28, 0x83, 0x44, 0x56, 0xac, 0x2f, 0xcb, 0xa8, 0x12, 0xa5, 0xa3, 0x11, 0x7e, 0x9a, 0x8c, - 0xdf, 0x30, 0xdb, 0xdf, 0x43, 0x30, 0xcd, 0xaa, 0x94, 0x0c, 0xe2, 0x44, 0xac, 0x9f, 0xe5, 0x49, - 0xdf, 0x51, 0xb8, 0x54, 0x3b, 0x33, 0xbf, 0x8b, 0x76, 0xd2, 0x4f, 0xe5, 0x4b, 0x51, 0x49, 0x96, - 0xa5, 0xe4, 0xf6, 0x02, 0xce, 0x04, 0xad, 0xff, 0x28, 0xa1, 0xba, 0x72, 0x47, 0x24, 0x5a, 0x9b, - 0x35, 0x22, 0xe2, 0xfb, 0x21, 0x09, 0x31, 0xbc, 0xa5, 0x4d, 0x6e, 0x02, 0xf4, 0xc2, 0xa5, 0x13, - 0x40, 0x40, 0x11, 0xf5, 0x96, 0x46, 0x5e, 0x81, 0x91, 0x2e, 0x87, 0xb5, 0x15, 0x0c, 0x53, 0x0a, - 0x5b, 0x2b, 0x58, 0xbb, 0x1b, 0x01, 0xac, 0xda, 0x3d, 0xb1, 0x7d, 0x38, 0xd0, 0x26, 0x2c, 0x01, - 0x52, 0x37, 0x24, 0x34, 0x80, 0x47, 0xe6, 0xbd, 0x15, 0xb8, 0x61, 0x87, 0xf2, 0x1b, 0xd3, 0x32, - 0x7e, 0x4a, 0xa5, 0xf8, 0x75, 0xe9, 0x05, 0x3c, 0x93, 0x5f, 0x93, 0x1f, 0x9f, 0x0a, 0x3c, 0x60, - 0x2d, 0xb8, 0xde, 0x5a, 0x55, 0x8e, 0x03, 0x72, 0x82, 0xe1, 0xfa, 0x40, 0x7e, 0xc0, 0xd2, 0x8d, - 0x0a, 0xb7, 0xaf, 0x1f, 0x59, 0x8f, 0x51, 0x55, 0x24, 0x70, 0xb1, 0x7f, 0x7e, 0x80, 0x36, 0xf2, - 0x84, 0x1b, 0x72, 0x9e, 0xd8, 0xd5, 0xda, 0x46, 0x25, 0x9a, 0xe6, 0x32, 0x6f, 0x76, 0x4b, 0xfd, - 0x38, 0xeb, 0x9c, 0xac, 0xc5, 0x0b, 0x3b, 0xa5, 0xaf, 0xb6, 0x63, 0xfd, 0x56, 0xec, 0xf3, 0x59, - 0x3a, 0x2a, 0xf6, 0xb9, 0x89, 0xd6, 0xc6, 0xfd, 0xab, 0x24, 0x6f, 0x36, 0xf9, 0xbf, 0x79, 0x82, - 0x20, 0xbf, 0xbb, 0x62, 0xf9, 0x31, 0x6a, 0x98, 0x64, 0xda, 0xdf, 0xf0, 0x4b, 0xd6, 0x4e, 0xae, - 0xa4, 0x99, 0xeb, 0xb0, 0xfe, 0xbb, 0x2c, 0xf6, 0x67, 0x6e, 0x5e, 0x38, 0x7f, 0xd3, 0xc7, 0xb8, - 0xf2, 0x8b, 0x1f, 0xe3, 0xde, 0x45, 0xdb, 0xe7, 0xfd, 0x71, 0x3a, 0x1e, 0x9e, 0xf7, 0x47, 0xb1, - 0xf4, 0x36, 0xfb, 0x1a, 0x57, 0x57, 0xa8, 0x7c, 0x96, 0xed, 0xa3, 0xcd, 0xfe, 0x68, 0xd8, 0x9f, - 0x25, 0xe2, 0xa0, 0x2d, 0x1f, 0x56, 0xe8, 0x82, 0xb4, 0xfe, 0xb7, 0xa4, 0xff, 0xa0, 0xfb, 0x35, - 0xb4, 0x97, 0x17, 0x10, 0xdb, 0x5e, 0x2c, 0x5e, 0x69, 0x4d, 0x3b, 0xf0, 0x7c, 0xf1, 0x80, 0x28, - 0xae, 0x2e, 0xc9, 0x92, 0xbf, 0x65, 0x96, 0xb4, 0x09, 0x5b, 0xa0, 0x0d, 0xdb, 0x6d, 0xfa, 0x76, - 0x8b, 0x2d, 0x3d, 0xe3, 0x04, 0xa3, 0x69, 0x7b, 0x7e, 0xf6, 0x0b, 0xf0, 0x12, 0x28, 0x55, 0xaf, - 0xaf, 0xc0, 0x01, 0x0e, 0x08, 0xed, 0x2d, 0xbd, 0x1d, 0x04, 0x9c, 0xff, 0x1c, 0xb4, 0xf9, 0x02, - 0x1c, 0xda, 0x01, 0x86, 0x2d, 0xed, 0x49, 0x21, 0x60, 0x86, 0xe9, 0x89, 0xe7, 0x2c, 0xbf, 0xe1, - 0x24, 0x4e, 0x9c, 0x8e, 0x7c, 0x68, 0xa2, 0x15, 0x3d, 0xd9, 0xef, 0xd8, 0x4b, 0x6f, 0x86, 0x3c, - 0xa2, 0xb6, 0x17, 0x72, 0x06, 0xb5, 0x15, 0x86, 0xfc, 0xdd, 0xc1, 0x21, 0x3e, 0xd4, 0x57, 0x18, - 0xea, 0x37, 0x9d, 0x6d, 0x6d, 0x0f, 0xcb, 0xb8, 0xec, 0x33, 0xd8, 0x69, 0x6c, 0x7d, 0xb2, 0x91, - 0x9d, 0x5a, 0xff, 0x1f, 0x00, 0x00, 0xff, 0xff, 0x31, 0x03, 0x4e, 0xbd, 0xfd, 0x1f, 0x00, 0x00, -} diff --git a/vendor/google.golang.org/appengine/internal/socket/socket_service.proto b/vendor/google.golang.org/appengine/internal/socket/socket_service.proto deleted file mode 100644 index 2fcc7953dc..0000000000 --- a/vendor/google.golang.org/appengine/internal/socket/socket_service.proto +++ /dev/null @@ -1,460 +0,0 @@ -syntax = "proto2"; -option go_package = "socket"; - -package appengine; - -message RemoteSocketServiceError { - enum ErrorCode { - SYSTEM_ERROR = 1; - GAI_ERROR = 2; - FAILURE = 4; - PERMISSION_DENIED = 5; - INVALID_REQUEST = 6; - SOCKET_CLOSED = 7; - } - - enum SystemError { - option allow_alias = true; - - SYS_SUCCESS = 0; - SYS_EPERM = 1; - SYS_ENOENT = 2; - SYS_ESRCH = 3; - SYS_EINTR = 4; - SYS_EIO = 5; - SYS_ENXIO = 6; - SYS_E2BIG = 7; - SYS_ENOEXEC = 8; - SYS_EBADF = 9; - SYS_ECHILD = 10; - SYS_EAGAIN = 11; - SYS_EWOULDBLOCK = 11; - SYS_ENOMEM = 12; - SYS_EACCES = 13; - SYS_EFAULT = 14; - SYS_ENOTBLK = 15; - SYS_EBUSY = 16; - SYS_EEXIST = 17; - SYS_EXDEV = 18; - SYS_ENODEV = 19; - SYS_ENOTDIR = 20; - SYS_EISDIR = 21; - SYS_EINVAL = 22; - SYS_ENFILE = 23; - SYS_EMFILE = 24; - SYS_ENOTTY = 25; - SYS_ETXTBSY = 26; - SYS_EFBIG = 27; - SYS_ENOSPC = 28; - SYS_ESPIPE = 29; - SYS_EROFS = 30; - SYS_EMLINK = 31; - SYS_EPIPE = 32; - SYS_EDOM = 33; - SYS_ERANGE = 34; - SYS_EDEADLK = 35; - SYS_EDEADLOCK = 35; - SYS_ENAMETOOLONG = 36; - SYS_ENOLCK = 37; - SYS_ENOSYS = 38; - SYS_ENOTEMPTY = 39; - SYS_ELOOP = 40; - SYS_ENOMSG = 42; - SYS_EIDRM = 43; - SYS_ECHRNG = 44; - SYS_EL2NSYNC = 45; - SYS_EL3HLT = 46; - SYS_EL3RST = 47; - SYS_ELNRNG = 48; - SYS_EUNATCH = 49; - SYS_ENOCSI = 50; - SYS_EL2HLT = 51; - SYS_EBADE = 52; - SYS_EBADR = 53; - SYS_EXFULL = 54; - SYS_ENOANO = 55; - SYS_EBADRQC = 56; - SYS_EBADSLT = 57; - SYS_EBFONT = 59; - SYS_ENOSTR = 60; - SYS_ENODATA = 61; - SYS_ETIME = 62; - SYS_ENOSR = 63; - SYS_ENONET = 64; - SYS_ENOPKG = 65; - SYS_EREMOTE = 66; - SYS_ENOLINK = 67; - SYS_EADV = 68; - SYS_ESRMNT = 69; - SYS_ECOMM = 70; - SYS_EPROTO = 71; - SYS_EMULTIHOP = 72; - SYS_EDOTDOT = 73; - SYS_EBADMSG = 74; - SYS_EOVERFLOW = 75; - SYS_ENOTUNIQ = 76; - SYS_EBADFD = 77; - SYS_EREMCHG = 78; - SYS_ELIBACC = 79; - SYS_ELIBBAD = 80; - SYS_ELIBSCN = 81; - SYS_ELIBMAX = 82; - SYS_ELIBEXEC = 83; - SYS_EILSEQ = 84; - SYS_ERESTART = 85; - SYS_ESTRPIPE = 86; - SYS_EUSERS = 87; - SYS_ENOTSOCK = 88; - SYS_EDESTADDRREQ = 89; - SYS_EMSGSIZE = 90; - SYS_EPROTOTYPE = 91; - SYS_ENOPROTOOPT = 92; - SYS_EPROTONOSUPPORT = 93; - SYS_ESOCKTNOSUPPORT = 94; - SYS_EOPNOTSUPP = 95; - SYS_ENOTSUP = 95; - SYS_EPFNOSUPPORT = 96; - SYS_EAFNOSUPPORT = 97; - SYS_EADDRINUSE = 98; - SYS_EADDRNOTAVAIL = 99; - SYS_ENETDOWN = 100; - SYS_ENETUNREACH = 101; - SYS_ENETRESET = 102; - SYS_ECONNABORTED = 103; - SYS_ECONNRESET = 104; - SYS_ENOBUFS = 105; - SYS_EISCONN = 106; - SYS_ENOTCONN = 107; - SYS_ESHUTDOWN = 108; - SYS_ETOOMANYREFS = 109; - SYS_ETIMEDOUT = 110; - SYS_ECONNREFUSED = 111; - SYS_EHOSTDOWN = 112; - SYS_EHOSTUNREACH = 113; - SYS_EALREADY = 114; - SYS_EINPROGRESS = 115; - SYS_ESTALE = 116; - SYS_EUCLEAN = 117; - SYS_ENOTNAM = 118; - SYS_ENAVAIL = 119; - SYS_EISNAM = 120; - SYS_EREMOTEIO = 121; - SYS_EDQUOT = 122; - SYS_ENOMEDIUM = 123; - SYS_EMEDIUMTYPE = 124; - SYS_ECANCELED = 125; - SYS_ENOKEY = 126; - SYS_EKEYEXPIRED = 127; - SYS_EKEYREVOKED = 128; - SYS_EKEYREJECTED = 129; - SYS_EOWNERDEAD = 130; - SYS_ENOTRECOVERABLE = 131; - SYS_ERFKILL = 132; - } - - optional int32 system_error = 1 [default=0]; - optional string error_detail = 2; -} - -message AddressPort { - required int32 port = 1; - optional bytes packed_address = 2; - - optional string hostname_hint = 3; -} - - - -message CreateSocketRequest { - enum SocketFamily { - IPv4 = 1; - IPv6 = 2; - } - - enum SocketProtocol { - TCP = 1; - UDP = 2; - } - - required SocketFamily family = 1; - required SocketProtocol protocol = 2; - - repeated SocketOption socket_options = 3; - - optional AddressPort proxy_external_ip = 4; - - optional int32 listen_backlog = 5 [default=0]; - - optional AddressPort remote_ip = 6; - - optional string app_id = 9; - - optional int64 project_id = 10; -} - -message CreateSocketReply { - optional string socket_descriptor = 1; - - optional AddressPort server_address = 3; - - optional AddressPort proxy_external_ip = 4; - - extensions 1000 to max; -} - - - -message BindRequest { - required string socket_descriptor = 1; - required AddressPort proxy_external_ip = 2; -} - -message BindReply { - optional AddressPort proxy_external_ip = 1; -} - - - -message GetSocketNameRequest { - required string socket_descriptor = 1; -} - -message GetSocketNameReply { - optional AddressPort proxy_external_ip = 2; -} - - - -message GetPeerNameRequest { - required string socket_descriptor = 1; -} - -message GetPeerNameReply { - optional AddressPort peer_ip = 2; -} - - -message SocketOption { - - enum SocketOptionLevel { - SOCKET_SOL_IP = 0; - SOCKET_SOL_SOCKET = 1; - SOCKET_SOL_TCP = 6; - SOCKET_SOL_UDP = 17; - } - - enum SocketOptionName { - option allow_alias = true; - - SOCKET_SO_DEBUG = 1; - SOCKET_SO_REUSEADDR = 2; - SOCKET_SO_TYPE = 3; - SOCKET_SO_ERROR = 4; - SOCKET_SO_DONTROUTE = 5; - SOCKET_SO_BROADCAST = 6; - SOCKET_SO_SNDBUF = 7; - SOCKET_SO_RCVBUF = 8; - SOCKET_SO_KEEPALIVE = 9; - SOCKET_SO_OOBINLINE = 10; - SOCKET_SO_LINGER = 13; - SOCKET_SO_RCVTIMEO = 20; - SOCKET_SO_SNDTIMEO = 21; - - SOCKET_IP_TOS = 1; - SOCKET_IP_TTL = 2; - SOCKET_IP_HDRINCL = 3; - SOCKET_IP_OPTIONS = 4; - - SOCKET_TCP_NODELAY = 1; - SOCKET_TCP_MAXSEG = 2; - SOCKET_TCP_CORK = 3; - SOCKET_TCP_KEEPIDLE = 4; - SOCKET_TCP_KEEPINTVL = 5; - SOCKET_TCP_KEEPCNT = 6; - SOCKET_TCP_SYNCNT = 7; - SOCKET_TCP_LINGER2 = 8; - SOCKET_TCP_DEFER_ACCEPT = 9; - SOCKET_TCP_WINDOW_CLAMP = 10; - SOCKET_TCP_INFO = 11; - SOCKET_TCP_QUICKACK = 12; - } - - required SocketOptionLevel level = 1; - required SocketOptionName option = 2; - required bytes value = 3; -} - - -message SetSocketOptionsRequest { - required string socket_descriptor = 1; - repeated SocketOption options = 2; -} - -message SetSocketOptionsReply { -} - -message GetSocketOptionsRequest { - required string socket_descriptor = 1; - repeated SocketOption options = 2; -} - -message GetSocketOptionsReply { - repeated SocketOption options = 2; -} - - -message ConnectRequest { - required string socket_descriptor = 1; - required AddressPort remote_ip = 2; - optional double timeout_seconds = 3 [default=-1]; -} - -message ConnectReply { - optional AddressPort proxy_external_ip = 1; - - extensions 1000 to max; -} - - -message ListenRequest { - required string socket_descriptor = 1; - required int32 backlog = 2; -} - -message ListenReply { -} - - -message AcceptRequest { - required string socket_descriptor = 1; - optional double timeout_seconds = 2 [default=-1]; -} - -message AcceptReply { - optional bytes new_socket_descriptor = 2; - optional AddressPort remote_address = 3; -} - - - -message ShutDownRequest { - enum How { - SOCKET_SHUT_RD = 1; - SOCKET_SHUT_WR = 2; - SOCKET_SHUT_RDWR = 3; - } - required string socket_descriptor = 1; - required How how = 2; - required int64 send_offset = 3; -} - -message ShutDownReply { -} - - - -message CloseRequest { - required string socket_descriptor = 1; - optional int64 send_offset = 2 [default=-1]; -} - -message CloseReply { -} - - - -message SendRequest { - required string socket_descriptor = 1; - required bytes data = 2 [ctype=CORD]; - required int64 stream_offset = 3; - optional int32 flags = 4 [default=0]; - optional AddressPort send_to = 5; - optional double timeout_seconds = 6 [default=-1]; -} - -message SendReply { - optional int32 data_sent = 1; -} - - -message ReceiveRequest { - enum Flags { - MSG_OOB = 1; - MSG_PEEK = 2; - } - required string socket_descriptor = 1; - required int32 data_size = 2; - optional int32 flags = 3 [default=0]; - optional double timeout_seconds = 5 [default=-1]; -} - -message ReceiveReply { - optional int64 stream_offset = 2; - optional bytes data = 3 [ctype=CORD]; - optional AddressPort received_from = 4; - optional int32 buffer_size = 5; -} - - - -message PollEvent { - - enum PollEventFlag { - SOCKET_POLLNONE = 0; - SOCKET_POLLIN = 1; - SOCKET_POLLPRI = 2; - SOCKET_POLLOUT = 4; - SOCKET_POLLERR = 8; - SOCKET_POLLHUP = 16; - SOCKET_POLLNVAL = 32; - SOCKET_POLLRDNORM = 64; - SOCKET_POLLRDBAND = 128; - SOCKET_POLLWRNORM = 256; - SOCKET_POLLWRBAND = 512; - SOCKET_POLLMSG = 1024; - SOCKET_POLLREMOVE = 4096; - SOCKET_POLLRDHUP = 8192; - }; - - required string socket_descriptor = 1; - required int32 requested_events = 2; - required int32 observed_events = 3; -} - -message PollRequest { - repeated PollEvent events = 1; - optional double timeout_seconds = 2 [default=-1]; -} - -message PollReply { - repeated PollEvent events = 2; -} - -message ResolveRequest { - required string name = 1; - repeated CreateSocketRequest.SocketFamily address_families = 2; -} - -message ResolveReply { - enum ErrorCode { - SOCKET_EAI_ADDRFAMILY = 1; - SOCKET_EAI_AGAIN = 2; - SOCKET_EAI_BADFLAGS = 3; - SOCKET_EAI_FAIL = 4; - SOCKET_EAI_FAMILY = 5; - SOCKET_EAI_MEMORY = 6; - SOCKET_EAI_NODATA = 7; - SOCKET_EAI_NONAME = 8; - SOCKET_EAI_SERVICE = 9; - SOCKET_EAI_SOCKTYPE = 10; - SOCKET_EAI_SYSTEM = 11; - SOCKET_EAI_BADHINTS = 12; - SOCKET_EAI_PROTOCOL = 13; - SOCKET_EAI_OVERFLOW = 14; - SOCKET_EAI_MAX = 15; - }; - - repeated bytes packed_address = 2; - optional string canonical_name = 3; - repeated string aliases = 4; -} diff --git a/vendor/google.golang.org/appengine/socket/doc.go b/vendor/google.golang.org/appengine/socket/doc.go deleted file mode 100644 index 3de46df826..0000000000 --- a/vendor/google.golang.org/appengine/socket/doc.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2012 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// Package socket provides outbound network sockets. -// -// This package is only required in the classic App Engine environment. -// Applications running only in App Engine "flexible environment" should -// use the standard library's net package. -package socket diff --git a/vendor/google.golang.org/appengine/socket/socket_classic.go b/vendor/google.golang.org/appengine/socket/socket_classic.go deleted file mode 100644 index 0ad50e2d36..0000000000 --- a/vendor/google.golang.org/appengine/socket/socket_classic.go +++ /dev/null @@ -1,290 +0,0 @@ -// Copyright 2012 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// +build appengine - -package socket - -import ( - "fmt" - "io" - "net" - "strconv" - "time" - - "github.com/golang/protobuf/proto" - "golang.org/x/net/context" - "google.golang.org/appengine/internal" - - pb "google.golang.org/appengine/internal/socket" -) - -// Dial connects to the address addr on the network protocol. -// The address format is host:port, where host may be a hostname or an IP address. -// Known protocols are "tcp" and "udp". -// The returned connection satisfies net.Conn, and is valid while ctx is valid; -// if the connection is to be used after ctx becomes invalid, invoke SetContext -// with the new context. -func Dial(ctx context.Context, protocol, addr string) (*Conn, error) { - return DialTimeout(ctx, protocol, addr, 0) -} - -var ipFamilies = []pb.CreateSocketRequest_SocketFamily{ - pb.CreateSocketRequest_IPv4, - pb.CreateSocketRequest_IPv6, -} - -// DialTimeout is like Dial but takes a timeout. -// The timeout includes name resolution, if required. -func DialTimeout(ctx context.Context, protocol, addr string, timeout time.Duration) (*Conn, error) { - dialCtx := ctx // Used for dialing and name resolution, but not stored in the *Conn. - if timeout > 0 { - var cancel context.CancelFunc - dialCtx, cancel = context.WithTimeout(ctx, timeout) - defer cancel() - } - - host, portStr, err := net.SplitHostPort(addr) - if err != nil { - return nil, err - } - port, err := strconv.Atoi(portStr) - if err != nil { - return nil, fmt.Errorf("socket: bad port %q: %v", portStr, err) - } - - var prot pb.CreateSocketRequest_SocketProtocol - switch protocol { - case "tcp": - prot = pb.CreateSocketRequest_TCP - case "udp": - prot = pb.CreateSocketRequest_UDP - default: - return nil, fmt.Errorf("socket: unknown protocol %q", protocol) - } - - packedAddrs, resolved, err := resolve(dialCtx, ipFamilies, host) - if err != nil { - return nil, fmt.Errorf("socket: failed resolving %q: %v", host, err) - } - if len(packedAddrs) == 0 { - return nil, fmt.Errorf("no addresses for %q", host) - } - - packedAddr := packedAddrs[0] // use first address - fam := pb.CreateSocketRequest_IPv4 - if len(packedAddr) == net.IPv6len { - fam = pb.CreateSocketRequest_IPv6 - } - - req := &pb.CreateSocketRequest{ - Family: fam.Enum(), - Protocol: prot.Enum(), - RemoteIp: &pb.AddressPort{ - Port: proto.Int32(int32(port)), - PackedAddress: packedAddr, - }, - } - if resolved { - req.RemoteIp.HostnameHint = &host - } - res := &pb.CreateSocketReply{} - if err := internal.Call(dialCtx, "remote_socket", "CreateSocket", req, res); err != nil { - return nil, err - } - - return &Conn{ - ctx: ctx, - desc: res.GetSocketDescriptor(), - prot: prot, - local: res.ProxyExternalIp, - remote: req.RemoteIp, - }, nil -} - -// LookupIP returns the given host's IP addresses. -func LookupIP(ctx context.Context, host string) (addrs []net.IP, err error) { - packedAddrs, _, err := resolve(ctx, ipFamilies, host) - if err != nil { - return nil, fmt.Errorf("socket: failed resolving %q: %v", host, err) - } - addrs = make([]net.IP, len(packedAddrs)) - for i, pa := range packedAddrs { - addrs[i] = net.IP(pa) - } - return addrs, nil -} - -func resolve(ctx context.Context, fams []pb.CreateSocketRequest_SocketFamily, host string) ([][]byte, bool, error) { - // Check if it's an IP address. - if ip := net.ParseIP(host); ip != nil { - if ip := ip.To4(); ip != nil { - return [][]byte{ip}, false, nil - } - return [][]byte{ip}, false, nil - } - - req := &pb.ResolveRequest{ - Name: &host, - AddressFamilies: fams, - } - res := &pb.ResolveReply{} - if err := internal.Call(ctx, "remote_socket", "Resolve", req, res); err != nil { - // XXX: need to map to pb.ResolveReply_ErrorCode? - return nil, false, err - } - return res.PackedAddress, true, nil -} - -// withDeadline is like context.WithDeadline, except it ignores the zero deadline. -func withDeadline(parent context.Context, deadline time.Time) (context.Context, context.CancelFunc) { - if deadline.IsZero() { - return parent, func() {} - } - return context.WithDeadline(parent, deadline) -} - -// Conn represents a socket connection. -// It implements net.Conn. -type Conn struct { - ctx context.Context - desc string - offset int64 - - prot pb.CreateSocketRequest_SocketProtocol - local, remote *pb.AddressPort - - readDeadline, writeDeadline time.Time // optional -} - -// SetContext sets the context that is used by this Conn. -// It is usually used only when using a Conn that was created in a different context, -// such as when a connection is created during a warmup request but used while -// servicing a user request. -func (cn *Conn) SetContext(ctx context.Context) { - cn.ctx = ctx -} - -func (cn *Conn) Read(b []byte) (n int, err error) { - const maxRead = 1 << 20 - if len(b) > maxRead { - b = b[:maxRead] - } - - req := &pb.ReceiveRequest{ - SocketDescriptor: &cn.desc, - DataSize: proto.Int32(int32(len(b))), - } - res := &pb.ReceiveReply{} - if !cn.readDeadline.IsZero() { - req.TimeoutSeconds = proto.Float64(cn.readDeadline.Sub(time.Now()).Seconds()) - } - ctx, cancel := withDeadline(cn.ctx, cn.readDeadline) - defer cancel() - if err := internal.Call(ctx, "remote_socket", "Receive", req, res); err != nil { - return 0, err - } - if len(res.Data) == 0 { - return 0, io.EOF - } - if len(res.Data) > len(b) { - return 0, fmt.Errorf("socket: internal error: read too much data: %d > %d", len(res.Data), len(b)) - } - return copy(b, res.Data), nil -} - -func (cn *Conn) Write(b []byte) (n int, err error) { - const lim = 1 << 20 // max per chunk - - for n < len(b) { - chunk := b[n:] - if len(chunk) > lim { - chunk = chunk[:lim] - } - - req := &pb.SendRequest{ - SocketDescriptor: &cn.desc, - Data: chunk, - StreamOffset: &cn.offset, - } - res := &pb.SendReply{} - if !cn.writeDeadline.IsZero() { - req.TimeoutSeconds = proto.Float64(cn.writeDeadline.Sub(time.Now()).Seconds()) - } - ctx, cancel := withDeadline(cn.ctx, cn.writeDeadline) - defer cancel() - if err = internal.Call(ctx, "remote_socket", "Send", req, res); err != nil { - // assume zero bytes were sent in this RPC - break - } - n += int(res.GetDataSent()) - cn.offset += int64(res.GetDataSent()) - } - - return -} - -func (cn *Conn) Close() error { - req := &pb.CloseRequest{ - SocketDescriptor: &cn.desc, - } - res := &pb.CloseReply{} - if err := internal.Call(cn.ctx, "remote_socket", "Close", req, res); err != nil { - return err - } - cn.desc = "CLOSED" - return nil -} - -func addr(prot pb.CreateSocketRequest_SocketProtocol, ap *pb.AddressPort) net.Addr { - if ap == nil { - return nil - } - switch prot { - case pb.CreateSocketRequest_TCP: - return &net.TCPAddr{ - IP: net.IP(ap.PackedAddress), - Port: int(*ap.Port), - } - case pb.CreateSocketRequest_UDP: - return &net.UDPAddr{ - IP: net.IP(ap.PackedAddress), - Port: int(*ap.Port), - } - } - panic("unknown protocol " + prot.String()) -} - -func (cn *Conn) LocalAddr() net.Addr { return addr(cn.prot, cn.local) } -func (cn *Conn) RemoteAddr() net.Addr { return addr(cn.prot, cn.remote) } - -func (cn *Conn) SetDeadline(t time.Time) error { - cn.readDeadline = t - cn.writeDeadline = t - return nil -} - -func (cn *Conn) SetReadDeadline(t time.Time) error { - cn.readDeadline = t - return nil -} - -func (cn *Conn) SetWriteDeadline(t time.Time) error { - cn.writeDeadline = t - return nil -} - -// KeepAlive signals that the connection is still in use. -// It may be called to prevent the socket being closed due to inactivity. -func (cn *Conn) KeepAlive() error { - req := &pb.GetSocketNameRequest{ - SocketDescriptor: &cn.desc, - } - res := &pb.GetSocketNameReply{} - return internal.Call(cn.ctx, "remote_socket", "GetSocketName", req, res) -} - -func init() { - internal.RegisterErrorCodeMap("remote_socket", pb.RemoteSocketServiceError_ErrorCode_name) -} diff --git a/vendor/google.golang.org/appengine/socket/socket_vm.go b/vendor/google.golang.org/appengine/socket/socket_vm.go deleted file mode 100644 index c804169a1c..0000000000 --- a/vendor/google.golang.org/appengine/socket/socket_vm.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// +build !appengine - -package socket - -import ( - "net" - "time" - - "golang.org/x/net/context" -) - -// Dial connects to the address addr on the network protocol. -// The address format is host:port, where host may be a hostname or an IP address. -// Known protocols are "tcp" and "udp". -// The returned connection satisfies net.Conn, and is valid while ctx is valid; -// if the connection is to be used after ctx becomes invalid, invoke SetContext -// with the new context. -func Dial(ctx context.Context, protocol, addr string) (*Conn, error) { - conn, err := net.Dial(protocol, addr) - if err != nil { - return nil, err - } - return &Conn{conn}, nil -} - -// DialTimeout is like Dial but takes a timeout. -// The timeout includes name resolution, if required. -func DialTimeout(ctx context.Context, protocol, addr string, timeout time.Duration) (*Conn, error) { - conn, err := net.DialTimeout(protocol, addr, timeout) - if err != nil { - return nil, err - } - return &Conn{conn}, nil -} - -// LookupIP returns the given host's IP addresses. -func LookupIP(ctx context.Context, host string) (addrs []net.IP, err error) { - return net.LookupIP(host) -} - -// Conn represents a socket connection. -// It implements net.Conn. -type Conn struct { - net.Conn -} - -// SetContext sets the context that is used by this Conn. -// It is usually used only when using a Conn that was created in a different context, -// such as when a connection is created during a warmup request but used while -// servicing a user request. -func (cn *Conn) SetContext(ctx context.Context) { - // This function is not required in App Engine "flexible environment". -} - -// KeepAlive signals that the connection is still in use. -// It may be called to prevent the socket being closed due to inactivity. -func (cn *Conn) KeepAlive() error { - // This function is not required in App Engine "flexible environment". - return nil -} diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go deleted file mode 100644 index 55111d110c..0000000000 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go +++ /dev/null @@ -1,124 +0,0 @@ -// Copyright (c) 2015, Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.25.0 -// protoc v3.13.0 -// source: google/api/annotations.proto - -package annotations - -import ( - reflect "reflect" - - proto "github.com/golang/protobuf/proto" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - descriptorpb "google.golang.org/protobuf/types/descriptorpb" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - -var file_google_api_annotations_proto_extTypes = []protoimpl.ExtensionInfo{ - { - ExtendedType: (*descriptorpb.MethodOptions)(nil), - ExtensionType: (*HttpRule)(nil), - Field: 72295728, - Name: "google.api.http", - Tag: "bytes,72295728,opt,name=http", - Filename: "google/api/annotations.proto", - }, -} - -// Extension fields to descriptorpb.MethodOptions. -var ( - // See `HttpRule`. - // - // optional google.api.HttpRule http = 72295728; - E_Http = &file_google_api_annotations_proto_extTypes[0] -) - -var File_google_api_annotations_proto protoreflect.FileDescriptor - -var file_google_api_annotations_proto_rawDesc = []byte{ - 0x0a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, - 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x15, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x3a, 0x4b, 0x0a, 0x04, 0x68, 0x74, 0x74, 0x70, 0x12, 0x1e, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, - 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xb0, 0xca, 0xbc, 0x22, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x04, 0x68, 0x74, 0x74, 0x70, - 0x42, 0x6e, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, - 0x70, 0x69, 0x42, 0x10, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, - 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, - 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, - 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, - 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var file_google_api_annotations_proto_goTypes = []interface{}{ - (*descriptorpb.MethodOptions)(nil), // 0: google.protobuf.MethodOptions - (*HttpRule)(nil), // 1: google.api.HttpRule -} -var file_google_api_annotations_proto_depIdxs = []int32{ - 0, // 0: google.api.http:extendee -> google.protobuf.MethodOptions - 1, // 1: google.api.http:type_name -> google.api.HttpRule - 2, // [2:2] is the sub-list for method output_type - 2, // [2:2] is the sub-list for method input_type - 1, // [1:2] is the sub-list for extension type_name - 0, // [0:1] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_google_api_annotations_proto_init() } -func file_google_api_annotations_proto_init() { - if File_google_api_annotations_proto != nil { - return - } - file_google_api_http_proto_init() - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_api_annotations_proto_rawDesc, - NumEnums: 0, - NumMessages: 0, - NumExtensions: 1, - NumServices: 0, - }, - GoTypes: file_google_api_annotations_proto_goTypes, - DependencyIndexes: file_google_api_annotations_proto_depIdxs, - ExtensionInfos: file_google_api_annotations_proto_extTypes, - }.Build() - File_google_api_annotations_proto = out.File - file_google_api_annotations_proto_rawDesc = nil - file_google_api_annotations_proto_goTypes = nil - file_google_api_annotations_proto_depIdxs = nil -} diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go deleted file mode 100644 index e4324641d6..0000000000 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go +++ /dev/null @@ -1,219 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.25.0 -// protoc v3.13.0 -// source: google/api/client.proto - -package annotations - -import ( - reflect "reflect" - - proto "github.com/golang/protobuf/proto" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - descriptorpb "google.golang.org/protobuf/types/descriptorpb" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - -var file_google_api_client_proto_extTypes = []protoimpl.ExtensionInfo{ - { - ExtendedType: (*descriptorpb.MethodOptions)(nil), - ExtensionType: ([]string)(nil), - Field: 1051, - Name: "google.api.method_signature", - Tag: "bytes,1051,rep,name=method_signature", - Filename: "google/api/client.proto", - }, - { - ExtendedType: (*descriptorpb.ServiceOptions)(nil), - ExtensionType: (*string)(nil), - Field: 1049, - Name: "google.api.default_host", - Tag: "bytes,1049,opt,name=default_host", - Filename: "google/api/client.proto", - }, - { - ExtendedType: (*descriptorpb.ServiceOptions)(nil), - ExtensionType: (*string)(nil), - Field: 1050, - Name: "google.api.oauth_scopes", - Tag: "bytes,1050,opt,name=oauth_scopes", - Filename: "google/api/client.proto", - }, -} - -// Extension fields to descriptorpb.MethodOptions. -var ( - // A definition of a client library method signature. - // - // In client libraries, each proto RPC corresponds to one or more methods - // which the end user is able to call, and calls the underlying RPC. - // Normally, this method receives a single argument (a struct or instance - // corresponding to the RPC request object). Defining this field will - // add one or more overloads providing flattened or simpler method signatures - // in some languages. - // - // The fields on the method signature are provided as a comma-separated - // string. - // - // For example, the proto RPC and annotation: - // - // rpc CreateSubscription(CreateSubscriptionRequest) - // returns (Subscription) { - // option (google.api.method_signature) = "name,topic"; - // } - // - // Would add the following Java overload (in addition to the method accepting - // the request object): - // - // public final Subscription createSubscription(String name, String topic) - // - // The following backwards-compatibility guidelines apply: - // - // * Adding this annotation to an unannotated method is backwards - // compatible. - // * Adding this annotation to a method which already has existing - // method signature annotations is backwards compatible if and only if - // the new method signature annotation is last in the sequence. - // * Modifying or removing an existing method signature annotation is - // a breaking change. - // * Re-ordering existing method signature annotations is a breaking - // change. - // - // repeated string method_signature = 1051; - E_MethodSignature = &file_google_api_client_proto_extTypes[0] -) - -// Extension fields to descriptorpb.ServiceOptions. -var ( - // The hostname for this service. - // This should be specified with no prefix or protocol. - // - // Example: - // - // service Foo { - // option (google.api.default_host) = "foo.googleapi.com"; - // ... - // } - // - // optional string default_host = 1049; - E_DefaultHost = &file_google_api_client_proto_extTypes[1] - // OAuth scopes needed for the client. - // - // Example: - // - // service Foo { - // option (google.api.oauth_scopes) = \ - // "https://www.googleapis.com/auth/cloud-platform"; - // ... - // } - // - // If there is more than one scope, use a comma-separated string: - // - // Example: - // - // service Foo { - // option (google.api.oauth_scopes) = \ - // "https://www.googleapis.com/auth/cloud-platform," - // "https://www.googleapis.com/auth/monitoring"; - // ... - // } - // - // optional string oauth_scopes = 1050; - E_OauthScopes = &file_google_api_client_proto_extTypes[2] -) - -var File_google_api_client_proto protoreflect.FileDescriptor - -var file_google_api_client_proto_rawDesc = []byte{ - 0x0a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, - 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, - 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x3a, 0x4a, 0x0a, 0x10, 0x6d, 0x65, 0x74, 0x68, 0x6f, - 0x64, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x1e, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, - 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9b, 0x08, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x0f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x3a, 0x43, 0x0a, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x68, - 0x6f, 0x73, 0x74, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x99, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x66, - 0x61, 0x75, 0x6c, 0x74, 0x48, 0x6f, 0x73, 0x74, 0x3a, 0x43, 0x0a, 0x0c, 0x6f, 0x61, 0x75, 0x74, - 0x68, 0x5f, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9a, 0x08, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0b, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x42, 0x69, 0x0a, - 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, - 0x0b, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, - 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var file_google_api_client_proto_goTypes = []interface{}{ - (*descriptorpb.MethodOptions)(nil), // 0: google.protobuf.MethodOptions - (*descriptorpb.ServiceOptions)(nil), // 1: google.protobuf.ServiceOptions -} -var file_google_api_client_proto_depIdxs = []int32{ - 0, // 0: google.api.method_signature:extendee -> google.protobuf.MethodOptions - 1, // 1: google.api.default_host:extendee -> google.protobuf.ServiceOptions - 1, // 2: google.api.oauth_scopes:extendee -> google.protobuf.ServiceOptions - 3, // [3:3] is the sub-list for method output_type - 3, // [3:3] is the sub-list for method input_type - 3, // [3:3] is the sub-list for extension type_name - 0, // [0:3] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_google_api_client_proto_init() } -func file_google_api_client_proto_init() { - if File_google_api_client_proto != nil { - return - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_api_client_proto_rawDesc, - NumEnums: 0, - NumMessages: 0, - NumExtensions: 3, - NumServices: 0, - }, - GoTypes: file_google_api_client_proto_goTypes, - DependencyIndexes: file_google_api_client_proto_depIdxs, - ExtensionInfos: file_google_api_client_proto_extTypes, - }.Build() - File_google_api_client_proto = out.File - file_google_api_client_proto_rawDesc = nil - file_google_api_client_proto_goTypes = nil - file_google_api_client_proto_depIdxs = nil -} diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go deleted file mode 100644 index 91ad80ecc8..0000000000 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go +++ /dev/null @@ -1,246 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.25.0 -// protoc v3.13.0 -// source: google/api/field_behavior.proto - -package annotations - -import ( - reflect "reflect" - sync "sync" - - proto "github.com/golang/protobuf/proto" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - descriptorpb "google.golang.org/protobuf/types/descriptorpb" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - -// An indicator of the behavior of a given field (for example, that a field -// is required in requests, or given as output but ignored as input). -// This **does not** change the behavior in protocol buffers itself; it only -// denotes the behavior and may affect how API tooling handles the field. -// -// Note: This enum **may** receive new values in the future. -type FieldBehavior int32 - -const ( - // Conventional default for enums. Do not use this. - FieldBehavior_FIELD_BEHAVIOR_UNSPECIFIED FieldBehavior = 0 - // Specifically denotes a field as optional. - // While all fields in protocol buffers are optional, this may be specified - // for emphasis if appropriate. - FieldBehavior_OPTIONAL FieldBehavior = 1 - // Denotes a field as required. - // This indicates that the field **must** be provided as part of the request, - // and failure to do so will cause an error (usually `INVALID_ARGUMENT`). - FieldBehavior_REQUIRED FieldBehavior = 2 - // Denotes a field as output only. - // This indicates that the field is provided in responses, but including the - // field in a request does nothing (the server *must* ignore it and - // *must not* throw an error as a result of the field's presence). - FieldBehavior_OUTPUT_ONLY FieldBehavior = 3 - // Denotes a field as input only. - // This indicates that the field is provided in requests, and the - // corresponding field is not included in output. - FieldBehavior_INPUT_ONLY FieldBehavior = 4 - // Denotes a field as immutable. - // This indicates that the field may be set once in a request to create a - // resource, but may not be changed thereafter. - FieldBehavior_IMMUTABLE FieldBehavior = 5 - // Denotes that a (repeated) field is an unordered list. - // This indicates that the service may provide the elements of the list - // in any arbitrary order, rather than the order the user originally - // provided. Additionally, the list's order may or may not be stable. - FieldBehavior_UNORDERED_LIST FieldBehavior = 6 -) - -// Enum value maps for FieldBehavior. -var ( - FieldBehavior_name = map[int32]string{ - 0: "FIELD_BEHAVIOR_UNSPECIFIED", - 1: "OPTIONAL", - 2: "REQUIRED", - 3: "OUTPUT_ONLY", - 4: "INPUT_ONLY", - 5: "IMMUTABLE", - 6: "UNORDERED_LIST", - } - FieldBehavior_value = map[string]int32{ - "FIELD_BEHAVIOR_UNSPECIFIED": 0, - "OPTIONAL": 1, - "REQUIRED": 2, - "OUTPUT_ONLY": 3, - "INPUT_ONLY": 4, - "IMMUTABLE": 5, - "UNORDERED_LIST": 6, - } -) - -func (x FieldBehavior) Enum() *FieldBehavior { - p := new(FieldBehavior) - *p = x - return p -} - -func (x FieldBehavior) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (FieldBehavior) Descriptor() protoreflect.EnumDescriptor { - return file_google_api_field_behavior_proto_enumTypes[0].Descriptor() -} - -func (FieldBehavior) Type() protoreflect.EnumType { - return &file_google_api_field_behavior_proto_enumTypes[0] -} - -func (x FieldBehavior) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use FieldBehavior.Descriptor instead. -func (FieldBehavior) EnumDescriptor() ([]byte, []int) { - return file_google_api_field_behavior_proto_rawDescGZIP(), []int{0} -} - -var file_google_api_field_behavior_proto_extTypes = []protoimpl.ExtensionInfo{ - { - ExtendedType: (*descriptorpb.FieldOptions)(nil), - ExtensionType: ([]FieldBehavior)(nil), - Field: 1052, - Name: "google.api.field_behavior", - Tag: "varint,1052,rep,name=field_behavior,enum=google.api.FieldBehavior", - Filename: "google/api/field_behavior.proto", - }, -} - -// Extension fields to descriptorpb.FieldOptions. -var ( - // A designation of a specific field behavior (required, output only, etc.) - // in protobuf messages. - // - // Examples: - // - // string name = 1 [(google.api.field_behavior) = REQUIRED]; - // State state = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; - // google.protobuf.Duration ttl = 1 - // [(google.api.field_behavior) = INPUT_ONLY]; - // google.protobuf.Timestamp expire_time = 1 - // [(google.api.field_behavior) = OUTPUT_ONLY, - // (google.api.field_behavior) = IMMUTABLE]; - // - // repeated google.api.FieldBehavior field_behavior = 1052; - E_FieldBehavior = &file_google_api_field_behavior_proto_extTypes[0] -) - -var File_google_api_field_behavior_proto protoreflect.FileDescriptor - -var file_google_api_field_behavior_proto_rawDesc = []byte{ - 0x0a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, - 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x20, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2a, - 0x8f, 0x01, 0x0a, 0x0d, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, - 0x72, 0x12, 0x1e, 0x0a, 0x1a, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x42, 0x45, 0x48, 0x41, 0x56, - 0x49, 0x4f, 0x52, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, - 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12, - 0x0c, 0x0a, 0x08, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x02, 0x12, 0x0f, 0x0a, - 0x0b, 0x4f, 0x55, 0x54, 0x50, 0x55, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x03, 0x12, 0x0e, - 0x0a, 0x0a, 0x49, 0x4e, 0x50, 0x55, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x04, 0x12, 0x0d, - 0x0a, 0x09, 0x49, 0x4d, 0x4d, 0x55, 0x54, 0x41, 0x42, 0x4c, 0x45, 0x10, 0x05, 0x12, 0x12, 0x0a, - 0x0e, 0x55, 0x4e, 0x4f, 0x52, 0x44, 0x45, 0x52, 0x45, 0x44, 0x5f, 0x4c, 0x49, 0x53, 0x54, 0x10, - 0x06, 0x3a, 0x60, 0x0a, 0x0e, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, - 0x69, 0x6f, 0x72, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x18, 0x9c, 0x08, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, - 0x76, 0x69, 0x6f, 0x72, 0x52, 0x0d, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, - 0x69, 0x6f, 0x72, 0x42, 0x70, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x12, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, - 0x76, 0x69, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, - 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, - 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xa2, 0x02, - 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_google_api_field_behavior_proto_rawDescOnce sync.Once - file_google_api_field_behavior_proto_rawDescData = file_google_api_field_behavior_proto_rawDesc -) - -func file_google_api_field_behavior_proto_rawDescGZIP() []byte { - file_google_api_field_behavior_proto_rawDescOnce.Do(func() { - file_google_api_field_behavior_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_field_behavior_proto_rawDescData) - }) - return file_google_api_field_behavior_proto_rawDescData -} - -var file_google_api_field_behavior_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_google_api_field_behavior_proto_goTypes = []interface{}{ - (FieldBehavior)(0), // 0: google.api.FieldBehavior - (*descriptorpb.FieldOptions)(nil), // 1: google.protobuf.FieldOptions -} -var file_google_api_field_behavior_proto_depIdxs = []int32{ - 1, // 0: google.api.field_behavior:extendee -> google.protobuf.FieldOptions - 0, // 1: google.api.field_behavior:type_name -> google.api.FieldBehavior - 2, // [2:2] is the sub-list for method output_type - 2, // [2:2] is the sub-list for method input_type - 1, // [1:2] is the sub-list for extension type_name - 0, // [0:1] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_google_api_field_behavior_proto_init() } -func file_google_api_field_behavior_proto_init() { - if File_google_api_field_behavior_proto != nil { - return - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_api_field_behavior_proto_rawDesc, - NumEnums: 1, - NumMessages: 0, - NumExtensions: 1, - NumServices: 0, - }, - GoTypes: file_google_api_field_behavior_proto_goTypes, - DependencyIndexes: file_google_api_field_behavior_proto_depIdxs, - EnumInfos: file_google_api_field_behavior_proto_enumTypes, - ExtensionInfos: file_google_api_field_behavior_proto_extTypes, - }.Build() - File_google_api_field_behavior_proto = out.File - file_google_api_field_behavior_proto_rawDesc = nil - file_google_api_field_behavior_proto_goTypes = nil - file_google_api_field_behavior_proto_depIdxs = nil -} diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go deleted file mode 100644 index f36d981ced..0000000000 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go +++ /dev/null @@ -1,783 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.25.0 -// protoc v3.13.0 -// source: google/api/http.proto - -package annotations - -import ( - reflect "reflect" - sync "sync" - - proto "github.com/golang/protobuf/proto" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - -// Defines the HTTP configuration for an API service. It contains a list of -// [HttpRule][google.api.HttpRule], each specifying the mapping of an RPC method -// to one or more HTTP REST API methods. -type Http struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // A list of HTTP configuration rules that apply to individual API methods. - // - // **NOTE:** All service configuration rules follow "last one wins" order. - Rules []*HttpRule `protobuf:"bytes,1,rep,name=rules,proto3" json:"rules,omitempty"` - // When set to true, URL path parameters will be fully URI-decoded except in - // cases of single segment matches in reserved expansion, where "%2F" will be - // left encoded. - // - // The default behavior is to not decode RFC 6570 reserved characters in multi - // segment matches. - FullyDecodeReservedExpansion bool `protobuf:"varint,2,opt,name=fully_decode_reserved_expansion,json=fullyDecodeReservedExpansion,proto3" json:"fully_decode_reserved_expansion,omitempty"` -} - -func (x *Http) Reset() { - *x = Http{} - if protoimpl.UnsafeEnabled { - mi := &file_google_api_http_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Http) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Http) ProtoMessage() {} - -func (x *Http) ProtoReflect() protoreflect.Message { - mi := &file_google_api_http_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Http.ProtoReflect.Descriptor instead. -func (*Http) Descriptor() ([]byte, []int) { - return file_google_api_http_proto_rawDescGZIP(), []int{0} -} - -func (x *Http) GetRules() []*HttpRule { - if x != nil { - return x.Rules - } - return nil -} - -func (x *Http) GetFullyDecodeReservedExpansion() bool { - if x != nil { - return x.FullyDecodeReservedExpansion - } - return false -} - -// # gRPC Transcoding -// -// gRPC Transcoding is a feature for mapping between a gRPC method and one or -// more HTTP REST endpoints. It allows developers to build a single API service -// that supports both gRPC APIs and REST APIs. Many systems, including [Google -// APIs](https://github.com/googleapis/googleapis), -// [Cloud Endpoints](https://cloud.google.com/endpoints), [gRPC -// Gateway](https://github.com/grpc-ecosystem/grpc-gateway), -// and [Envoy](https://github.com/envoyproxy/envoy) proxy support this feature -// and use it for large scale production services. -// -// `HttpRule` defines the schema of the gRPC/REST mapping. The mapping specifies -// how different portions of the gRPC request message are mapped to the URL -// path, URL query parameters, and HTTP request body. It also controls how the -// gRPC response message is mapped to the HTTP response body. `HttpRule` is -// typically specified as an `google.api.http` annotation on the gRPC method. -// -// Each mapping specifies a URL path template and an HTTP method. The path -// template may refer to one or more fields in the gRPC request message, as long -// as each field is a non-repeated field with a primitive (non-message) type. -// The path template controls how fields of the request message are mapped to -// the URL path. -// -// Example: -// -// service Messaging { -// rpc GetMessage(GetMessageRequest) returns (Message) { -// option (google.api.http) = { -// get: "/v1/{name=messages/*}" -// }; -// } -// } -// message GetMessageRequest { -// string name = 1; // Mapped to URL path. -// } -// message Message { -// string text = 1; // The resource content. -// } -// -// This enables an HTTP REST to gRPC mapping as below: -// -// HTTP | gRPC -// -----|----- -// `GET /v1/messages/123456` | `GetMessage(name: "messages/123456")` -// -// Any fields in the request message which are not bound by the path template -// automatically become HTTP query parameters if there is no HTTP request body. -// For example: -// -// service Messaging { -// rpc GetMessage(GetMessageRequest) returns (Message) { -// option (google.api.http) = { -// get:"/v1/messages/{message_id}" -// }; -// } -// } -// message GetMessageRequest { -// message SubMessage { -// string subfield = 1; -// } -// string message_id = 1; // Mapped to URL path. -// int64 revision = 2; // Mapped to URL query parameter `revision`. -// SubMessage sub = 3; // Mapped to URL query parameter `sub.subfield`. -// } -// -// This enables a HTTP JSON to RPC mapping as below: -// -// HTTP | gRPC -// -----|----- -// `GET /v1/messages/123456?revision=2&sub.subfield=foo` | -// `GetMessage(message_id: "123456" revision: 2 sub: SubMessage(subfield: -// "foo"))` -// -// Note that fields which are mapped to URL query parameters must have a -// primitive type or a repeated primitive type or a non-repeated message type. -// In the case of a repeated type, the parameter can be repeated in the URL -// as `...?param=A¶m=B`. In the case of a message type, each field of the -// message is mapped to a separate parameter, such as -// `...?foo.a=A&foo.b=B&foo.c=C`. -// -// For HTTP methods that allow a request body, the `body` field -// specifies the mapping. Consider a REST update method on the -// message resource collection: -// -// service Messaging { -// rpc UpdateMessage(UpdateMessageRequest) returns (Message) { -// option (google.api.http) = { -// patch: "/v1/messages/{message_id}" -// body: "message" -// }; -// } -// } -// message UpdateMessageRequest { -// string message_id = 1; // mapped to the URL -// Message message = 2; // mapped to the body -// } -// -// The following HTTP JSON to RPC mapping is enabled, where the -// representation of the JSON in the request body is determined by -// protos JSON encoding: -// -// HTTP | gRPC -// -----|----- -// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: -// "123456" message { text: "Hi!" })` -// -// The special name `*` can be used in the body mapping to define that -// every field not bound by the path template should be mapped to the -// request body. This enables the following alternative definition of -// the update method: -// -// service Messaging { -// rpc UpdateMessage(Message) returns (Message) { -// option (google.api.http) = { -// patch: "/v1/messages/{message_id}" -// body: "*" -// }; -// } -// } -// message Message { -// string message_id = 1; -// string text = 2; -// } -// -// -// The following HTTP JSON to RPC mapping is enabled: -// -// HTTP | gRPC -// -----|----- -// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: -// "123456" text: "Hi!")` -// -// Note that when using `*` in the body mapping, it is not possible to -// have HTTP parameters, as all fields not bound by the path end in -// the body. This makes this option more rarely used in practice when -// defining REST APIs. The common usage of `*` is in custom methods -// which don't use the URL at all for transferring data. -// -// It is possible to define multiple HTTP methods for one RPC by using -// the `additional_bindings` option. Example: -// -// service Messaging { -// rpc GetMessage(GetMessageRequest) returns (Message) { -// option (google.api.http) = { -// get: "/v1/messages/{message_id}" -// additional_bindings { -// get: "/v1/users/{user_id}/messages/{message_id}" -// } -// }; -// } -// } -// message GetMessageRequest { -// string message_id = 1; -// string user_id = 2; -// } -// -// This enables the following two alternative HTTP JSON to RPC mappings: -// -// HTTP | gRPC -// -----|----- -// `GET /v1/messages/123456` | `GetMessage(message_id: "123456")` -// `GET /v1/users/me/messages/123456` | `GetMessage(user_id: "me" message_id: -// "123456")` -// -// ## Rules for HTTP mapping -// -// 1. Leaf request fields (recursive expansion nested messages in the request -// message) are classified into three categories: -// - Fields referred by the path template. They are passed via the URL path. -// - Fields referred by the [HttpRule.body][google.api.HttpRule.body]. They are passed via the HTTP -// request body. -// - All other fields are passed via the URL query parameters, and the -// parameter name is the field path in the request message. A repeated -// field can be represented as multiple query parameters under the same -// name. -// 2. If [HttpRule.body][google.api.HttpRule.body] is "*", there is no URL query parameter, all fields -// are passed via URL path and HTTP request body. -// 3. If [HttpRule.body][google.api.HttpRule.body] is omitted, there is no HTTP request body, all -// fields are passed via URL path and URL query parameters. -// -// ### Path template syntax -// -// Template = "/" Segments [ Verb ] ; -// Segments = Segment { "/" Segment } ; -// Segment = "*" | "**" | LITERAL | Variable ; -// Variable = "{" FieldPath [ "=" Segments ] "}" ; -// FieldPath = IDENT { "." IDENT } ; -// Verb = ":" LITERAL ; -// -// The syntax `*` matches a single URL path segment. The syntax `**` matches -// zero or more URL path segments, which must be the last part of the URL path -// except the `Verb`. -// -// The syntax `Variable` matches part of the URL path as specified by its -// template. A variable template must not contain other variables. If a variable -// matches a single path segment, its template may be omitted, e.g. `{var}` -// is equivalent to `{var=*}`. -// -// The syntax `LITERAL` matches literal text in the URL path. If the `LITERAL` -// contains any reserved character, such characters should be percent-encoded -// before the matching. -// -// If a variable contains exactly one path segment, such as `"{var}"` or -// `"{var=*}"`, when such a variable is expanded into a URL path on the client -// side, all characters except `[-_.~0-9a-zA-Z]` are percent-encoded. The -// server side does the reverse decoding. Such variables show up in the -// [Discovery -// Document](https://developers.google.com/discovery/v1/reference/apis) as -// `{var}`. -// -// If a variable contains multiple path segments, such as `"{var=foo/*}"` -// or `"{var=**}"`, when such a variable is expanded into a URL path on the -// client side, all characters except `[-_.~/0-9a-zA-Z]` are percent-encoded. -// The server side does the reverse decoding, except "%2F" and "%2f" are left -// unchanged. Such variables show up in the -// [Discovery -// Document](https://developers.google.com/discovery/v1/reference/apis) as -// `{+var}`. -// -// ## Using gRPC API Service Configuration -// -// gRPC API Service Configuration (service config) is a configuration language -// for configuring a gRPC service to become a user-facing product. The -// service config is simply the YAML representation of the `google.api.Service` -// proto message. -// -// As an alternative to annotating your proto file, you can configure gRPC -// transcoding in your service config YAML files. You do this by specifying a -// `HttpRule` that maps the gRPC method to a REST endpoint, achieving the same -// effect as the proto annotation. This can be particularly useful if you -// have a proto that is reused in multiple services. Note that any transcoding -// specified in the service config will override any matching transcoding -// configuration in the proto. -// -// Example: -// -// http: -// rules: -// # Selects a gRPC method and applies HttpRule to it. -// - selector: example.v1.Messaging.GetMessage -// get: /v1/messages/{message_id}/{sub.subfield} -// -// ## Special notes -// -// When gRPC Transcoding is used to map a gRPC to JSON REST endpoints, the -// proto to JSON conversion must follow the [proto3 -// specification](https://developers.google.com/protocol-buffers/docs/proto3#json). -// -// While the single segment variable follows the semantics of -// [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2 Simple String -// Expansion, the multi segment variable **does not** follow RFC 6570 Section -// 3.2.3 Reserved Expansion. The reason is that the Reserved Expansion -// does not expand special characters like `?` and `#`, which would lead -// to invalid URLs. As the result, gRPC Transcoding uses a custom encoding -// for multi segment variables. -// -// The path variables **must not** refer to any repeated or mapped field, -// because client libraries are not capable of handling such variable expansion. -// -// The path variables **must not** capture the leading "/" character. The reason -// is that the most common use case "{var}" does not capture the leading "/" -// character. For consistency, all path variables must share the same behavior. -// -// Repeated message fields must not be mapped to URL query parameters, because -// no client library can support such complicated mapping. -// -// If an API needs to use a JSON array for request or response body, it can map -// the request or response body to a repeated field. However, some gRPC -// Transcoding implementations may not support this feature. -type HttpRule struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Selects a method to which this rule applies. - // - // Refer to [selector][google.api.DocumentationRule.selector] for syntax details. - Selector string `protobuf:"bytes,1,opt,name=selector,proto3" json:"selector,omitempty"` - // Determines the URL pattern is matched by this rules. This pattern can be - // used with any of the {get|put|post|delete|patch} methods. A custom method - // can be defined using the 'custom' field. - // - // Types that are assignable to Pattern: - // *HttpRule_Get - // *HttpRule_Put - // *HttpRule_Post - // *HttpRule_Delete - // *HttpRule_Patch - // *HttpRule_Custom - Pattern isHttpRule_Pattern `protobuf_oneof:"pattern"` - // The name of the request field whose value is mapped to the HTTP request - // body, or `*` for mapping all request fields not captured by the path - // pattern to the HTTP body, or omitted for not having any HTTP request body. - // - // NOTE: the referred field must be present at the top-level of the request - // message type. - Body string `protobuf:"bytes,7,opt,name=body,proto3" json:"body,omitempty"` - // Optional. The name of the response field whose value is mapped to the HTTP - // response body. When omitted, the entire response message will be used - // as the HTTP response body. - // - // NOTE: The referred field must be present at the top-level of the response - // message type. - ResponseBody string `protobuf:"bytes,12,opt,name=response_body,json=responseBody,proto3" json:"response_body,omitempty"` - // Additional HTTP bindings for the selector. Nested bindings must - // not contain an `additional_bindings` field themselves (that is, - // the nesting may only be one level deep). - AdditionalBindings []*HttpRule `protobuf:"bytes,11,rep,name=additional_bindings,json=additionalBindings,proto3" json:"additional_bindings,omitempty"` -} - -func (x *HttpRule) Reset() { - *x = HttpRule{} - if protoimpl.UnsafeEnabled { - mi := &file_google_api_http_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *HttpRule) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*HttpRule) ProtoMessage() {} - -func (x *HttpRule) ProtoReflect() protoreflect.Message { - mi := &file_google_api_http_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use HttpRule.ProtoReflect.Descriptor instead. -func (*HttpRule) Descriptor() ([]byte, []int) { - return file_google_api_http_proto_rawDescGZIP(), []int{1} -} - -func (x *HttpRule) GetSelector() string { - if x != nil { - return x.Selector - } - return "" -} - -func (m *HttpRule) GetPattern() isHttpRule_Pattern { - if m != nil { - return m.Pattern - } - return nil -} - -func (x *HttpRule) GetGet() string { - if x, ok := x.GetPattern().(*HttpRule_Get); ok { - return x.Get - } - return "" -} - -func (x *HttpRule) GetPut() string { - if x, ok := x.GetPattern().(*HttpRule_Put); ok { - return x.Put - } - return "" -} - -func (x *HttpRule) GetPost() string { - if x, ok := x.GetPattern().(*HttpRule_Post); ok { - return x.Post - } - return "" -} - -func (x *HttpRule) GetDelete() string { - if x, ok := x.GetPattern().(*HttpRule_Delete); ok { - return x.Delete - } - return "" -} - -func (x *HttpRule) GetPatch() string { - if x, ok := x.GetPattern().(*HttpRule_Patch); ok { - return x.Patch - } - return "" -} - -func (x *HttpRule) GetCustom() *CustomHttpPattern { - if x, ok := x.GetPattern().(*HttpRule_Custom); ok { - return x.Custom - } - return nil -} - -func (x *HttpRule) GetBody() string { - if x != nil { - return x.Body - } - return "" -} - -func (x *HttpRule) GetResponseBody() string { - if x != nil { - return x.ResponseBody - } - return "" -} - -func (x *HttpRule) GetAdditionalBindings() []*HttpRule { - if x != nil { - return x.AdditionalBindings - } - return nil -} - -type isHttpRule_Pattern interface { - isHttpRule_Pattern() -} - -type HttpRule_Get struct { - // Maps to HTTP GET. Used for listing and getting information about - // resources. - Get string `protobuf:"bytes,2,opt,name=get,proto3,oneof"` -} - -type HttpRule_Put struct { - // Maps to HTTP PUT. Used for replacing a resource. - Put string `protobuf:"bytes,3,opt,name=put,proto3,oneof"` -} - -type HttpRule_Post struct { - // Maps to HTTP POST. Used for creating a resource or performing an action. - Post string `protobuf:"bytes,4,opt,name=post,proto3,oneof"` -} - -type HttpRule_Delete struct { - // Maps to HTTP DELETE. Used for deleting a resource. - Delete string `protobuf:"bytes,5,opt,name=delete,proto3,oneof"` -} - -type HttpRule_Patch struct { - // Maps to HTTP PATCH. Used for updating a resource. - Patch string `protobuf:"bytes,6,opt,name=patch,proto3,oneof"` -} - -type HttpRule_Custom struct { - // The custom pattern is used for specifying an HTTP method that is not - // included in the `pattern` field, such as HEAD, or "*" to leave the - // HTTP method unspecified for this rule. The wild-card rule is useful - // for services that provide content to Web (HTML) clients. - Custom *CustomHttpPattern `protobuf:"bytes,8,opt,name=custom,proto3,oneof"` -} - -func (*HttpRule_Get) isHttpRule_Pattern() {} - -func (*HttpRule_Put) isHttpRule_Pattern() {} - -func (*HttpRule_Post) isHttpRule_Pattern() {} - -func (*HttpRule_Delete) isHttpRule_Pattern() {} - -func (*HttpRule_Patch) isHttpRule_Pattern() {} - -func (*HttpRule_Custom) isHttpRule_Pattern() {} - -// A custom pattern is used for defining custom HTTP verb. -type CustomHttpPattern struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The name of this custom HTTP verb. - Kind string `protobuf:"bytes,1,opt,name=kind,proto3" json:"kind,omitempty"` - // The path matched by this custom verb. - Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"` -} - -func (x *CustomHttpPattern) Reset() { - *x = CustomHttpPattern{} - if protoimpl.UnsafeEnabled { - mi := &file_google_api_http_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CustomHttpPattern) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CustomHttpPattern) ProtoMessage() {} - -func (x *CustomHttpPattern) ProtoReflect() protoreflect.Message { - mi := &file_google_api_http_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CustomHttpPattern.ProtoReflect.Descriptor instead. -func (*CustomHttpPattern) Descriptor() ([]byte, []int) { - return file_google_api_http_proto_rawDescGZIP(), []int{2} -} - -func (x *CustomHttpPattern) GetKind() string { - if x != nil { - return x.Kind - } - return "" -} - -func (x *CustomHttpPattern) GetPath() string { - if x != nil { - return x.Path - } - return "" -} - -var File_google_api_http_proto protoreflect.FileDescriptor - -var file_google_api_http_proto_rawDesc = []byte{ - 0x0a, 0x15, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x68, 0x74, 0x74, - 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x61, 0x70, 0x69, 0x22, 0x79, 0x0a, 0x04, 0x48, 0x74, 0x74, 0x70, 0x12, 0x2a, 0x0a, 0x05, 0x72, - 0x75, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x52, 0x75, 0x6c, 0x65, - 0x52, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x45, 0x0a, 0x1f, 0x66, 0x75, 0x6c, 0x6c, 0x79, - 0x5f, 0x64, 0x65, 0x63, 0x6f, 0x64, 0x65, 0x5f, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, - 0x5f, 0x65, 0x78, 0x70, 0x61, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x1c, 0x66, 0x75, 0x6c, 0x6c, 0x79, 0x44, 0x65, 0x63, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x64, 0x45, 0x78, 0x70, 0x61, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xda, - 0x02, 0x0a, 0x08, 0x48, 0x74, 0x74, 0x70, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x73, - 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, - 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x12, 0x0a, 0x03, 0x67, 0x65, 0x74, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x03, 0x67, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x03, 0x70, - 0x75, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x03, 0x70, 0x75, 0x74, 0x12, - 0x14, 0x0a, 0x04, 0x70, 0x6f, 0x73, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, - 0x04, 0x70, 0x6f, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x06, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x06, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, - 0x16, 0x0a, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, - 0x52, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x12, 0x37, 0x0a, 0x06, 0x63, 0x75, 0x73, 0x74, 0x6f, - 0x6d, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x48, 0x74, 0x74, 0x70, 0x50, - 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x48, 0x00, 0x52, 0x06, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, - 0x12, 0x12, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, - 0x62, 0x6f, 0x64, 0x79, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x5f, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x45, 0x0a, 0x13, 0x61, 0x64, 0x64, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x62, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x73, - 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x12, 0x61, 0x64, - 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x42, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x73, - 0x42, 0x09, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x22, 0x3b, 0x0a, 0x11, 0x43, - 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x48, 0x74, 0x74, 0x70, 0x50, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, - 0x12, 0x12, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, - 0x6b, 0x69, 0x6e, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x42, 0x6a, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x09, 0x48, 0x74, 0x74, 0x70, - 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, - 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, - 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x04, - 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_google_api_http_proto_rawDescOnce sync.Once - file_google_api_http_proto_rawDescData = file_google_api_http_proto_rawDesc -) - -func file_google_api_http_proto_rawDescGZIP() []byte { - file_google_api_http_proto_rawDescOnce.Do(func() { - file_google_api_http_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_http_proto_rawDescData) - }) - return file_google_api_http_proto_rawDescData -} - -var file_google_api_http_proto_msgTypes = make([]protoimpl.MessageInfo, 3) -var file_google_api_http_proto_goTypes = []interface{}{ - (*Http)(nil), // 0: google.api.Http - (*HttpRule)(nil), // 1: google.api.HttpRule - (*CustomHttpPattern)(nil), // 2: google.api.CustomHttpPattern -} -var file_google_api_http_proto_depIdxs = []int32{ - 1, // 0: google.api.Http.rules:type_name -> google.api.HttpRule - 2, // 1: google.api.HttpRule.custom:type_name -> google.api.CustomHttpPattern - 1, // 2: google.api.HttpRule.additional_bindings:type_name -> google.api.HttpRule - 3, // [3:3] is the sub-list for method output_type - 3, // [3:3] is the sub-list for method input_type - 3, // [3:3] is the sub-list for extension type_name - 3, // [3:3] is the sub-list for extension extendee - 0, // [0:3] is the sub-list for field type_name -} - -func init() { file_google_api_http_proto_init() } -func file_google_api_http_proto_init() { - if File_google_api_http_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_google_api_http_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Http); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_api_http_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HttpRule); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_api_http_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CustomHttpPattern); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_google_api_http_proto_msgTypes[1].OneofWrappers = []interface{}{ - (*HttpRule_Get)(nil), - (*HttpRule_Put)(nil), - (*HttpRule_Post)(nil), - (*HttpRule_Delete)(nil), - (*HttpRule_Patch)(nil), - (*HttpRule_Custom)(nil), - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_api_http_proto_rawDesc, - NumEnums: 0, - NumMessages: 3, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_google_api_http_proto_goTypes, - DependencyIndexes: file_google_api_http_proto_depIdxs, - MessageInfos: file_google_api_http_proto_msgTypes, - }.Build() - File_google_api_http_proto = out.File - file_google_api_http_proto_rawDesc = nil - file_google_api_http_proto_goTypes = nil - file_google_api_http_proto_depIdxs = nil -} diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go deleted file mode 100644 index 8a422a66b3..0000000000 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go +++ /dev/null @@ -1,721 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.25.0 -// protoc v3.13.0 -// source: google/api/resource.proto - -package annotations - -import ( - reflect "reflect" - sync "sync" - - proto "github.com/golang/protobuf/proto" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - descriptorpb "google.golang.org/protobuf/types/descriptorpb" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - -// A description of the historical or future-looking state of the -// resource pattern. -type ResourceDescriptor_History int32 - -const ( - // The "unset" value. - ResourceDescriptor_HISTORY_UNSPECIFIED ResourceDescriptor_History = 0 - // The resource originally had one pattern and launched as such, and - // additional patterns were added later. - ResourceDescriptor_ORIGINALLY_SINGLE_PATTERN ResourceDescriptor_History = 1 - // The resource has one pattern, but the API owner expects to add more - // later. (This is the inverse of ORIGINALLY_SINGLE_PATTERN, and prevents - // that from being necessary once there are multiple patterns.) - ResourceDescriptor_FUTURE_MULTI_PATTERN ResourceDescriptor_History = 2 -) - -// Enum value maps for ResourceDescriptor_History. -var ( - ResourceDescriptor_History_name = map[int32]string{ - 0: "HISTORY_UNSPECIFIED", - 1: "ORIGINALLY_SINGLE_PATTERN", - 2: "FUTURE_MULTI_PATTERN", - } - ResourceDescriptor_History_value = map[string]int32{ - "HISTORY_UNSPECIFIED": 0, - "ORIGINALLY_SINGLE_PATTERN": 1, - "FUTURE_MULTI_PATTERN": 2, - } -) - -func (x ResourceDescriptor_History) Enum() *ResourceDescriptor_History { - p := new(ResourceDescriptor_History) - *p = x - return p -} - -func (x ResourceDescriptor_History) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (ResourceDescriptor_History) Descriptor() protoreflect.EnumDescriptor { - return file_google_api_resource_proto_enumTypes[0].Descriptor() -} - -func (ResourceDescriptor_History) Type() protoreflect.EnumType { - return &file_google_api_resource_proto_enumTypes[0] -} - -func (x ResourceDescriptor_History) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use ResourceDescriptor_History.Descriptor instead. -func (ResourceDescriptor_History) EnumDescriptor() ([]byte, []int) { - return file_google_api_resource_proto_rawDescGZIP(), []int{0, 0} -} - -// A flag representing a specific style that a resource claims to conform to. -type ResourceDescriptor_Style int32 - -const ( - // The unspecified value. Do not use. - ResourceDescriptor_STYLE_UNSPECIFIED ResourceDescriptor_Style = 0 - // This resource is intended to be "declarative-friendly". - // - // Declarative-friendly resources must be more strictly consistent, and - // setting this to true communicates to tools that this resource should - // adhere to declarative-friendly expectations. - // - // Note: This is used by the API linter (linter.aip.dev) to enable - // additional checks. - ResourceDescriptor_DECLARATIVE_FRIENDLY ResourceDescriptor_Style = 1 -) - -// Enum value maps for ResourceDescriptor_Style. -var ( - ResourceDescriptor_Style_name = map[int32]string{ - 0: "STYLE_UNSPECIFIED", - 1: "DECLARATIVE_FRIENDLY", - } - ResourceDescriptor_Style_value = map[string]int32{ - "STYLE_UNSPECIFIED": 0, - "DECLARATIVE_FRIENDLY": 1, - } -) - -func (x ResourceDescriptor_Style) Enum() *ResourceDescriptor_Style { - p := new(ResourceDescriptor_Style) - *p = x - return p -} - -func (x ResourceDescriptor_Style) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (ResourceDescriptor_Style) Descriptor() protoreflect.EnumDescriptor { - return file_google_api_resource_proto_enumTypes[1].Descriptor() -} - -func (ResourceDescriptor_Style) Type() protoreflect.EnumType { - return &file_google_api_resource_proto_enumTypes[1] -} - -func (x ResourceDescriptor_Style) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use ResourceDescriptor_Style.Descriptor instead. -func (ResourceDescriptor_Style) EnumDescriptor() ([]byte, []int) { - return file_google_api_resource_proto_rawDescGZIP(), []int{0, 1} -} - -// A simple descriptor of a resource type. -// -// ResourceDescriptor annotates a resource message (either by means of a -// protobuf annotation or use in the service config), and associates the -// resource's schema, the resource type, and the pattern of the resource name. -// -// Example: -// -// message Topic { -// // Indicates this message defines a resource schema. -// // Declares the resource type in the format of {service}/{kind}. -// // For Kubernetes resources, the format is {api group}/{kind}. -// option (google.api.resource) = { -// type: "pubsub.googleapis.com/Topic" -// name_descriptor: { -// pattern: "projects/{project}/topics/{topic}" -// parent_type: "cloudresourcemanager.googleapis.com/Project" -// parent_name_extractor: "projects/{project}" -// } -// }; -// } -// -// The ResourceDescriptor Yaml config will look like: -// -// resources: -// - type: "pubsub.googleapis.com/Topic" -// name_descriptor: -// - pattern: "projects/{project}/topics/{topic}" -// parent_type: "cloudresourcemanager.googleapis.com/Project" -// parent_name_extractor: "projects/{project}" -// -// Sometimes, resources have multiple patterns, typically because they can -// live under multiple parents. -// -// Example: -// -// message LogEntry { -// option (google.api.resource) = { -// type: "logging.googleapis.com/LogEntry" -// name_descriptor: { -// pattern: "projects/{project}/logs/{log}" -// parent_type: "cloudresourcemanager.googleapis.com/Project" -// parent_name_extractor: "projects/{project}" -// } -// name_descriptor: { -// pattern: "folders/{folder}/logs/{log}" -// parent_type: "cloudresourcemanager.googleapis.com/Folder" -// parent_name_extractor: "folders/{folder}" -// } -// name_descriptor: { -// pattern: "organizations/{organization}/logs/{log}" -// parent_type: "cloudresourcemanager.googleapis.com/Organization" -// parent_name_extractor: "organizations/{organization}" -// } -// name_descriptor: { -// pattern: "billingAccounts/{billing_account}/logs/{log}" -// parent_type: "billing.googleapis.com/BillingAccount" -// parent_name_extractor: "billingAccounts/{billing_account}" -// } -// }; -// } -// -// The ResourceDescriptor Yaml config will look like: -// -// resources: -// - type: 'logging.googleapis.com/LogEntry' -// name_descriptor: -// - pattern: "projects/{project}/logs/{log}" -// parent_type: "cloudresourcemanager.googleapis.com/Project" -// parent_name_extractor: "projects/{project}" -// - pattern: "folders/{folder}/logs/{log}" -// parent_type: "cloudresourcemanager.googleapis.com/Folder" -// parent_name_extractor: "folders/{folder}" -// - pattern: "organizations/{organization}/logs/{log}" -// parent_type: "cloudresourcemanager.googleapis.com/Organization" -// parent_name_extractor: "organizations/{organization}" -// - pattern: "billingAccounts/{billing_account}/logs/{log}" -// parent_type: "billing.googleapis.com/BillingAccount" -// parent_name_extractor: "billingAccounts/{billing_account}" -// -// For flexible resources, the resource name doesn't contain parent names, but -// the resource itself has parents for policy evaluation. -// -// Example: -// -// message Shelf { -// option (google.api.resource) = { -// type: "library.googleapis.com/Shelf" -// name_descriptor: { -// pattern: "shelves/{shelf}" -// parent_type: "cloudresourcemanager.googleapis.com/Project" -// } -// name_descriptor: { -// pattern: "shelves/{shelf}" -// parent_type: "cloudresourcemanager.googleapis.com/Folder" -// } -// }; -// } -// -// The ResourceDescriptor Yaml config will look like: -// -// resources: -// - type: 'library.googleapis.com/Shelf' -// name_descriptor: -// - pattern: "shelves/{shelf}" -// parent_type: "cloudresourcemanager.googleapis.com/Project" -// - pattern: "shelves/{shelf}" -// parent_type: "cloudresourcemanager.googleapis.com/Folder" -type ResourceDescriptor struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The resource type. It must be in the format of - // {service_name}/{resource_type_kind}. The `resource_type_kind` must be - // singular and must not include version numbers. - // - // Example: `storage.googleapis.com/Bucket` - // - // The value of the resource_type_kind must follow the regular expression - // /[A-Za-z][a-zA-Z0-9]+/. It should start with an upper case character and - // should use PascalCase (UpperCamelCase). The maximum number of - // characters allowed for the `resource_type_kind` is 100. - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - // Optional. The relative resource name pattern associated with this resource - // type. The DNS prefix of the full resource name shouldn't be specified here. - // - // The path pattern must follow the syntax, which aligns with HTTP binding - // syntax: - // - // Template = Segment { "/" Segment } ; - // Segment = LITERAL | Variable ; - // Variable = "{" LITERAL "}" ; - // - // Examples: - // - // - "projects/{project}/topics/{topic}" - // - "projects/{project}/knowledgeBases/{knowledge_base}" - // - // The components in braces correspond to the IDs for each resource in the - // hierarchy. It is expected that, if multiple patterns are provided, - // the same component name (e.g. "project") refers to IDs of the same - // type of resource. - Pattern []string `protobuf:"bytes,2,rep,name=pattern,proto3" json:"pattern,omitempty"` - // Optional. The field on the resource that designates the resource name - // field. If omitted, this is assumed to be "name". - NameField string `protobuf:"bytes,3,opt,name=name_field,json=nameField,proto3" json:"name_field,omitempty"` - // Optional. The historical or future-looking state of the resource pattern. - // - // Example: - // - // // The InspectTemplate message originally only supported resource - // // names with organization, and project was added later. - // message InspectTemplate { - // option (google.api.resource) = { - // type: "dlp.googleapis.com/InspectTemplate" - // pattern: - // "organizations/{organization}/inspectTemplates/{inspect_template}" - // pattern: "projects/{project}/inspectTemplates/{inspect_template}" - // history: ORIGINALLY_SINGLE_PATTERN - // }; - // } - History ResourceDescriptor_History `protobuf:"varint,4,opt,name=history,proto3,enum=google.api.ResourceDescriptor_History" json:"history,omitempty"` - // The plural name used in the resource name and permission names, such as - // 'projects' for the resource name of 'projects/{project}' and the permission - // name of 'cloudresourcemanager.googleapis.com/projects.get'. It is the same - // concept of the `plural` field in k8s CRD spec - // https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/ - // - // Note: The plural form is required even for singleton resources. See - // https://aip.dev/156 - Plural string `protobuf:"bytes,5,opt,name=plural,proto3" json:"plural,omitempty"` - // The same concept of the `singular` field in k8s CRD spec - // https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/ - // Such as "project" for the `resourcemanager.googleapis.com/Project` type. - Singular string `protobuf:"bytes,6,opt,name=singular,proto3" json:"singular,omitempty"` - // Style flag(s) for this resource. - // These indicate that a resource is expected to conform to a given - // style. See the specific style flags for additional information. - Style []ResourceDescriptor_Style `protobuf:"varint,10,rep,packed,name=style,proto3,enum=google.api.ResourceDescriptor_Style" json:"style,omitempty"` -} - -func (x *ResourceDescriptor) Reset() { - *x = ResourceDescriptor{} - if protoimpl.UnsafeEnabled { - mi := &file_google_api_resource_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ResourceDescriptor) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ResourceDescriptor) ProtoMessage() {} - -func (x *ResourceDescriptor) ProtoReflect() protoreflect.Message { - mi := &file_google_api_resource_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ResourceDescriptor.ProtoReflect.Descriptor instead. -func (*ResourceDescriptor) Descriptor() ([]byte, []int) { - return file_google_api_resource_proto_rawDescGZIP(), []int{0} -} - -func (x *ResourceDescriptor) GetType() string { - if x != nil { - return x.Type - } - return "" -} - -func (x *ResourceDescriptor) GetPattern() []string { - if x != nil { - return x.Pattern - } - return nil -} - -func (x *ResourceDescriptor) GetNameField() string { - if x != nil { - return x.NameField - } - return "" -} - -func (x *ResourceDescriptor) GetHistory() ResourceDescriptor_History { - if x != nil { - return x.History - } - return ResourceDescriptor_HISTORY_UNSPECIFIED -} - -func (x *ResourceDescriptor) GetPlural() string { - if x != nil { - return x.Plural - } - return "" -} - -func (x *ResourceDescriptor) GetSingular() string { - if x != nil { - return x.Singular - } - return "" -} - -func (x *ResourceDescriptor) GetStyle() []ResourceDescriptor_Style { - if x != nil { - return x.Style - } - return nil -} - -// Defines a proto annotation that describes a string field that refers to -// an API resource. -type ResourceReference struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The resource type that the annotated field references. - // - // Example: - // - // message Subscription { - // string topic = 2 [(google.api.resource_reference) = { - // type: "pubsub.googleapis.com/Topic" - // }]; - // } - // - // Occasionally, a field may reference an arbitrary resource. In this case, - // APIs use the special value * in their resource reference. - // - // Example: - // - // message GetIamPolicyRequest { - // string resource = 2 [(google.api.resource_reference) = { - // type: "*" - // }]; - // } - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - // The resource type of a child collection that the annotated field - // references. This is useful for annotating the `parent` field that - // doesn't have a fixed resource type. - // - // Example: - // - // message ListLogEntriesRequest { - // string parent = 1 [(google.api.resource_reference) = { - // child_type: "logging.googleapis.com/LogEntry" - // }; - // } - ChildType string `protobuf:"bytes,2,opt,name=child_type,json=childType,proto3" json:"child_type,omitempty"` -} - -func (x *ResourceReference) Reset() { - *x = ResourceReference{} - if protoimpl.UnsafeEnabled { - mi := &file_google_api_resource_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ResourceReference) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ResourceReference) ProtoMessage() {} - -func (x *ResourceReference) ProtoReflect() protoreflect.Message { - mi := &file_google_api_resource_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ResourceReference.ProtoReflect.Descriptor instead. -func (*ResourceReference) Descriptor() ([]byte, []int) { - return file_google_api_resource_proto_rawDescGZIP(), []int{1} -} - -func (x *ResourceReference) GetType() string { - if x != nil { - return x.Type - } - return "" -} - -func (x *ResourceReference) GetChildType() string { - if x != nil { - return x.ChildType - } - return "" -} - -var file_google_api_resource_proto_extTypes = []protoimpl.ExtensionInfo{ - { - ExtendedType: (*descriptorpb.FieldOptions)(nil), - ExtensionType: (*ResourceReference)(nil), - Field: 1055, - Name: "google.api.resource_reference", - Tag: "bytes,1055,opt,name=resource_reference", - Filename: "google/api/resource.proto", - }, - { - ExtendedType: (*descriptorpb.FileOptions)(nil), - ExtensionType: ([]*ResourceDescriptor)(nil), - Field: 1053, - Name: "google.api.resource_definition", - Tag: "bytes,1053,rep,name=resource_definition", - Filename: "google/api/resource.proto", - }, - { - ExtendedType: (*descriptorpb.MessageOptions)(nil), - ExtensionType: (*ResourceDescriptor)(nil), - Field: 1053, - Name: "google.api.resource", - Tag: "bytes,1053,opt,name=resource", - Filename: "google/api/resource.proto", - }, -} - -// Extension fields to descriptorpb.FieldOptions. -var ( - // An annotation that describes a resource reference, see - // [ResourceReference][]. - // - // optional google.api.ResourceReference resource_reference = 1055; - E_ResourceReference = &file_google_api_resource_proto_extTypes[0] -) - -// Extension fields to descriptorpb.FileOptions. -var ( - // An annotation that describes a resource definition without a corresponding - // message; see [ResourceDescriptor][]. - // - // repeated google.api.ResourceDescriptor resource_definition = 1053; - E_ResourceDefinition = &file_google_api_resource_proto_extTypes[1] -) - -// Extension fields to descriptorpb.MessageOptions. -var ( - // An annotation that describes a resource definition, see - // [ResourceDescriptor][]. - // - // optional google.api.ResourceDescriptor resource = 1053; - E_Resource = &file_google_api_resource_proto_extTypes[2] -) - -var File_google_api_resource_proto protoreflect.FileDescriptor - -var file_google_api_resource_proto_rawDesc = []byte{ - 0x0a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xaa, 0x03, 0x0a, 0x12, 0x52, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, - 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, - 0x74, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18, - 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, 0x1d, - 0x0a, 0x0a, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x40, 0x0a, - 0x07, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x52, 0x65, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x48, - 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x07, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x12, - 0x16, 0x0a, 0x06, 0x70, 0x6c, 0x75, 0x72, 0x61, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x06, 0x70, 0x6c, 0x75, 0x72, 0x61, 0x6c, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x69, 0x6e, 0x67, 0x75, - 0x6c, 0x61, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x69, 0x6e, 0x67, 0x75, - 0x6c, 0x61, 0x72, 0x12, 0x3a, 0x0a, 0x05, 0x73, 0x74, 0x79, 0x6c, 0x65, 0x18, 0x0a, 0x20, 0x03, - 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, - 0x6f, 0x72, 0x2e, 0x53, 0x74, 0x79, 0x6c, 0x65, 0x52, 0x05, 0x73, 0x74, 0x79, 0x6c, 0x65, 0x22, - 0x5b, 0x0a, 0x07, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x17, 0x0a, 0x13, 0x48, 0x49, - 0x53, 0x54, 0x4f, 0x52, 0x59, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, - 0x44, 0x10, 0x00, 0x12, 0x1d, 0x0a, 0x19, 0x4f, 0x52, 0x49, 0x47, 0x49, 0x4e, 0x41, 0x4c, 0x4c, - 0x59, 0x5f, 0x53, 0x49, 0x4e, 0x47, 0x4c, 0x45, 0x5f, 0x50, 0x41, 0x54, 0x54, 0x45, 0x52, 0x4e, - 0x10, 0x01, 0x12, 0x18, 0x0a, 0x14, 0x46, 0x55, 0x54, 0x55, 0x52, 0x45, 0x5f, 0x4d, 0x55, 0x4c, - 0x54, 0x49, 0x5f, 0x50, 0x41, 0x54, 0x54, 0x45, 0x52, 0x4e, 0x10, 0x02, 0x22, 0x38, 0x0a, 0x05, - 0x53, 0x74, 0x79, 0x6c, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x53, 0x54, 0x59, 0x4c, 0x45, 0x5f, 0x55, - 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x18, 0x0a, 0x14, - 0x44, 0x45, 0x43, 0x4c, 0x41, 0x52, 0x41, 0x54, 0x49, 0x56, 0x45, 0x5f, 0x46, 0x52, 0x49, 0x45, - 0x4e, 0x44, 0x4c, 0x59, 0x10, 0x01, 0x22, 0x46, 0x0a, 0x11, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, - 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, - 0x1d, 0x0a, 0x0a, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x6c, - 0x0a, 0x12, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x72, 0x65, 0x66, 0x65, 0x72, - 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x18, 0x9f, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x11, 0x72, 0x65, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x3a, 0x6e, 0x0a, 0x13, - 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x18, 0x9d, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x12, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x3a, 0x5c, 0x0a, 0x08, - 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9d, 0x08, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x52, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, - 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x6e, 0x0a, 0x0e, 0x63, 0x6f, - 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x0d, 0x52, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, - 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x33, -} - -var ( - file_google_api_resource_proto_rawDescOnce sync.Once - file_google_api_resource_proto_rawDescData = file_google_api_resource_proto_rawDesc -) - -func file_google_api_resource_proto_rawDescGZIP() []byte { - file_google_api_resource_proto_rawDescOnce.Do(func() { - file_google_api_resource_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_resource_proto_rawDescData) - }) - return file_google_api_resource_proto_rawDescData -} - -var file_google_api_resource_proto_enumTypes = make([]protoimpl.EnumInfo, 2) -var file_google_api_resource_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_google_api_resource_proto_goTypes = []interface{}{ - (ResourceDescriptor_History)(0), // 0: google.api.ResourceDescriptor.History - (ResourceDescriptor_Style)(0), // 1: google.api.ResourceDescriptor.Style - (*ResourceDescriptor)(nil), // 2: google.api.ResourceDescriptor - (*ResourceReference)(nil), // 3: google.api.ResourceReference - (*descriptorpb.FieldOptions)(nil), // 4: google.protobuf.FieldOptions - (*descriptorpb.FileOptions)(nil), // 5: google.protobuf.FileOptions - (*descriptorpb.MessageOptions)(nil), // 6: google.protobuf.MessageOptions -} -var file_google_api_resource_proto_depIdxs = []int32{ - 0, // 0: google.api.ResourceDescriptor.history:type_name -> google.api.ResourceDescriptor.History - 1, // 1: google.api.ResourceDescriptor.style:type_name -> google.api.ResourceDescriptor.Style - 4, // 2: google.api.resource_reference:extendee -> google.protobuf.FieldOptions - 5, // 3: google.api.resource_definition:extendee -> google.protobuf.FileOptions - 6, // 4: google.api.resource:extendee -> google.protobuf.MessageOptions - 3, // 5: google.api.resource_reference:type_name -> google.api.ResourceReference - 2, // 6: google.api.resource_definition:type_name -> google.api.ResourceDescriptor - 2, // 7: google.api.resource:type_name -> google.api.ResourceDescriptor - 8, // [8:8] is the sub-list for method output_type - 8, // [8:8] is the sub-list for method input_type - 5, // [5:8] is the sub-list for extension type_name - 2, // [2:5] is the sub-list for extension extendee - 0, // [0:2] is the sub-list for field type_name -} - -func init() { file_google_api_resource_proto_init() } -func file_google_api_resource_proto_init() { - if File_google_api_resource_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_google_api_resource_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ResourceDescriptor); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_api_resource_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ResourceReference); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_api_resource_proto_rawDesc, - NumEnums: 2, - NumMessages: 2, - NumExtensions: 3, - NumServices: 0, - }, - GoTypes: file_google_api_resource_proto_goTypes, - DependencyIndexes: file_google_api_resource_proto_depIdxs, - EnumInfos: file_google_api_resource_proto_enumTypes, - MessageInfos: file_google_api_resource_proto_msgTypes, - ExtensionInfos: file_google_api_resource_proto_extTypes, - }.Build() - File_google_api_resource_proto = out.File - file_google_api_resource_proto_rawDesc = nil - file_google_api_resource_proto_goTypes = nil - file_google_api_resource_proto_depIdxs = nil -} diff --git a/vendor/google.golang.org/genproto/googleapis/api/distribution/distribution.pb.go b/vendor/google.golang.org/genproto/googleapis/api/distribution/distribution.pb.go deleted file mode 100644 index 37f6a4e689..0000000000 --- a/vendor/google.golang.org/genproto/googleapis/api/distribution/distribution.pb.go +++ /dev/null @@ -1,894 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.25.0 -// protoc v3.13.0 -// source: google/api/distribution.proto - -package distribution - -import ( - reflect "reflect" - sync "sync" - - proto "github.com/golang/protobuf/proto" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - anypb "google.golang.org/protobuf/types/known/anypb" - timestamppb "google.golang.org/protobuf/types/known/timestamppb" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - -// `Distribution` contains summary statistics for a population of values. It -// optionally contains a histogram representing the distribution of those values -// across a set of buckets. -// -// The summary statistics are the count, mean, sum of the squared deviation from -// the mean, the minimum, and the maximum of the set of population of values. -// The histogram is based on a sequence of buckets and gives a count of values -// that fall into each bucket. The boundaries of the buckets are given either -// explicitly or by formulas for buckets of fixed or exponentially increasing -// widths. -// -// Although it is not forbidden, it is generally a bad idea to include -// non-finite values (infinities or NaNs) in the population of values, as this -// will render the `mean` and `sum_of_squared_deviation` fields meaningless. -type Distribution struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The number of values in the population. Must be non-negative. This value - // must equal the sum of the values in `bucket_counts` if a histogram is - // provided. - Count int64 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"` - // The arithmetic mean of the values in the population. If `count` is zero - // then this field must be zero. - Mean float64 `protobuf:"fixed64,2,opt,name=mean,proto3" json:"mean,omitempty"` - // The sum of squared deviations from the mean of the values in the - // population. For values x_i this is: - // - // Sum[i=1..n]((x_i - mean)^2) - // - // Knuth, "The Art of Computer Programming", Vol. 2, page 232, 3rd edition - // describes Welford's method for accumulating this sum in one pass. - // - // If `count` is zero then this field must be zero. - SumOfSquaredDeviation float64 `protobuf:"fixed64,3,opt,name=sum_of_squared_deviation,json=sumOfSquaredDeviation,proto3" json:"sum_of_squared_deviation,omitempty"` - // If specified, contains the range of the population values. The field - // must not be present if the `count` is zero. - Range *Distribution_Range `protobuf:"bytes,4,opt,name=range,proto3" json:"range,omitempty"` - // Defines the histogram bucket boundaries. If the distribution does not - // contain a histogram, then omit this field. - BucketOptions *Distribution_BucketOptions `protobuf:"bytes,6,opt,name=bucket_options,json=bucketOptions,proto3" json:"bucket_options,omitempty"` - // The number of values in each bucket of the histogram, as described in - // `bucket_options`. If the distribution does not have a histogram, then omit - // this field. If there is a histogram, then the sum of the values in - // `bucket_counts` must equal the value in the `count` field of the - // distribution. - // - // If present, `bucket_counts` should contain N values, where N is the number - // of buckets specified in `bucket_options`. If you supply fewer than N - // values, the remaining values are assumed to be 0. - // - // The order of the values in `bucket_counts` follows the bucket numbering - // schemes described for the three bucket types. The first value must be the - // count for the underflow bucket (number 0). The next N-2 values are the - // counts for the finite buckets (number 1 through N-2). The N'th value in - // `bucket_counts` is the count for the overflow bucket (number N-1). - BucketCounts []int64 `protobuf:"varint,7,rep,packed,name=bucket_counts,json=bucketCounts,proto3" json:"bucket_counts,omitempty"` - // Must be in increasing order of `value` field. - Exemplars []*Distribution_Exemplar `protobuf:"bytes,10,rep,name=exemplars,proto3" json:"exemplars,omitempty"` -} - -func (x *Distribution) Reset() { - *x = Distribution{} - if protoimpl.UnsafeEnabled { - mi := &file_google_api_distribution_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Distribution) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Distribution) ProtoMessage() {} - -func (x *Distribution) ProtoReflect() protoreflect.Message { - mi := &file_google_api_distribution_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Distribution.ProtoReflect.Descriptor instead. -func (*Distribution) Descriptor() ([]byte, []int) { - return file_google_api_distribution_proto_rawDescGZIP(), []int{0} -} - -func (x *Distribution) GetCount() int64 { - if x != nil { - return x.Count - } - return 0 -} - -func (x *Distribution) GetMean() float64 { - if x != nil { - return x.Mean - } - return 0 -} - -func (x *Distribution) GetSumOfSquaredDeviation() float64 { - if x != nil { - return x.SumOfSquaredDeviation - } - return 0 -} - -func (x *Distribution) GetRange() *Distribution_Range { - if x != nil { - return x.Range - } - return nil -} - -func (x *Distribution) GetBucketOptions() *Distribution_BucketOptions { - if x != nil { - return x.BucketOptions - } - return nil -} - -func (x *Distribution) GetBucketCounts() []int64 { - if x != nil { - return x.BucketCounts - } - return nil -} - -func (x *Distribution) GetExemplars() []*Distribution_Exemplar { - if x != nil { - return x.Exemplars - } - return nil -} - -// The range of the population values. -type Distribution_Range struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The minimum of the population values. - Min float64 `protobuf:"fixed64,1,opt,name=min,proto3" json:"min,omitempty"` - // The maximum of the population values. - Max float64 `protobuf:"fixed64,2,opt,name=max,proto3" json:"max,omitempty"` -} - -func (x *Distribution_Range) Reset() { - *x = Distribution_Range{} - if protoimpl.UnsafeEnabled { - mi := &file_google_api_distribution_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Distribution_Range) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Distribution_Range) ProtoMessage() {} - -func (x *Distribution_Range) ProtoReflect() protoreflect.Message { - mi := &file_google_api_distribution_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Distribution_Range.ProtoReflect.Descriptor instead. -func (*Distribution_Range) Descriptor() ([]byte, []int) { - return file_google_api_distribution_proto_rawDescGZIP(), []int{0, 0} -} - -func (x *Distribution_Range) GetMin() float64 { - if x != nil { - return x.Min - } - return 0 -} - -func (x *Distribution_Range) GetMax() float64 { - if x != nil { - return x.Max - } - return 0 -} - -// `BucketOptions` describes the bucket boundaries used to create a histogram -// for the distribution. The buckets can be in a linear sequence, an -// exponential sequence, or each bucket can be specified explicitly. -// `BucketOptions` does not include the number of values in each bucket. -// -// A bucket has an inclusive lower bound and exclusive upper bound for the -// values that are counted for that bucket. The upper bound of a bucket must -// be strictly greater than the lower bound. The sequence of N buckets for a -// distribution consists of an underflow bucket (number 0), zero or more -// finite buckets (number 1 through N - 2) and an overflow bucket (number N - -// 1). The buckets are contiguous: the lower bound of bucket i (i > 0) is the -// same as the upper bound of bucket i - 1. The buckets span the whole range -// of finite values: lower bound of the underflow bucket is -infinity and the -// upper bound of the overflow bucket is +infinity. The finite buckets are -// so-called because both bounds are finite. -type Distribution_BucketOptions struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Exactly one of these three fields must be set. - // - // Types that are assignable to Options: - // *Distribution_BucketOptions_LinearBuckets - // *Distribution_BucketOptions_ExponentialBuckets - // *Distribution_BucketOptions_ExplicitBuckets - Options isDistribution_BucketOptions_Options `protobuf_oneof:"options"` -} - -func (x *Distribution_BucketOptions) Reset() { - *x = Distribution_BucketOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_api_distribution_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Distribution_BucketOptions) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Distribution_BucketOptions) ProtoMessage() {} - -func (x *Distribution_BucketOptions) ProtoReflect() protoreflect.Message { - mi := &file_google_api_distribution_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Distribution_BucketOptions.ProtoReflect.Descriptor instead. -func (*Distribution_BucketOptions) Descriptor() ([]byte, []int) { - return file_google_api_distribution_proto_rawDescGZIP(), []int{0, 1} -} - -func (m *Distribution_BucketOptions) GetOptions() isDistribution_BucketOptions_Options { - if m != nil { - return m.Options - } - return nil -} - -func (x *Distribution_BucketOptions) GetLinearBuckets() *Distribution_BucketOptions_Linear { - if x, ok := x.GetOptions().(*Distribution_BucketOptions_LinearBuckets); ok { - return x.LinearBuckets - } - return nil -} - -func (x *Distribution_BucketOptions) GetExponentialBuckets() *Distribution_BucketOptions_Exponential { - if x, ok := x.GetOptions().(*Distribution_BucketOptions_ExponentialBuckets); ok { - return x.ExponentialBuckets - } - return nil -} - -func (x *Distribution_BucketOptions) GetExplicitBuckets() *Distribution_BucketOptions_Explicit { - if x, ok := x.GetOptions().(*Distribution_BucketOptions_ExplicitBuckets); ok { - return x.ExplicitBuckets - } - return nil -} - -type isDistribution_BucketOptions_Options interface { - isDistribution_BucketOptions_Options() -} - -type Distribution_BucketOptions_LinearBuckets struct { - // The linear bucket. - LinearBuckets *Distribution_BucketOptions_Linear `protobuf:"bytes,1,opt,name=linear_buckets,json=linearBuckets,proto3,oneof"` -} - -type Distribution_BucketOptions_ExponentialBuckets struct { - // The exponential buckets. - ExponentialBuckets *Distribution_BucketOptions_Exponential `protobuf:"bytes,2,opt,name=exponential_buckets,json=exponentialBuckets,proto3,oneof"` -} - -type Distribution_BucketOptions_ExplicitBuckets struct { - // The explicit buckets. - ExplicitBuckets *Distribution_BucketOptions_Explicit `protobuf:"bytes,3,opt,name=explicit_buckets,json=explicitBuckets,proto3,oneof"` -} - -func (*Distribution_BucketOptions_LinearBuckets) isDistribution_BucketOptions_Options() {} - -func (*Distribution_BucketOptions_ExponentialBuckets) isDistribution_BucketOptions_Options() {} - -func (*Distribution_BucketOptions_ExplicitBuckets) isDistribution_BucketOptions_Options() {} - -// Exemplars are example points that may be used to annotate aggregated -// distribution values. They are metadata that gives information about a -// particular value added to a Distribution bucket, such as a trace ID that -// was active when a value was added. They may contain further information, -// such as a example values and timestamps, origin, etc. -type Distribution_Exemplar struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Value of the exemplar point. This value determines to which bucket the - // exemplar belongs. - Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` - // The observation (sampling) time of the above value. - Timestamp *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"` - // Contextual information about the example value. Examples are: - // - // Trace: type.googleapis.com/google.monitoring.v3.SpanContext - // - // Literal string: type.googleapis.com/google.protobuf.StringValue - // - // Labels dropped during aggregation: - // type.googleapis.com/google.monitoring.v3.DroppedLabels - // - // There may be only a single attachment of any given message type in a - // single exemplar, and this is enforced by the system. - Attachments []*anypb.Any `protobuf:"bytes,3,rep,name=attachments,proto3" json:"attachments,omitempty"` -} - -func (x *Distribution_Exemplar) Reset() { - *x = Distribution_Exemplar{} - if protoimpl.UnsafeEnabled { - mi := &file_google_api_distribution_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Distribution_Exemplar) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Distribution_Exemplar) ProtoMessage() {} - -func (x *Distribution_Exemplar) ProtoReflect() protoreflect.Message { - mi := &file_google_api_distribution_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Distribution_Exemplar.ProtoReflect.Descriptor instead. -func (*Distribution_Exemplar) Descriptor() ([]byte, []int) { - return file_google_api_distribution_proto_rawDescGZIP(), []int{0, 2} -} - -func (x *Distribution_Exemplar) GetValue() float64 { - if x != nil { - return x.Value - } - return 0 -} - -func (x *Distribution_Exemplar) GetTimestamp() *timestamppb.Timestamp { - if x != nil { - return x.Timestamp - } - return nil -} - -func (x *Distribution_Exemplar) GetAttachments() []*anypb.Any { - if x != nil { - return x.Attachments - } - return nil -} - -// Specifies a linear sequence of buckets that all have the same width -// (except overflow and underflow). Each bucket represents a constant -// absolute uncertainty on the specific value in the bucket. -// -// There are `num_finite_buckets + 2` (= N) buckets. Bucket `i` has the -// following boundaries: -// -// Upper bound (0 <= i < N-1): offset + (width * i). -// Lower bound (1 <= i < N): offset + (width * (i - 1)). -type Distribution_BucketOptions_Linear struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Must be greater than 0. - NumFiniteBuckets int32 `protobuf:"varint,1,opt,name=num_finite_buckets,json=numFiniteBuckets,proto3" json:"num_finite_buckets,omitempty"` - // Must be greater than 0. - Width float64 `protobuf:"fixed64,2,opt,name=width,proto3" json:"width,omitempty"` - // Lower bound of the first bucket. - Offset float64 `protobuf:"fixed64,3,opt,name=offset,proto3" json:"offset,omitempty"` -} - -func (x *Distribution_BucketOptions_Linear) Reset() { - *x = Distribution_BucketOptions_Linear{} - if protoimpl.UnsafeEnabled { - mi := &file_google_api_distribution_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Distribution_BucketOptions_Linear) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Distribution_BucketOptions_Linear) ProtoMessage() {} - -func (x *Distribution_BucketOptions_Linear) ProtoReflect() protoreflect.Message { - mi := &file_google_api_distribution_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Distribution_BucketOptions_Linear.ProtoReflect.Descriptor instead. -func (*Distribution_BucketOptions_Linear) Descriptor() ([]byte, []int) { - return file_google_api_distribution_proto_rawDescGZIP(), []int{0, 1, 0} -} - -func (x *Distribution_BucketOptions_Linear) GetNumFiniteBuckets() int32 { - if x != nil { - return x.NumFiniteBuckets - } - return 0 -} - -func (x *Distribution_BucketOptions_Linear) GetWidth() float64 { - if x != nil { - return x.Width - } - return 0 -} - -func (x *Distribution_BucketOptions_Linear) GetOffset() float64 { - if x != nil { - return x.Offset - } - return 0 -} - -// Specifies an exponential sequence of buckets that have a width that is -// proportional to the value of the lower bound. Each bucket represents a -// constant relative uncertainty on a specific value in the bucket. -// -// There are `num_finite_buckets + 2` (= N) buckets. Bucket `i` has the -// following boundaries: -// -// Upper bound (0 <= i < N-1): scale * (growth_factor ^ i). -// Lower bound (1 <= i < N): scale * (growth_factor ^ (i - 1)). -type Distribution_BucketOptions_Exponential struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Must be greater than 0. - NumFiniteBuckets int32 `protobuf:"varint,1,opt,name=num_finite_buckets,json=numFiniteBuckets,proto3" json:"num_finite_buckets,omitempty"` - // Must be greater than 1. - GrowthFactor float64 `protobuf:"fixed64,2,opt,name=growth_factor,json=growthFactor,proto3" json:"growth_factor,omitempty"` - // Must be greater than 0. - Scale float64 `protobuf:"fixed64,3,opt,name=scale,proto3" json:"scale,omitempty"` -} - -func (x *Distribution_BucketOptions_Exponential) Reset() { - *x = Distribution_BucketOptions_Exponential{} - if protoimpl.UnsafeEnabled { - mi := &file_google_api_distribution_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Distribution_BucketOptions_Exponential) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Distribution_BucketOptions_Exponential) ProtoMessage() {} - -func (x *Distribution_BucketOptions_Exponential) ProtoReflect() protoreflect.Message { - mi := &file_google_api_distribution_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Distribution_BucketOptions_Exponential.ProtoReflect.Descriptor instead. -func (*Distribution_BucketOptions_Exponential) Descriptor() ([]byte, []int) { - return file_google_api_distribution_proto_rawDescGZIP(), []int{0, 1, 1} -} - -func (x *Distribution_BucketOptions_Exponential) GetNumFiniteBuckets() int32 { - if x != nil { - return x.NumFiniteBuckets - } - return 0 -} - -func (x *Distribution_BucketOptions_Exponential) GetGrowthFactor() float64 { - if x != nil { - return x.GrowthFactor - } - return 0 -} - -func (x *Distribution_BucketOptions_Exponential) GetScale() float64 { - if x != nil { - return x.Scale - } - return 0 -} - -// Specifies a set of buckets with arbitrary widths. -// -// There are `size(bounds) + 1` (= N) buckets. Bucket `i` has the following -// boundaries: -// -// Upper bound (0 <= i < N-1): bounds[i] -// Lower bound (1 <= i < N); bounds[i - 1] -// -// The `bounds` field must contain at least one element. If `bounds` has -// only one element, then there are no finite buckets, and that single -// element is the common boundary of the overflow and underflow buckets. -type Distribution_BucketOptions_Explicit struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The values must be monotonically increasing. - Bounds []float64 `protobuf:"fixed64,1,rep,packed,name=bounds,proto3" json:"bounds,omitempty"` -} - -func (x *Distribution_BucketOptions_Explicit) Reset() { - *x = Distribution_BucketOptions_Explicit{} - if protoimpl.UnsafeEnabled { - mi := &file_google_api_distribution_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Distribution_BucketOptions_Explicit) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Distribution_BucketOptions_Explicit) ProtoMessage() {} - -func (x *Distribution_BucketOptions_Explicit) ProtoReflect() protoreflect.Message { - mi := &file_google_api_distribution_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Distribution_BucketOptions_Explicit.ProtoReflect.Descriptor instead. -func (*Distribution_BucketOptions_Explicit) Descriptor() ([]byte, []int) { - return file_google_api_distribution_proto_rawDescGZIP(), []int{0, 1, 2} -} - -func (x *Distribution_BucketOptions_Explicit) GetBounds() []float64 { - if x != nil { - return x.Bounds - } - return nil -} - -var File_google_api_distribution_proto protoreflect.FileDescriptor - -var file_google_api_distribution_proto_rawDesc = []byte{ - 0x0a, 0x1d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x64, 0x69, 0x73, - 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, - 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x19, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, - 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xda, 0x08, 0x0a, 0x0c, 0x44, 0x69, 0x73, 0x74, - 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, - 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x12, - 0x0a, 0x04, 0x6d, 0x65, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x04, 0x6d, 0x65, - 0x61, 0x6e, 0x12, 0x37, 0x0a, 0x18, 0x73, 0x75, 0x6d, 0x5f, 0x6f, 0x66, 0x5f, 0x73, 0x71, 0x75, - 0x61, 0x72, 0x65, 0x64, 0x5f, 0x64, 0x65, 0x76, 0x69, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x01, 0x52, 0x15, 0x73, 0x75, 0x6d, 0x4f, 0x66, 0x53, 0x71, 0x75, 0x61, 0x72, - 0x65, 0x64, 0x44, 0x65, 0x76, 0x69, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x34, 0x0a, 0x05, 0x72, - 0x61, 0x6e, 0x67, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, - 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x05, 0x72, 0x61, 0x6e, 0x67, - 0x65, 0x12, 0x4d, 0x0a, 0x0e, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, - 0x69, 0x6f, 0x6e, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x52, 0x0d, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x12, 0x23, 0x0a, 0x0d, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, - 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x03, 0x52, 0x0c, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x43, - 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x12, 0x3f, 0x0a, 0x09, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, - 0x72, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, - 0x6f, 0x6e, 0x2e, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x09, 0x65, 0x78, 0x65, - 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x73, 0x1a, 0x2b, 0x0a, 0x05, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, - 0x10, 0x0a, 0x03, 0x6d, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x03, 0x6d, 0x69, - 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x6d, 0x61, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x03, - 0x6d, 0x61, 0x78, 0x1a, 0xb9, 0x04, 0x0a, 0x0d, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x56, 0x0a, 0x0e, 0x6c, 0x69, 0x6e, 0x65, 0x61, 0x72, 0x5f, - 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x69, 0x73, 0x74, 0x72, - 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4c, 0x69, 0x6e, 0x65, 0x61, 0x72, 0x48, 0x00, 0x52, 0x0d, - 0x6c, 0x69, 0x6e, 0x65, 0x61, 0x72, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, 0x65, 0x0a, - 0x13, 0x65, 0x78, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x62, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, - 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x2e, 0x45, 0x78, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x48, 0x00, - 0x52, 0x12, 0x65, 0x78, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x42, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x73, 0x12, 0x5c, 0x0a, 0x10, 0x65, 0x78, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, - 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x69, 0x73, 0x74, - 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45, 0x78, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x48, - 0x00, 0x52, 0x0f, 0x65, 0x78, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x73, 0x1a, 0x64, 0x0a, 0x06, 0x4c, 0x69, 0x6e, 0x65, 0x61, 0x72, 0x12, 0x2c, 0x0a, 0x12, - 0x6e, 0x75, 0x6d, 0x5f, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x65, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x10, 0x6e, 0x75, 0x6d, 0x46, 0x69, 0x6e, - 0x69, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x77, 0x69, - 0x64, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x77, 0x69, 0x64, 0x74, 0x68, - 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x01, - 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x1a, 0x76, 0x0a, 0x0b, 0x45, 0x78, 0x70, 0x6f, - 0x6e, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x12, 0x2c, 0x0a, 0x12, 0x6e, 0x75, 0x6d, 0x5f, 0x66, - 0x69, 0x6e, 0x69, 0x74, 0x65, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x05, 0x52, 0x10, 0x6e, 0x75, 0x6d, 0x46, 0x69, 0x6e, 0x69, 0x74, 0x65, 0x42, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x67, 0x72, 0x6f, 0x77, 0x74, 0x68, 0x5f, - 0x66, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0c, 0x67, 0x72, - 0x6f, 0x77, 0x74, 0x68, 0x46, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x63, - 0x61, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x73, 0x63, 0x61, 0x6c, 0x65, - 0x1a, 0x22, 0x0a, 0x08, 0x45, 0x78, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x12, 0x16, 0x0a, 0x06, - 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x01, 0x52, 0x06, 0x62, 0x6f, - 0x75, 0x6e, 0x64, 0x73, 0x42, 0x09, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, - 0x92, 0x01, 0x0a, 0x08, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x12, 0x14, 0x0a, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, - 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x36, 0x0a, 0x0b, - 0x61, 0x74, 0x74, 0x61, 0x63, 0x68, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x61, 0x74, 0x74, 0x61, 0x63, 0x68, 0x6d, - 0x65, 0x6e, 0x74, 0x73, 0x42, 0x71, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x11, 0x44, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, - 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x43, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, - 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, - 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x64, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, - 0x69, 0x6f, 0x6e, 0x3b, 0x64, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, - 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_google_api_distribution_proto_rawDescOnce sync.Once - file_google_api_distribution_proto_rawDescData = file_google_api_distribution_proto_rawDesc -) - -func file_google_api_distribution_proto_rawDescGZIP() []byte { - file_google_api_distribution_proto_rawDescOnce.Do(func() { - file_google_api_distribution_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_distribution_proto_rawDescData) - }) - return file_google_api_distribution_proto_rawDescData -} - -var file_google_api_distribution_proto_msgTypes = make([]protoimpl.MessageInfo, 7) -var file_google_api_distribution_proto_goTypes = []interface{}{ - (*Distribution)(nil), // 0: google.api.Distribution - (*Distribution_Range)(nil), // 1: google.api.Distribution.Range - (*Distribution_BucketOptions)(nil), // 2: google.api.Distribution.BucketOptions - (*Distribution_Exemplar)(nil), // 3: google.api.Distribution.Exemplar - (*Distribution_BucketOptions_Linear)(nil), // 4: google.api.Distribution.BucketOptions.Linear - (*Distribution_BucketOptions_Exponential)(nil), // 5: google.api.Distribution.BucketOptions.Exponential - (*Distribution_BucketOptions_Explicit)(nil), // 6: google.api.Distribution.BucketOptions.Explicit - (*timestamppb.Timestamp)(nil), // 7: google.protobuf.Timestamp - (*anypb.Any)(nil), // 8: google.protobuf.Any -} -var file_google_api_distribution_proto_depIdxs = []int32{ - 1, // 0: google.api.Distribution.range:type_name -> google.api.Distribution.Range - 2, // 1: google.api.Distribution.bucket_options:type_name -> google.api.Distribution.BucketOptions - 3, // 2: google.api.Distribution.exemplars:type_name -> google.api.Distribution.Exemplar - 4, // 3: google.api.Distribution.BucketOptions.linear_buckets:type_name -> google.api.Distribution.BucketOptions.Linear - 5, // 4: google.api.Distribution.BucketOptions.exponential_buckets:type_name -> google.api.Distribution.BucketOptions.Exponential - 6, // 5: google.api.Distribution.BucketOptions.explicit_buckets:type_name -> google.api.Distribution.BucketOptions.Explicit - 7, // 6: google.api.Distribution.Exemplar.timestamp:type_name -> google.protobuf.Timestamp - 8, // 7: google.api.Distribution.Exemplar.attachments:type_name -> google.protobuf.Any - 8, // [8:8] is the sub-list for method output_type - 8, // [8:8] is the sub-list for method input_type - 8, // [8:8] is the sub-list for extension type_name - 8, // [8:8] is the sub-list for extension extendee - 0, // [0:8] is the sub-list for field type_name -} - -func init() { file_google_api_distribution_proto_init() } -func file_google_api_distribution_proto_init() { - if File_google_api_distribution_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_google_api_distribution_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Distribution); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_api_distribution_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Distribution_Range); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_api_distribution_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Distribution_BucketOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_api_distribution_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Distribution_Exemplar); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_api_distribution_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Distribution_BucketOptions_Linear); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_api_distribution_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Distribution_BucketOptions_Exponential); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_api_distribution_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Distribution_BucketOptions_Explicit); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_google_api_distribution_proto_msgTypes[2].OneofWrappers = []interface{}{ - (*Distribution_BucketOptions_LinearBuckets)(nil), - (*Distribution_BucketOptions_ExponentialBuckets)(nil), - (*Distribution_BucketOptions_ExplicitBuckets)(nil), - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_api_distribution_proto_rawDesc, - NumEnums: 0, - NumMessages: 7, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_google_api_distribution_proto_goTypes, - DependencyIndexes: file_google_api_distribution_proto_depIdxs, - MessageInfos: file_google_api_distribution_proto_msgTypes, - }.Build() - File_google_api_distribution_proto = out.File - file_google_api_distribution_proto_rawDesc = nil - file_google_api_distribution_proto_goTypes = nil - file_google_api_distribution_proto_depIdxs = nil -} diff --git a/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/checked.pb.go b/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/checked.pb.go deleted file mode 100644 index 74f09b71b0..0000000000 --- a/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/checked.pb.go +++ /dev/null @@ -1,1646 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.25.0 -// protoc v3.13.0 -// source: google/api/expr/v1alpha1/checked.proto - -package expr - -import ( - reflect "reflect" - sync "sync" - - proto "github.com/golang/protobuf/proto" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - emptypb "google.golang.org/protobuf/types/known/emptypb" - structpb "google.golang.org/protobuf/types/known/structpb" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - -// CEL primitive types. -type Type_PrimitiveType int32 - -const ( - // Unspecified type. - Type_PRIMITIVE_TYPE_UNSPECIFIED Type_PrimitiveType = 0 - // Boolean type. - Type_BOOL Type_PrimitiveType = 1 - // Int64 type. - // - // Proto-based integer values are widened to int64. - Type_INT64 Type_PrimitiveType = 2 - // Uint64 type. - // - // Proto-based unsigned integer values are widened to uint64. - Type_UINT64 Type_PrimitiveType = 3 - // Double type. - // - // Proto-based float values are widened to double values. - Type_DOUBLE Type_PrimitiveType = 4 - // String type. - Type_STRING Type_PrimitiveType = 5 - // Bytes type. - Type_BYTES Type_PrimitiveType = 6 -) - -// Enum value maps for Type_PrimitiveType. -var ( - Type_PrimitiveType_name = map[int32]string{ - 0: "PRIMITIVE_TYPE_UNSPECIFIED", - 1: "BOOL", - 2: "INT64", - 3: "UINT64", - 4: "DOUBLE", - 5: "STRING", - 6: "BYTES", - } - Type_PrimitiveType_value = map[string]int32{ - "PRIMITIVE_TYPE_UNSPECIFIED": 0, - "BOOL": 1, - "INT64": 2, - "UINT64": 3, - "DOUBLE": 4, - "STRING": 5, - "BYTES": 6, - } -) - -func (x Type_PrimitiveType) Enum() *Type_PrimitiveType { - p := new(Type_PrimitiveType) - *p = x - return p -} - -func (x Type_PrimitiveType) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (Type_PrimitiveType) Descriptor() protoreflect.EnumDescriptor { - return file_google_api_expr_v1alpha1_checked_proto_enumTypes[0].Descriptor() -} - -func (Type_PrimitiveType) Type() protoreflect.EnumType { - return &file_google_api_expr_v1alpha1_checked_proto_enumTypes[0] -} - -func (x Type_PrimitiveType) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use Type_PrimitiveType.Descriptor instead. -func (Type_PrimitiveType) EnumDescriptor() ([]byte, []int) { - return file_google_api_expr_v1alpha1_checked_proto_rawDescGZIP(), []int{1, 0} -} - -// Well-known protobuf types treated with first-class support in CEL. -type Type_WellKnownType int32 - -const ( - // Unspecified type. - Type_WELL_KNOWN_TYPE_UNSPECIFIED Type_WellKnownType = 0 - // Well-known protobuf.Any type. - // - // Any types are a polymorphic message type. During type-checking they are - // treated like `DYN` types, but at runtime they are resolved to a specific - // message type specified at evaluation time. - Type_ANY Type_WellKnownType = 1 - // Well-known protobuf.Timestamp type, internally referenced as `timestamp`. - Type_TIMESTAMP Type_WellKnownType = 2 - // Well-known protobuf.Duration type, internally referenced as `duration`. - Type_DURATION Type_WellKnownType = 3 -) - -// Enum value maps for Type_WellKnownType. -var ( - Type_WellKnownType_name = map[int32]string{ - 0: "WELL_KNOWN_TYPE_UNSPECIFIED", - 1: "ANY", - 2: "TIMESTAMP", - 3: "DURATION", - } - Type_WellKnownType_value = map[string]int32{ - "WELL_KNOWN_TYPE_UNSPECIFIED": 0, - "ANY": 1, - "TIMESTAMP": 2, - "DURATION": 3, - } -) - -func (x Type_WellKnownType) Enum() *Type_WellKnownType { - p := new(Type_WellKnownType) - *p = x - return p -} - -func (x Type_WellKnownType) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (Type_WellKnownType) Descriptor() protoreflect.EnumDescriptor { - return file_google_api_expr_v1alpha1_checked_proto_enumTypes[1].Descriptor() -} - -func (Type_WellKnownType) Type() protoreflect.EnumType { - return &file_google_api_expr_v1alpha1_checked_proto_enumTypes[1] -} - -func (x Type_WellKnownType) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use Type_WellKnownType.Descriptor instead. -func (Type_WellKnownType) EnumDescriptor() ([]byte, []int) { - return file_google_api_expr_v1alpha1_checked_proto_rawDescGZIP(), []int{1, 1} -} - -// A CEL expression which has been successfully type checked. -type CheckedExpr struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // A map from expression ids to resolved references. - // - // The following entries are in this table: - // - // - An Ident or Select expression is represented here if it resolves to a - // declaration. For instance, if `a.b.c` is represented by - // `select(select(id(a), b), c)`, and `a.b` resolves to a declaration, - // while `c` is a field selection, then the reference is attached to the - // nested select expression (but not to the id or or the outer select). - // In turn, if `a` resolves to a declaration and `b.c` are field selections, - // the reference is attached to the ident expression. - // - Every Call expression has an entry here, identifying the function being - // called. - // - Every CreateStruct expression for a message has an entry, identifying - // the message. - ReferenceMap map[int64]*Reference `protobuf:"bytes,2,rep,name=reference_map,json=referenceMap,proto3" json:"reference_map,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - // A map from expression ids to types. - // - // Every expression node which has a type different than DYN has a mapping - // here. If an expression has type DYN, it is omitted from this map to save - // space. - TypeMap map[int64]*Type `protobuf:"bytes,3,rep,name=type_map,json=typeMap,proto3" json:"type_map,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - // The source info derived from input that generated the parsed `expr` and - // any optimizations made during the type-checking pass. - SourceInfo *SourceInfo `protobuf:"bytes,5,opt,name=source_info,json=sourceInfo,proto3" json:"source_info,omitempty"` - // The checked expression. Semantically equivalent to the parsed `expr`, but - // may have structural differences. - Expr *Expr `protobuf:"bytes,4,opt,name=expr,proto3" json:"expr,omitempty"` -} - -func (x *CheckedExpr) Reset() { - *x = CheckedExpr{} - if protoimpl.UnsafeEnabled { - mi := &file_google_api_expr_v1alpha1_checked_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CheckedExpr) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CheckedExpr) ProtoMessage() {} - -func (x *CheckedExpr) ProtoReflect() protoreflect.Message { - mi := &file_google_api_expr_v1alpha1_checked_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CheckedExpr.ProtoReflect.Descriptor instead. -func (*CheckedExpr) Descriptor() ([]byte, []int) { - return file_google_api_expr_v1alpha1_checked_proto_rawDescGZIP(), []int{0} -} - -func (x *CheckedExpr) GetReferenceMap() map[int64]*Reference { - if x != nil { - return x.ReferenceMap - } - return nil -} - -func (x *CheckedExpr) GetTypeMap() map[int64]*Type { - if x != nil { - return x.TypeMap - } - return nil -} - -func (x *CheckedExpr) GetSourceInfo() *SourceInfo { - if x != nil { - return x.SourceInfo - } - return nil -} - -func (x *CheckedExpr) GetExpr() *Expr { - if x != nil { - return x.Expr - } - return nil -} - -// Represents a CEL type. -type Type struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The kind of type. - // - // Types that are assignable to TypeKind: - // *Type_Dyn - // *Type_Null - // *Type_Primitive - // *Type_Wrapper - // *Type_WellKnown - // *Type_ListType_ - // *Type_MapType_ - // *Type_Function - // *Type_MessageType - // *Type_TypeParam - // *Type_Type - // *Type_Error - // *Type_AbstractType_ - TypeKind isType_TypeKind `protobuf_oneof:"type_kind"` -} - -func (x *Type) Reset() { - *x = Type{} - if protoimpl.UnsafeEnabled { - mi := &file_google_api_expr_v1alpha1_checked_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Type) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Type) ProtoMessage() {} - -func (x *Type) ProtoReflect() protoreflect.Message { - mi := &file_google_api_expr_v1alpha1_checked_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Type.ProtoReflect.Descriptor instead. -func (*Type) Descriptor() ([]byte, []int) { - return file_google_api_expr_v1alpha1_checked_proto_rawDescGZIP(), []int{1} -} - -func (m *Type) GetTypeKind() isType_TypeKind { - if m != nil { - return m.TypeKind - } - return nil -} - -func (x *Type) GetDyn() *emptypb.Empty { - if x, ok := x.GetTypeKind().(*Type_Dyn); ok { - return x.Dyn - } - return nil -} - -func (x *Type) GetNull() structpb.NullValue { - if x, ok := x.GetTypeKind().(*Type_Null); ok { - return x.Null - } - return structpb.NullValue_NULL_VALUE -} - -func (x *Type) GetPrimitive() Type_PrimitiveType { - if x, ok := x.GetTypeKind().(*Type_Primitive); ok { - return x.Primitive - } - return Type_PRIMITIVE_TYPE_UNSPECIFIED -} - -func (x *Type) GetWrapper() Type_PrimitiveType { - if x, ok := x.GetTypeKind().(*Type_Wrapper); ok { - return x.Wrapper - } - return Type_PRIMITIVE_TYPE_UNSPECIFIED -} - -func (x *Type) GetWellKnown() Type_WellKnownType { - if x, ok := x.GetTypeKind().(*Type_WellKnown); ok { - return x.WellKnown - } - return Type_WELL_KNOWN_TYPE_UNSPECIFIED -} - -func (x *Type) GetListType() *Type_ListType { - if x, ok := x.GetTypeKind().(*Type_ListType_); ok { - return x.ListType - } - return nil -} - -func (x *Type) GetMapType() *Type_MapType { - if x, ok := x.GetTypeKind().(*Type_MapType_); ok { - return x.MapType - } - return nil -} - -func (x *Type) GetFunction() *Type_FunctionType { - if x, ok := x.GetTypeKind().(*Type_Function); ok { - return x.Function - } - return nil -} - -func (x *Type) GetMessageType() string { - if x, ok := x.GetTypeKind().(*Type_MessageType); ok { - return x.MessageType - } - return "" -} - -func (x *Type) GetTypeParam() string { - if x, ok := x.GetTypeKind().(*Type_TypeParam); ok { - return x.TypeParam - } - return "" -} - -func (x *Type) GetType() *Type { - if x, ok := x.GetTypeKind().(*Type_Type); ok { - return x.Type - } - return nil -} - -func (x *Type) GetError() *emptypb.Empty { - if x, ok := x.GetTypeKind().(*Type_Error); ok { - return x.Error - } - return nil -} - -func (x *Type) GetAbstractType() *Type_AbstractType { - if x, ok := x.GetTypeKind().(*Type_AbstractType_); ok { - return x.AbstractType - } - return nil -} - -type isType_TypeKind interface { - isType_TypeKind() -} - -type Type_Dyn struct { - // Dynamic type. - Dyn *emptypb.Empty `protobuf:"bytes,1,opt,name=dyn,proto3,oneof"` -} - -type Type_Null struct { - // Null value. - Null structpb.NullValue `protobuf:"varint,2,opt,name=null,proto3,enum=google.protobuf.NullValue,oneof"` -} - -type Type_Primitive struct { - // Primitive types: `true`, `1u`, `-2.0`, `'string'`, `b'bytes'`. - Primitive Type_PrimitiveType `protobuf:"varint,3,opt,name=primitive,proto3,enum=google.api.expr.v1alpha1.Type_PrimitiveType,oneof"` -} - -type Type_Wrapper struct { - // Wrapper of a primitive type, e.g. `google.protobuf.Int64Value`. - Wrapper Type_PrimitiveType `protobuf:"varint,4,opt,name=wrapper,proto3,enum=google.api.expr.v1alpha1.Type_PrimitiveType,oneof"` -} - -type Type_WellKnown struct { - // Well-known protobuf type such as `google.protobuf.Timestamp`. - WellKnown Type_WellKnownType `protobuf:"varint,5,opt,name=well_known,json=wellKnown,proto3,enum=google.api.expr.v1alpha1.Type_WellKnownType,oneof"` -} - -type Type_ListType_ struct { - // Parameterized list with elements of `list_type`, e.g. `list`. - ListType *Type_ListType `protobuf:"bytes,6,opt,name=list_type,json=listType,proto3,oneof"` -} - -type Type_MapType_ struct { - // Parameterized map with typed keys and values. - MapType *Type_MapType `protobuf:"bytes,7,opt,name=map_type,json=mapType,proto3,oneof"` -} - -type Type_Function struct { - // Function type. - Function *Type_FunctionType `protobuf:"bytes,8,opt,name=function,proto3,oneof"` -} - -type Type_MessageType struct { - // Protocol buffer message type. - // - // The `message_type` string specifies the qualified message type name. For - // example, `google.plus.Profile`. - MessageType string `protobuf:"bytes,9,opt,name=message_type,json=messageType,proto3,oneof"` -} - -type Type_TypeParam struct { - // Type param type. - // - // The `type_param` string specifies the type parameter name, e.g. `list` - // would be a `list_type` whose element type was a `type_param` type - // named `E`. - TypeParam string `protobuf:"bytes,10,opt,name=type_param,json=typeParam,proto3,oneof"` -} - -type Type_Type struct { - // Type type. - // - // The `type` value specifies the target type. e.g. int is type with a - // target type of `Primitive.INT`. - Type *Type `protobuf:"bytes,11,opt,name=type,proto3,oneof"` -} - -type Type_Error struct { - // Error type. - // - // During type-checking if an expression is an error, its type is propagated - // as the `ERROR` type. This permits the type-checker to discover other - // errors present in the expression. - Error *emptypb.Empty `protobuf:"bytes,12,opt,name=error,proto3,oneof"` -} - -type Type_AbstractType_ struct { - // Abstract, application defined type. - AbstractType *Type_AbstractType `protobuf:"bytes,14,opt,name=abstract_type,json=abstractType,proto3,oneof"` -} - -func (*Type_Dyn) isType_TypeKind() {} - -func (*Type_Null) isType_TypeKind() {} - -func (*Type_Primitive) isType_TypeKind() {} - -func (*Type_Wrapper) isType_TypeKind() {} - -func (*Type_WellKnown) isType_TypeKind() {} - -func (*Type_ListType_) isType_TypeKind() {} - -func (*Type_MapType_) isType_TypeKind() {} - -func (*Type_Function) isType_TypeKind() {} - -func (*Type_MessageType) isType_TypeKind() {} - -func (*Type_TypeParam) isType_TypeKind() {} - -func (*Type_Type) isType_TypeKind() {} - -func (*Type_Error) isType_TypeKind() {} - -func (*Type_AbstractType_) isType_TypeKind() {} - -// Represents a declaration of a named value or function. -// -// A declaration is part of the contract between the expression, the agent -// evaluating that expression, and the caller requesting evaluation. -type Decl struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The fully qualified name of the declaration. - // - // Declarations are organized in containers and this represents the full path - // to the declaration in its container, as in `google.api.expr.Decl`. - // - // Declarations used as [FunctionDecl.Overload][google.api.expr.v1alpha1.Decl.FunctionDecl.Overload] parameters may or may not - // have a name depending on whether the overload is function declaration or a - // function definition containing a result [Expr][google.api.expr.v1alpha1.Expr]. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Required. The declaration kind. - // - // Types that are assignable to DeclKind: - // *Decl_Ident - // *Decl_Function - DeclKind isDecl_DeclKind `protobuf_oneof:"decl_kind"` -} - -func (x *Decl) Reset() { - *x = Decl{} - if protoimpl.UnsafeEnabled { - mi := &file_google_api_expr_v1alpha1_checked_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Decl) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Decl) ProtoMessage() {} - -func (x *Decl) ProtoReflect() protoreflect.Message { - mi := &file_google_api_expr_v1alpha1_checked_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Decl.ProtoReflect.Descriptor instead. -func (*Decl) Descriptor() ([]byte, []int) { - return file_google_api_expr_v1alpha1_checked_proto_rawDescGZIP(), []int{2} -} - -func (x *Decl) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (m *Decl) GetDeclKind() isDecl_DeclKind { - if m != nil { - return m.DeclKind - } - return nil -} - -func (x *Decl) GetIdent() *Decl_IdentDecl { - if x, ok := x.GetDeclKind().(*Decl_Ident); ok { - return x.Ident - } - return nil -} - -func (x *Decl) GetFunction() *Decl_FunctionDecl { - if x, ok := x.GetDeclKind().(*Decl_Function); ok { - return x.Function - } - return nil -} - -type isDecl_DeclKind interface { - isDecl_DeclKind() -} - -type Decl_Ident struct { - // Identifier declaration. - Ident *Decl_IdentDecl `protobuf:"bytes,2,opt,name=ident,proto3,oneof"` -} - -type Decl_Function struct { - // Function declaration. - Function *Decl_FunctionDecl `protobuf:"bytes,3,opt,name=function,proto3,oneof"` -} - -func (*Decl_Ident) isDecl_DeclKind() {} - -func (*Decl_Function) isDecl_DeclKind() {} - -// Describes a resolved reference to a declaration. -type Reference struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The fully qualified name of the declaration. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // For references to functions, this is a list of `Overload.overload_id` - // values which match according to typing rules. - // - // If the list has more than one element, overload resolution among the - // presented candidates must happen at runtime because of dynamic types. The - // type checker attempts to narrow down this list as much as possible. - // - // Empty if this is not a reference to a [Decl.FunctionDecl][google.api.expr.v1alpha1.Decl.FunctionDecl]. - OverloadId []string `protobuf:"bytes,3,rep,name=overload_id,json=overloadId,proto3" json:"overload_id,omitempty"` - // For references to constants, this may contain the value of the - // constant if known at compile time. - Value *Constant `protobuf:"bytes,4,opt,name=value,proto3" json:"value,omitempty"` -} - -func (x *Reference) Reset() { - *x = Reference{} - if protoimpl.UnsafeEnabled { - mi := &file_google_api_expr_v1alpha1_checked_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Reference) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Reference) ProtoMessage() {} - -func (x *Reference) ProtoReflect() protoreflect.Message { - mi := &file_google_api_expr_v1alpha1_checked_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Reference.ProtoReflect.Descriptor instead. -func (*Reference) Descriptor() ([]byte, []int) { - return file_google_api_expr_v1alpha1_checked_proto_rawDescGZIP(), []int{3} -} - -func (x *Reference) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *Reference) GetOverloadId() []string { - if x != nil { - return x.OverloadId - } - return nil -} - -func (x *Reference) GetValue() *Constant { - if x != nil { - return x.Value - } - return nil -} - -// List type with typed elements, e.g. `list`. -type Type_ListType struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The element type. - ElemType *Type `protobuf:"bytes,1,opt,name=elem_type,json=elemType,proto3" json:"elem_type,omitempty"` -} - -func (x *Type_ListType) Reset() { - *x = Type_ListType{} - if protoimpl.UnsafeEnabled { - mi := &file_google_api_expr_v1alpha1_checked_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Type_ListType) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Type_ListType) ProtoMessage() {} - -func (x *Type_ListType) ProtoReflect() protoreflect.Message { - mi := &file_google_api_expr_v1alpha1_checked_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Type_ListType.ProtoReflect.Descriptor instead. -func (*Type_ListType) Descriptor() ([]byte, []int) { - return file_google_api_expr_v1alpha1_checked_proto_rawDescGZIP(), []int{1, 0} -} - -func (x *Type_ListType) GetElemType() *Type { - if x != nil { - return x.ElemType - } - return nil -} - -// Map type with parameterized key and value types, e.g. `map`. -type Type_MapType struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The type of the key. - KeyType *Type `protobuf:"bytes,1,opt,name=key_type,json=keyType,proto3" json:"key_type,omitempty"` - // The type of the value. - ValueType *Type `protobuf:"bytes,2,opt,name=value_type,json=valueType,proto3" json:"value_type,omitempty"` -} - -func (x *Type_MapType) Reset() { - *x = Type_MapType{} - if protoimpl.UnsafeEnabled { - mi := &file_google_api_expr_v1alpha1_checked_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Type_MapType) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Type_MapType) ProtoMessage() {} - -func (x *Type_MapType) ProtoReflect() protoreflect.Message { - mi := &file_google_api_expr_v1alpha1_checked_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Type_MapType.ProtoReflect.Descriptor instead. -func (*Type_MapType) Descriptor() ([]byte, []int) { - return file_google_api_expr_v1alpha1_checked_proto_rawDescGZIP(), []int{1, 1} -} - -func (x *Type_MapType) GetKeyType() *Type { - if x != nil { - return x.KeyType - } - return nil -} - -func (x *Type_MapType) GetValueType() *Type { - if x != nil { - return x.ValueType - } - return nil -} - -// Function type with result and arg types. -type Type_FunctionType struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Result type of the function. - ResultType *Type `protobuf:"bytes,1,opt,name=result_type,json=resultType,proto3" json:"result_type,omitempty"` - // Argument types of the function. - ArgTypes []*Type `protobuf:"bytes,2,rep,name=arg_types,json=argTypes,proto3" json:"arg_types,omitempty"` -} - -func (x *Type_FunctionType) Reset() { - *x = Type_FunctionType{} - if protoimpl.UnsafeEnabled { - mi := &file_google_api_expr_v1alpha1_checked_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Type_FunctionType) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Type_FunctionType) ProtoMessage() {} - -func (x *Type_FunctionType) ProtoReflect() protoreflect.Message { - mi := &file_google_api_expr_v1alpha1_checked_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Type_FunctionType.ProtoReflect.Descriptor instead. -func (*Type_FunctionType) Descriptor() ([]byte, []int) { - return file_google_api_expr_v1alpha1_checked_proto_rawDescGZIP(), []int{1, 2} -} - -func (x *Type_FunctionType) GetResultType() *Type { - if x != nil { - return x.ResultType - } - return nil -} - -func (x *Type_FunctionType) GetArgTypes() []*Type { - if x != nil { - return x.ArgTypes - } - return nil -} - -// Application defined abstract type. -type Type_AbstractType struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The fully qualified name of this abstract type. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Parameter types for this abstract type. - ParameterTypes []*Type `protobuf:"bytes,2,rep,name=parameter_types,json=parameterTypes,proto3" json:"parameter_types,omitempty"` -} - -func (x *Type_AbstractType) Reset() { - *x = Type_AbstractType{} - if protoimpl.UnsafeEnabled { - mi := &file_google_api_expr_v1alpha1_checked_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Type_AbstractType) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Type_AbstractType) ProtoMessage() {} - -func (x *Type_AbstractType) ProtoReflect() protoreflect.Message { - mi := &file_google_api_expr_v1alpha1_checked_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Type_AbstractType.ProtoReflect.Descriptor instead. -func (*Type_AbstractType) Descriptor() ([]byte, []int) { - return file_google_api_expr_v1alpha1_checked_proto_rawDescGZIP(), []int{1, 3} -} - -func (x *Type_AbstractType) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *Type_AbstractType) GetParameterTypes() []*Type { - if x != nil { - return x.ParameterTypes - } - return nil -} - -// Identifier declaration which specifies its type and optional `Expr` value. -// -// An identifier without a value is a declaration that must be provided at -// evaluation time. An identifier with a value should resolve to a constant, -// but may be used in conjunction with other identifiers bound at evaluation -// time. -type Decl_IdentDecl struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The type of the identifier. - Type *Type `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - // The constant value of the identifier. If not specified, the identifier - // must be supplied at evaluation time. - Value *Constant `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - // Documentation string for the identifier. - Doc string `protobuf:"bytes,3,opt,name=doc,proto3" json:"doc,omitempty"` -} - -func (x *Decl_IdentDecl) Reset() { - *x = Decl_IdentDecl{} - if protoimpl.UnsafeEnabled { - mi := &file_google_api_expr_v1alpha1_checked_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Decl_IdentDecl) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Decl_IdentDecl) ProtoMessage() {} - -func (x *Decl_IdentDecl) ProtoReflect() protoreflect.Message { - mi := &file_google_api_expr_v1alpha1_checked_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Decl_IdentDecl.ProtoReflect.Descriptor instead. -func (*Decl_IdentDecl) Descriptor() ([]byte, []int) { - return file_google_api_expr_v1alpha1_checked_proto_rawDescGZIP(), []int{2, 0} -} - -func (x *Decl_IdentDecl) GetType() *Type { - if x != nil { - return x.Type - } - return nil -} - -func (x *Decl_IdentDecl) GetValue() *Constant { - if x != nil { - return x.Value - } - return nil -} - -func (x *Decl_IdentDecl) GetDoc() string { - if x != nil { - return x.Doc - } - return "" -} - -// Function declaration specifies one or more overloads which indicate the -// function's parameter types and return type, and may optionally specify a -// function definition in terms of CEL expressions. -// -// Functions have no observable side-effects (there may be side-effects like -// logging which are not observable from CEL). -type Decl_FunctionDecl struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. List of function overloads, must contain at least one overload. - Overloads []*Decl_FunctionDecl_Overload `protobuf:"bytes,1,rep,name=overloads,proto3" json:"overloads,omitempty"` -} - -func (x *Decl_FunctionDecl) Reset() { - *x = Decl_FunctionDecl{} - if protoimpl.UnsafeEnabled { - mi := &file_google_api_expr_v1alpha1_checked_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Decl_FunctionDecl) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Decl_FunctionDecl) ProtoMessage() {} - -func (x *Decl_FunctionDecl) ProtoReflect() protoreflect.Message { - mi := &file_google_api_expr_v1alpha1_checked_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Decl_FunctionDecl.ProtoReflect.Descriptor instead. -func (*Decl_FunctionDecl) Descriptor() ([]byte, []int) { - return file_google_api_expr_v1alpha1_checked_proto_rawDescGZIP(), []int{2, 1} -} - -func (x *Decl_FunctionDecl) GetOverloads() []*Decl_FunctionDecl_Overload { - if x != nil { - return x.Overloads - } - return nil -} - -// An overload indicates a function's parameter types and return type, and -// may optionally include a function body described in terms of [Expr][google.api.expr.v1alpha1.Expr] -// values. -// -// Functions overloads are declared in either a function or method -// call-style. For methods, the `params[0]` is the expected type of the -// target receiver. -// -// Overloads must have non-overlapping argument types after erasure of all -// parameterized type variables (similar as type erasure in Java). -type Decl_FunctionDecl_Overload struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. Globally unique overload name of the function which reflects - // the function name and argument types. - // - // This will be used by a [Reference][google.api.expr.v1alpha1.Reference] to indicate the `overload_id` that - // was resolved for the function `name`. - OverloadId string `protobuf:"bytes,1,opt,name=overload_id,json=overloadId,proto3" json:"overload_id,omitempty"` - // List of function parameter [Type][google.api.expr.v1alpha1.Type] values. - // - // Param types are disjoint after generic type parameters have been - // replaced with the type `DYN`. Since the `DYN` type is compatible with - // any other type, this means that if `A` is a type parameter, the - // function types `int` and `int` are not disjoint. Likewise, - // `map` is not disjoint from `map`. - // - // When the `result_type` of a function is a generic type param, the - // type param name also appears as the `type` of on at least one params. - Params []*Type `protobuf:"bytes,2,rep,name=params,proto3" json:"params,omitempty"` - // The type param names associated with the function declaration. - // - // For example, `function ex(K key, map map) : V` would yield - // the type params of `K, V`. - TypeParams []string `protobuf:"bytes,3,rep,name=type_params,json=typeParams,proto3" json:"type_params,omitempty"` - // Required. The result type of the function. For example, the operator - // `string.isEmpty()` would have `result_type` of `kind: BOOL`. - ResultType *Type `protobuf:"bytes,4,opt,name=result_type,json=resultType,proto3" json:"result_type,omitempty"` - // Whether the function is to be used in a method call-style `x.f(...)` - // of a function call-style `f(x, ...)`. - // - // For methods, the first parameter declaration, `params[0]` is the - // expected type of the target receiver. - IsInstanceFunction bool `protobuf:"varint,5,opt,name=is_instance_function,json=isInstanceFunction,proto3" json:"is_instance_function,omitempty"` - // Documentation string for the overload. - Doc string `protobuf:"bytes,6,opt,name=doc,proto3" json:"doc,omitempty"` -} - -func (x *Decl_FunctionDecl_Overload) Reset() { - *x = Decl_FunctionDecl_Overload{} - if protoimpl.UnsafeEnabled { - mi := &file_google_api_expr_v1alpha1_checked_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Decl_FunctionDecl_Overload) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Decl_FunctionDecl_Overload) ProtoMessage() {} - -func (x *Decl_FunctionDecl_Overload) ProtoReflect() protoreflect.Message { - mi := &file_google_api_expr_v1alpha1_checked_proto_msgTypes[12] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Decl_FunctionDecl_Overload.ProtoReflect.Descriptor instead. -func (*Decl_FunctionDecl_Overload) Descriptor() ([]byte, []int) { - return file_google_api_expr_v1alpha1_checked_proto_rawDescGZIP(), []int{2, 1, 0} -} - -func (x *Decl_FunctionDecl_Overload) GetOverloadId() string { - if x != nil { - return x.OverloadId - } - return "" -} - -func (x *Decl_FunctionDecl_Overload) GetParams() []*Type { - if x != nil { - return x.Params - } - return nil -} - -func (x *Decl_FunctionDecl_Overload) GetTypeParams() []string { - if x != nil { - return x.TypeParams - } - return nil -} - -func (x *Decl_FunctionDecl_Overload) GetResultType() *Type { - if x != nil { - return x.ResultType - } - return nil -} - -func (x *Decl_FunctionDecl_Overload) GetIsInstanceFunction() bool { - if x != nil { - return x.IsInstanceFunction - } - return false -} - -func (x *Decl_FunctionDecl_Overload) GetDoc() string { - if x != nil { - return x.Doc - } - return "" -} - -var File_google_api_expr_v1alpha1_checked_proto protoreflect.FileDescriptor - -var file_google_api_expr_v1alpha1_checked_proto_rawDesc = []byte{ - 0x0a, 0x26, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x78, 0x70, - 0x72, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x63, 0x68, 0x65, 0x63, 0x6b, - 0x65, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x18, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, - 0x61, 0x31, 0x1a, 0x25, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, - 0x78, 0x70, 0x72, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x73, 0x79, 0x6e, - 0x74, 0x61, 0x78, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xf7, 0x03, 0x0a, 0x0b, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, - 0x45, 0x78, 0x70, 0x72, 0x12, 0x5c, 0x0a, 0x0d, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, - 0x65, 0x5f, 0x6d, 0x61, 0x70, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, - 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x45, 0x78, - 0x70, 0x72, 0x2e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x4d, 0x61, 0x70, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x4d, - 0x61, 0x70, 0x12, 0x4d, 0x0a, 0x08, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6d, 0x61, 0x70, 0x18, 0x03, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, - 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x45, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, - 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x74, 0x79, 0x70, 0x65, 0x4d, 0x61, - 0x70, 0x12, 0x45, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, - 0x31, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0a, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x32, 0x0a, 0x04, 0x65, 0x78, 0x70, 0x72, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, - 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x04, 0x65, 0x78, 0x70, 0x72, 0x1a, 0x64, 0x0a, 0x11, - 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, - 0x6b, 0x65, 0x79, 0x12, 0x39, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x52, 0x65, - 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, - 0x38, 0x01, 0x1a, 0x5a, 0x0a, 0x0c, 0x54, 0x79, 0x70, 0x65, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x03, 0x6b, 0x65, 0x79, 0x12, 0x34, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x54, - 0x79, 0x70, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xc8, - 0x0b, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2a, 0x0a, 0x03, 0x64, 0x79, 0x6e, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x48, 0x00, 0x52, 0x03, - 0x64, 0x79, 0x6e, 0x12, 0x30, 0x0a, 0x04, 0x6e, 0x75, 0x6c, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x4e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, - 0x04, 0x6e, 0x75, 0x6c, 0x6c, 0x12, 0x4c, 0x0a, 0x09, 0x70, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, - 0x76, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, - 0x68, 0x61, 0x31, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x50, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, - 0x76, 0x65, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x09, 0x70, 0x72, 0x69, 0x6d, 0x69, 0x74, - 0x69, 0x76, 0x65, 0x12, 0x48, 0x0a, 0x07, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, - 0x54, 0x79, 0x70, 0x65, 0x2e, 0x50, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x54, 0x79, - 0x70, 0x65, 0x48, 0x00, 0x52, 0x07, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x12, 0x4d, 0x0a, - 0x0a, 0x77, 0x65, 0x6c, 0x6c, 0x5f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, - 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x54, 0x79, 0x70, - 0x65, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x48, - 0x00, 0x52, 0x09, 0x77, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x12, 0x46, 0x0a, 0x09, - 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, - 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, - 0x4c, 0x69, 0x73, 0x74, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x08, 0x6c, 0x69, 0x73, 0x74, - 0x54, 0x79, 0x70, 0x65, 0x12, 0x43, 0x0a, 0x08, 0x6d, 0x61, 0x70, 0x5f, 0x74, 0x79, 0x70, 0x65, - 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, - 0x31, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x4d, 0x61, 0x70, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, - 0x52, 0x07, 0x6d, 0x61, 0x70, 0x54, 0x79, 0x70, 0x65, 0x12, 0x49, 0x0a, 0x08, 0x66, 0x75, 0x6e, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, - 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x46, 0x75, 0x6e, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x08, 0x66, 0x75, 0x6e, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0c, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, - 0x74, 0x79, 0x70, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x6d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0a, 0x74, 0x79, 0x70, - 0x65, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, - 0x09, 0x74, 0x79, 0x70, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x12, 0x34, 0x0a, 0x04, 0x74, 0x79, - 0x70, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, - 0x68, 0x61, 0x31, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, - 0x12, 0x2e, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x48, 0x00, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, - 0x12, 0x52, 0x0a, 0x0d, 0x61, 0x62, 0x73, 0x74, 0x72, 0x61, 0x63, 0x74, 0x5f, 0x74, 0x79, 0x70, - 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, - 0x61, 0x31, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x41, 0x62, 0x73, 0x74, 0x72, 0x61, 0x63, 0x74, - 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x0c, 0x61, 0x62, 0x73, 0x74, 0x72, 0x61, 0x63, 0x74, - 0x54, 0x79, 0x70, 0x65, 0x1a, 0x47, 0x0a, 0x08, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x79, 0x70, 0x65, - 0x12, 0x3b, 0x0a, 0x09, 0x65, 0x6c, 0x65, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x54, - 0x79, 0x70, 0x65, 0x52, 0x08, 0x65, 0x6c, 0x65, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x1a, 0x83, 0x01, - 0x0a, 0x07, 0x4d, 0x61, 0x70, 0x54, 0x79, 0x70, 0x65, 0x12, 0x39, 0x0a, 0x08, 0x6b, 0x65, 0x79, - 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, - 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x07, 0x6b, 0x65, 0x79, - 0x54, 0x79, 0x70, 0x65, 0x12, 0x3d, 0x0a, 0x0a, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x74, 0x79, - 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, - 0x68, 0x61, 0x31, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x54, - 0x79, 0x70, 0x65, 0x1a, 0x8c, 0x01, 0x0a, 0x0c, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x54, 0x79, 0x70, 0x65, 0x12, 0x3f, 0x0a, 0x0b, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x5f, 0x74, - 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, - 0x70, 0x68, 0x61, 0x31, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x72, 0x65, 0x73, 0x75, 0x6c, - 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x3b, 0x0a, 0x09, 0x61, 0x72, 0x67, 0x5f, 0x74, 0x79, 0x70, - 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, - 0x68, 0x61, 0x31, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x08, 0x61, 0x72, 0x67, 0x54, 0x79, 0x70, - 0x65, 0x73, 0x1a, 0x6b, 0x0a, 0x0c, 0x41, 0x62, 0x73, 0x74, 0x72, 0x61, 0x63, 0x74, 0x54, 0x79, - 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x47, 0x0a, 0x0f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, - 0x74, 0x65, 0x72, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, - 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, - 0x0e, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x73, 0x22, - 0x73, 0x0a, 0x0d, 0x50, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x54, 0x79, 0x70, 0x65, - 0x12, 0x1e, 0x0a, 0x1a, 0x50, 0x52, 0x49, 0x4d, 0x49, 0x54, 0x49, 0x56, 0x45, 0x5f, 0x54, 0x59, - 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, - 0x12, 0x08, 0x0a, 0x04, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x49, 0x4e, - 0x54, 0x36, 0x34, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x55, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, - 0x03, 0x12, 0x0a, 0x0a, 0x06, 0x44, 0x4f, 0x55, 0x42, 0x4c, 0x45, 0x10, 0x04, 0x12, 0x0a, 0x0a, - 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x05, 0x12, 0x09, 0x0a, 0x05, 0x42, 0x59, 0x54, - 0x45, 0x53, 0x10, 0x06, 0x22, 0x56, 0x0a, 0x0d, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, - 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x1b, 0x57, 0x45, 0x4c, 0x4c, 0x5f, 0x4b, 0x4e, - 0x4f, 0x57, 0x4e, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, - 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x4e, 0x59, 0x10, 0x01, 0x12, - 0x0d, 0x0a, 0x09, 0x54, 0x49, 0x4d, 0x45, 0x53, 0x54, 0x41, 0x4d, 0x50, 0x10, 0x02, 0x12, 0x0c, - 0x0a, 0x08, 0x44, 0x55, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x03, 0x42, 0x0b, 0x0a, 0x09, - 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0xb3, 0x05, 0x0a, 0x04, 0x44, 0x65, - 0x63, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x40, 0x0a, 0x05, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, - 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x44, 0x65, 0x63, 0x6c, 0x48, - 0x00, 0x52, 0x05, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x12, 0x49, 0x0a, 0x08, 0x66, 0x75, 0x6e, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, - 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x2e, 0x46, 0x75, 0x6e, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x63, 0x6c, 0x48, 0x00, 0x52, 0x08, 0x66, 0x75, 0x6e, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x1a, 0x8b, 0x01, 0x0a, 0x09, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x44, 0x65, 0x63, - 0x6c, 0x12, 0x32, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, - 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, - 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x38, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, - 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, - 0x10, 0x0a, 0x03, 0x64, 0x6f, 0x63, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x64, 0x6f, - 0x63, 0x1a, 0xee, 0x02, 0x0a, 0x0c, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, - 0x63, 0x6c, 0x12, 0x52, 0x0a, 0x09, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, - 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x2e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, - 0x63, 0x6c, 0x2e, 0x4f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x09, 0x6f, 0x76, 0x65, - 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x1a, 0x89, 0x02, 0x0a, 0x08, 0x4f, 0x76, 0x65, 0x72, 0x6c, - 0x6f, 0x61, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x5f, - 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, - 0x61, 0x64, 0x49, 0x64, 0x12, 0x36, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x02, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, - 0x54, 0x79, 0x70, 0x65, 0x52, 0x06, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x1f, 0x0a, 0x0b, - 0x74, 0x79, 0x70, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x0a, 0x74, 0x79, 0x70, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x3f, 0x0a, - 0x0b, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x54, 0x79, - 0x70, 0x65, 0x52, 0x0a, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x30, - 0x0a, 0x14, 0x69, 0x73, 0x5f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x66, 0x75, - 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x69, 0x73, - 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x10, 0x0a, 0x03, 0x64, 0x6f, 0x63, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x64, - 0x6f, 0x63, 0x42, 0x0b, 0x0a, 0x09, 0x64, 0x65, 0x63, 0x6c, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x22, - 0x7a, 0x0a, 0x09, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, - 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x49, - 0x64, 0x12, 0x38, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, - 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x73, - 0x74, 0x61, 0x6e, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x6c, 0x0a, 0x1c, 0x63, - 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, - 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x42, 0x09, 0x44, 0x65, 0x63, - 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, - 0x61, 0x70, 0x69, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, - 0x31, 0x3b, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, -} - -var ( - file_google_api_expr_v1alpha1_checked_proto_rawDescOnce sync.Once - file_google_api_expr_v1alpha1_checked_proto_rawDescData = file_google_api_expr_v1alpha1_checked_proto_rawDesc -) - -func file_google_api_expr_v1alpha1_checked_proto_rawDescGZIP() []byte { - file_google_api_expr_v1alpha1_checked_proto_rawDescOnce.Do(func() { - file_google_api_expr_v1alpha1_checked_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_expr_v1alpha1_checked_proto_rawDescData) - }) - return file_google_api_expr_v1alpha1_checked_proto_rawDescData -} - -var file_google_api_expr_v1alpha1_checked_proto_enumTypes = make([]protoimpl.EnumInfo, 2) -var file_google_api_expr_v1alpha1_checked_proto_msgTypes = make([]protoimpl.MessageInfo, 13) -var file_google_api_expr_v1alpha1_checked_proto_goTypes = []interface{}{ - (Type_PrimitiveType)(0), // 0: google.api.expr.v1alpha1.Type.PrimitiveType - (Type_WellKnownType)(0), // 1: google.api.expr.v1alpha1.Type.WellKnownType - (*CheckedExpr)(nil), // 2: google.api.expr.v1alpha1.CheckedExpr - (*Type)(nil), // 3: google.api.expr.v1alpha1.Type - (*Decl)(nil), // 4: google.api.expr.v1alpha1.Decl - (*Reference)(nil), // 5: google.api.expr.v1alpha1.Reference - nil, // 6: google.api.expr.v1alpha1.CheckedExpr.ReferenceMapEntry - nil, // 7: google.api.expr.v1alpha1.CheckedExpr.TypeMapEntry - (*Type_ListType)(nil), // 8: google.api.expr.v1alpha1.Type.ListType - (*Type_MapType)(nil), // 9: google.api.expr.v1alpha1.Type.MapType - (*Type_FunctionType)(nil), // 10: google.api.expr.v1alpha1.Type.FunctionType - (*Type_AbstractType)(nil), // 11: google.api.expr.v1alpha1.Type.AbstractType - (*Decl_IdentDecl)(nil), // 12: google.api.expr.v1alpha1.Decl.IdentDecl - (*Decl_FunctionDecl)(nil), // 13: google.api.expr.v1alpha1.Decl.FunctionDecl - (*Decl_FunctionDecl_Overload)(nil), // 14: google.api.expr.v1alpha1.Decl.FunctionDecl.Overload - (*SourceInfo)(nil), // 15: google.api.expr.v1alpha1.SourceInfo - (*Expr)(nil), // 16: google.api.expr.v1alpha1.Expr - (*emptypb.Empty)(nil), // 17: google.protobuf.Empty - (structpb.NullValue)(0), // 18: google.protobuf.NullValue - (*Constant)(nil), // 19: google.api.expr.v1alpha1.Constant -} -var file_google_api_expr_v1alpha1_checked_proto_depIdxs = []int32{ - 6, // 0: google.api.expr.v1alpha1.CheckedExpr.reference_map:type_name -> google.api.expr.v1alpha1.CheckedExpr.ReferenceMapEntry - 7, // 1: google.api.expr.v1alpha1.CheckedExpr.type_map:type_name -> google.api.expr.v1alpha1.CheckedExpr.TypeMapEntry - 15, // 2: google.api.expr.v1alpha1.CheckedExpr.source_info:type_name -> google.api.expr.v1alpha1.SourceInfo - 16, // 3: google.api.expr.v1alpha1.CheckedExpr.expr:type_name -> google.api.expr.v1alpha1.Expr - 17, // 4: google.api.expr.v1alpha1.Type.dyn:type_name -> google.protobuf.Empty - 18, // 5: google.api.expr.v1alpha1.Type.null:type_name -> google.protobuf.NullValue - 0, // 6: google.api.expr.v1alpha1.Type.primitive:type_name -> google.api.expr.v1alpha1.Type.PrimitiveType - 0, // 7: google.api.expr.v1alpha1.Type.wrapper:type_name -> google.api.expr.v1alpha1.Type.PrimitiveType - 1, // 8: google.api.expr.v1alpha1.Type.well_known:type_name -> google.api.expr.v1alpha1.Type.WellKnownType - 8, // 9: google.api.expr.v1alpha1.Type.list_type:type_name -> google.api.expr.v1alpha1.Type.ListType - 9, // 10: google.api.expr.v1alpha1.Type.map_type:type_name -> google.api.expr.v1alpha1.Type.MapType - 10, // 11: google.api.expr.v1alpha1.Type.function:type_name -> google.api.expr.v1alpha1.Type.FunctionType - 3, // 12: google.api.expr.v1alpha1.Type.type:type_name -> google.api.expr.v1alpha1.Type - 17, // 13: google.api.expr.v1alpha1.Type.error:type_name -> google.protobuf.Empty - 11, // 14: google.api.expr.v1alpha1.Type.abstract_type:type_name -> google.api.expr.v1alpha1.Type.AbstractType - 12, // 15: google.api.expr.v1alpha1.Decl.ident:type_name -> google.api.expr.v1alpha1.Decl.IdentDecl - 13, // 16: google.api.expr.v1alpha1.Decl.function:type_name -> google.api.expr.v1alpha1.Decl.FunctionDecl - 19, // 17: google.api.expr.v1alpha1.Reference.value:type_name -> google.api.expr.v1alpha1.Constant - 5, // 18: google.api.expr.v1alpha1.CheckedExpr.ReferenceMapEntry.value:type_name -> google.api.expr.v1alpha1.Reference - 3, // 19: google.api.expr.v1alpha1.CheckedExpr.TypeMapEntry.value:type_name -> google.api.expr.v1alpha1.Type - 3, // 20: google.api.expr.v1alpha1.Type.ListType.elem_type:type_name -> google.api.expr.v1alpha1.Type - 3, // 21: google.api.expr.v1alpha1.Type.MapType.key_type:type_name -> google.api.expr.v1alpha1.Type - 3, // 22: google.api.expr.v1alpha1.Type.MapType.value_type:type_name -> google.api.expr.v1alpha1.Type - 3, // 23: google.api.expr.v1alpha1.Type.FunctionType.result_type:type_name -> google.api.expr.v1alpha1.Type - 3, // 24: google.api.expr.v1alpha1.Type.FunctionType.arg_types:type_name -> google.api.expr.v1alpha1.Type - 3, // 25: google.api.expr.v1alpha1.Type.AbstractType.parameter_types:type_name -> google.api.expr.v1alpha1.Type - 3, // 26: google.api.expr.v1alpha1.Decl.IdentDecl.type:type_name -> google.api.expr.v1alpha1.Type - 19, // 27: google.api.expr.v1alpha1.Decl.IdentDecl.value:type_name -> google.api.expr.v1alpha1.Constant - 14, // 28: google.api.expr.v1alpha1.Decl.FunctionDecl.overloads:type_name -> google.api.expr.v1alpha1.Decl.FunctionDecl.Overload - 3, // 29: google.api.expr.v1alpha1.Decl.FunctionDecl.Overload.params:type_name -> google.api.expr.v1alpha1.Type - 3, // 30: google.api.expr.v1alpha1.Decl.FunctionDecl.Overload.result_type:type_name -> google.api.expr.v1alpha1.Type - 31, // [31:31] is the sub-list for method output_type - 31, // [31:31] is the sub-list for method input_type - 31, // [31:31] is the sub-list for extension type_name - 31, // [31:31] is the sub-list for extension extendee - 0, // [0:31] is the sub-list for field type_name -} - -func init() { file_google_api_expr_v1alpha1_checked_proto_init() } -func file_google_api_expr_v1alpha1_checked_proto_init() { - if File_google_api_expr_v1alpha1_checked_proto != nil { - return - } - file_google_api_expr_v1alpha1_syntax_proto_init() - if !protoimpl.UnsafeEnabled { - file_google_api_expr_v1alpha1_checked_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CheckedExpr); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_api_expr_v1alpha1_checked_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Type); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_api_expr_v1alpha1_checked_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Decl); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_api_expr_v1alpha1_checked_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Reference); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_api_expr_v1alpha1_checked_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Type_ListType); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_api_expr_v1alpha1_checked_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Type_MapType); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_api_expr_v1alpha1_checked_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Type_FunctionType); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_api_expr_v1alpha1_checked_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Type_AbstractType); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_api_expr_v1alpha1_checked_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Decl_IdentDecl); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_api_expr_v1alpha1_checked_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Decl_FunctionDecl); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_api_expr_v1alpha1_checked_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Decl_FunctionDecl_Overload); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_google_api_expr_v1alpha1_checked_proto_msgTypes[1].OneofWrappers = []interface{}{ - (*Type_Dyn)(nil), - (*Type_Null)(nil), - (*Type_Primitive)(nil), - (*Type_Wrapper)(nil), - (*Type_WellKnown)(nil), - (*Type_ListType_)(nil), - (*Type_MapType_)(nil), - (*Type_Function)(nil), - (*Type_MessageType)(nil), - (*Type_TypeParam)(nil), - (*Type_Type)(nil), - (*Type_Error)(nil), - (*Type_AbstractType_)(nil), - } - file_google_api_expr_v1alpha1_checked_proto_msgTypes[2].OneofWrappers = []interface{}{ - (*Decl_Ident)(nil), - (*Decl_Function)(nil), - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_api_expr_v1alpha1_checked_proto_rawDesc, - NumEnums: 2, - NumMessages: 13, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_google_api_expr_v1alpha1_checked_proto_goTypes, - DependencyIndexes: file_google_api_expr_v1alpha1_checked_proto_depIdxs, - EnumInfos: file_google_api_expr_v1alpha1_checked_proto_enumTypes, - MessageInfos: file_google_api_expr_v1alpha1_checked_proto_msgTypes, - }.Build() - File_google_api_expr_v1alpha1_checked_proto = out.File - file_google_api_expr_v1alpha1_checked_proto_rawDesc = nil - file_google_api_expr_v1alpha1_checked_proto_goTypes = nil - file_google_api_expr_v1alpha1_checked_proto_depIdxs = nil -} diff --git a/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/eval.pb.go b/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/eval.pb.go deleted file mode 100644 index 73aa1947d2..0000000000 --- a/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/eval.pb.go +++ /dev/null @@ -1,584 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.25.0 -// protoc v3.13.0 -// source: google/api/expr/v1alpha1/eval.proto - -package expr - -import ( - reflect "reflect" - sync "sync" - - proto "github.com/golang/protobuf/proto" - status "google.golang.org/genproto/googleapis/rpc/status" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - -// The state of an evaluation. -// -// Can represent an inital, partial, or completed state of evaluation. -type EvalState struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The unique values referenced in this message. - Values []*ExprValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` - // An ordered list of results. - // - // Tracks the flow of evaluation through the expression. - // May be sparse. - Results []*EvalState_Result `protobuf:"bytes,3,rep,name=results,proto3" json:"results,omitempty"` -} - -func (x *EvalState) Reset() { - *x = EvalState{} - if protoimpl.UnsafeEnabled { - mi := &file_google_api_expr_v1alpha1_eval_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *EvalState) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*EvalState) ProtoMessage() {} - -func (x *EvalState) ProtoReflect() protoreflect.Message { - mi := &file_google_api_expr_v1alpha1_eval_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use EvalState.ProtoReflect.Descriptor instead. -func (*EvalState) Descriptor() ([]byte, []int) { - return file_google_api_expr_v1alpha1_eval_proto_rawDescGZIP(), []int{0} -} - -func (x *EvalState) GetValues() []*ExprValue { - if x != nil { - return x.Values - } - return nil -} - -func (x *EvalState) GetResults() []*EvalState_Result { - if x != nil { - return x.Results - } - return nil -} - -// The value of an evaluated expression. -type ExprValue struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // An expression can resolve to a value, error or unknown. - // - // Types that are assignable to Kind: - // *ExprValue_Value - // *ExprValue_Error - // *ExprValue_Unknown - Kind isExprValue_Kind `protobuf_oneof:"kind"` -} - -func (x *ExprValue) Reset() { - *x = ExprValue{} - if protoimpl.UnsafeEnabled { - mi := &file_google_api_expr_v1alpha1_eval_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ExprValue) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ExprValue) ProtoMessage() {} - -func (x *ExprValue) ProtoReflect() protoreflect.Message { - mi := &file_google_api_expr_v1alpha1_eval_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ExprValue.ProtoReflect.Descriptor instead. -func (*ExprValue) Descriptor() ([]byte, []int) { - return file_google_api_expr_v1alpha1_eval_proto_rawDescGZIP(), []int{1} -} - -func (m *ExprValue) GetKind() isExprValue_Kind { - if m != nil { - return m.Kind - } - return nil -} - -func (x *ExprValue) GetValue() *Value { - if x, ok := x.GetKind().(*ExprValue_Value); ok { - return x.Value - } - return nil -} - -func (x *ExprValue) GetError() *ErrorSet { - if x, ok := x.GetKind().(*ExprValue_Error); ok { - return x.Error - } - return nil -} - -func (x *ExprValue) GetUnknown() *UnknownSet { - if x, ok := x.GetKind().(*ExprValue_Unknown); ok { - return x.Unknown - } - return nil -} - -type isExprValue_Kind interface { - isExprValue_Kind() -} - -type ExprValue_Value struct { - // A concrete value. - Value *Value `protobuf:"bytes,1,opt,name=value,proto3,oneof"` -} - -type ExprValue_Error struct { - // The set of errors in the critical path of evalution. - // - // Only errors in the critical path are included. For example, - // `( || true) && ` will only result in ``, - // while ` || ` will result in both `` and - // ``. - // - // Errors cause by the presence of other errors are not included in the - // set. For example `.foo`, `foo()`, and ` + 1` will - // only result in ``. - // - // Multiple errors *might* be included when evaluation could result - // in different errors. For example ` + ` and - // `foo(, )` may result in ``, `` or both. - // The exact subset of errors included for this case is unspecified and - // depends on the implementation details of the evaluator. - Error *ErrorSet `protobuf:"bytes,2,opt,name=error,proto3,oneof"` -} - -type ExprValue_Unknown struct { - // The set of unknowns in the critical path of evaluation. - // - // Unknown behaves identically to Error with regards to propagation. - // Specifically, only unknowns in the critical path are included, unknowns - // caused by the presence of other unknowns are not included, and multiple - // unknowns *might* be included included when evaluation could result in - // different unknowns. For example: - // - // ( || true) && -> - // || -> - // .foo -> - // foo() -> - // + -> or - // - // Unknown takes precidence over Error in cases where a `Value` can short - // circuit the result: - // - // || -> - // && -> - // - // Errors take precidence in all other cases: - // - // + -> - // foo(, ) -> - Unknown *UnknownSet `protobuf:"bytes,3,opt,name=unknown,proto3,oneof"` -} - -func (*ExprValue_Value) isExprValue_Kind() {} - -func (*ExprValue_Error) isExprValue_Kind() {} - -func (*ExprValue_Unknown) isExprValue_Kind() {} - -// A set of errors. -// -// The errors included depend on the context. See `ExprValue.error`. -type ErrorSet struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The errors in the set. - Errors []*status.Status `protobuf:"bytes,1,rep,name=errors,proto3" json:"errors,omitempty"` -} - -func (x *ErrorSet) Reset() { - *x = ErrorSet{} - if protoimpl.UnsafeEnabled { - mi := &file_google_api_expr_v1alpha1_eval_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ErrorSet) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ErrorSet) ProtoMessage() {} - -func (x *ErrorSet) ProtoReflect() protoreflect.Message { - mi := &file_google_api_expr_v1alpha1_eval_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ErrorSet.ProtoReflect.Descriptor instead. -func (*ErrorSet) Descriptor() ([]byte, []int) { - return file_google_api_expr_v1alpha1_eval_proto_rawDescGZIP(), []int{2} -} - -func (x *ErrorSet) GetErrors() []*status.Status { - if x != nil { - return x.Errors - } - return nil -} - -// A set of expressions for which the value is unknown. -// -// The unknowns included depend on the context. See `ExprValue.unknown`. -type UnknownSet struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The ids of the expressions with unknown values. - Exprs []int64 `protobuf:"varint,1,rep,packed,name=exprs,proto3" json:"exprs,omitempty"` -} - -func (x *UnknownSet) Reset() { - *x = UnknownSet{} - if protoimpl.UnsafeEnabled { - mi := &file_google_api_expr_v1alpha1_eval_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *UnknownSet) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*UnknownSet) ProtoMessage() {} - -func (x *UnknownSet) ProtoReflect() protoreflect.Message { - mi := &file_google_api_expr_v1alpha1_eval_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use UnknownSet.ProtoReflect.Descriptor instead. -func (*UnknownSet) Descriptor() ([]byte, []int) { - return file_google_api_expr_v1alpha1_eval_proto_rawDescGZIP(), []int{3} -} - -func (x *UnknownSet) GetExprs() []int64 { - if x != nil { - return x.Exprs - } - return nil -} - -// A single evalution result. -type EvalState_Result struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The id of the expression this result if for. - Expr int64 `protobuf:"varint,1,opt,name=expr,proto3" json:"expr,omitempty"` - // The index in `values` of the resulting value. - Value int64 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"` -} - -func (x *EvalState_Result) Reset() { - *x = EvalState_Result{} - if protoimpl.UnsafeEnabled { - mi := &file_google_api_expr_v1alpha1_eval_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *EvalState_Result) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*EvalState_Result) ProtoMessage() {} - -func (x *EvalState_Result) ProtoReflect() protoreflect.Message { - mi := &file_google_api_expr_v1alpha1_eval_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use EvalState_Result.ProtoReflect.Descriptor instead. -func (*EvalState_Result) Descriptor() ([]byte, []int) { - return file_google_api_expr_v1alpha1_eval_proto_rawDescGZIP(), []int{0, 0} -} - -func (x *EvalState_Result) GetExpr() int64 { - if x != nil { - return x.Expr - } - return 0 -} - -func (x *EvalState_Result) GetValue() int64 { - if x != nil { - return x.Value - } - return 0 -} - -var File_google_api_expr_v1alpha1_eval_proto protoreflect.FileDescriptor - -var file_google_api_expr_v1alpha1_eval_proto_rawDesc = []byte{ - 0x0a, 0x23, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x78, 0x70, - 0x72, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x65, 0x76, 0x61, 0x6c, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x18, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x1a, - 0x24, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x78, 0x70, 0x72, - 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70, - 0x63, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc2, - 0x01, 0x0a, 0x09, 0x45, 0x76, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x3b, 0x0a, 0x06, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, - 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x44, 0x0a, 0x07, 0x72, 0x65, 0x73, - 0x75, 0x6c, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, - 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x76, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, - 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x1a, - 0x32, 0x0a, 0x06, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x78, 0x70, - 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x65, 0x78, 0x70, 0x72, 0x12, 0x14, 0x0a, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x22, 0xca, 0x01, 0x0a, 0x09, 0x45, 0x78, 0x70, 0x72, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x12, 0x37, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, - 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x48, 0x00, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3a, 0x0a, 0x05, 0x65, 0x72, - 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, - 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x48, 0x00, 0x52, - 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x40, 0x0a, 0x07, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, - 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, - 0x61, 0x31, 0x2e, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x53, 0x65, 0x74, 0x48, 0x00, 0x52, - 0x07, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x42, 0x06, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, - 0x22, 0x36, 0x0a, 0x08, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x12, 0x2a, 0x0a, 0x06, - 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x52, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x22, 0x22, 0x0a, 0x0a, 0x55, 0x6e, 0x6b, 0x6e, - 0x6f, 0x77, 0x6e, 0x53, 0x65, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x78, 0x70, 0x72, 0x73, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x03, 0x52, 0x05, 0x65, 0x78, 0x70, 0x72, 0x73, 0x42, 0x6c, 0x0a, 0x1c, - 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, - 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x42, 0x09, 0x45, 0x76, - 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, - 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, - 0x61, 0x31, 0x3b, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x33, -} - -var ( - file_google_api_expr_v1alpha1_eval_proto_rawDescOnce sync.Once - file_google_api_expr_v1alpha1_eval_proto_rawDescData = file_google_api_expr_v1alpha1_eval_proto_rawDesc -) - -func file_google_api_expr_v1alpha1_eval_proto_rawDescGZIP() []byte { - file_google_api_expr_v1alpha1_eval_proto_rawDescOnce.Do(func() { - file_google_api_expr_v1alpha1_eval_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_expr_v1alpha1_eval_proto_rawDescData) - }) - return file_google_api_expr_v1alpha1_eval_proto_rawDescData -} - -var file_google_api_expr_v1alpha1_eval_proto_msgTypes = make([]protoimpl.MessageInfo, 5) -var file_google_api_expr_v1alpha1_eval_proto_goTypes = []interface{}{ - (*EvalState)(nil), // 0: google.api.expr.v1alpha1.EvalState - (*ExprValue)(nil), // 1: google.api.expr.v1alpha1.ExprValue - (*ErrorSet)(nil), // 2: google.api.expr.v1alpha1.ErrorSet - (*UnknownSet)(nil), // 3: google.api.expr.v1alpha1.UnknownSet - (*EvalState_Result)(nil), // 4: google.api.expr.v1alpha1.EvalState.Result - (*Value)(nil), // 5: google.api.expr.v1alpha1.Value - (*status.Status)(nil), // 6: google.rpc.Status -} -var file_google_api_expr_v1alpha1_eval_proto_depIdxs = []int32{ - 1, // 0: google.api.expr.v1alpha1.EvalState.values:type_name -> google.api.expr.v1alpha1.ExprValue - 4, // 1: google.api.expr.v1alpha1.EvalState.results:type_name -> google.api.expr.v1alpha1.EvalState.Result - 5, // 2: google.api.expr.v1alpha1.ExprValue.value:type_name -> google.api.expr.v1alpha1.Value - 2, // 3: google.api.expr.v1alpha1.ExprValue.error:type_name -> google.api.expr.v1alpha1.ErrorSet - 3, // 4: google.api.expr.v1alpha1.ExprValue.unknown:type_name -> google.api.expr.v1alpha1.UnknownSet - 6, // 5: google.api.expr.v1alpha1.ErrorSet.errors:type_name -> google.rpc.Status - 6, // [6:6] is the sub-list for method output_type - 6, // [6:6] is the sub-list for method input_type - 6, // [6:6] is the sub-list for extension type_name - 6, // [6:6] is the sub-list for extension extendee - 0, // [0:6] is the sub-list for field type_name -} - -func init() { file_google_api_expr_v1alpha1_eval_proto_init() } -func file_google_api_expr_v1alpha1_eval_proto_init() { - if File_google_api_expr_v1alpha1_eval_proto != nil { - return - } - file_google_api_expr_v1alpha1_value_proto_init() - if !protoimpl.UnsafeEnabled { - file_google_api_expr_v1alpha1_eval_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*EvalState); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_api_expr_v1alpha1_eval_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ExprValue); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_api_expr_v1alpha1_eval_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ErrorSet); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_api_expr_v1alpha1_eval_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UnknownSet); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_api_expr_v1alpha1_eval_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*EvalState_Result); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_google_api_expr_v1alpha1_eval_proto_msgTypes[1].OneofWrappers = []interface{}{ - (*ExprValue_Value)(nil), - (*ExprValue_Error)(nil), - (*ExprValue_Unknown)(nil), - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_api_expr_v1alpha1_eval_proto_rawDesc, - NumEnums: 0, - NumMessages: 5, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_google_api_expr_v1alpha1_eval_proto_goTypes, - DependencyIndexes: file_google_api_expr_v1alpha1_eval_proto_depIdxs, - MessageInfos: file_google_api_expr_v1alpha1_eval_proto_msgTypes, - }.Build() - File_google_api_expr_v1alpha1_eval_proto = out.File - file_google_api_expr_v1alpha1_eval_proto_rawDesc = nil - file_google_api_expr_v1alpha1_eval_proto_goTypes = nil - file_google_api_expr_v1alpha1_eval_proto_depIdxs = nil -} diff --git a/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/explain.pb.go b/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/explain.pb.go deleted file mode 100644 index 853b373705..0000000000 --- a/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/explain.pb.go +++ /dev/null @@ -1,280 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.25.0 -// protoc v3.13.0 -// source: google/api/expr/v1alpha1/explain.proto - -package expr - -import ( - reflect "reflect" - sync "sync" - - proto "github.com/golang/protobuf/proto" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - -// Values of intermediate expressions produced when evaluating expression. -// Deprecated, use `EvalState` instead. -// -// Deprecated: Do not use. -type Explain struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // All of the observed values. - // - // The field value_index is an index in the values list. - // Separating values from steps is needed to remove redundant values. - Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` - // List of steps. - // - // Repeated evaluations of the same expression generate new ExprStep - // instances. The order of such ExprStep instances matches the order of - // elements returned by Comprehension.iter_range. - ExprSteps []*Explain_ExprStep `protobuf:"bytes,2,rep,name=expr_steps,json=exprSteps,proto3" json:"expr_steps,omitempty"` -} - -func (x *Explain) Reset() { - *x = Explain{} - if protoimpl.UnsafeEnabled { - mi := &file_google_api_expr_v1alpha1_explain_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Explain) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Explain) ProtoMessage() {} - -func (x *Explain) ProtoReflect() protoreflect.Message { - mi := &file_google_api_expr_v1alpha1_explain_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Explain.ProtoReflect.Descriptor instead. -func (*Explain) Descriptor() ([]byte, []int) { - return file_google_api_expr_v1alpha1_explain_proto_rawDescGZIP(), []int{0} -} - -func (x *Explain) GetValues() []*Value { - if x != nil { - return x.Values - } - return nil -} - -func (x *Explain) GetExprSteps() []*Explain_ExprStep { - if x != nil { - return x.ExprSteps - } - return nil -} - -// ID and value index of one step. -type Explain_ExprStep struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // ID of corresponding Expr node. - Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` - // Index of the value in the values list. - ValueIndex int32 `protobuf:"varint,2,opt,name=value_index,json=valueIndex,proto3" json:"value_index,omitempty"` -} - -func (x *Explain_ExprStep) Reset() { - *x = Explain_ExprStep{} - if protoimpl.UnsafeEnabled { - mi := &file_google_api_expr_v1alpha1_explain_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Explain_ExprStep) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Explain_ExprStep) ProtoMessage() {} - -func (x *Explain_ExprStep) ProtoReflect() protoreflect.Message { - mi := &file_google_api_expr_v1alpha1_explain_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Explain_ExprStep.ProtoReflect.Descriptor instead. -func (*Explain_ExprStep) Descriptor() ([]byte, []int) { - return file_google_api_expr_v1alpha1_explain_proto_rawDescGZIP(), []int{0, 0} -} - -func (x *Explain_ExprStep) GetId() int64 { - if x != nil { - return x.Id - } - return 0 -} - -func (x *Explain_ExprStep) GetValueIndex() int32 { - if x != nil { - return x.ValueIndex - } - return 0 -} - -var File_google_api_expr_v1alpha1_explain_proto protoreflect.FileDescriptor - -var file_google_api_expr_v1alpha1_explain_proto_rawDesc = []byte{ - 0x0a, 0x26, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x78, 0x70, - 0x72, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x65, 0x78, 0x70, 0x6c, 0x61, - 0x69, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x18, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, - 0x61, 0x31, 0x1a, 0x24, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, - 0x78, 0x70, 0x72, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xce, 0x01, 0x0a, 0x07, 0x45, 0x78, 0x70, - 0x6c, 0x61, 0x69, 0x6e, 0x12, 0x37, 0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x49, 0x0a, - 0x0a, 0x65, 0x78, 0x70, 0x72, 0x5f, 0x73, 0x74, 0x65, 0x70, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, - 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, - 0x6c, 0x61, 0x69, 0x6e, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x53, 0x74, 0x65, 0x70, 0x52, 0x09, 0x65, - 0x78, 0x70, 0x72, 0x53, 0x74, 0x65, 0x70, 0x73, 0x1a, 0x3b, 0x0a, 0x08, 0x45, 0x78, 0x70, 0x72, - 0x53, 0x74, 0x65, 0x70, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, - 0x52, 0x02, 0x69, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x69, 0x6e, - 0x64, 0x65, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x49, 0x6e, 0x64, 0x65, 0x78, 0x3a, 0x02, 0x18, 0x01, 0x42, 0x6f, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, - 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x42, 0x0c, 0x45, 0x78, 0x70, 0x6c, 0x61, - 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, - 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, - 0x61, 0x31, 0x3b, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x33, -} - -var ( - file_google_api_expr_v1alpha1_explain_proto_rawDescOnce sync.Once - file_google_api_expr_v1alpha1_explain_proto_rawDescData = file_google_api_expr_v1alpha1_explain_proto_rawDesc -) - -func file_google_api_expr_v1alpha1_explain_proto_rawDescGZIP() []byte { - file_google_api_expr_v1alpha1_explain_proto_rawDescOnce.Do(func() { - file_google_api_expr_v1alpha1_explain_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_expr_v1alpha1_explain_proto_rawDescData) - }) - return file_google_api_expr_v1alpha1_explain_proto_rawDescData -} - -var file_google_api_expr_v1alpha1_explain_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_google_api_expr_v1alpha1_explain_proto_goTypes = []interface{}{ - (*Explain)(nil), // 0: google.api.expr.v1alpha1.Explain - (*Explain_ExprStep)(nil), // 1: google.api.expr.v1alpha1.Explain.ExprStep - (*Value)(nil), // 2: google.api.expr.v1alpha1.Value -} -var file_google_api_expr_v1alpha1_explain_proto_depIdxs = []int32{ - 2, // 0: google.api.expr.v1alpha1.Explain.values:type_name -> google.api.expr.v1alpha1.Value - 1, // 1: google.api.expr.v1alpha1.Explain.expr_steps:type_name -> google.api.expr.v1alpha1.Explain.ExprStep - 2, // [2:2] is the sub-list for method output_type - 2, // [2:2] is the sub-list for method input_type - 2, // [2:2] is the sub-list for extension type_name - 2, // [2:2] is the sub-list for extension extendee - 0, // [0:2] is the sub-list for field type_name -} - -func init() { file_google_api_expr_v1alpha1_explain_proto_init() } -func file_google_api_expr_v1alpha1_explain_proto_init() { - if File_google_api_expr_v1alpha1_explain_proto != nil { - return - } - file_google_api_expr_v1alpha1_value_proto_init() - if !protoimpl.UnsafeEnabled { - file_google_api_expr_v1alpha1_explain_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Explain); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_api_expr_v1alpha1_explain_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Explain_ExprStep); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_api_expr_v1alpha1_explain_proto_rawDesc, - NumEnums: 0, - NumMessages: 2, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_google_api_expr_v1alpha1_explain_proto_goTypes, - DependencyIndexes: file_google_api_expr_v1alpha1_explain_proto_depIdxs, - MessageInfos: file_google_api_expr_v1alpha1_explain_proto_msgTypes, - }.Build() - File_google_api_expr_v1alpha1_explain_proto = out.File - file_google_api_expr_v1alpha1_explain_proto_rawDesc = nil - file_google_api_expr_v1alpha1_explain_proto_goTypes = nil - file_google_api_expr_v1alpha1_explain_proto_depIdxs = nil -} diff --git a/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/syntax.pb.go b/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/syntax.pb.go deleted file mode 100644 index 7e5479874b..0000000000 --- a/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/syntax.pb.go +++ /dev/null @@ -1,1689 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.25.0 -// protoc v3.13.0 -// source: google/api/expr/v1alpha1/syntax.proto - -package expr - -import ( - reflect "reflect" - sync "sync" - - proto "github.com/golang/protobuf/proto" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - durationpb "google.golang.org/protobuf/types/known/durationpb" - structpb "google.golang.org/protobuf/types/known/structpb" - timestamppb "google.golang.org/protobuf/types/known/timestamppb" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - -// An expression together with source information as returned by the parser. -type ParsedExpr struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The parsed expression. - Expr *Expr `protobuf:"bytes,2,opt,name=expr,proto3" json:"expr,omitempty"` - // The source info derived from input that generated the parsed `expr`. - SourceInfo *SourceInfo `protobuf:"bytes,3,opt,name=source_info,json=sourceInfo,proto3" json:"source_info,omitempty"` -} - -func (x *ParsedExpr) Reset() { - *x = ParsedExpr{} - if protoimpl.UnsafeEnabled { - mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ParsedExpr) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ParsedExpr) ProtoMessage() {} - -func (x *ParsedExpr) ProtoReflect() protoreflect.Message { - mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ParsedExpr.ProtoReflect.Descriptor instead. -func (*ParsedExpr) Descriptor() ([]byte, []int) { - return file_google_api_expr_v1alpha1_syntax_proto_rawDescGZIP(), []int{0} -} - -func (x *ParsedExpr) GetExpr() *Expr { - if x != nil { - return x.Expr - } - return nil -} - -func (x *ParsedExpr) GetSourceInfo() *SourceInfo { - if x != nil { - return x.SourceInfo - } - return nil -} - -// An abstract representation of a common expression. -// -// Expressions are abstractly represented as a collection of identifiers, -// select statements, function calls, literals, and comprehensions. All -// operators with the exception of the '.' operator are modelled as function -// calls. This makes it easy to represent new operators into the existing AST. -// -// All references within expressions must resolve to a [Decl][google.api.expr.v1alpha1.Decl] provided at -// type-check for an expression to be valid. A reference may either be a bare -// identifier `name` or a qualified identifier `google.api.name`. References -// may either refer to a value or a function declaration. -// -// For example, the expression `google.api.name.startsWith('expr')` references -// the declaration `google.api.name` within a [Expr.Select][google.api.expr.v1alpha1.Expr.Select] expression, and -// the function declaration `startsWith`. -type Expr struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. An id assigned to this node by the parser which is unique in a - // given expression tree. This is used to associate type information and other - // attributes to a node in the parse tree. - Id int64 `protobuf:"varint,2,opt,name=id,proto3" json:"id,omitempty"` - // Required. Variants of expressions. - // - // Types that are assignable to ExprKind: - // *Expr_ConstExpr - // *Expr_IdentExpr - // *Expr_SelectExpr - // *Expr_CallExpr - // *Expr_ListExpr - // *Expr_StructExpr - // *Expr_ComprehensionExpr - ExprKind isExpr_ExprKind `protobuf_oneof:"expr_kind"` -} - -func (x *Expr) Reset() { - *x = Expr{} - if protoimpl.UnsafeEnabled { - mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Expr) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Expr) ProtoMessage() {} - -func (x *Expr) ProtoReflect() protoreflect.Message { - mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Expr.ProtoReflect.Descriptor instead. -func (*Expr) Descriptor() ([]byte, []int) { - return file_google_api_expr_v1alpha1_syntax_proto_rawDescGZIP(), []int{1} -} - -func (x *Expr) GetId() int64 { - if x != nil { - return x.Id - } - return 0 -} - -func (m *Expr) GetExprKind() isExpr_ExprKind { - if m != nil { - return m.ExprKind - } - return nil -} - -func (x *Expr) GetConstExpr() *Constant { - if x, ok := x.GetExprKind().(*Expr_ConstExpr); ok { - return x.ConstExpr - } - return nil -} - -func (x *Expr) GetIdentExpr() *Expr_Ident { - if x, ok := x.GetExprKind().(*Expr_IdentExpr); ok { - return x.IdentExpr - } - return nil -} - -func (x *Expr) GetSelectExpr() *Expr_Select { - if x, ok := x.GetExprKind().(*Expr_SelectExpr); ok { - return x.SelectExpr - } - return nil -} - -func (x *Expr) GetCallExpr() *Expr_Call { - if x, ok := x.GetExprKind().(*Expr_CallExpr); ok { - return x.CallExpr - } - return nil -} - -func (x *Expr) GetListExpr() *Expr_CreateList { - if x, ok := x.GetExprKind().(*Expr_ListExpr); ok { - return x.ListExpr - } - return nil -} - -func (x *Expr) GetStructExpr() *Expr_CreateStruct { - if x, ok := x.GetExprKind().(*Expr_StructExpr); ok { - return x.StructExpr - } - return nil -} - -func (x *Expr) GetComprehensionExpr() *Expr_Comprehension { - if x, ok := x.GetExprKind().(*Expr_ComprehensionExpr); ok { - return x.ComprehensionExpr - } - return nil -} - -type isExpr_ExprKind interface { - isExpr_ExprKind() -} - -type Expr_ConstExpr struct { - // A literal expression. - ConstExpr *Constant `protobuf:"bytes,3,opt,name=const_expr,json=constExpr,proto3,oneof"` -} - -type Expr_IdentExpr struct { - // An identifier expression. - IdentExpr *Expr_Ident `protobuf:"bytes,4,opt,name=ident_expr,json=identExpr,proto3,oneof"` -} - -type Expr_SelectExpr struct { - // A field selection expression, e.g. `request.auth`. - SelectExpr *Expr_Select `protobuf:"bytes,5,opt,name=select_expr,json=selectExpr,proto3,oneof"` -} - -type Expr_CallExpr struct { - // A call expression, including calls to predefined functions and operators. - CallExpr *Expr_Call `protobuf:"bytes,6,opt,name=call_expr,json=callExpr,proto3,oneof"` -} - -type Expr_ListExpr struct { - // A list creation expression. - ListExpr *Expr_CreateList `protobuf:"bytes,7,opt,name=list_expr,json=listExpr,proto3,oneof"` -} - -type Expr_StructExpr struct { - // A map or message creation expression. - StructExpr *Expr_CreateStruct `protobuf:"bytes,8,opt,name=struct_expr,json=structExpr,proto3,oneof"` -} - -type Expr_ComprehensionExpr struct { - // A comprehension expression. - ComprehensionExpr *Expr_Comprehension `protobuf:"bytes,9,opt,name=comprehension_expr,json=comprehensionExpr,proto3,oneof"` -} - -func (*Expr_ConstExpr) isExpr_ExprKind() {} - -func (*Expr_IdentExpr) isExpr_ExprKind() {} - -func (*Expr_SelectExpr) isExpr_ExprKind() {} - -func (*Expr_CallExpr) isExpr_ExprKind() {} - -func (*Expr_ListExpr) isExpr_ExprKind() {} - -func (*Expr_StructExpr) isExpr_ExprKind() {} - -func (*Expr_ComprehensionExpr) isExpr_ExprKind() {} - -// Represents a primitive literal. -// -// Named 'Constant' here for backwards compatibility. -// -// This is similar as the primitives supported in the well-known type -// `google.protobuf.Value`, but richer so it can represent CEL's full range of -// primitives. -// -// Lists and structs are not included as constants as these aggregate types may -// contain [Expr][google.api.expr.v1alpha1.Expr] elements which require evaluation and are thus not constant. -// -// Examples of literals include: `"hello"`, `b'bytes'`, `1u`, `4.2`, `-2`, -// `true`, `null`. -type Constant struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The valid constant kinds. - // - // Types that are assignable to ConstantKind: - // *Constant_NullValue - // *Constant_BoolValue - // *Constant_Int64Value - // *Constant_Uint64Value - // *Constant_DoubleValue - // *Constant_StringValue - // *Constant_BytesValue - // *Constant_DurationValue - // *Constant_TimestampValue - ConstantKind isConstant_ConstantKind `protobuf_oneof:"constant_kind"` -} - -func (x *Constant) Reset() { - *x = Constant{} - if protoimpl.UnsafeEnabled { - mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Constant) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Constant) ProtoMessage() {} - -func (x *Constant) ProtoReflect() protoreflect.Message { - mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Constant.ProtoReflect.Descriptor instead. -func (*Constant) Descriptor() ([]byte, []int) { - return file_google_api_expr_v1alpha1_syntax_proto_rawDescGZIP(), []int{2} -} - -func (m *Constant) GetConstantKind() isConstant_ConstantKind { - if m != nil { - return m.ConstantKind - } - return nil -} - -func (x *Constant) GetNullValue() structpb.NullValue { - if x, ok := x.GetConstantKind().(*Constant_NullValue); ok { - return x.NullValue - } - return structpb.NullValue_NULL_VALUE -} - -func (x *Constant) GetBoolValue() bool { - if x, ok := x.GetConstantKind().(*Constant_BoolValue); ok { - return x.BoolValue - } - return false -} - -func (x *Constant) GetInt64Value() int64 { - if x, ok := x.GetConstantKind().(*Constant_Int64Value); ok { - return x.Int64Value - } - return 0 -} - -func (x *Constant) GetUint64Value() uint64 { - if x, ok := x.GetConstantKind().(*Constant_Uint64Value); ok { - return x.Uint64Value - } - return 0 -} - -func (x *Constant) GetDoubleValue() float64 { - if x, ok := x.GetConstantKind().(*Constant_DoubleValue); ok { - return x.DoubleValue - } - return 0 -} - -func (x *Constant) GetStringValue() string { - if x, ok := x.GetConstantKind().(*Constant_StringValue); ok { - return x.StringValue - } - return "" -} - -func (x *Constant) GetBytesValue() []byte { - if x, ok := x.GetConstantKind().(*Constant_BytesValue); ok { - return x.BytesValue - } - return nil -} - -// Deprecated: Do not use. -func (x *Constant) GetDurationValue() *durationpb.Duration { - if x, ok := x.GetConstantKind().(*Constant_DurationValue); ok { - return x.DurationValue - } - return nil -} - -// Deprecated: Do not use. -func (x *Constant) GetTimestampValue() *timestamppb.Timestamp { - if x, ok := x.GetConstantKind().(*Constant_TimestampValue); ok { - return x.TimestampValue - } - return nil -} - -type isConstant_ConstantKind interface { - isConstant_ConstantKind() -} - -type Constant_NullValue struct { - // null value. - NullValue structpb.NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,proto3,enum=google.protobuf.NullValue,oneof"` -} - -type Constant_BoolValue struct { - // boolean value. - BoolValue bool `protobuf:"varint,2,opt,name=bool_value,json=boolValue,proto3,oneof"` -} - -type Constant_Int64Value struct { - // int64 value. - Int64Value int64 `protobuf:"varint,3,opt,name=int64_value,json=int64Value,proto3,oneof"` -} - -type Constant_Uint64Value struct { - // uint64 value. - Uint64Value uint64 `protobuf:"varint,4,opt,name=uint64_value,json=uint64Value,proto3,oneof"` -} - -type Constant_DoubleValue struct { - // double value. - DoubleValue float64 `protobuf:"fixed64,5,opt,name=double_value,json=doubleValue,proto3,oneof"` -} - -type Constant_StringValue struct { - // string value. - StringValue string `protobuf:"bytes,6,opt,name=string_value,json=stringValue,proto3,oneof"` -} - -type Constant_BytesValue struct { - // bytes value. - BytesValue []byte `protobuf:"bytes,7,opt,name=bytes_value,json=bytesValue,proto3,oneof"` -} - -type Constant_DurationValue struct { - // protobuf.Duration value. - // - // Deprecated: duration is no longer considered a builtin cel type. - // - // Deprecated: Do not use. - DurationValue *durationpb.Duration `protobuf:"bytes,8,opt,name=duration_value,json=durationValue,proto3,oneof"` -} - -type Constant_TimestampValue struct { - // protobuf.Timestamp value. - // - // Deprecated: timestamp is no longer considered a builtin cel type. - // - // Deprecated: Do not use. - TimestampValue *timestamppb.Timestamp `protobuf:"bytes,9,opt,name=timestamp_value,json=timestampValue,proto3,oneof"` -} - -func (*Constant_NullValue) isConstant_ConstantKind() {} - -func (*Constant_BoolValue) isConstant_ConstantKind() {} - -func (*Constant_Int64Value) isConstant_ConstantKind() {} - -func (*Constant_Uint64Value) isConstant_ConstantKind() {} - -func (*Constant_DoubleValue) isConstant_ConstantKind() {} - -func (*Constant_StringValue) isConstant_ConstantKind() {} - -func (*Constant_BytesValue) isConstant_ConstantKind() {} - -func (*Constant_DurationValue) isConstant_ConstantKind() {} - -func (*Constant_TimestampValue) isConstant_ConstantKind() {} - -// Source information collected at parse time. -type SourceInfo struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The syntax version of the source, e.g. `cel1`. - SyntaxVersion string `protobuf:"bytes,1,opt,name=syntax_version,json=syntaxVersion,proto3" json:"syntax_version,omitempty"` - // The location name. All position information attached to an expression is - // relative to this location. - // - // The location could be a file, UI element, or similar. For example, - // `acme/app/AnvilPolicy.cel`. - Location string `protobuf:"bytes,2,opt,name=location,proto3" json:"location,omitempty"` - // Monotonically increasing list of character offsets where newlines appear. - // - // The line number of a given position is the index `i` where for a given - // `id` the `line_offsets[i] < id_positions[id] < line_offsets[i+1]`. The - // column may be derivd from `id_positions[id] - line_offsets[i]`. - LineOffsets []int32 `protobuf:"varint,3,rep,packed,name=line_offsets,json=lineOffsets,proto3" json:"line_offsets,omitempty"` - // A map from the parse node id (e.g. `Expr.id`) to the character offset - // within source. - Positions map[int64]int32 `protobuf:"bytes,4,rep,name=positions,proto3" json:"positions,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` - // A map from the parse node id where a macro replacement was made to the - // call `Expr` that resulted in a macro expansion. - // - // For example, `has(value.field)` is a function call that is replaced by a - // `test_only` field selection in the AST. Likewise, the call - // `list.exists(e, e > 10)` translates to a comprehension expression. The key - // in the map corresponds to the expression id of the expanded macro, and the - // value is the call `Expr` that was replaced. - MacroCalls map[int64]*Expr `protobuf:"bytes,5,rep,name=macro_calls,json=macroCalls,proto3" json:"macro_calls,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` -} - -func (x *SourceInfo) Reset() { - *x = SourceInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SourceInfo) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SourceInfo) ProtoMessage() {} - -func (x *SourceInfo) ProtoReflect() protoreflect.Message { - mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SourceInfo.ProtoReflect.Descriptor instead. -func (*SourceInfo) Descriptor() ([]byte, []int) { - return file_google_api_expr_v1alpha1_syntax_proto_rawDescGZIP(), []int{3} -} - -func (x *SourceInfo) GetSyntaxVersion() string { - if x != nil { - return x.SyntaxVersion - } - return "" -} - -func (x *SourceInfo) GetLocation() string { - if x != nil { - return x.Location - } - return "" -} - -func (x *SourceInfo) GetLineOffsets() []int32 { - if x != nil { - return x.LineOffsets - } - return nil -} - -func (x *SourceInfo) GetPositions() map[int64]int32 { - if x != nil { - return x.Positions - } - return nil -} - -func (x *SourceInfo) GetMacroCalls() map[int64]*Expr { - if x != nil { - return x.MacroCalls - } - return nil -} - -// A specific position in source. -type SourcePosition struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The soucre location name (e.g. file name). - Location string `protobuf:"bytes,1,opt,name=location,proto3" json:"location,omitempty"` - // The character offset. - Offset int32 `protobuf:"varint,2,opt,name=offset,proto3" json:"offset,omitempty"` - // The 1-based index of the starting line in the source text - // where the issue occurs, or 0 if unknown. - Line int32 `protobuf:"varint,3,opt,name=line,proto3" json:"line,omitempty"` - // The 0-based index of the starting position within the line of source text - // where the issue occurs. Only meaningful if line is nonzero. - Column int32 `protobuf:"varint,4,opt,name=column,proto3" json:"column,omitempty"` -} - -func (x *SourcePosition) Reset() { - *x = SourcePosition{} - if protoimpl.UnsafeEnabled { - mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SourcePosition) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SourcePosition) ProtoMessage() {} - -func (x *SourcePosition) ProtoReflect() protoreflect.Message { - mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SourcePosition.ProtoReflect.Descriptor instead. -func (*SourcePosition) Descriptor() ([]byte, []int) { - return file_google_api_expr_v1alpha1_syntax_proto_rawDescGZIP(), []int{4} -} - -func (x *SourcePosition) GetLocation() string { - if x != nil { - return x.Location - } - return "" -} - -func (x *SourcePosition) GetOffset() int32 { - if x != nil { - return x.Offset - } - return 0 -} - -func (x *SourcePosition) GetLine() int32 { - if x != nil { - return x.Line - } - return 0 -} - -func (x *SourcePosition) GetColumn() int32 { - if x != nil { - return x.Column - } - return 0 -} - -// An identifier expression. e.g. `request`. -type Expr_Ident struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. Holds a single, unqualified identifier, possibly preceded by a - // '.'. - // - // Qualified names are represented by the [Expr.Select][google.api.expr.v1alpha1.Expr.Select] expression. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` -} - -func (x *Expr_Ident) Reset() { - *x = Expr_Ident{} - if protoimpl.UnsafeEnabled { - mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Expr_Ident) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Expr_Ident) ProtoMessage() {} - -func (x *Expr_Ident) ProtoReflect() protoreflect.Message { - mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Expr_Ident.ProtoReflect.Descriptor instead. -func (*Expr_Ident) Descriptor() ([]byte, []int) { - return file_google_api_expr_v1alpha1_syntax_proto_rawDescGZIP(), []int{1, 0} -} - -func (x *Expr_Ident) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -// A field selection expression. e.g. `request.auth`. -type Expr_Select struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The target of the selection expression. - // - // For example, in the select expression `request.auth`, the `request` - // portion of the expression is the `operand`. - Operand *Expr `protobuf:"bytes,1,opt,name=operand,proto3" json:"operand,omitempty"` - // Required. The name of the field to select. - // - // For example, in the select expression `request.auth`, the `auth` portion - // of the expression would be the `field`. - Field string `protobuf:"bytes,2,opt,name=field,proto3" json:"field,omitempty"` - // Whether the select is to be interpreted as a field presence test. - // - // This results from the macro `has(request.auth)`. - TestOnly bool `protobuf:"varint,3,opt,name=test_only,json=testOnly,proto3" json:"test_only,omitempty"` -} - -func (x *Expr_Select) Reset() { - *x = Expr_Select{} - if protoimpl.UnsafeEnabled { - mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Expr_Select) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Expr_Select) ProtoMessage() {} - -func (x *Expr_Select) ProtoReflect() protoreflect.Message { - mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Expr_Select.ProtoReflect.Descriptor instead. -func (*Expr_Select) Descriptor() ([]byte, []int) { - return file_google_api_expr_v1alpha1_syntax_proto_rawDescGZIP(), []int{1, 1} -} - -func (x *Expr_Select) GetOperand() *Expr { - if x != nil { - return x.Operand - } - return nil -} - -func (x *Expr_Select) GetField() string { - if x != nil { - return x.Field - } - return "" -} - -func (x *Expr_Select) GetTestOnly() bool { - if x != nil { - return x.TestOnly - } - return false -} - -// A call expression, including calls to predefined functions and operators. -// -// For example, `value == 10`, `size(map_value)`. -type Expr_Call struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The target of an method call-style expression. For example, `x` in - // `x.f()`. - Target *Expr `protobuf:"bytes,1,opt,name=target,proto3" json:"target,omitempty"` - // Required. The name of the function or method being called. - Function string `protobuf:"bytes,2,opt,name=function,proto3" json:"function,omitempty"` - // The arguments. - Args []*Expr `protobuf:"bytes,3,rep,name=args,proto3" json:"args,omitempty"` -} - -func (x *Expr_Call) Reset() { - *x = Expr_Call{} - if protoimpl.UnsafeEnabled { - mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Expr_Call) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Expr_Call) ProtoMessage() {} - -func (x *Expr_Call) ProtoReflect() protoreflect.Message { - mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Expr_Call.ProtoReflect.Descriptor instead. -func (*Expr_Call) Descriptor() ([]byte, []int) { - return file_google_api_expr_v1alpha1_syntax_proto_rawDescGZIP(), []int{1, 2} -} - -func (x *Expr_Call) GetTarget() *Expr { - if x != nil { - return x.Target - } - return nil -} - -func (x *Expr_Call) GetFunction() string { - if x != nil { - return x.Function - } - return "" -} - -func (x *Expr_Call) GetArgs() []*Expr { - if x != nil { - return x.Args - } - return nil -} - -// A list creation expression. -// -// Lists may either be homogenous, e.g. `[1, 2, 3]`, or heterogenous, e.g. -// `dyn([1, 'hello', 2.0])` -type Expr_CreateList struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The elements part of the list. - Elements []*Expr `protobuf:"bytes,1,rep,name=elements,proto3" json:"elements,omitempty"` -} - -func (x *Expr_CreateList) Reset() { - *x = Expr_CreateList{} - if protoimpl.UnsafeEnabled { - mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Expr_CreateList) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Expr_CreateList) ProtoMessage() {} - -func (x *Expr_CreateList) ProtoReflect() protoreflect.Message { - mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Expr_CreateList.ProtoReflect.Descriptor instead. -func (*Expr_CreateList) Descriptor() ([]byte, []int) { - return file_google_api_expr_v1alpha1_syntax_proto_rawDescGZIP(), []int{1, 3} -} - -func (x *Expr_CreateList) GetElements() []*Expr { - if x != nil { - return x.Elements - } - return nil -} - -// A map or message creation expression. -// -// Maps are constructed as `{'key_name': 'value'}`. Message construction is -// similar, but prefixed with a type name and composed of field ids: -// `types.MyType{field_id: 'value'}`. -type Expr_CreateStruct struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The type name of the message to be created, empty when creating map - // literals. - MessageName string `protobuf:"bytes,1,opt,name=message_name,json=messageName,proto3" json:"message_name,omitempty"` - // The entries in the creation expression. - Entries []*Expr_CreateStruct_Entry `protobuf:"bytes,2,rep,name=entries,proto3" json:"entries,omitempty"` -} - -func (x *Expr_CreateStruct) Reset() { - *x = Expr_CreateStruct{} - if protoimpl.UnsafeEnabled { - mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Expr_CreateStruct) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Expr_CreateStruct) ProtoMessage() {} - -func (x *Expr_CreateStruct) ProtoReflect() protoreflect.Message { - mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Expr_CreateStruct.ProtoReflect.Descriptor instead. -func (*Expr_CreateStruct) Descriptor() ([]byte, []int) { - return file_google_api_expr_v1alpha1_syntax_proto_rawDescGZIP(), []int{1, 4} -} - -func (x *Expr_CreateStruct) GetMessageName() string { - if x != nil { - return x.MessageName - } - return "" -} - -func (x *Expr_CreateStruct) GetEntries() []*Expr_CreateStruct_Entry { - if x != nil { - return x.Entries - } - return nil -} - -// A comprehension expression applied to a list or map. -// -// Comprehensions are not part of the core syntax, but enabled with macros. -// A macro matches a specific call signature within a parsed AST and replaces -// the call with an alternate AST block. Macro expansion happens at parse -// time. -// -// The following macros are supported within CEL: -// -// Aggregate type macros may be applied to all elements in a list or all keys -// in a map: -// -// * `all`, `exists`, `exists_one` - test a predicate expression against -// the inputs and return `true` if the predicate is satisfied for all, -// any, or only one value `list.all(x, x < 10)`. -// * `filter` - test a predicate expression against the inputs and return -// the subset of elements which satisfy the predicate: -// `payments.filter(p, p > 1000)`. -// * `map` - apply an expression to all elements in the input and return the -// output aggregate type: `[1, 2, 3].map(i, i * i)`. -// -// The `has(m.x)` macro tests whether the property `x` is present in struct -// `m`. The semantics of this macro depend on the type of `m`. For proto2 -// messages `has(m.x)` is defined as 'defined, but not set`. For proto3, the -// macro tests whether the property is set to its default. For map and struct -// types, the macro tests whether the property `x` is defined on `m`. -type Expr_Comprehension struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The name of the iteration variable. - IterVar string `protobuf:"bytes,1,opt,name=iter_var,json=iterVar,proto3" json:"iter_var,omitempty"` - // The range over which var iterates. - IterRange *Expr `protobuf:"bytes,2,opt,name=iter_range,json=iterRange,proto3" json:"iter_range,omitempty"` - // The name of the variable used for accumulation of the result. - AccuVar string `protobuf:"bytes,3,opt,name=accu_var,json=accuVar,proto3" json:"accu_var,omitempty"` - // The initial value of the accumulator. - AccuInit *Expr `protobuf:"bytes,4,opt,name=accu_init,json=accuInit,proto3" json:"accu_init,omitempty"` - // An expression which can contain iter_var and accu_var. - // - // Returns false when the result has been computed and may be used as - // a hint to short-circuit the remainder of the comprehension. - LoopCondition *Expr `protobuf:"bytes,5,opt,name=loop_condition,json=loopCondition,proto3" json:"loop_condition,omitempty"` - // An expression which can contain iter_var and accu_var. - // - // Computes the next value of accu_var. - LoopStep *Expr `protobuf:"bytes,6,opt,name=loop_step,json=loopStep,proto3" json:"loop_step,omitempty"` - // An expression which can contain accu_var. - // - // Computes the result. - Result *Expr `protobuf:"bytes,7,opt,name=result,proto3" json:"result,omitempty"` -} - -func (x *Expr_Comprehension) Reset() { - *x = Expr_Comprehension{} - if protoimpl.UnsafeEnabled { - mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Expr_Comprehension) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Expr_Comprehension) ProtoMessage() {} - -func (x *Expr_Comprehension) ProtoReflect() protoreflect.Message { - mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Expr_Comprehension.ProtoReflect.Descriptor instead. -func (*Expr_Comprehension) Descriptor() ([]byte, []int) { - return file_google_api_expr_v1alpha1_syntax_proto_rawDescGZIP(), []int{1, 5} -} - -func (x *Expr_Comprehension) GetIterVar() string { - if x != nil { - return x.IterVar - } - return "" -} - -func (x *Expr_Comprehension) GetIterRange() *Expr { - if x != nil { - return x.IterRange - } - return nil -} - -func (x *Expr_Comprehension) GetAccuVar() string { - if x != nil { - return x.AccuVar - } - return "" -} - -func (x *Expr_Comprehension) GetAccuInit() *Expr { - if x != nil { - return x.AccuInit - } - return nil -} - -func (x *Expr_Comprehension) GetLoopCondition() *Expr { - if x != nil { - return x.LoopCondition - } - return nil -} - -func (x *Expr_Comprehension) GetLoopStep() *Expr { - if x != nil { - return x.LoopStep - } - return nil -} - -func (x *Expr_Comprehension) GetResult() *Expr { - if x != nil { - return x.Result - } - return nil -} - -// Represents an entry. -type Expr_CreateStruct_Entry struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. An id assigned to this node by the parser which is unique - // in a given expression tree. This is used to associate type - // information and other attributes to the node. - Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` - // The `Entry` key kinds. - // - // Types that are assignable to KeyKind: - // *Expr_CreateStruct_Entry_FieldKey - // *Expr_CreateStruct_Entry_MapKey - KeyKind isExpr_CreateStruct_Entry_KeyKind `protobuf_oneof:"key_kind"` - // Required. The value assigned to the key. - Value *Expr `protobuf:"bytes,4,opt,name=value,proto3" json:"value,omitempty"` -} - -func (x *Expr_CreateStruct_Entry) Reset() { - *x = Expr_CreateStruct_Entry{} - if protoimpl.UnsafeEnabled { - mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Expr_CreateStruct_Entry) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Expr_CreateStruct_Entry) ProtoMessage() {} - -func (x *Expr_CreateStruct_Entry) ProtoReflect() protoreflect.Message { - mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Expr_CreateStruct_Entry.ProtoReflect.Descriptor instead. -func (*Expr_CreateStruct_Entry) Descriptor() ([]byte, []int) { - return file_google_api_expr_v1alpha1_syntax_proto_rawDescGZIP(), []int{1, 4, 0} -} - -func (x *Expr_CreateStruct_Entry) GetId() int64 { - if x != nil { - return x.Id - } - return 0 -} - -func (m *Expr_CreateStruct_Entry) GetKeyKind() isExpr_CreateStruct_Entry_KeyKind { - if m != nil { - return m.KeyKind - } - return nil -} - -func (x *Expr_CreateStruct_Entry) GetFieldKey() string { - if x, ok := x.GetKeyKind().(*Expr_CreateStruct_Entry_FieldKey); ok { - return x.FieldKey - } - return "" -} - -func (x *Expr_CreateStruct_Entry) GetMapKey() *Expr { - if x, ok := x.GetKeyKind().(*Expr_CreateStruct_Entry_MapKey); ok { - return x.MapKey - } - return nil -} - -func (x *Expr_CreateStruct_Entry) GetValue() *Expr { - if x != nil { - return x.Value - } - return nil -} - -type isExpr_CreateStruct_Entry_KeyKind interface { - isExpr_CreateStruct_Entry_KeyKind() -} - -type Expr_CreateStruct_Entry_FieldKey struct { - // The field key for a message creator statement. - FieldKey string `protobuf:"bytes,2,opt,name=field_key,json=fieldKey,proto3,oneof"` -} - -type Expr_CreateStruct_Entry_MapKey struct { - // The key expression for a map creation statement. - MapKey *Expr `protobuf:"bytes,3,opt,name=map_key,json=mapKey,proto3,oneof"` -} - -func (*Expr_CreateStruct_Entry_FieldKey) isExpr_CreateStruct_Entry_KeyKind() {} - -func (*Expr_CreateStruct_Entry_MapKey) isExpr_CreateStruct_Entry_KeyKind() {} - -var File_google_api_expr_v1alpha1_syntax_proto protoreflect.FileDescriptor - -var file_google_api_expr_v1alpha1_syntax_proto_rawDesc = []byte{ - 0x0a, 0x25, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x78, 0x70, - 0x72, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x73, 0x79, 0x6e, 0x74, 0x61, - 0x78, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x18, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, - 0x31, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, - 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x22, 0x87, 0x01, 0x0a, 0x0a, 0x50, 0x61, 0x72, 0x73, 0x65, 0x64, 0x45, 0x78, 0x70, 0x72, 0x12, - 0x32, 0x0a, 0x04, 0x65, 0x78, 0x70, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, - 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x04, 0x65, - 0x78, 0x70, 0x72, 0x12, 0x45, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x6e, - 0x66, 0x6f, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, - 0x68, 0x61, 0x31, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0a, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0xdc, 0x0c, 0x0a, 0x04, 0x45, - 0x78, 0x70, 0x72, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x02, 0x69, 0x64, 0x12, 0x43, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x5f, 0x65, 0x78, 0x70, - 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, - 0x61, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x48, 0x00, 0x52, 0x09, 0x63, - 0x6f, 0x6e, 0x73, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12, 0x45, 0x0a, 0x0a, 0x69, 0x64, 0x65, 0x6e, - 0x74, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, - 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x2e, 0x49, 0x64, 0x65, - 0x6e, 0x74, 0x48, 0x00, 0x52, 0x09, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12, - 0x48, 0x0a, 0x0b, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, - 0x45, 0x78, 0x70, 0x72, 0x2e, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x48, 0x00, 0x52, 0x0a, 0x73, - 0x65, 0x6c, 0x65, 0x63, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12, 0x42, 0x0a, 0x09, 0x63, 0x61, 0x6c, - 0x6c, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, - 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x2e, 0x43, 0x61, 0x6c, - 0x6c, 0x48, 0x00, 0x52, 0x08, 0x63, 0x61, 0x6c, 0x6c, 0x45, 0x78, 0x70, 0x72, 0x12, 0x48, 0x0a, - 0x09, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, - 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72, - 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x00, 0x52, 0x08, 0x6c, - 0x69, 0x73, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12, 0x4e, 0x0a, 0x0b, 0x73, 0x74, 0x72, 0x75, 0x63, - 0x74, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, - 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x2e, 0x43, 0x72, 0x65, - 0x61, 0x74, 0x65, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x48, 0x00, 0x52, 0x0a, 0x73, 0x74, 0x72, - 0x75, 0x63, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12, 0x5d, 0x0a, 0x12, 0x63, 0x6f, 0x6d, 0x70, 0x72, - 0x65, 0x68, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x09, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, - 0x78, 0x70, 0x72, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x68, 0x65, 0x6e, 0x73, 0x69, 0x6f, - 0x6e, 0x48, 0x00, 0x52, 0x11, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x68, 0x65, 0x6e, 0x73, 0x69, - 0x6f, 0x6e, 0x45, 0x78, 0x70, 0x72, 0x1a, 0x1b, 0x0a, 0x05, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x12, - 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x1a, 0x75, 0x0a, 0x06, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x12, 0x38, 0x0a, - 0x07, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, - 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x07, - 0x6f, 0x70, 0x65, 0x72, 0x61, 0x6e, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x1b, 0x0a, - 0x09, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x08, 0x74, 0x65, 0x73, 0x74, 0x4f, 0x6e, 0x6c, 0x79, 0x1a, 0x8e, 0x01, 0x0a, 0x04, 0x43, - 0x61, 0x6c, 0x6c, 0x12, 0x36, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, - 0x78, 0x70, 0x72, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x66, - 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, - 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x32, 0x0a, 0x04, 0x61, 0x72, 0x67, 0x73, 0x18, - 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, - 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x04, 0x61, 0x72, 0x67, 0x73, 0x1a, 0x48, 0x0a, 0x0a, 0x43, - 0x72, 0x65, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x6c, 0x65, - 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, - 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x08, 0x65, 0x6c, 0x65, - 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x1a, 0xb4, 0x02, 0x0a, 0x0c, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, - 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x4b, 0x0a, 0x07, 0x65, 0x6e, 0x74, - 0x72, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, - 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, - 0x65, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x65, - 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x1a, 0xb3, 0x01, 0x0a, 0x05, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, - 0x12, 0x1d, 0x0a, 0x09, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x4b, 0x65, 0x79, 0x12, - 0x39, 0x0a, 0x07, 0x6d, 0x61, 0x70, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, - 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72, - 0x48, 0x00, 0x52, 0x06, 0x6d, 0x61, 0x70, 0x4b, 0x65, 0x79, 0x12, 0x34, 0x0a, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, - 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x42, 0x0a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x1a, 0xfd, 0x02, 0x0a, - 0x0d, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x68, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x19, - 0x0a, 0x08, 0x69, 0x74, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x07, 0x69, 0x74, 0x65, 0x72, 0x56, 0x61, 0x72, 0x12, 0x3d, 0x0a, 0x0a, 0x69, 0x74, 0x65, - 0x72, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, - 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x09, 0x69, - 0x74, 0x65, 0x72, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x61, 0x63, 0x63, 0x75, - 0x5f, 0x76, 0x61, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x63, 0x63, 0x75, - 0x56, 0x61, 0x72, 0x12, 0x3b, 0x0a, 0x09, 0x61, 0x63, 0x63, 0x75, 0x5f, 0x69, 0x6e, 0x69, 0x74, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, - 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x08, 0x61, 0x63, 0x63, 0x75, 0x49, 0x6e, 0x69, 0x74, - 0x12, 0x45, 0x0a, 0x0e, 0x6c, 0x6f, 0x6f, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, - 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x0d, 0x6c, 0x6f, 0x6f, 0x70, 0x43, 0x6f, - 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3b, 0x0a, 0x09, 0x6c, 0x6f, 0x6f, 0x70, 0x5f, - 0x73, 0x74, 0x65, 0x70, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, - 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x08, 0x6c, 0x6f, 0x6f, 0x70, - 0x53, 0x74, 0x65, 0x70, 0x12, 0x36, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x07, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, - 0x45, 0x78, 0x70, 0x72, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x42, 0x0b, 0x0a, 0x09, - 0x65, 0x78, 0x70, 0x72, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0xc1, 0x03, 0x0a, 0x08, 0x43, 0x6f, - 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x12, 0x3b, 0x0a, 0x0a, 0x6e, 0x75, 0x6c, 0x6c, 0x5f, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4e, 0x75, 0x6c, - 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6e, 0x75, 0x6c, 0x6c, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0a, 0x62, 0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0a, 0x69, 0x6e, 0x74, - 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x75, 0x69, 0x6e, 0x74, 0x36, - 0x34, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x48, 0x00, 0x52, - 0x0b, 0x75, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, - 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, - 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0a, 0x62, - 0x79, 0x74, 0x65, 0x73, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x46, 0x0a, 0x0e, 0x64, 0x75, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x18, 0x01, - 0x48, 0x00, 0x52, 0x0d, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x12, 0x49, 0x0a, 0x0f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, - 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x02, 0x18, 0x01, 0x48, 0x00, 0x52, 0x0e, 0x74, 0x69, - 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0f, 0x0a, 0x0d, - 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0xb9, 0x03, - 0x0a, 0x0a, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x25, 0x0a, 0x0e, - 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x56, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x21, 0x0a, 0x0c, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x73, 0x18, - 0x03, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0b, 0x6c, 0x69, 0x6e, 0x65, 0x4f, 0x66, 0x66, 0x73, 0x65, - 0x74, 0x73, 0x12, 0x51, 0x0a, 0x09, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, - 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, - 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x50, 0x6f, 0x73, 0x69, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x70, 0x6f, 0x73, 0x69, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x55, 0x0a, 0x0b, 0x6d, 0x61, 0x63, 0x72, 0x6f, 0x5f, 0x63, - 0x61, 0x6c, 0x6c, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, - 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, - 0x2e, 0x4d, 0x61, 0x63, 0x72, 0x6f, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x52, 0x0a, 0x6d, 0x61, 0x63, 0x72, 0x6f, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x1a, 0x3c, 0x0a, 0x0e, - 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, - 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x6b, 0x65, 0x79, - 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x5d, 0x0a, 0x0f, 0x4d, 0x61, - 0x63, 0x72, 0x6f, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, - 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, - 0x34, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, - 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x70, 0x0a, 0x0e, 0x53, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x6c, - 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, - 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, - 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, - 0x12, 0x0a, 0x04, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x6c, - 0x69, 0x6e, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x05, 0x52, 0x06, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x42, 0x6e, 0x0a, 0x1c, 0x63, - 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, - 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x42, 0x0b, 0x53, 0x79, 0x6e, - 0x74, 0x61, 0x78, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3c, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, - 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, - 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, - 0x68, 0x61, 0x31, 0x3b, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x33, -} - -var ( - file_google_api_expr_v1alpha1_syntax_proto_rawDescOnce sync.Once - file_google_api_expr_v1alpha1_syntax_proto_rawDescData = file_google_api_expr_v1alpha1_syntax_proto_rawDesc -) - -func file_google_api_expr_v1alpha1_syntax_proto_rawDescGZIP() []byte { - file_google_api_expr_v1alpha1_syntax_proto_rawDescOnce.Do(func() { - file_google_api_expr_v1alpha1_syntax_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_expr_v1alpha1_syntax_proto_rawDescData) - }) - return file_google_api_expr_v1alpha1_syntax_proto_rawDescData -} - -var file_google_api_expr_v1alpha1_syntax_proto_msgTypes = make([]protoimpl.MessageInfo, 14) -var file_google_api_expr_v1alpha1_syntax_proto_goTypes = []interface{}{ - (*ParsedExpr)(nil), // 0: google.api.expr.v1alpha1.ParsedExpr - (*Expr)(nil), // 1: google.api.expr.v1alpha1.Expr - (*Constant)(nil), // 2: google.api.expr.v1alpha1.Constant - (*SourceInfo)(nil), // 3: google.api.expr.v1alpha1.SourceInfo - (*SourcePosition)(nil), // 4: google.api.expr.v1alpha1.SourcePosition - (*Expr_Ident)(nil), // 5: google.api.expr.v1alpha1.Expr.Ident - (*Expr_Select)(nil), // 6: google.api.expr.v1alpha1.Expr.Select - (*Expr_Call)(nil), // 7: google.api.expr.v1alpha1.Expr.Call - (*Expr_CreateList)(nil), // 8: google.api.expr.v1alpha1.Expr.CreateList - (*Expr_CreateStruct)(nil), // 9: google.api.expr.v1alpha1.Expr.CreateStruct - (*Expr_Comprehension)(nil), // 10: google.api.expr.v1alpha1.Expr.Comprehension - (*Expr_CreateStruct_Entry)(nil), // 11: google.api.expr.v1alpha1.Expr.CreateStruct.Entry - nil, // 12: google.api.expr.v1alpha1.SourceInfo.PositionsEntry - nil, // 13: google.api.expr.v1alpha1.SourceInfo.MacroCallsEntry - (structpb.NullValue)(0), // 14: google.protobuf.NullValue - (*durationpb.Duration)(nil), // 15: google.protobuf.Duration - (*timestamppb.Timestamp)(nil), // 16: google.protobuf.Timestamp -} -var file_google_api_expr_v1alpha1_syntax_proto_depIdxs = []int32{ - 1, // 0: google.api.expr.v1alpha1.ParsedExpr.expr:type_name -> google.api.expr.v1alpha1.Expr - 3, // 1: google.api.expr.v1alpha1.ParsedExpr.source_info:type_name -> google.api.expr.v1alpha1.SourceInfo - 2, // 2: google.api.expr.v1alpha1.Expr.const_expr:type_name -> google.api.expr.v1alpha1.Constant - 5, // 3: google.api.expr.v1alpha1.Expr.ident_expr:type_name -> google.api.expr.v1alpha1.Expr.Ident - 6, // 4: google.api.expr.v1alpha1.Expr.select_expr:type_name -> google.api.expr.v1alpha1.Expr.Select - 7, // 5: google.api.expr.v1alpha1.Expr.call_expr:type_name -> google.api.expr.v1alpha1.Expr.Call - 8, // 6: google.api.expr.v1alpha1.Expr.list_expr:type_name -> google.api.expr.v1alpha1.Expr.CreateList - 9, // 7: google.api.expr.v1alpha1.Expr.struct_expr:type_name -> google.api.expr.v1alpha1.Expr.CreateStruct - 10, // 8: google.api.expr.v1alpha1.Expr.comprehension_expr:type_name -> google.api.expr.v1alpha1.Expr.Comprehension - 14, // 9: google.api.expr.v1alpha1.Constant.null_value:type_name -> google.protobuf.NullValue - 15, // 10: google.api.expr.v1alpha1.Constant.duration_value:type_name -> google.protobuf.Duration - 16, // 11: google.api.expr.v1alpha1.Constant.timestamp_value:type_name -> google.protobuf.Timestamp - 12, // 12: google.api.expr.v1alpha1.SourceInfo.positions:type_name -> google.api.expr.v1alpha1.SourceInfo.PositionsEntry - 13, // 13: google.api.expr.v1alpha1.SourceInfo.macro_calls:type_name -> google.api.expr.v1alpha1.SourceInfo.MacroCallsEntry - 1, // 14: google.api.expr.v1alpha1.Expr.Select.operand:type_name -> google.api.expr.v1alpha1.Expr - 1, // 15: google.api.expr.v1alpha1.Expr.Call.target:type_name -> google.api.expr.v1alpha1.Expr - 1, // 16: google.api.expr.v1alpha1.Expr.Call.args:type_name -> google.api.expr.v1alpha1.Expr - 1, // 17: google.api.expr.v1alpha1.Expr.CreateList.elements:type_name -> google.api.expr.v1alpha1.Expr - 11, // 18: google.api.expr.v1alpha1.Expr.CreateStruct.entries:type_name -> google.api.expr.v1alpha1.Expr.CreateStruct.Entry - 1, // 19: google.api.expr.v1alpha1.Expr.Comprehension.iter_range:type_name -> google.api.expr.v1alpha1.Expr - 1, // 20: google.api.expr.v1alpha1.Expr.Comprehension.accu_init:type_name -> google.api.expr.v1alpha1.Expr - 1, // 21: google.api.expr.v1alpha1.Expr.Comprehension.loop_condition:type_name -> google.api.expr.v1alpha1.Expr - 1, // 22: google.api.expr.v1alpha1.Expr.Comprehension.loop_step:type_name -> google.api.expr.v1alpha1.Expr - 1, // 23: google.api.expr.v1alpha1.Expr.Comprehension.result:type_name -> google.api.expr.v1alpha1.Expr - 1, // 24: google.api.expr.v1alpha1.Expr.CreateStruct.Entry.map_key:type_name -> google.api.expr.v1alpha1.Expr - 1, // 25: google.api.expr.v1alpha1.Expr.CreateStruct.Entry.value:type_name -> google.api.expr.v1alpha1.Expr - 1, // 26: google.api.expr.v1alpha1.SourceInfo.MacroCallsEntry.value:type_name -> google.api.expr.v1alpha1.Expr - 27, // [27:27] is the sub-list for method output_type - 27, // [27:27] is the sub-list for method input_type - 27, // [27:27] is the sub-list for extension type_name - 27, // [27:27] is the sub-list for extension extendee - 0, // [0:27] is the sub-list for field type_name -} - -func init() { file_google_api_expr_v1alpha1_syntax_proto_init() } -func file_google_api_expr_v1alpha1_syntax_proto_init() { - if File_google_api_expr_v1alpha1_syntax_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_google_api_expr_v1alpha1_syntax_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ParsedExpr); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_api_expr_v1alpha1_syntax_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Expr); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_api_expr_v1alpha1_syntax_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Constant); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_api_expr_v1alpha1_syntax_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SourceInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_api_expr_v1alpha1_syntax_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SourcePosition); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_api_expr_v1alpha1_syntax_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Expr_Ident); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_api_expr_v1alpha1_syntax_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Expr_Select); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_api_expr_v1alpha1_syntax_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Expr_Call); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_api_expr_v1alpha1_syntax_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Expr_CreateList); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_api_expr_v1alpha1_syntax_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Expr_CreateStruct); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_api_expr_v1alpha1_syntax_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Expr_Comprehension); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_api_expr_v1alpha1_syntax_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Expr_CreateStruct_Entry); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_google_api_expr_v1alpha1_syntax_proto_msgTypes[1].OneofWrappers = []interface{}{ - (*Expr_ConstExpr)(nil), - (*Expr_IdentExpr)(nil), - (*Expr_SelectExpr)(nil), - (*Expr_CallExpr)(nil), - (*Expr_ListExpr)(nil), - (*Expr_StructExpr)(nil), - (*Expr_ComprehensionExpr)(nil), - } - file_google_api_expr_v1alpha1_syntax_proto_msgTypes[2].OneofWrappers = []interface{}{ - (*Constant_NullValue)(nil), - (*Constant_BoolValue)(nil), - (*Constant_Int64Value)(nil), - (*Constant_Uint64Value)(nil), - (*Constant_DoubleValue)(nil), - (*Constant_StringValue)(nil), - (*Constant_BytesValue)(nil), - (*Constant_DurationValue)(nil), - (*Constant_TimestampValue)(nil), - } - file_google_api_expr_v1alpha1_syntax_proto_msgTypes[11].OneofWrappers = []interface{}{ - (*Expr_CreateStruct_Entry_FieldKey)(nil), - (*Expr_CreateStruct_Entry_MapKey)(nil), - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_api_expr_v1alpha1_syntax_proto_rawDesc, - NumEnums: 0, - NumMessages: 14, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_google_api_expr_v1alpha1_syntax_proto_goTypes, - DependencyIndexes: file_google_api_expr_v1alpha1_syntax_proto_depIdxs, - MessageInfos: file_google_api_expr_v1alpha1_syntax_proto_msgTypes, - }.Build() - File_google_api_expr_v1alpha1_syntax_proto = out.File - file_google_api_expr_v1alpha1_syntax_proto_rawDesc = nil - file_google_api_expr_v1alpha1_syntax_proto_goTypes = nil - file_google_api_expr_v1alpha1_syntax_proto_depIdxs = nil -} diff --git a/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/value.pb.go b/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/value.pb.go deleted file mode 100644 index a0a7c849f5..0000000000 --- a/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/value.pb.go +++ /dev/null @@ -1,725 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.25.0 -// protoc v3.13.0 -// source: google/api/expr/v1alpha1/value.proto - -package expr - -import ( - reflect "reflect" - sync "sync" - - proto "github.com/golang/protobuf/proto" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - anypb "google.golang.org/protobuf/types/known/anypb" - structpb "google.golang.org/protobuf/types/known/structpb" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - -// Represents a CEL value. -// -// This is similar to `google.protobuf.Value`, but can represent CEL's full -// range of values. -type Value struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The valid kinds of values. - // - // Types that are assignable to Kind: - // *Value_NullValue - // *Value_BoolValue - // *Value_Int64Value - // *Value_Uint64Value - // *Value_DoubleValue - // *Value_StringValue - // *Value_BytesValue - // *Value_EnumValue - // *Value_ObjectValue - // *Value_MapValue - // *Value_ListValue - // *Value_TypeValue - Kind isValue_Kind `protobuf_oneof:"kind"` -} - -func (x *Value) Reset() { - *x = Value{} - if protoimpl.UnsafeEnabled { - mi := &file_google_api_expr_v1alpha1_value_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Value) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Value) ProtoMessage() {} - -func (x *Value) ProtoReflect() protoreflect.Message { - mi := &file_google_api_expr_v1alpha1_value_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Value.ProtoReflect.Descriptor instead. -func (*Value) Descriptor() ([]byte, []int) { - return file_google_api_expr_v1alpha1_value_proto_rawDescGZIP(), []int{0} -} - -func (m *Value) GetKind() isValue_Kind { - if m != nil { - return m.Kind - } - return nil -} - -func (x *Value) GetNullValue() structpb.NullValue { - if x, ok := x.GetKind().(*Value_NullValue); ok { - return x.NullValue - } - return structpb.NullValue_NULL_VALUE -} - -func (x *Value) GetBoolValue() bool { - if x, ok := x.GetKind().(*Value_BoolValue); ok { - return x.BoolValue - } - return false -} - -func (x *Value) GetInt64Value() int64 { - if x, ok := x.GetKind().(*Value_Int64Value); ok { - return x.Int64Value - } - return 0 -} - -func (x *Value) GetUint64Value() uint64 { - if x, ok := x.GetKind().(*Value_Uint64Value); ok { - return x.Uint64Value - } - return 0 -} - -func (x *Value) GetDoubleValue() float64 { - if x, ok := x.GetKind().(*Value_DoubleValue); ok { - return x.DoubleValue - } - return 0 -} - -func (x *Value) GetStringValue() string { - if x, ok := x.GetKind().(*Value_StringValue); ok { - return x.StringValue - } - return "" -} - -func (x *Value) GetBytesValue() []byte { - if x, ok := x.GetKind().(*Value_BytesValue); ok { - return x.BytesValue - } - return nil -} - -func (x *Value) GetEnumValue() *EnumValue { - if x, ok := x.GetKind().(*Value_EnumValue); ok { - return x.EnumValue - } - return nil -} - -func (x *Value) GetObjectValue() *anypb.Any { - if x, ok := x.GetKind().(*Value_ObjectValue); ok { - return x.ObjectValue - } - return nil -} - -func (x *Value) GetMapValue() *MapValue { - if x, ok := x.GetKind().(*Value_MapValue); ok { - return x.MapValue - } - return nil -} - -func (x *Value) GetListValue() *ListValue { - if x, ok := x.GetKind().(*Value_ListValue); ok { - return x.ListValue - } - return nil -} - -func (x *Value) GetTypeValue() string { - if x, ok := x.GetKind().(*Value_TypeValue); ok { - return x.TypeValue - } - return "" -} - -type isValue_Kind interface { - isValue_Kind() -} - -type Value_NullValue struct { - // Null value. - NullValue structpb.NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,proto3,enum=google.protobuf.NullValue,oneof"` -} - -type Value_BoolValue struct { - // Boolean value. - BoolValue bool `protobuf:"varint,2,opt,name=bool_value,json=boolValue,proto3,oneof"` -} - -type Value_Int64Value struct { - // Signed integer value. - Int64Value int64 `protobuf:"varint,3,opt,name=int64_value,json=int64Value,proto3,oneof"` -} - -type Value_Uint64Value struct { - // Unsigned integer value. - Uint64Value uint64 `protobuf:"varint,4,opt,name=uint64_value,json=uint64Value,proto3,oneof"` -} - -type Value_DoubleValue struct { - // Floating point value. - DoubleValue float64 `protobuf:"fixed64,5,opt,name=double_value,json=doubleValue,proto3,oneof"` -} - -type Value_StringValue struct { - // UTF-8 string value. - StringValue string `protobuf:"bytes,6,opt,name=string_value,json=stringValue,proto3,oneof"` -} - -type Value_BytesValue struct { - // Byte string value. - BytesValue []byte `protobuf:"bytes,7,opt,name=bytes_value,json=bytesValue,proto3,oneof"` -} - -type Value_EnumValue struct { - // An enum value. - EnumValue *EnumValue `protobuf:"bytes,9,opt,name=enum_value,json=enumValue,proto3,oneof"` -} - -type Value_ObjectValue struct { - // The proto message backing an object value. - ObjectValue *anypb.Any `protobuf:"bytes,10,opt,name=object_value,json=objectValue,proto3,oneof"` -} - -type Value_MapValue struct { - // Map value. - MapValue *MapValue `protobuf:"bytes,11,opt,name=map_value,json=mapValue,proto3,oneof"` -} - -type Value_ListValue struct { - // List value. - ListValue *ListValue `protobuf:"bytes,12,opt,name=list_value,json=listValue,proto3,oneof"` -} - -type Value_TypeValue struct { - // Type value. - TypeValue string `protobuf:"bytes,15,opt,name=type_value,json=typeValue,proto3,oneof"` -} - -func (*Value_NullValue) isValue_Kind() {} - -func (*Value_BoolValue) isValue_Kind() {} - -func (*Value_Int64Value) isValue_Kind() {} - -func (*Value_Uint64Value) isValue_Kind() {} - -func (*Value_DoubleValue) isValue_Kind() {} - -func (*Value_StringValue) isValue_Kind() {} - -func (*Value_BytesValue) isValue_Kind() {} - -func (*Value_EnumValue) isValue_Kind() {} - -func (*Value_ObjectValue) isValue_Kind() {} - -func (*Value_MapValue) isValue_Kind() {} - -func (*Value_ListValue) isValue_Kind() {} - -func (*Value_TypeValue) isValue_Kind() {} - -// An enum value. -type EnumValue struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The fully qualified name of the enum type. - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - // The value of the enum. - Value int32 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"` -} - -func (x *EnumValue) Reset() { - *x = EnumValue{} - if protoimpl.UnsafeEnabled { - mi := &file_google_api_expr_v1alpha1_value_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *EnumValue) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*EnumValue) ProtoMessage() {} - -func (x *EnumValue) ProtoReflect() protoreflect.Message { - mi := &file_google_api_expr_v1alpha1_value_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use EnumValue.ProtoReflect.Descriptor instead. -func (*EnumValue) Descriptor() ([]byte, []int) { - return file_google_api_expr_v1alpha1_value_proto_rawDescGZIP(), []int{1} -} - -func (x *EnumValue) GetType() string { - if x != nil { - return x.Type - } - return "" -} - -func (x *EnumValue) GetValue() int32 { - if x != nil { - return x.Value - } - return 0 -} - -// A list. -// -// Wrapped in a message so 'not set' and empty can be differentiated, which is -// required for use in a 'oneof'. -type ListValue struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The ordered values in the list. - Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` -} - -func (x *ListValue) Reset() { - *x = ListValue{} - if protoimpl.UnsafeEnabled { - mi := &file_google_api_expr_v1alpha1_value_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListValue) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListValue) ProtoMessage() {} - -func (x *ListValue) ProtoReflect() protoreflect.Message { - mi := &file_google_api_expr_v1alpha1_value_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListValue.ProtoReflect.Descriptor instead. -func (*ListValue) Descriptor() ([]byte, []int) { - return file_google_api_expr_v1alpha1_value_proto_rawDescGZIP(), []int{2} -} - -func (x *ListValue) GetValues() []*Value { - if x != nil { - return x.Values - } - return nil -} - -// A map. -// -// Wrapped in a message so 'not set' and empty can be differentiated, which is -// required for use in a 'oneof'. -type MapValue struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The set of map entries. - // - // CEL has fewer restrictions on keys, so a protobuf map represenation - // cannot be used. - Entries []*MapValue_Entry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"` -} - -func (x *MapValue) Reset() { - *x = MapValue{} - if protoimpl.UnsafeEnabled { - mi := &file_google_api_expr_v1alpha1_value_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *MapValue) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*MapValue) ProtoMessage() {} - -func (x *MapValue) ProtoReflect() protoreflect.Message { - mi := &file_google_api_expr_v1alpha1_value_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use MapValue.ProtoReflect.Descriptor instead. -func (*MapValue) Descriptor() ([]byte, []int) { - return file_google_api_expr_v1alpha1_value_proto_rawDescGZIP(), []int{3} -} - -func (x *MapValue) GetEntries() []*MapValue_Entry { - if x != nil { - return x.Entries - } - return nil -} - -// An entry in the map. -type MapValue_Entry struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The key. - // - // Must be unique with in the map. - // Currently only boolean, int, uint, and string values can be keys. - Key *Value `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - // The value. - Value *Value `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` -} - -func (x *MapValue_Entry) Reset() { - *x = MapValue_Entry{} - if protoimpl.UnsafeEnabled { - mi := &file_google_api_expr_v1alpha1_value_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *MapValue_Entry) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*MapValue_Entry) ProtoMessage() {} - -func (x *MapValue_Entry) ProtoReflect() protoreflect.Message { - mi := &file_google_api_expr_v1alpha1_value_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use MapValue_Entry.ProtoReflect.Descriptor instead. -func (*MapValue_Entry) Descriptor() ([]byte, []int) { - return file_google_api_expr_v1alpha1_value_proto_rawDescGZIP(), []int{3, 0} -} - -func (x *MapValue_Entry) GetKey() *Value { - if x != nil { - return x.Key - } - return nil -} - -func (x *MapValue_Entry) GetValue() *Value { - if x != nil { - return x.Value - } - return nil -} - -var File_google_api_expr_v1alpha1_value_proto protoreflect.FileDescriptor - -var file_google_api_expr_v1alpha1_value_proto_rawDesc = []byte{ - 0x0a, 0x24, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x78, 0x70, - 0x72, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x18, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, - 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, - 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xcd, 0x04, 0x0a, 0x05, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x12, 0x3b, 0x0a, 0x0a, 0x6e, 0x75, 0x6c, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4e, 0x75, 0x6c, 0x6c, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x12, 0x1f, 0x0a, 0x0a, 0x62, 0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0a, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x75, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x48, 0x00, 0x52, 0x0b, 0x75, 0x69, - 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x64, 0x6f, 0x75, - 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x01, 0x48, - 0x00, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, - 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0a, 0x62, 0x79, 0x74, 0x65, - 0x73, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x44, 0x0a, 0x0a, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, - 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, - 0x00, 0x52, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x39, 0x0a, 0x0c, - 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0a, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, 0x0b, 0x6f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x41, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, - 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x4d, 0x61, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00, - 0x52, 0x08, 0x6d, 0x61, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x44, 0x0a, 0x0a, 0x6c, 0x69, - 0x73, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, - 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x12, 0x1f, 0x0a, 0x0a, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0f, - 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x09, 0x74, 0x79, 0x70, 0x65, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x42, 0x06, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0x35, 0x0a, 0x09, 0x45, 0x6e, 0x75, - 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x22, 0x44, 0x0a, 0x09, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x37, 0x0a, - 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, - 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x22, 0xc1, 0x01, 0x0a, 0x08, 0x4d, 0x61, 0x70, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x12, 0x42, 0x0a, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, - 0x4d, 0x61, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, - 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x1a, 0x71, 0x0a, 0x05, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x12, 0x31, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, - 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x03, - 0x6b, 0x65, 0x79, 0x12, 0x35, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, - 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, - 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x42, 0x0a, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, - 0x61, 0x70, 0x69, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, - 0x31, 0x3b, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, -} - -var ( - file_google_api_expr_v1alpha1_value_proto_rawDescOnce sync.Once - file_google_api_expr_v1alpha1_value_proto_rawDescData = file_google_api_expr_v1alpha1_value_proto_rawDesc -) - -func file_google_api_expr_v1alpha1_value_proto_rawDescGZIP() []byte { - file_google_api_expr_v1alpha1_value_proto_rawDescOnce.Do(func() { - file_google_api_expr_v1alpha1_value_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_expr_v1alpha1_value_proto_rawDescData) - }) - return file_google_api_expr_v1alpha1_value_proto_rawDescData -} - -var file_google_api_expr_v1alpha1_value_proto_msgTypes = make([]protoimpl.MessageInfo, 5) -var file_google_api_expr_v1alpha1_value_proto_goTypes = []interface{}{ - (*Value)(nil), // 0: google.api.expr.v1alpha1.Value - (*EnumValue)(nil), // 1: google.api.expr.v1alpha1.EnumValue - (*ListValue)(nil), // 2: google.api.expr.v1alpha1.ListValue - (*MapValue)(nil), // 3: google.api.expr.v1alpha1.MapValue - (*MapValue_Entry)(nil), // 4: google.api.expr.v1alpha1.MapValue.Entry - (structpb.NullValue)(0), // 5: google.protobuf.NullValue - (*anypb.Any)(nil), // 6: google.protobuf.Any -} -var file_google_api_expr_v1alpha1_value_proto_depIdxs = []int32{ - 5, // 0: google.api.expr.v1alpha1.Value.null_value:type_name -> google.protobuf.NullValue - 1, // 1: google.api.expr.v1alpha1.Value.enum_value:type_name -> google.api.expr.v1alpha1.EnumValue - 6, // 2: google.api.expr.v1alpha1.Value.object_value:type_name -> google.protobuf.Any - 3, // 3: google.api.expr.v1alpha1.Value.map_value:type_name -> google.api.expr.v1alpha1.MapValue - 2, // 4: google.api.expr.v1alpha1.Value.list_value:type_name -> google.api.expr.v1alpha1.ListValue - 0, // 5: google.api.expr.v1alpha1.ListValue.values:type_name -> google.api.expr.v1alpha1.Value - 4, // 6: google.api.expr.v1alpha1.MapValue.entries:type_name -> google.api.expr.v1alpha1.MapValue.Entry - 0, // 7: google.api.expr.v1alpha1.MapValue.Entry.key:type_name -> google.api.expr.v1alpha1.Value - 0, // 8: google.api.expr.v1alpha1.MapValue.Entry.value:type_name -> google.api.expr.v1alpha1.Value - 9, // [9:9] is the sub-list for method output_type - 9, // [9:9] is the sub-list for method input_type - 9, // [9:9] is the sub-list for extension type_name - 9, // [9:9] is the sub-list for extension extendee - 0, // [0:9] is the sub-list for field type_name -} - -func init() { file_google_api_expr_v1alpha1_value_proto_init() } -func file_google_api_expr_v1alpha1_value_proto_init() { - if File_google_api_expr_v1alpha1_value_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_google_api_expr_v1alpha1_value_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Value); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_api_expr_v1alpha1_value_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*EnumValue); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_api_expr_v1alpha1_value_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListValue); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_api_expr_v1alpha1_value_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MapValue); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_api_expr_v1alpha1_value_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MapValue_Entry); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_google_api_expr_v1alpha1_value_proto_msgTypes[0].OneofWrappers = []interface{}{ - (*Value_NullValue)(nil), - (*Value_BoolValue)(nil), - (*Value_Int64Value)(nil), - (*Value_Uint64Value)(nil), - (*Value_DoubleValue)(nil), - (*Value_StringValue)(nil), - (*Value_BytesValue)(nil), - (*Value_EnumValue)(nil), - (*Value_ObjectValue)(nil), - (*Value_MapValue)(nil), - (*Value_ListValue)(nil), - (*Value_TypeValue)(nil), - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_api_expr_v1alpha1_value_proto_rawDesc, - NumEnums: 0, - NumMessages: 5, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_google_api_expr_v1alpha1_value_proto_goTypes, - DependencyIndexes: file_google_api_expr_v1alpha1_value_proto_depIdxs, - MessageInfos: file_google_api_expr_v1alpha1_value_proto_msgTypes, - }.Build() - File_google_api_expr_v1alpha1_value_proto = out.File - file_google_api_expr_v1alpha1_value_proto_rawDesc = nil - file_google_api_expr_v1alpha1_value_proto_goTypes = nil - file_google_api_expr_v1alpha1_value_proto_depIdxs = nil -} diff --git a/vendor/google.golang.org/genproto/googleapis/api/label/label.pb.go b/vendor/google.golang.org/genproto/googleapis/api/label/label.pb.go deleted file mode 100644 index 619b46b312..0000000000 --- a/vendor/google.golang.org/genproto/googleapis/api/label/label.pb.go +++ /dev/null @@ -1,254 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.25.0 -// protoc v3.13.0 -// source: google/api/label.proto - -package label - -import ( - reflect "reflect" - sync "sync" - - proto "github.com/golang/protobuf/proto" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - -// Value types that can be used as label values. -type LabelDescriptor_ValueType int32 - -const ( - // A variable-length string. This is the default. - LabelDescriptor_STRING LabelDescriptor_ValueType = 0 - // Boolean; true or false. - LabelDescriptor_BOOL LabelDescriptor_ValueType = 1 - // A 64-bit signed integer. - LabelDescriptor_INT64 LabelDescriptor_ValueType = 2 -) - -// Enum value maps for LabelDescriptor_ValueType. -var ( - LabelDescriptor_ValueType_name = map[int32]string{ - 0: "STRING", - 1: "BOOL", - 2: "INT64", - } - LabelDescriptor_ValueType_value = map[string]int32{ - "STRING": 0, - "BOOL": 1, - "INT64": 2, - } -) - -func (x LabelDescriptor_ValueType) Enum() *LabelDescriptor_ValueType { - p := new(LabelDescriptor_ValueType) - *p = x - return p -} - -func (x LabelDescriptor_ValueType) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (LabelDescriptor_ValueType) Descriptor() protoreflect.EnumDescriptor { - return file_google_api_label_proto_enumTypes[0].Descriptor() -} - -func (LabelDescriptor_ValueType) Type() protoreflect.EnumType { - return &file_google_api_label_proto_enumTypes[0] -} - -func (x LabelDescriptor_ValueType) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use LabelDescriptor_ValueType.Descriptor instead. -func (LabelDescriptor_ValueType) EnumDescriptor() ([]byte, []int) { - return file_google_api_label_proto_rawDescGZIP(), []int{0, 0} -} - -// A description of a label. -type LabelDescriptor struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The label key. - Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - // The type of data that can be assigned to the label. - ValueType LabelDescriptor_ValueType `protobuf:"varint,2,opt,name=value_type,json=valueType,proto3,enum=google.api.LabelDescriptor_ValueType" json:"value_type,omitempty"` - // A human-readable description for the label. - Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` -} - -func (x *LabelDescriptor) Reset() { - *x = LabelDescriptor{} - if protoimpl.UnsafeEnabled { - mi := &file_google_api_label_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *LabelDescriptor) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*LabelDescriptor) ProtoMessage() {} - -func (x *LabelDescriptor) ProtoReflect() protoreflect.Message { - mi := &file_google_api_label_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use LabelDescriptor.ProtoReflect.Descriptor instead. -func (*LabelDescriptor) Descriptor() ([]byte, []int) { - return file_google_api_label_proto_rawDescGZIP(), []int{0} -} - -func (x *LabelDescriptor) GetKey() string { - if x != nil { - return x.Key - } - return "" -} - -func (x *LabelDescriptor) GetValueType() LabelDescriptor_ValueType { - if x != nil { - return x.ValueType - } - return LabelDescriptor_STRING -} - -func (x *LabelDescriptor) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -var File_google_api_label_proto protoreflect.FileDescriptor - -var file_google_api_label_proto_rawDesc = []byte{ - 0x0a, 0x16, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6c, 0x61, 0x62, - 0x65, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x61, 0x70, 0x69, 0x22, 0xb9, 0x01, 0x0a, 0x0f, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x44, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x44, 0x0a, 0x0a, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x25, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, 0x61, 0x62, 0x65, - 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x54, 0x79, 0x70, 0x65, - 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x22, 0x2c, 0x0a, 0x09, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, - 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x42, - 0x4f, 0x4f, 0x4c, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x02, - 0x42, 0x5f, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, - 0x70, 0x69, 0x42, 0x0a, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, - 0x5a, 0x35, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, - 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6c, 0x61, 0x62, 0x65, - 0x6c, 0x3b, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, - 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_google_api_label_proto_rawDescOnce sync.Once - file_google_api_label_proto_rawDescData = file_google_api_label_proto_rawDesc -) - -func file_google_api_label_proto_rawDescGZIP() []byte { - file_google_api_label_proto_rawDescOnce.Do(func() { - file_google_api_label_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_label_proto_rawDescData) - }) - return file_google_api_label_proto_rawDescData -} - -var file_google_api_label_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_google_api_label_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_api_label_proto_goTypes = []interface{}{ - (LabelDescriptor_ValueType)(0), // 0: google.api.LabelDescriptor.ValueType - (*LabelDescriptor)(nil), // 1: google.api.LabelDescriptor -} -var file_google_api_label_proto_depIdxs = []int32{ - 0, // 0: google.api.LabelDescriptor.value_type:type_name -> google.api.LabelDescriptor.ValueType - 1, // [1:1] is the sub-list for method output_type - 1, // [1:1] is the sub-list for method input_type - 1, // [1:1] is the sub-list for extension type_name - 1, // [1:1] is the sub-list for extension extendee - 0, // [0:1] is the sub-list for field type_name -} - -func init() { file_google_api_label_proto_init() } -func file_google_api_label_proto_init() { - if File_google_api_label_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_google_api_label_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*LabelDescriptor); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_api_label_proto_rawDesc, - NumEnums: 1, - NumMessages: 1, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_google_api_label_proto_goTypes, - DependencyIndexes: file_google_api_label_proto_depIdxs, - EnumInfos: file_google_api_label_proto_enumTypes, - MessageInfos: file_google_api_label_proto_msgTypes, - }.Build() - File_google_api_label_proto = out.File - file_google_api_label_proto_rawDesc = nil - file_google_api_label_proto_goTypes = nil - file_google_api_label_proto_depIdxs = nil -} diff --git a/vendor/google.golang.org/genproto/googleapis/api/launch_stage.pb.go b/vendor/google.golang.org/genproto/googleapis/api/launch_stage.pb.go deleted file mode 100644 index 227a96fa93..0000000000 --- a/vendor/google.golang.org/genproto/googleapis/api/launch_stage.pb.go +++ /dev/null @@ -1,208 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.25.0 -// protoc v3.13.0 -// source: google/api/launch_stage.proto - -package api - -import ( - reflect "reflect" - sync "sync" - - proto "github.com/golang/protobuf/proto" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - -// The launch stage as defined by [Google Cloud Platform -// Launch Stages](http://cloud.google.com/terms/launch-stages). -type LaunchStage int32 - -const ( - // Do not use this default value. - LaunchStage_LAUNCH_STAGE_UNSPECIFIED LaunchStage = 0 - // The feature is not yet implemented. Users can not use it. - LaunchStage_UNIMPLEMENTED LaunchStage = 6 - // Prelaunch features are hidden from users and are only visible internally. - LaunchStage_PRELAUNCH LaunchStage = 7 - // Early Access features are limited to a closed group of testers. To use - // these features, you must sign up in advance and sign a Trusted Tester - // agreement (which includes confidentiality provisions). These features may - // be unstable, changed in backward-incompatible ways, and are not - // guaranteed to be released. - LaunchStage_EARLY_ACCESS LaunchStage = 1 - // Alpha is a limited availability test for releases before they are cleared - // for widespread use. By Alpha, all significant design issues are resolved - // and we are in the process of verifying functionality. Alpha customers - // need to apply for access, agree to applicable terms, and have their - // projects allowlisted. Alpha releases don’t have to be feature complete, - // no SLAs are provided, and there are no technical support obligations, but - // they will be far enough along that customers can actually use them in - // test environments or for limited-use tests -- just like they would in - // normal production cases. - LaunchStage_ALPHA LaunchStage = 2 - // Beta is the point at which we are ready to open a release for any - // customer to use. There are no SLA or technical support obligations in a - // Beta release. Products will be complete from a feature perspective, but - // may have some open outstanding issues. Beta releases are suitable for - // limited production use cases. - LaunchStage_BETA LaunchStage = 3 - // GA features are open to all developers and are considered stable and - // fully qualified for production use. - LaunchStage_GA LaunchStage = 4 - // Deprecated features are scheduled to be shut down and removed. For more - // information, see the “Deprecation Policy” section of our [Terms of - // Service](https://cloud.google.com/terms/) - // and the [Google Cloud Platform Subject to the Deprecation - // Policy](https://cloud.google.com/terms/deprecation) documentation. - LaunchStage_DEPRECATED LaunchStage = 5 -) - -// Enum value maps for LaunchStage. -var ( - LaunchStage_name = map[int32]string{ - 0: "LAUNCH_STAGE_UNSPECIFIED", - 6: "UNIMPLEMENTED", - 7: "PRELAUNCH", - 1: "EARLY_ACCESS", - 2: "ALPHA", - 3: "BETA", - 4: "GA", - 5: "DEPRECATED", - } - LaunchStage_value = map[string]int32{ - "LAUNCH_STAGE_UNSPECIFIED": 0, - "UNIMPLEMENTED": 6, - "PRELAUNCH": 7, - "EARLY_ACCESS": 1, - "ALPHA": 2, - "BETA": 3, - "GA": 4, - "DEPRECATED": 5, - } -) - -func (x LaunchStage) Enum() *LaunchStage { - p := new(LaunchStage) - *p = x - return p -} - -func (x LaunchStage) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (LaunchStage) Descriptor() protoreflect.EnumDescriptor { - return file_google_api_launch_stage_proto_enumTypes[0].Descriptor() -} - -func (LaunchStage) Type() protoreflect.EnumType { - return &file_google_api_launch_stage_proto_enumTypes[0] -} - -func (x LaunchStage) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use LaunchStage.Descriptor instead. -func (LaunchStage) EnumDescriptor() ([]byte, []int) { - return file_google_api_launch_stage_proto_rawDescGZIP(), []int{0} -} - -var File_google_api_launch_stage_proto protoreflect.FileDescriptor - -var file_google_api_launch_stage_proto_rawDesc = []byte{ - 0x0a, 0x1d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6c, 0x61, 0x75, - 0x6e, 0x63, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, - 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2a, 0x8c, 0x01, 0x0a, 0x0b, - 0x4c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, 0x65, 0x12, 0x1c, 0x0a, 0x18, 0x4c, - 0x41, 0x55, 0x4e, 0x43, 0x48, 0x5f, 0x53, 0x54, 0x41, 0x47, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, - 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x55, 0x4e, 0x49, - 0x4d, 0x50, 0x4c, 0x45, 0x4d, 0x45, 0x4e, 0x54, 0x45, 0x44, 0x10, 0x06, 0x12, 0x0d, 0x0a, 0x09, - 0x50, 0x52, 0x45, 0x4c, 0x41, 0x55, 0x4e, 0x43, 0x48, 0x10, 0x07, 0x12, 0x10, 0x0a, 0x0c, 0x45, - 0x41, 0x52, 0x4c, 0x59, 0x5f, 0x41, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x01, 0x12, 0x09, 0x0a, - 0x05, 0x41, 0x4c, 0x50, 0x48, 0x41, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x42, 0x45, 0x54, 0x41, - 0x10, 0x03, 0x12, 0x06, 0x0a, 0x02, 0x47, 0x41, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x44, 0x45, - 0x50, 0x52, 0x45, 0x43, 0x41, 0x54, 0x45, 0x44, 0x10, 0x05, 0x42, 0x5a, 0x0a, 0x0e, 0x63, 0x6f, - 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x10, 0x4c, 0x61, - 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, - 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, - 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x3b, 0x61, 0x70, 0x69, 0xa2, - 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_google_api_launch_stage_proto_rawDescOnce sync.Once - file_google_api_launch_stage_proto_rawDescData = file_google_api_launch_stage_proto_rawDesc -) - -func file_google_api_launch_stage_proto_rawDescGZIP() []byte { - file_google_api_launch_stage_proto_rawDescOnce.Do(func() { - file_google_api_launch_stage_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_launch_stage_proto_rawDescData) - }) - return file_google_api_launch_stage_proto_rawDescData -} - -var file_google_api_launch_stage_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_google_api_launch_stage_proto_goTypes = []interface{}{ - (LaunchStage)(0), // 0: google.api.LaunchStage -} -var file_google_api_launch_stage_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_google_api_launch_stage_proto_init() } -func file_google_api_launch_stage_proto_init() { - if File_google_api_launch_stage_proto != nil { - return - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_api_launch_stage_proto_rawDesc, - NumEnums: 1, - NumMessages: 0, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_google_api_launch_stage_proto_goTypes, - DependencyIndexes: file_google_api_launch_stage_proto_depIdxs, - EnumInfos: file_google_api_launch_stage_proto_enumTypes, - }.Build() - File_google_api_launch_stage_proto = out.File - file_google_api_launch_stage_proto_rawDesc = nil - file_google_api_launch_stage_proto_goTypes = nil - file_google_api_launch_stage_proto_depIdxs = nil -} diff --git a/vendor/google.golang.org/genproto/googleapis/api/metric/metric.pb.go b/vendor/google.golang.org/genproto/googleapis/api/metric/metric.pb.go deleted file mode 100644 index 987047d0e8..0000000000 --- a/vendor/google.golang.org/genproto/googleapis/api/metric/metric.pb.go +++ /dev/null @@ -1,773 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.25.0 -// protoc v3.13.0 -// source: google/api/metric.proto - -package metric - -import ( - reflect "reflect" - sync "sync" - - proto "github.com/golang/protobuf/proto" - api "google.golang.org/genproto/googleapis/api" - label "google.golang.org/genproto/googleapis/api/label" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - durationpb "google.golang.org/protobuf/types/known/durationpb" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - -// The kind of measurement. It describes how the data is reported. -// For information on setting the start time and end time based on -// the MetricKind, see [TimeInterval][google.monitoring.v3.TimeInterval]. -type MetricDescriptor_MetricKind int32 - -const ( - // Do not use this default value. - MetricDescriptor_METRIC_KIND_UNSPECIFIED MetricDescriptor_MetricKind = 0 - // An instantaneous measurement of a value. - MetricDescriptor_GAUGE MetricDescriptor_MetricKind = 1 - // The change in a value during a time interval. - MetricDescriptor_DELTA MetricDescriptor_MetricKind = 2 - // A value accumulated over a time interval. Cumulative - // measurements in a time series should have the same start time - // and increasing end times, until an event resets the cumulative - // value to zero and sets a new start time for the following - // points. - MetricDescriptor_CUMULATIVE MetricDescriptor_MetricKind = 3 -) - -// Enum value maps for MetricDescriptor_MetricKind. -var ( - MetricDescriptor_MetricKind_name = map[int32]string{ - 0: "METRIC_KIND_UNSPECIFIED", - 1: "GAUGE", - 2: "DELTA", - 3: "CUMULATIVE", - } - MetricDescriptor_MetricKind_value = map[string]int32{ - "METRIC_KIND_UNSPECIFIED": 0, - "GAUGE": 1, - "DELTA": 2, - "CUMULATIVE": 3, - } -) - -func (x MetricDescriptor_MetricKind) Enum() *MetricDescriptor_MetricKind { - p := new(MetricDescriptor_MetricKind) - *p = x - return p -} - -func (x MetricDescriptor_MetricKind) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (MetricDescriptor_MetricKind) Descriptor() protoreflect.EnumDescriptor { - return file_google_api_metric_proto_enumTypes[0].Descriptor() -} - -func (MetricDescriptor_MetricKind) Type() protoreflect.EnumType { - return &file_google_api_metric_proto_enumTypes[0] -} - -func (x MetricDescriptor_MetricKind) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use MetricDescriptor_MetricKind.Descriptor instead. -func (MetricDescriptor_MetricKind) EnumDescriptor() ([]byte, []int) { - return file_google_api_metric_proto_rawDescGZIP(), []int{0, 0} -} - -// The value type of a metric. -type MetricDescriptor_ValueType int32 - -const ( - // Do not use this default value. - MetricDescriptor_VALUE_TYPE_UNSPECIFIED MetricDescriptor_ValueType = 0 - // The value is a boolean. - // This value type can be used only if the metric kind is `GAUGE`. - MetricDescriptor_BOOL MetricDescriptor_ValueType = 1 - // The value is a signed 64-bit integer. - MetricDescriptor_INT64 MetricDescriptor_ValueType = 2 - // The value is a double precision floating point number. - MetricDescriptor_DOUBLE MetricDescriptor_ValueType = 3 - // The value is a text string. - // This value type can be used only if the metric kind is `GAUGE`. - MetricDescriptor_STRING MetricDescriptor_ValueType = 4 - // The value is a [`Distribution`][google.api.Distribution]. - MetricDescriptor_DISTRIBUTION MetricDescriptor_ValueType = 5 - // The value is money. - MetricDescriptor_MONEY MetricDescriptor_ValueType = 6 -) - -// Enum value maps for MetricDescriptor_ValueType. -var ( - MetricDescriptor_ValueType_name = map[int32]string{ - 0: "VALUE_TYPE_UNSPECIFIED", - 1: "BOOL", - 2: "INT64", - 3: "DOUBLE", - 4: "STRING", - 5: "DISTRIBUTION", - 6: "MONEY", - } - MetricDescriptor_ValueType_value = map[string]int32{ - "VALUE_TYPE_UNSPECIFIED": 0, - "BOOL": 1, - "INT64": 2, - "DOUBLE": 3, - "STRING": 4, - "DISTRIBUTION": 5, - "MONEY": 6, - } -) - -func (x MetricDescriptor_ValueType) Enum() *MetricDescriptor_ValueType { - p := new(MetricDescriptor_ValueType) - *p = x - return p -} - -func (x MetricDescriptor_ValueType) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (MetricDescriptor_ValueType) Descriptor() protoreflect.EnumDescriptor { - return file_google_api_metric_proto_enumTypes[1].Descriptor() -} - -func (MetricDescriptor_ValueType) Type() protoreflect.EnumType { - return &file_google_api_metric_proto_enumTypes[1] -} - -func (x MetricDescriptor_ValueType) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use MetricDescriptor_ValueType.Descriptor instead. -func (MetricDescriptor_ValueType) EnumDescriptor() ([]byte, []int) { - return file_google_api_metric_proto_rawDescGZIP(), []int{0, 1} -} - -// Defines a metric type and its schema. Once a metric descriptor is created, -// deleting or altering it stops data collection and makes the metric type's -// existing data unusable. -// -type MetricDescriptor struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The resource name of the metric descriptor. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // The metric type, including its DNS name prefix. The type is not - // URL-encoded. All user-defined metric types have the DNS name - // `custom.googleapis.com` or `external.googleapis.com`. Metric types should - // use a natural hierarchical grouping. For example: - // - // "custom.googleapis.com/invoice/paid/amount" - // "external.googleapis.com/prometheus/up" - // "appengine.googleapis.com/http/server/response_latencies" - Type string `protobuf:"bytes,8,opt,name=type,proto3" json:"type,omitempty"` - // The set of labels that can be used to describe a specific - // instance of this metric type. For example, the - // `appengine.googleapis.com/http/server/response_latencies` metric - // type has a label for the HTTP response code, `response_code`, so - // you can look at latencies for successful responses or just - // for responses that failed. - Labels []*label.LabelDescriptor `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty"` - // Whether the metric records instantaneous values, changes to a value, etc. - // Some combinations of `metric_kind` and `value_type` might not be supported. - MetricKind MetricDescriptor_MetricKind `protobuf:"varint,3,opt,name=metric_kind,json=metricKind,proto3,enum=google.api.MetricDescriptor_MetricKind" json:"metric_kind,omitempty"` - // Whether the measurement is an integer, a floating-point number, etc. - // Some combinations of `metric_kind` and `value_type` might not be supported. - ValueType MetricDescriptor_ValueType `protobuf:"varint,4,opt,name=value_type,json=valueType,proto3,enum=google.api.MetricDescriptor_ValueType" json:"value_type,omitempty"` - // The units in which the metric value is reported. It is only applicable - // if the `value_type` is `INT64`, `DOUBLE`, or `DISTRIBUTION`. The `unit` - // defines the representation of the stored metric values. - // - // Different systems may scale the values to be more easily displayed (so a - // value of `0.02KBy` _might_ be displayed as `20By`, and a value of - // `3523KBy` _might_ be displayed as `3.5MBy`). However, if the `unit` is - // `KBy`, then the value of the metric is always in thousands of bytes, no - // matter how it may be displayed.. - // - // If you want a custom metric to record the exact number of CPU-seconds used - // by a job, you can create an `INT64 CUMULATIVE` metric whose `unit` is - // `s{CPU}` (or equivalently `1s{CPU}` or just `s`). If the job uses 12,005 - // CPU-seconds, then the value is written as `12005`. - // - // Alternatively, if you want a custom metric to record data in a more - // granular way, you can create a `DOUBLE CUMULATIVE` metric whose `unit` is - // `ks{CPU}`, and then write the value `12.005` (which is `12005/1000`), - // or use `Kis{CPU}` and write `11.723` (which is `12005/1024`). - // - // The supported units are a subset of [The Unified Code for Units of - // Measure](http://unitsofmeasure.org/ucum.html) standard: - // - // **Basic units (UNIT)** - // - // * `bit` bit - // * `By` byte - // * `s` second - // * `min` minute - // * `h` hour - // * `d` day - // * `1` dimensionless - // - // **Prefixes (PREFIX)** - // - // * `k` kilo (10^3) - // * `M` mega (10^6) - // * `G` giga (10^9) - // * `T` tera (10^12) - // * `P` peta (10^15) - // * `E` exa (10^18) - // * `Z` zetta (10^21) - // * `Y` yotta (10^24) - // - // * `m` milli (10^-3) - // * `u` micro (10^-6) - // * `n` nano (10^-9) - // * `p` pico (10^-12) - // * `f` femto (10^-15) - // * `a` atto (10^-18) - // * `z` zepto (10^-21) - // * `y` yocto (10^-24) - // - // * `Ki` kibi (2^10) - // * `Mi` mebi (2^20) - // * `Gi` gibi (2^30) - // * `Ti` tebi (2^40) - // * `Pi` pebi (2^50) - // - // **Grammar** - // - // The grammar also includes these connectors: - // - // * `/` division or ratio (as an infix operator). For examples, - // `kBy/{email}` or `MiBy/10ms` (although you should almost never - // have `/s` in a metric `unit`; rates should always be computed at - // query time from the underlying cumulative or delta value). - // * `.` multiplication or composition (as an infix operator). For - // examples, `GBy.d` or `k{watt}.h`. - // - // The grammar for a unit is as follows: - // - // Expression = Component { "." Component } { "/" Component } ; - // - // Component = ( [ PREFIX ] UNIT | "%" ) [ Annotation ] - // | Annotation - // | "1" - // ; - // - // Annotation = "{" NAME "}" ; - // - // Notes: - // - // * `Annotation` is just a comment if it follows a `UNIT`. If the annotation - // is used alone, then the unit is equivalent to `1`. For examples, - // `{request}/s == 1/s`, `By{transmitted}/s == By/s`. - // * `NAME` is a sequence of non-blank printable ASCII characters not - // containing `{` or `}`. - // * `1` represents a unitary [dimensionless - // unit](https://en.wikipedia.org/wiki/Dimensionless_quantity) of 1, such - // as in `1/s`. It is typically used when none of the basic units are - // appropriate. For example, "new users per day" can be represented as - // `1/d` or `{new-users}/d` (and a metric value `5` would mean "5 new - // users). Alternatively, "thousands of page views per day" would be - // represented as `1000/d` or `k1/d` or `k{page_views}/d` (and a metric - // value of `5.3` would mean "5300 page views per day"). - // * `%` represents dimensionless value of 1/100, and annotates values giving - // a percentage (so the metric values are typically in the range of 0..100, - // and a metric value `3` means "3 percent"). - // * `10^2.%` indicates a metric contains a ratio, typically in the range - // 0..1, that will be multiplied by 100 and displayed as a percentage - // (so a metric value `0.03` means "3 percent"). - Unit string `protobuf:"bytes,5,opt,name=unit,proto3" json:"unit,omitempty"` - // A detailed description of the metric, which can be used in documentation. - Description string `protobuf:"bytes,6,opt,name=description,proto3" json:"description,omitempty"` - // A concise name for the metric, which can be displayed in user interfaces. - // Use sentence case without an ending period, for example "Request count". - // This field is optional but it is recommended to be set for any metrics - // associated with user-visible concepts, such as Quota. - DisplayName string `protobuf:"bytes,7,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` - // Optional. Metadata which can be used to guide usage of the metric. - Metadata *MetricDescriptor_MetricDescriptorMetadata `protobuf:"bytes,10,opt,name=metadata,proto3" json:"metadata,omitempty"` - // Optional. The launch stage of the metric definition. - LaunchStage api.LaunchStage `protobuf:"varint,12,opt,name=launch_stage,json=launchStage,proto3,enum=google.api.LaunchStage" json:"launch_stage,omitempty"` - // Read-only. If present, then a [time - // series][google.monitoring.v3.TimeSeries], which is identified partially by - // a metric type and a [MonitoredResourceDescriptor][google.api.MonitoredResourceDescriptor], that is associated - // with this metric type can only be associated with one of the monitored - // resource types listed here. - MonitoredResourceTypes []string `protobuf:"bytes,13,rep,name=monitored_resource_types,json=monitoredResourceTypes,proto3" json:"monitored_resource_types,omitempty"` -} - -func (x *MetricDescriptor) Reset() { - *x = MetricDescriptor{} - if protoimpl.UnsafeEnabled { - mi := &file_google_api_metric_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *MetricDescriptor) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*MetricDescriptor) ProtoMessage() {} - -func (x *MetricDescriptor) ProtoReflect() protoreflect.Message { - mi := &file_google_api_metric_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use MetricDescriptor.ProtoReflect.Descriptor instead. -func (*MetricDescriptor) Descriptor() ([]byte, []int) { - return file_google_api_metric_proto_rawDescGZIP(), []int{0} -} - -func (x *MetricDescriptor) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *MetricDescriptor) GetType() string { - if x != nil { - return x.Type - } - return "" -} - -func (x *MetricDescriptor) GetLabels() []*label.LabelDescriptor { - if x != nil { - return x.Labels - } - return nil -} - -func (x *MetricDescriptor) GetMetricKind() MetricDescriptor_MetricKind { - if x != nil { - return x.MetricKind - } - return MetricDescriptor_METRIC_KIND_UNSPECIFIED -} - -func (x *MetricDescriptor) GetValueType() MetricDescriptor_ValueType { - if x != nil { - return x.ValueType - } - return MetricDescriptor_VALUE_TYPE_UNSPECIFIED -} - -func (x *MetricDescriptor) GetUnit() string { - if x != nil { - return x.Unit - } - return "" -} - -func (x *MetricDescriptor) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -func (x *MetricDescriptor) GetDisplayName() string { - if x != nil { - return x.DisplayName - } - return "" -} - -func (x *MetricDescriptor) GetMetadata() *MetricDescriptor_MetricDescriptorMetadata { - if x != nil { - return x.Metadata - } - return nil -} - -func (x *MetricDescriptor) GetLaunchStage() api.LaunchStage { - if x != nil { - return x.LaunchStage - } - return api.LaunchStage_LAUNCH_STAGE_UNSPECIFIED -} - -func (x *MetricDescriptor) GetMonitoredResourceTypes() []string { - if x != nil { - return x.MonitoredResourceTypes - } - return nil -} - -// A specific metric, identified by specifying values for all of the -// labels of a [`MetricDescriptor`][google.api.MetricDescriptor]. -type Metric struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // An existing metric type, see [google.api.MetricDescriptor][google.api.MetricDescriptor]. - // For example, `custom.googleapis.com/invoice/paid/amount`. - Type string `protobuf:"bytes,3,opt,name=type,proto3" json:"type,omitempty"` - // The set of label values that uniquely identify this metric. All - // labels listed in the `MetricDescriptor` must be assigned values. - Labels map[string]string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` -} - -func (x *Metric) Reset() { - *x = Metric{} - if protoimpl.UnsafeEnabled { - mi := &file_google_api_metric_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Metric) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Metric) ProtoMessage() {} - -func (x *Metric) ProtoReflect() protoreflect.Message { - mi := &file_google_api_metric_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Metric.ProtoReflect.Descriptor instead. -func (*Metric) Descriptor() ([]byte, []int) { - return file_google_api_metric_proto_rawDescGZIP(), []int{1} -} - -func (x *Metric) GetType() string { - if x != nil { - return x.Type - } - return "" -} - -func (x *Metric) GetLabels() map[string]string { - if x != nil { - return x.Labels - } - return nil -} - -// Additional annotations that can be used to guide the usage of a metric. -type MetricDescriptor_MetricDescriptorMetadata struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Deprecated. Must use the [MetricDescriptor.launch_stage][google.api.MetricDescriptor.launch_stage] instead. - // - // Deprecated: Do not use. - LaunchStage api.LaunchStage `protobuf:"varint,1,opt,name=launch_stage,json=launchStage,proto3,enum=google.api.LaunchStage" json:"launch_stage,omitempty"` - // The sampling period of metric data points. For metrics which are written - // periodically, consecutive data points are stored at this time interval, - // excluding data loss due to errors. Metrics with a higher granularity have - // a smaller sampling period. - SamplePeriod *durationpb.Duration `protobuf:"bytes,2,opt,name=sample_period,json=samplePeriod,proto3" json:"sample_period,omitempty"` - // The delay of data points caused by ingestion. Data points older than this - // age are guaranteed to be ingested and available to be read, excluding - // data loss due to errors. - IngestDelay *durationpb.Duration `protobuf:"bytes,3,opt,name=ingest_delay,json=ingestDelay,proto3" json:"ingest_delay,omitempty"` -} - -func (x *MetricDescriptor_MetricDescriptorMetadata) Reset() { - *x = MetricDescriptor_MetricDescriptorMetadata{} - if protoimpl.UnsafeEnabled { - mi := &file_google_api_metric_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *MetricDescriptor_MetricDescriptorMetadata) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*MetricDescriptor_MetricDescriptorMetadata) ProtoMessage() {} - -func (x *MetricDescriptor_MetricDescriptorMetadata) ProtoReflect() protoreflect.Message { - mi := &file_google_api_metric_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use MetricDescriptor_MetricDescriptorMetadata.ProtoReflect.Descriptor instead. -func (*MetricDescriptor_MetricDescriptorMetadata) Descriptor() ([]byte, []int) { - return file_google_api_metric_proto_rawDescGZIP(), []int{0, 0} -} - -// Deprecated: Do not use. -func (x *MetricDescriptor_MetricDescriptorMetadata) GetLaunchStage() api.LaunchStage { - if x != nil { - return x.LaunchStage - } - return api.LaunchStage_LAUNCH_STAGE_UNSPECIFIED -} - -func (x *MetricDescriptor_MetricDescriptorMetadata) GetSamplePeriod() *durationpb.Duration { - if x != nil { - return x.SamplePeriod - } - return nil -} - -func (x *MetricDescriptor_MetricDescriptorMetadata) GetIngestDelay() *durationpb.Duration { - if x != nil { - return x.IngestDelay - } - return nil -} - -var File_google_api_metric_proto protoreflect.FileDescriptor - -var file_google_api_metric_proto_rawDesc = []byte{ - 0x0a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6d, 0x65, 0x74, - 0x72, 0x69, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x16, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, - 0x69, 0x2f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, - 0x5f, 0x73, 0x74, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc1, 0x07, 0x0a, - 0x10, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, - 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x08, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x33, 0x0a, 0x06, 0x6c, 0x61, 0x62, - 0x65, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x48, - 0x0a, 0x0b, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, - 0x72, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x4b, 0x69, 0x6e, 0x64, 0x52, 0x0a, 0x6d, 0x65, - 0x74, 0x72, 0x69, 0x63, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x45, 0x0a, 0x0a, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x54, 0x79, 0x70, 0x65, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, - 0x12, 0x0a, 0x04, 0x75, 0x6e, 0x69, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, - 0x6e, 0x69, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, - 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, - 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x51, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, - 0x64, 0x61, 0x74, 0x61, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, - 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x3a, 0x0a, 0x0c, 0x6c, - 0x61, 0x75, 0x6e, 0x63, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x67, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, - 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, 0x65, 0x52, 0x0b, 0x6c, 0x61, 0x75, 0x6e, - 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, 0x65, 0x12, 0x38, 0x0a, 0x18, 0x6d, 0x6f, 0x6e, 0x69, 0x74, - 0x6f, 0x72, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x79, - 0x70, 0x65, 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x09, 0x52, 0x16, 0x6d, 0x6f, 0x6e, 0x69, 0x74, - 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, - 0x73, 0x1a, 0xd8, 0x01, 0x0a, 0x18, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x3e, - 0x0a, 0x0c, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x67, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x4c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, 0x65, 0x42, 0x02, 0x18, - 0x01, 0x52, 0x0b, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, 0x65, 0x12, 0x3e, - 0x0a, 0x0d, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x0c, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x50, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x12, 0x3c, - 0x0a, 0x0c, 0x69, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x0b, 0x69, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x22, 0x4f, 0x0a, 0x0a, - 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x1b, 0x0a, 0x17, 0x4d, 0x45, - 0x54, 0x52, 0x49, 0x43, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, - 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x47, 0x41, 0x55, 0x47, 0x45, - 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x44, 0x45, 0x4c, 0x54, 0x41, 0x10, 0x02, 0x12, 0x0e, 0x0a, - 0x0a, 0x43, 0x55, 0x4d, 0x55, 0x4c, 0x41, 0x54, 0x49, 0x56, 0x45, 0x10, 0x03, 0x22, 0x71, 0x0a, - 0x09, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x16, 0x56, 0x41, - 0x4c, 0x55, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, - 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x01, - 0x12, 0x09, 0x0a, 0x05, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x44, - 0x4f, 0x55, 0x42, 0x4c, 0x45, 0x10, 0x03, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, - 0x47, 0x10, 0x04, 0x12, 0x10, 0x0a, 0x0c, 0x44, 0x49, 0x53, 0x54, 0x52, 0x49, 0x42, 0x55, 0x54, - 0x49, 0x4f, 0x4e, 0x10, 0x05, 0x12, 0x09, 0x0a, 0x05, 0x4d, 0x4f, 0x4e, 0x45, 0x59, 0x10, 0x06, - 0x22, 0x8f, 0x01, 0x0a, 0x06, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, 0x12, 0x0a, 0x04, 0x74, - 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, - 0x36, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, - 0x72, 0x69, 0x63, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, - 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, - 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, - 0x38, 0x01, 0x42, 0x5f, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x61, 0x70, 0x69, 0x42, 0x0b, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x50, 0x72, 0x6f, 0x74, - 0x6f, 0x50, 0x01, 0x5a, 0x37, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, - 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6d, - 0x65, 0x74, 0x72, 0x69, 0x63, 0x3b, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0xa2, 0x02, 0x04, 0x47, - 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_google_api_metric_proto_rawDescOnce sync.Once - file_google_api_metric_proto_rawDescData = file_google_api_metric_proto_rawDesc -) - -func file_google_api_metric_proto_rawDescGZIP() []byte { - file_google_api_metric_proto_rawDescOnce.Do(func() { - file_google_api_metric_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_metric_proto_rawDescData) - }) - return file_google_api_metric_proto_rawDescData -} - -var file_google_api_metric_proto_enumTypes = make([]protoimpl.EnumInfo, 2) -var file_google_api_metric_proto_msgTypes = make([]protoimpl.MessageInfo, 4) -var file_google_api_metric_proto_goTypes = []interface{}{ - (MetricDescriptor_MetricKind)(0), // 0: google.api.MetricDescriptor.MetricKind - (MetricDescriptor_ValueType)(0), // 1: google.api.MetricDescriptor.ValueType - (*MetricDescriptor)(nil), // 2: google.api.MetricDescriptor - (*Metric)(nil), // 3: google.api.Metric - (*MetricDescriptor_MetricDescriptorMetadata)(nil), // 4: google.api.MetricDescriptor.MetricDescriptorMetadata - nil, // 5: google.api.Metric.LabelsEntry - (*label.LabelDescriptor)(nil), // 6: google.api.LabelDescriptor - (api.LaunchStage)(0), // 7: google.api.LaunchStage - (*durationpb.Duration)(nil), // 8: google.protobuf.Duration -} -var file_google_api_metric_proto_depIdxs = []int32{ - 6, // 0: google.api.MetricDescriptor.labels:type_name -> google.api.LabelDescriptor - 0, // 1: google.api.MetricDescriptor.metric_kind:type_name -> google.api.MetricDescriptor.MetricKind - 1, // 2: google.api.MetricDescriptor.value_type:type_name -> google.api.MetricDescriptor.ValueType - 4, // 3: google.api.MetricDescriptor.metadata:type_name -> google.api.MetricDescriptor.MetricDescriptorMetadata - 7, // 4: google.api.MetricDescriptor.launch_stage:type_name -> google.api.LaunchStage - 5, // 5: google.api.Metric.labels:type_name -> google.api.Metric.LabelsEntry - 7, // 6: google.api.MetricDescriptor.MetricDescriptorMetadata.launch_stage:type_name -> google.api.LaunchStage - 8, // 7: google.api.MetricDescriptor.MetricDescriptorMetadata.sample_period:type_name -> google.protobuf.Duration - 8, // 8: google.api.MetricDescriptor.MetricDescriptorMetadata.ingest_delay:type_name -> google.protobuf.Duration - 9, // [9:9] is the sub-list for method output_type - 9, // [9:9] is the sub-list for method input_type - 9, // [9:9] is the sub-list for extension type_name - 9, // [9:9] is the sub-list for extension extendee - 0, // [0:9] is the sub-list for field type_name -} - -func init() { file_google_api_metric_proto_init() } -func file_google_api_metric_proto_init() { - if File_google_api_metric_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_google_api_metric_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MetricDescriptor); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_api_metric_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Metric); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_api_metric_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MetricDescriptor_MetricDescriptorMetadata); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_api_metric_proto_rawDesc, - NumEnums: 2, - NumMessages: 4, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_google_api_metric_proto_goTypes, - DependencyIndexes: file_google_api_metric_proto_depIdxs, - EnumInfos: file_google_api_metric_proto_enumTypes, - MessageInfos: file_google_api_metric_proto_msgTypes, - }.Build() - File_google_api_metric_proto = out.File - file_google_api_metric_proto_rawDesc = nil - file_google_api_metric_proto_goTypes = nil - file_google_api_metric_proto_depIdxs = nil -} diff --git a/vendor/google.golang.org/genproto/googleapis/api/monitoredres/monitored_resource.pb.go b/vendor/google.golang.org/genproto/googleapis/api/monitoredres/monitored_resource.pb.go deleted file mode 100644 index 585046c56a..0000000000 --- a/vendor/google.golang.org/genproto/googleapis/api/monitoredres/monitored_resource.pb.go +++ /dev/null @@ -1,468 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.25.0 -// protoc v3.13.0 -// source: google/api/monitored_resource.proto - -package monitoredres - -import ( - reflect "reflect" - sync "sync" - - proto "github.com/golang/protobuf/proto" - api "google.golang.org/genproto/googleapis/api" - label "google.golang.org/genproto/googleapis/api/label" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - structpb "google.golang.org/protobuf/types/known/structpb" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - -// An object that describes the schema of a [MonitoredResource][google.api.MonitoredResource] object using a -// type name and a set of labels. For example, the monitored resource -// descriptor for Google Compute Engine VM instances has a type of -// `"gce_instance"` and specifies the use of the labels `"instance_id"` and -// `"zone"` to identify particular VM instances. -// -// Different APIs can support different monitored resource types. APIs generally -// provide a `list` method that returns the monitored resource descriptors used -// by the API. -// -type MonitoredResourceDescriptor struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Optional. The resource name of the monitored resource descriptor: - // `"projects/{project_id}/monitoredResourceDescriptors/{type}"` where - // {type} is the value of the `type` field in this object and - // {project_id} is a project ID that provides API-specific context for - // accessing the type. APIs that do not use project information can use the - // resource name format `"monitoredResourceDescriptors/{type}"`. - Name string `protobuf:"bytes,5,opt,name=name,proto3" json:"name,omitempty"` - // Required. The monitored resource type. For example, the type - // `"cloudsql_database"` represents databases in Google Cloud SQL. - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - // Optional. A concise name for the monitored resource type that might be - // displayed in user interfaces. It should be a Title Cased Noun Phrase, - // without any article or other determiners. For example, - // `"Google Cloud SQL Database"`. - DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` - // Optional. A detailed description of the monitored resource type that might - // be used in documentation. - Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` - // Required. A set of labels used to describe instances of this monitored - // resource type. For example, an individual Google Cloud SQL database is - // identified by values for the labels `"database_id"` and `"zone"`. - Labels []*label.LabelDescriptor `protobuf:"bytes,4,rep,name=labels,proto3" json:"labels,omitempty"` - // Optional. The launch stage of the monitored resource definition. - LaunchStage api.LaunchStage `protobuf:"varint,7,opt,name=launch_stage,json=launchStage,proto3,enum=google.api.LaunchStage" json:"launch_stage,omitempty"` -} - -func (x *MonitoredResourceDescriptor) Reset() { - *x = MonitoredResourceDescriptor{} - if protoimpl.UnsafeEnabled { - mi := &file_google_api_monitored_resource_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *MonitoredResourceDescriptor) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*MonitoredResourceDescriptor) ProtoMessage() {} - -func (x *MonitoredResourceDescriptor) ProtoReflect() protoreflect.Message { - mi := &file_google_api_monitored_resource_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use MonitoredResourceDescriptor.ProtoReflect.Descriptor instead. -func (*MonitoredResourceDescriptor) Descriptor() ([]byte, []int) { - return file_google_api_monitored_resource_proto_rawDescGZIP(), []int{0} -} - -func (x *MonitoredResourceDescriptor) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *MonitoredResourceDescriptor) GetType() string { - if x != nil { - return x.Type - } - return "" -} - -func (x *MonitoredResourceDescriptor) GetDisplayName() string { - if x != nil { - return x.DisplayName - } - return "" -} - -func (x *MonitoredResourceDescriptor) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -func (x *MonitoredResourceDescriptor) GetLabels() []*label.LabelDescriptor { - if x != nil { - return x.Labels - } - return nil -} - -func (x *MonitoredResourceDescriptor) GetLaunchStage() api.LaunchStage { - if x != nil { - return x.LaunchStage - } - return api.LaunchStage_LAUNCH_STAGE_UNSPECIFIED -} - -// An object representing a resource that can be used for monitoring, logging, -// billing, or other purposes. Examples include virtual machine instances, -// databases, and storage devices such as disks. The `type` field identifies a -// [MonitoredResourceDescriptor][google.api.MonitoredResourceDescriptor] object that describes the resource's -// schema. Information in the `labels` field identifies the actual resource and -// its attributes according to the schema. For example, a particular Compute -// Engine VM instance could be represented by the following object, because the -// [MonitoredResourceDescriptor][google.api.MonitoredResourceDescriptor] for `"gce_instance"` has labels -// `"instance_id"` and `"zone"`: -// -// { "type": "gce_instance", -// "labels": { "instance_id": "12345678901234", -// "zone": "us-central1-a" }} -type MonitoredResource struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The monitored resource type. This field must match - // the `type` field of a [MonitoredResourceDescriptor][google.api.MonitoredResourceDescriptor] object. For - // example, the type of a Compute Engine VM instance is `gce_instance`. - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - // Required. Values for all of the labels listed in the associated monitored - // resource descriptor. For example, Compute Engine VM instances use the - // labels `"project_id"`, `"instance_id"`, and `"zone"`. - Labels map[string]string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` -} - -func (x *MonitoredResource) Reset() { - *x = MonitoredResource{} - if protoimpl.UnsafeEnabled { - mi := &file_google_api_monitored_resource_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *MonitoredResource) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*MonitoredResource) ProtoMessage() {} - -func (x *MonitoredResource) ProtoReflect() protoreflect.Message { - mi := &file_google_api_monitored_resource_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use MonitoredResource.ProtoReflect.Descriptor instead. -func (*MonitoredResource) Descriptor() ([]byte, []int) { - return file_google_api_monitored_resource_proto_rawDescGZIP(), []int{1} -} - -func (x *MonitoredResource) GetType() string { - if x != nil { - return x.Type - } - return "" -} - -func (x *MonitoredResource) GetLabels() map[string]string { - if x != nil { - return x.Labels - } - return nil -} - -// Auxiliary metadata for a [MonitoredResource][google.api.MonitoredResource] object. -// [MonitoredResource][google.api.MonitoredResource] objects contain the minimum set of information to -// uniquely identify a monitored resource instance. There is some other useful -// auxiliary metadata. Monitoring and Logging use an ingestion -// pipeline to extract metadata for cloud resources of all types, and store -// the metadata in this message. -type MonitoredResourceMetadata struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Output only. Values for predefined system metadata labels. - // System labels are a kind of metadata extracted by Google, including - // "machine_image", "vpc", "subnet_id", - // "security_group", "name", etc. - // System label values can be only strings, Boolean values, or a list of - // strings. For example: - // - // { "name": "my-test-instance", - // "security_group": ["a", "b", "c"], - // "spot_instance": false } - SystemLabels *structpb.Struct `protobuf:"bytes,1,opt,name=system_labels,json=systemLabels,proto3" json:"system_labels,omitempty"` - // Output only. A map of user-defined metadata labels. - UserLabels map[string]string `protobuf:"bytes,2,rep,name=user_labels,json=userLabels,proto3" json:"user_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` -} - -func (x *MonitoredResourceMetadata) Reset() { - *x = MonitoredResourceMetadata{} - if protoimpl.UnsafeEnabled { - mi := &file_google_api_monitored_resource_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *MonitoredResourceMetadata) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*MonitoredResourceMetadata) ProtoMessage() {} - -func (x *MonitoredResourceMetadata) ProtoReflect() protoreflect.Message { - mi := &file_google_api_monitored_resource_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use MonitoredResourceMetadata.ProtoReflect.Descriptor instead. -func (*MonitoredResourceMetadata) Descriptor() ([]byte, []int) { - return file_google_api_monitored_resource_proto_rawDescGZIP(), []int{2} -} - -func (x *MonitoredResourceMetadata) GetSystemLabels() *structpb.Struct { - if x != nil { - return x.SystemLabels - } - return nil -} - -func (x *MonitoredResourceMetadata) GetUserLabels() map[string]string { - if x != nil { - return x.UserLabels - } - return nil -} - -var File_google_api_monitored_resource_proto protoreflect.FileDescriptor - -var file_google_api_monitored_resource_proto_rawDesc = []byte{ - 0x0a, 0x23, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6d, 0x6f, 0x6e, - 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, - 0x69, 0x1a, 0x16, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6c, 0x61, - 0x62, 0x65, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x5f, 0x73, 0x74, 0x61, - 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xfb, 0x01, 0x0a, 0x1b, 0x4d, 0x6f, 0x6e, 0x69, 0x74, - 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, - 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x21, - 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, - 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x33, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x04, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, - 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x3a, 0x0a, 0x0c, 0x6c, 0x61, 0x75, 0x6e, - 0x63, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x67, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, 0x61, 0x75, 0x6e, - 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, 0x65, 0x52, 0x0b, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, - 0x74, 0x61, 0x67, 0x65, 0x22, 0xa5, 0x01, 0x0a, 0x11, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, - 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, - 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x41, - 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, - 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x4c, 0x61, - 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, - 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, - 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xf0, 0x01, 0x0a, - 0x19, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x3c, 0x0a, 0x0d, 0x73, 0x79, - 0x73, 0x74, 0x65, 0x6d, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x0c, 0x73, 0x79, 0x73, 0x74, - 0x65, 0x6d, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x56, 0x0a, 0x0b, 0x75, 0x73, 0x65, 0x72, - 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x35, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, - 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x55, 0x73, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x75, 0x73, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, - 0x1a, 0x3d, 0x0a, 0x0f, 0x55, 0x73, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, - 0x79, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, - 0x69, 0x42, 0x16, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x43, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, - 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, - 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, - 0x72, 0x65, 0x73, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x72, 0x65, 0x73, - 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x33, -} - -var ( - file_google_api_monitored_resource_proto_rawDescOnce sync.Once - file_google_api_monitored_resource_proto_rawDescData = file_google_api_monitored_resource_proto_rawDesc -) - -func file_google_api_monitored_resource_proto_rawDescGZIP() []byte { - file_google_api_monitored_resource_proto_rawDescOnce.Do(func() { - file_google_api_monitored_resource_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_monitored_resource_proto_rawDescData) - }) - return file_google_api_monitored_resource_proto_rawDescData -} - -var file_google_api_monitored_resource_proto_msgTypes = make([]protoimpl.MessageInfo, 5) -var file_google_api_monitored_resource_proto_goTypes = []interface{}{ - (*MonitoredResourceDescriptor)(nil), // 0: google.api.MonitoredResourceDescriptor - (*MonitoredResource)(nil), // 1: google.api.MonitoredResource - (*MonitoredResourceMetadata)(nil), // 2: google.api.MonitoredResourceMetadata - nil, // 3: google.api.MonitoredResource.LabelsEntry - nil, // 4: google.api.MonitoredResourceMetadata.UserLabelsEntry - (*label.LabelDescriptor)(nil), // 5: google.api.LabelDescriptor - (api.LaunchStage)(0), // 6: google.api.LaunchStage - (*structpb.Struct)(nil), // 7: google.protobuf.Struct -} -var file_google_api_monitored_resource_proto_depIdxs = []int32{ - 5, // 0: google.api.MonitoredResourceDescriptor.labels:type_name -> google.api.LabelDescriptor - 6, // 1: google.api.MonitoredResourceDescriptor.launch_stage:type_name -> google.api.LaunchStage - 3, // 2: google.api.MonitoredResource.labels:type_name -> google.api.MonitoredResource.LabelsEntry - 7, // 3: google.api.MonitoredResourceMetadata.system_labels:type_name -> google.protobuf.Struct - 4, // 4: google.api.MonitoredResourceMetadata.user_labels:type_name -> google.api.MonitoredResourceMetadata.UserLabelsEntry - 5, // [5:5] is the sub-list for method output_type - 5, // [5:5] is the sub-list for method input_type - 5, // [5:5] is the sub-list for extension type_name - 5, // [5:5] is the sub-list for extension extendee - 0, // [0:5] is the sub-list for field type_name -} - -func init() { file_google_api_monitored_resource_proto_init() } -func file_google_api_monitored_resource_proto_init() { - if File_google_api_monitored_resource_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_google_api_monitored_resource_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MonitoredResourceDescriptor); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_api_monitored_resource_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MonitoredResource); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_api_monitored_resource_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MonitoredResourceMetadata); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_api_monitored_resource_proto_rawDesc, - NumEnums: 0, - NumMessages: 5, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_google_api_monitored_resource_proto_goTypes, - DependencyIndexes: file_google_api_monitored_resource_proto_depIdxs, - MessageInfos: file_google_api_monitored_resource_proto_msgTypes, - }.Build() - File_google_api_monitored_resource_proto = out.File - file_google_api_monitored_resource_proto_rawDesc = nil - file_google_api_monitored_resource_proto_goTypes = nil - file_google_api_monitored_resource_proto_depIdxs = nil -} diff --git a/vendor/google.golang.org/genproto/googleapis/container/v1/cluster_service.pb.go b/vendor/google.golang.org/genproto/googleapis/container/v1/cluster_service.pb.go deleted file mode 100644 index d407e284e0..0000000000 --- a/vendor/google.golang.org/genproto/googleapis/container/v1/cluster_service.pb.go +++ /dev/null @@ -1,15458 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.25.0 -// protoc v3.13.0 -// source: google/container/v1/cluster_service.proto - -package container - -import ( - context "context" - reflect "reflect" - sync "sync" - - proto "github.com/golang/protobuf/proto" - _ "google.golang.org/genproto/googleapis/api/annotations" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - emptypb "google.golang.org/protobuf/types/known/emptypb" - timestamppb "google.golang.org/protobuf/types/known/timestamppb" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - -// Possible types of sandboxes. -type SandboxConfig_Type int32 - -const ( - // Default value. This should not be used. - SandboxConfig_UNSPECIFIED SandboxConfig_Type = 0 - // Run sandbox using gvisor. - SandboxConfig_GVISOR SandboxConfig_Type = 1 -) - -// Enum value maps for SandboxConfig_Type. -var ( - SandboxConfig_Type_name = map[int32]string{ - 0: "UNSPECIFIED", - 1: "GVISOR", - } - SandboxConfig_Type_value = map[string]int32{ - "UNSPECIFIED": 0, - "GVISOR": 1, - } -) - -func (x SandboxConfig_Type) Enum() *SandboxConfig_Type { - p := new(SandboxConfig_Type) - *p = x - return p -} - -func (x SandboxConfig_Type) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (SandboxConfig_Type) Descriptor() protoreflect.EnumDescriptor { - return file_google_container_v1_cluster_service_proto_enumTypes[0].Descriptor() -} - -func (SandboxConfig_Type) Type() protoreflect.EnumType { - return &file_google_container_v1_cluster_service_proto_enumTypes[0] -} - -func (x SandboxConfig_Type) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use SandboxConfig_Type.Descriptor instead. -func (SandboxConfig_Type) EnumDescriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{2, 0} -} - -// Indicates whether to consume capacity from a reservation or not. -type ReservationAffinity_Type int32 - -const ( - // Default value. This should not be used. - ReservationAffinity_UNSPECIFIED ReservationAffinity_Type = 0 - // Do not consume from any reserved capacity. - ReservationAffinity_NO_RESERVATION ReservationAffinity_Type = 1 - // Consume any reservation available. - ReservationAffinity_ANY_RESERVATION ReservationAffinity_Type = 2 - // Must consume from a specific reservation. Must specify key value fields - // for specifying the reservations. - ReservationAffinity_SPECIFIC_RESERVATION ReservationAffinity_Type = 3 -) - -// Enum value maps for ReservationAffinity_Type. -var ( - ReservationAffinity_Type_name = map[int32]string{ - 0: "UNSPECIFIED", - 1: "NO_RESERVATION", - 2: "ANY_RESERVATION", - 3: "SPECIFIC_RESERVATION", - } - ReservationAffinity_Type_value = map[string]int32{ - "UNSPECIFIED": 0, - "NO_RESERVATION": 1, - "ANY_RESERVATION": 2, - "SPECIFIC_RESERVATION": 3, - } -) - -func (x ReservationAffinity_Type) Enum() *ReservationAffinity_Type { - p := new(ReservationAffinity_Type) - *p = x - return p -} - -func (x ReservationAffinity_Type) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (ReservationAffinity_Type) Descriptor() protoreflect.EnumDescriptor { - return file_google_container_v1_cluster_service_proto_enumTypes[1].Descriptor() -} - -func (ReservationAffinity_Type) Type() protoreflect.EnumType { - return &file_google_container_v1_cluster_service_proto_enumTypes[1] -} - -func (x ReservationAffinity_Type) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use ReservationAffinity_Type.Descriptor instead. -func (ReservationAffinity_Type) EnumDescriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{3, 0} -} - -// Possible values for Effect in taint. -type NodeTaint_Effect int32 - -const ( - // Not set - NodeTaint_EFFECT_UNSPECIFIED NodeTaint_Effect = 0 - // NoSchedule - NodeTaint_NO_SCHEDULE NodeTaint_Effect = 1 - // PreferNoSchedule - NodeTaint_PREFER_NO_SCHEDULE NodeTaint_Effect = 2 - // NoExecute - NodeTaint_NO_EXECUTE NodeTaint_Effect = 3 -) - -// Enum value maps for NodeTaint_Effect. -var ( - NodeTaint_Effect_name = map[int32]string{ - 0: "EFFECT_UNSPECIFIED", - 1: "NO_SCHEDULE", - 2: "PREFER_NO_SCHEDULE", - 3: "NO_EXECUTE", - } - NodeTaint_Effect_value = map[string]int32{ - "EFFECT_UNSPECIFIED": 0, - "NO_SCHEDULE": 1, - "PREFER_NO_SCHEDULE": 2, - "NO_EXECUTE": 3, - } -) - -func (x NodeTaint_Effect) Enum() *NodeTaint_Effect { - p := new(NodeTaint_Effect) - *p = x - return p -} - -func (x NodeTaint_Effect) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (NodeTaint_Effect) Descriptor() protoreflect.EnumDescriptor { - return file_google_container_v1_cluster_service_proto_enumTypes[2].Descriptor() -} - -func (NodeTaint_Effect) Type() protoreflect.EnumType { - return &file_google_container_v1_cluster_service_proto_enumTypes[2] -} - -func (x NodeTaint_Effect) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use NodeTaint_Effect.Descriptor instead. -func (NodeTaint_Effect) EnumDescriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{4, 0} -} - -// Load balancer type of ingress service of Cloud Run. -type CloudRunConfig_LoadBalancerType int32 - -const ( - // Load balancer type for Cloud Run is unspecified. - CloudRunConfig_LOAD_BALANCER_TYPE_UNSPECIFIED CloudRunConfig_LoadBalancerType = 0 - // Install external load balancer for Cloud Run. - CloudRunConfig_LOAD_BALANCER_TYPE_EXTERNAL CloudRunConfig_LoadBalancerType = 1 - // Install internal load balancer for Cloud Run. - CloudRunConfig_LOAD_BALANCER_TYPE_INTERNAL CloudRunConfig_LoadBalancerType = 2 -) - -// Enum value maps for CloudRunConfig_LoadBalancerType. -var ( - CloudRunConfig_LoadBalancerType_name = map[int32]string{ - 0: "LOAD_BALANCER_TYPE_UNSPECIFIED", - 1: "LOAD_BALANCER_TYPE_EXTERNAL", - 2: "LOAD_BALANCER_TYPE_INTERNAL", - } - CloudRunConfig_LoadBalancerType_value = map[string]int32{ - "LOAD_BALANCER_TYPE_UNSPECIFIED": 0, - "LOAD_BALANCER_TYPE_EXTERNAL": 1, - "LOAD_BALANCER_TYPE_INTERNAL": 2, - } -) - -func (x CloudRunConfig_LoadBalancerType) Enum() *CloudRunConfig_LoadBalancerType { - p := new(CloudRunConfig_LoadBalancerType) - *p = x - return p -} - -func (x CloudRunConfig_LoadBalancerType) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (CloudRunConfig_LoadBalancerType) Descriptor() protoreflect.EnumDescriptor { - return file_google_container_v1_cluster_service_proto_enumTypes[3].Descriptor() -} - -func (CloudRunConfig_LoadBalancerType) Type() protoreflect.EnumType { - return &file_google_container_v1_cluster_service_proto_enumTypes[3] -} - -func (x CloudRunConfig_LoadBalancerType) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use CloudRunConfig_LoadBalancerType.Descriptor instead. -func (CloudRunConfig_LoadBalancerType) EnumDescriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{16, 0} -} - -// Allowed Network Policy providers. -type NetworkPolicy_Provider int32 - -const ( - // Not set - NetworkPolicy_PROVIDER_UNSPECIFIED NetworkPolicy_Provider = 0 - // Tigera (Calico Felix). - NetworkPolicy_CALICO NetworkPolicy_Provider = 1 -) - -// Enum value maps for NetworkPolicy_Provider. -var ( - NetworkPolicy_Provider_name = map[int32]string{ - 0: "PROVIDER_UNSPECIFIED", - 1: "CALICO", - } - NetworkPolicy_Provider_value = map[string]int32{ - "PROVIDER_UNSPECIFIED": 0, - "CALICO": 1, - } -) - -func (x NetworkPolicy_Provider) Enum() *NetworkPolicy_Provider { - p := new(NetworkPolicy_Provider) - *p = x - return p -} - -func (x NetworkPolicy_Provider) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (NetworkPolicy_Provider) Descriptor() protoreflect.EnumDescriptor { - return file_google_container_v1_cluster_service_proto_enumTypes[4].Descriptor() -} - -func (NetworkPolicy_Provider) Type() protoreflect.EnumType { - return &file_google_container_v1_cluster_service_proto_enumTypes[4] -} - -func (x NetworkPolicy_Provider) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use NetworkPolicy_Provider.Descriptor instead. -func (NetworkPolicy_Provider) EnumDescriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{20, 0} -} - -// The current status of the cluster. -type Cluster_Status int32 - -const ( - // Not set. - Cluster_STATUS_UNSPECIFIED Cluster_Status = 0 - // The PROVISIONING state indicates the cluster is being created. - Cluster_PROVISIONING Cluster_Status = 1 - // The RUNNING state indicates the cluster has been created and is fully - // usable. - Cluster_RUNNING Cluster_Status = 2 - // The RECONCILING state indicates that some work is actively being done on - // the cluster, such as upgrading the master or node software. Details can - // be found in the `statusMessage` field. - Cluster_RECONCILING Cluster_Status = 3 - // The STOPPING state indicates the cluster is being deleted. - Cluster_STOPPING Cluster_Status = 4 - // The ERROR state indicates the cluster is unusable. It will be - // automatically deleted. Details can be found in the `statusMessage` field. - Cluster_ERROR Cluster_Status = 5 - // The DEGRADED state indicates the cluster requires user action to restore - // full functionality. Details can be found in the `statusMessage` field. - Cluster_DEGRADED Cluster_Status = 6 -) - -// Enum value maps for Cluster_Status. -var ( - Cluster_Status_name = map[int32]string{ - 0: "STATUS_UNSPECIFIED", - 1: "PROVISIONING", - 2: "RUNNING", - 3: "RECONCILING", - 4: "STOPPING", - 5: "ERROR", - 6: "DEGRADED", - } - Cluster_Status_value = map[string]int32{ - "STATUS_UNSPECIFIED": 0, - "PROVISIONING": 1, - "RUNNING": 2, - "RECONCILING": 3, - "STOPPING": 4, - "ERROR": 5, - "DEGRADED": 6, - } -) - -func (x Cluster_Status) Enum() *Cluster_Status { - p := new(Cluster_Status) - *p = x - return p -} - -func (x Cluster_Status) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (Cluster_Status) Descriptor() protoreflect.EnumDescriptor { - return file_google_container_v1_cluster_service_proto_enumTypes[5].Descriptor() -} - -func (Cluster_Status) Type() protoreflect.EnumType { - return &file_google_container_v1_cluster_service_proto_enumTypes[5] -} - -func (x Cluster_Status) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use Cluster_Status.Descriptor instead. -func (Cluster_Status) EnumDescriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{23, 0} -} - -// Current status of the operation. -type Operation_Status int32 - -const ( - // Not set. - Operation_STATUS_UNSPECIFIED Operation_Status = 0 - // The operation has been created. - Operation_PENDING Operation_Status = 1 - // The operation is currently running. - Operation_RUNNING Operation_Status = 2 - // The operation is done, either cancelled or completed. - Operation_DONE Operation_Status = 3 - // The operation is aborting. - Operation_ABORTING Operation_Status = 4 -) - -// Enum value maps for Operation_Status. -var ( - Operation_Status_name = map[int32]string{ - 0: "STATUS_UNSPECIFIED", - 1: "PENDING", - 2: "RUNNING", - 3: "DONE", - 4: "ABORTING", - } - Operation_Status_value = map[string]int32{ - "STATUS_UNSPECIFIED": 0, - "PENDING": 1, - "RUNNING": 2, - "DONE": 3, - "ABORTING": 4, - } -) - -func (x Operation_Status) Enum() *Operation_Status { - p := new(Operation_Status) - *p = x - return p -} - -func (x Operation_Status) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (Operation_Status) Descriptor() protoreflect.EnumDescriptor { - return file_google_container_v1_cluster_service_proto_enumTypes[6].Descriptor() -} - -func (Operation_Status) Type() protoreflect.EnumType { - return &file_google_container_v1_cluster_service_proto_enumTypes[6] -} - -func (x Operation_Status) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use Operation_Status.Descriptor instead. -func (Operation_Status) EnumDescriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{25, 0} -} - -// Operation type. -type Operation_Type int32 - -const ( - // Not set. - Operation_TYPE_UNSPECIFIED Operation_Type = 0 - // Cluster create. - Operation_CREATE_CLUSTER Operation_Type = 1 - // Cluster delete. - Operation_DELETE_CLUSTER Operation_Type = 2 - // A master upgrade. - Operation_UPGRADE_MASTER Operation_Type = 3 - // A node upgrade. - Operation_UPGRADE_NODES Operation_Type = 4 - // Cluster repair. - Operation_REPAIR_CLUSTER Operation_Type = 5 - // Cluster update. - Operation_UPDATE_CLUSTER Operation_Type = 6 - // Node pool create. - Operation_CREATE_NODE_POOL Operation_Type = 7 - // Node pool delete. - Operation_DELETE_NODE_POOL Operation_Type = 8 - // Set node pool management. - Operation_SET_NODE_POOL_MANAGEMENT Operation_Type = 9 - // Automatic node pool repair. - Operation_AUTO_REPAIR_NODES Operation_Type = 10 - // Automatic node upgrade. - Operation_AUTO_UPGRADE_NODES Operation_Type = 11 - // Set labels. - Operation_SET_LABELS Operation_Type = 12 - // Set/generate master auth materials - Operation_SET_MASTER_AUTH Operation_Type = 13 - // Set node pool size. - Operation_SET_NODE_POOL_SIZE Operation_Type = 14 - // Updates network policy for a cluster. - Operation_SET_NETWORK_POLICY Operation_Type = 15 - // Set the maintenance policy. - Operation_SET_MAINTENANCE_POLICY Operation_Type = 16 -) - -// Enum value maps for Operation_Type. -var ( - Operation_Type_name = map[int32]string{ - 0: "TYPE_UNSPECIFIED", - 1: "CREATE_CLUSTER", - 2: "DELETE_CLUSTER", - 3: "UPGRADE_MASTER", - 4: "UPGRADE_NODES", - 5: "REPAIR_CLUSTER", - 6: "UPDATE_CLUSTER", - 7: "CREATE_NODE_POOL", - 8: "DELETE_NODE_POOL", - 9: "SET_NODE_POOL_MANAGEMENT", - 10: "AUTO_REPAIR_NODES", - 11: "AUTO_UPGRADE_NODES", - 12: "SET_LABELS", - 13: "SET_MASTER_AUTH", - 14: "SET_NODE_POOL_SIZE", - 15: "SET_NETWORK_POLICY", - 16: "SET_MAINTENANCE_POLICY", - } - Operation_Type_value = map[string]int32{ - "TYPE_UNSPECIFIED": 0, - "CREATE_CLUSTER": 1, - "DELETE_CLUSTER": 2, - "UPGRADE_MASTER": 3, - "UPGRADE_NODES": 4, - "REPAIR_CLUSTER": 5, - "UPDATE_CLUSTER": 6, - "CREATE_NODE_POOL": 7, - "DELETE_NODE_POOL": 8, - "SET_NODE_POOL_MANAGEMENT": 9, - "AUTO_REPAIR_NODES": 10, - "AUTO_UPGRADE_NODES": 11, - "SET_LABELS": 12, - "SET_MASTER_AUTH": 13, - "SET_NODE_POOL_SIZE": 14, - "SET_NETWORK_POLICY": 15, - "SET_MAINTENANCE_POLICY": 16, - } -) - -func (x Operation_Type) Enum() *Operation_Type { - p := new(Operation_Type) - *p = x - return p -} - -func (x Operation_Type) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (Operation_Type) Descriptor() protoreflect.EnumDescriptor { - return file_google_container_v1_cluster_service_proto_enumTypes[7].Descriptor() -} - -func (Operation_Type) Type() protoreflect.EnumType { - return &file_google_container_v1_cluster_service_proto_enumTypes[7] -} - -func (x Operation_Type) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use Operation_Type.Descriptor instead. -func (Operation_Type) EnumDescriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{25, 1} -} - -// Operation type: what type update to perform. -type SetMasterAuthRequest_Action int32 - -const ( - // Operation is unknown and will error out. - SetMasterAuthRequest_UNKNOWN SetMasterAuthRequest_Action = 0 - // Set the password to a user generated value. - SetMasterAuthRequest_SET_PASSWORD SetMasterAuthRequest_Action = 1 - // Generate a new password and set it to that. - SetMasterAuthRequest_GENERATE_PASSWORD SetMasterAuthRequest_Action = 2 - // Set the username. If an empty username is provided, basic authentication - // is disabled for the cluster. If a non-empty username is provided, basic - // authentication is enabled, with either a provided password or a generated - // one. - SetMasterAuthRequest_SET_USERNAME SetMasterAuthRequest_Action = 3 -) - -// Enum value maps for SetMasterAuthRequest_Action. -var ( - SetMasterAuthRequest_Action_name = map[int32]string{ - 0: "UNKNOWN", - 1: "SET_PASSWORD", - 2: "GENERATE_PASSWORD", - 3: "SET_USERNAME", - } - SetMasterAuthRequest_Action_value = map[string]int32{ - "UNKNOWN": 0, - "SET_PASSWORD": 1, - "GENERATE_PASSWORD": 2, - "SET_USERNAME": 3, - } -) - -func (x SetMasterAuthRequest_Action) Enum() *SetMasterAuthRequest_Action { - p := new(SetMasterAuthRequest_Action) - *p = x - return p -} - -func (x SetMasterAuthRequest_Action) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (SetMasterAuthRequest_Action) Descriptor() protoreflect.EnumDescriptor { - return file_google_container_v1_cluster_service_proto_enumTypes[8].Descriptor() -} - -func (SetMasterAuthRequest_Action) Type() protoreflect.EnumType { - return &file_google_container_v1_cluster_service_proto_enumTypes[8] -} - -func (x SetMasterAuthRequest_Action) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use SetMasterAuthRequest_Action.Descriptor instead. -func (SetMasterAuthRequest_Action) EnumDescriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{37, 0} -} - -// The current status of the node pool instance. -type NodePool_Status int32 - -const ( - // Not set. - NodePool_STATUS_UNSPECIFIED NodePool_Status = 0 - // The PROVISIONING state indicates the node pool is being created. - NodePool_PROVISIONING NodePool_Status = 1 - // The RUNNING state indicates the node pool has been created - // and is fully usable. - NodePool_RUNNING NodePool_Status = 2 - // The RUNNING_WITH_ERROR state indicates the node pool has been created - // and is partially usable. Some error state has occurred and some - // functionality may be impaired. Customer may need to reissue a request - // or trigger a new update. - NodePool_RUNNING_WITH_ERROR NodePool_Status = 3 - // The RECONCILING state indicates that some work is actively being done on - // the node pool, such as upgrading node software. Details can - // be found in the `statusMessage` field. - NodePool_RECONCILING NodePool_Status = 4 - // The STOPPING state indicates the node pool is being deleted. - NodePool_STOPPING NodePool_Status = 5 - // The ERROR state indicates the node pool may be unusable. Details - // can be found in the `statusMessage` field. - NodePool_ERROR NodePool_Status = 6 -) - -// Enum value maps for NodePool_Status. -var ( - NodePool_Status_name = map[int32]string{ - 0: "STATUS_UNSPECIFIED", - 1: "PROVISIONING", - 2: "RUNNING", - 3: "RUNNING_WITH_ERROR", - 4: "RECONCILING", - 5: "STOPPING", - 6: "ERROR", - } - NodePool_Status_value = map[string]int32{ - "STATUS_UNSPECIFIED": 0, - "PROVISIONING": 1, - "RUNNING": 2, - "RUNNING_WITH_ERROR": 3, - "RECONCILING": 4, - "STOPPING": 5, - "ERROR": 6, - } -) - -func (x NodePool_Status) Enum() *NodePool_Status { - p := new(NodePool_Status) - *p = x - return p -} - -func (x NodePool_Status) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (NodePool_Status) Descriptor() protoreflect.EnumDescriptor { - return file_google_container_v1_cluster_service_proto_enumTypes[9].Descriptor() -} - -func (NodePool_Status) Type() protoreflect.EnumType { - return &file_google_container_v1_cluster_service_proto_enumTypes[9] -} - -func (x NodePool_Status) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use NodePool_Status.Descriptor instead. -func (NodePool_Status) EnumDescriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{51, 0} -} - -// Mode is the configuration for how to expose metadata to workloads running -// on the node. -type WorkloadMetadataConfig_Mode int32 - -const ( - // Not set. - WorkloadMetadataConfig_MODE_UNSPECIFIED WorkloadMetadataConfig_Mode = 0 - // Expose all Compute Engine metadata to pods. - WorkloadMetadataConfig_GCE_METADATA WorkloadMetadataConfig_Mode = 1 - // Run the GKE Metadata Server on this node. The GKE Metadata Server exposes - // a metadata API to workloads that is compatible with the V1 Compute - // Metadata APIs exposed by the Compute Engine and App Engine Metadata - // Servers. This feature can only be enabled if Workload Identity is enabled - // at the cluster level. - WorkloadMetadataConfig_GKE_METADATA WorkloadMetadataConfig_Mode = 2 -) - -// Enum value maps for WorkloadMetadataConfig_Mode. -var ( - WorkloadMetadataConfig_Mode_name = map[int32]string{ - 0: "MODE_UNSPECIFIED", - 1: "GCE_METADATA", - 2: "GKE_METADATA", - } - WorkloadMetadataConfig_Mode_value = map[string]int32{ - "MODE_UNSPECIFIED": 0, - "GCE_METADATA": 1, - "GKE_METADATA": 2, - } -) - -func (x WorkloadMetadataConfig_Mode) Enum() *WorkloadMetadataConfig_Mode { - p := new(WorkloadMetadataConfig_Mode) - *p = x - return p -} - -func (x WorkloadMetadataConfig_Mode) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (WorkloadMetadataConfig_Mode) Descriptor() protoreflect.EnumDescriptor { - return file_google_container_v1_cluster_service_proto_enumTypes[10].Descriptor() -} - -func (WorkloadMetadataConfig_Mode) Type() protoreflect.EnumType { - return &file_google_container_v1_cluster_service_proto_enumTypes[10] -} - -func (x WorkloadMetadataConfig_Mode) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use WorkloadMetadataConfig_Mode.Descriptor instead. -func (WorkloadMetadataConfig_Mode) EnumDescriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{72, 0} -} - -// Code for each condition -type StatusCondition_Code int32 - -const ( - // UNKNOWN indicates a generic condition. - StatusCondition_UNKNOWN StatusCondition_Code = 0 - // GCE_STOCKOUT indicates that Google Compute Engine resources are - // temporarily unavailable. - StatusCondition_GCE_STOCKOUT StatusCondition_Code = 1 - // GKE_SERVICE_ACCOUNT_DELETED indicates that the user deleted their robot - // service account. - StatusCondition_GKE_SERVICE_ACCOUNT_DELETED StatusCondition_Code = 2 - // Google Compute Engine quota was exceeded. - StatusCondition_GCE_QUOTA_EXCEEDED StatusCondition_Code = 3 - // Cluster state was manually changed by an SRE due to a system logic error. - StatusCondition_SET_BY_OPERATOR StatusCondition_Code = 4 - // Unable to perform an encrypt operation against the CloudKMS key used for - // etcd level encryption. - // More codes TBA - StatusCondition_CLOUD_KMS_KEY_ERROR StatusCondition_Code = 7 -) - -// Enum value maps for StatusCondition_Code. -var ( - StatusCondition_Code_name = map[int32]string{ - 0: "UNKNOWN", - 1: "GCE_STOCKOUT", - 2: "GKE_SERVICE_ACCOUNT_DELETED", - 3: "GCE_QUOTA_EXCEEDED", - 4: "SET_BY_OPERATOR", - 7: "CLOUD_KMS_KEY_ERROR", - } - StatusCondition_Code_value = map[string]int32{ - "UNKNOWN": 0, - "GCE_STOCKOUT": 1, - "GKE_SERVICE_ACCOUNT_DELETED": 2, - "GCE_QUOTA_EXCEEDED": 3, - "SET_BY_OPERATOR": 4, - "CLOUD_KMS_KEY_ERROR": 7, - } -) - -func (x StatusCondition_Code) Enum() *StatusCondition_Code { - p := new(StatusCondition_Code) - *p = x - return p -} - -func (x StatusCondition_Code) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (StatusCondition_Code) Descriptor() protoreflect.EnumDescriptor { - return file_google_container_v1_cluster_service_proto_enumTypes[11].Descriptor() -} - -func (StatusCondition_Code) Type() protoreflect.EnumType { - return &file_google_container_v1_cluster_service_proto_enumTypes[11] -} - -func (x StatusCondition_Code) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use StatusCondition_Code.Descriptor instead. -func (StatusCondition_Code) EnumDescriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{75, 0} -} - -// Possible values for 'channel'. -type ReleaseChannel_Channel int32 - -const ( - // No channel specified. - ReleaseChannel_UNSPECIFIED ReleaseChannel_Channel = 0 - // RAPID channel is offered on an early access basis for customers who want - // to test new releases. - // - // WARNING: Versions available in the RAPID Channel may be subject to - // unresolved issues with no known workaround and are not subject to any - // SLAs. - ReleaseChannel_RAPID ReleaseChannel_Channel = 1 - // Clusters subscribed to REGULAR receive versions that are considered GA - // quality. REGULAR is intended for production users who want to take - // advantage of new features. - ReleaseChannel_REGULAR ReleaseChannel_Channel = 2 - // Clusters subscribed to STABLE receive versions that are known to be - // stable and reliable in production. - ReleaseChannel_STABLE ReleaseChannel_Channel = 3 -) - -// Enum value maps for ReleaseChannel_Channel. -var ( - ReleaseChannel_Channel_name = map[int32]string{ - 0: "UNSPECIFIED", - 1: "RAPID", - 2: "REGULAR", - 3: "STABLE", - } - ReleaseChannel_Channel_value = map[string]int32{ - "UNSPECIFIED": 0, - "RAPID": 1, - "REGULAR": 2, - "STABLE": 3, - } -) - -func (x ReleaseChannel_Channel) Enum() *ReleaseChannel_Channel { - p := new(ReleaseChannel_Channel) - *p = x - return p -} - -func (x ReleaseChannel_Channel) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (ReleaseChannel_Channel) Descriptor() protoreflect.EnumDescriptor { - return file_google_container_v1_cluster_service_proto_enumTypes[12].Descriptor() -} - -func (ReleaseChannel_Channel) Type() protoreflect.EnumType { - return &file_google_container_v1_cluster_service_proto_enumTypes[12] -} - -func (x ReleaseChannel_Channel) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use ReleaseChannel_Channel.Descriptor instead. -func (ReleaseChannel_Channel) EnumDescriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{82, 0} -} - -// State of etcd encryption. -type DatabaseEncryption_State int32 - -const ( - // Should never be set - DatabaseEncryption_UNKNOWN DatabaseEncryption_State = 0 - // Secrets in etcd are encrypted. - DatabaseEncryption_ENCRYPTED DatabaseEncryption_State = 1 - // Secrets in etcd are stored in plain text (at etcd level) - this is - // unrelated to Compute Engine level full disk encryption. - DatabaseEncryption_DECRYPTED DatabaseEncryption_State = 2 -) - -// Enum value maps for DatabaseEncryption_State. -var ( - DatabaseEncryption_State_name = map[int32]string{ - 0: "UNKNOWN", - 1: "ENCRYPTED", - 2: "DECRYPTED", - } - DatabaseEncryption_State_value = map[string]int32{ - "UNKNOWN": 0, - "ENCRYPTED": 1, - "DECRYPTED": 2, - } -) - -func (x DatabaseEncryption_State) Enum() *DatabaseEncryption_State { - p := new(DatabaseEncryption_State) - *p = x - return p -} - -func (x DatabaseEncryption_State) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (DatabaseEncryption_State) Descriptor() protoreflect.EnumDescriptor { - return file_google_container_v1_cluster_service_proto_enumTypes[13].Descriptor() -} - -func (DatabaseEncryption_State) Type() protoreflect.EnumType { - return &file_google_container_v1_cluster_service_proto_enumTypes[13] -} - -func (x DatabaseEncryption_State) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use DatabaseEncryption_State.Descriptor instead. -func (DatabaseEncryption_State) EnumDescriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{86, 0} -} - -// Status shows the current usage of a secondary IP range. -type UsableSubnetworkSecondaryRange_Status int32 - -const ( - // UNKNOWN is the zero value of the Status enum. It's not a valid status. - UsableSubnetworkSecondaryRange_UNKNOWN UsableSubnetworkSecondaryRange_Status = 0 - // UNUSED denotes that this range is unclaimed by any cluster. - UsableSubnetworkSecondaryRange_UNUSED UsableSubnetworkSecondaryRange_Status = 1 - // IN_USE_SERVICE denotes that this range is claimed by a cluster for - // services. It cannot be used for other clusters. - UsableSubnetworkSecondaryRange_IN_USE_SERVICE UsableSubnetworkSecondaryRange_Status = 2 - // IN_USE_SHAREABLE_POD denotes this range was created by the network admin - // and is currently claimed by a cluster for pods. It can only be used by - // other clusters as a pod range. - UsableSubnetworkSecondaryRange_IN_USE_SHAREABLE_POD UsableSubnetworkSecondaryRange_Status = 3 - // IN_USE_MANAGED_POD denotes this range was created by GKE and is claimed - // for pods. It cannot be used for other clusters. - UsableSubnetworkSecondaryRange_IN_USE_MANAGED_POD UsableSubnetworkSecondaryRange_Status = 4 -) - -// Enum value maps for UsableSubnetworkSecondaryRange_Status. -var ( - UsableSubnetworkSecondaryRange_Status_name = map[int32]string{ - 0: "UNKNOWN", - 1: "UNUSED", - 2: "IN_USE_SERVICE", - 3: "IN_USE_SHAREABLE_POD", - 4: "IN_USE_MANAGED_POD", - } - UsableSubnetworkSecondaryRange_Status_value = map[string]int32{ - "UNKNOWN": 0, - "UNUSED": 1, - "IN_USE_SERVICE": 2, - "IN_USE_SHAREABLE_POD": 3, - "IN_USE_MANAGED_POD": 4, - } -) - -func (x UsableSubnetworkSecondaryRange_Status) Enum() *UsableSubnetworkSecondaryRange_Status { - p := new(UsableSubnetworkSecondaryRange_Status) - *p = x - return p -} - -func (x UsableSubnetworkSecondaryRange_Status) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (UsableSubnetworkSecondaryRange_Status) Descriptor() protoreflect.EnumDescriptor { - return file_google_container_v1_cluster_service_proto_enumTypes[14].Descriptor() -} - -func (UsableSubnetworkSecondaryRange_Status) Type() protoreflect.EnumType { - return &file_google_container_v1_cluster_service_proto_enumTypes[14] -} - -func (x UsableSubnetworkSecondaryRange_Status) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use UsableSubnetworkSecondaryRange_Status.Descriptor instead. -func (UsableSubnetworkSecondaryRange_Status) EnumDescriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{89, 0} -} - -// Parameters that describe the nodes in a cluster. -type NodeConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The name of a Google Compute Engine [machine - // type](https://cloud.google.com/compute/docs/machine-types) - // - // If unspecified, the default machine type is `e2-medium`. - MachineType string `protobuf:"bytes,1,opt,name=machine_type,json=machineType,proto3" json:"machine_type,omitempty"` - // Size of the disk attached to each node, specified in GB. - // The smallest allowed disk size is 10GB. - // - // If unspecified, the default disk size is 100GB. - DiskSizeGb int32 `protobuf:"varint,2,opt,name=disk_size_gb,json=diskSizeGb,proto3" json:"disk_size_gb,omitempty"` - // The set of Google API scopes to be made available on all of the - // node VMs under the "default" service account. - // - // The following scopes are recommended, but not required, and by default are - // not included: - // - // * `https://www.googleapis.com/auth/compute` is required for mounting - // persistent storage on your nodes. - // * `https://www.googleapis.com/auth/devstorage.read_only` is required for - // communicating with **gcr.io** - // (the [Google Container - // Registry](https://cloud.google.com/container-registry/)). - // - // If unspecified, no scopes are added, unless Cloud Logging or Cloud - // Monitoring are enabled, in which case their required scopes will be added. - OauthScopes []string `protobuf:"bytes,3,rep,name=oauth_scopes,json=oauthScopes,proto3" json:"oauth_scopes,omitempty"` - // The Google Cloud Platform Service Account to be used by the node VMs. - // Specify the email address of the Service Account; otherwise, if no Service - // Account is specified, the "default" service account is used. - ServiceAccount string `protobuf:"bytes,9,opt,name=service_account,json=serviceAccount,proto3" json:"service_account,omitempty"` - // The metadata key/value pairs assigned to instances in the cluster. - // - // Keys must conform to the regexp `[a-zA-Z0-9-_]+` and be less than 128 bytes - // in length. These are reflected as part of a URL in the metadata server. - // Additionally, to avoid ambiguity, keys must not conflict with any other - // metadata keys for the project or be one of the reserved keys: - // - "cluster-location" - // - "cluster-name" - // - "cluster-uid" - // - "configure-sh" - // - "containerd-configure-sh" - // - "enable-os-login" - // - "gci-ensure-gke-docker" - // - "gci-metrics-enabled" - // - "gci-update-strategy" - // - "instance-template" - // - "kube-env" - // - "startup-script" - // - "user-data" - // - "disable-address-manager" - // - "windows-startup-script-ps1" - // - "common-psm1" - // - "k8s-node-setup-psm1" - // - "install-ssh-psm1" - // - "user-profile-psm1" - // - // The following keys are reserved for Windows nodes: - // - "serial-port-logging-enable" - // - // Values are free-form strings, and only have meaning as interpreted by - // the image running in the instance. The only restriction placed on them is - // that each value's size must be less than or equal to 32 KB. - // - // The total size of all keys and values must be less than 512 KB. - Metadata map[string]string `protobuf:"bytes,4,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - // The image type to use for this node. Note that for a given image type, - // the latest version of it will be used. - ImageType string `protobuf:"bytes,5,opt,name=image_type,json=imageType,proto3" json:"image_type,omitempty"` - // The map of Kubernetes labels (key/value pairs) to be applied to each node. - // These will added in addition to any default label(s) that - // Kubernetes may apply to the node. - // In case of conflict in label keys, the applied set may differ depending on - // the Kubernetes version -- it's best to assume the behavior is undefined - // and conflicts should be avoided. - // For more information, including usage and the valid values, see: - // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ - Labels map[string]string `protobuf:"bytes,6,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - // The number of local SSD disks to be attached to the node. - // - // The limit for this value is dependent upon the maximum number of - // disks available on a machine per zone. See: - // https://cloud.google.com/compute/docs/disks/local-ssd - // for more information. - LocalSsdCount int32 `protobuf:"varint,7,opt,name=local_ssd_count,json=localSsdCount,proto3" json:"local_ssd_count,omitempty"` - // The list of instance tags applied to all nodes. Tags are used to identify - // valid sources or targets for network firewalls and are specified by - // the client during cluster or node pool creation. Each tag within the list - // must comply with RFC1035. - Tags []string `protobuf:"bytes,8,rep,name=tags,proto3" json:"tags,omitempty"` - // Whether the nodes are created as preemptible VM instances. See: - // https://cloud.google.com/compute/docs/instances/preemptible for more - // information about preemptible VM instances. - Preemptible bool `protobuf:"varint,10,opt,name=preemptible,proto3" json:"preemptible,omitempty"` - // A list of hardware accelerators to be attached to each node. - // See https://cloud.google.com/compute/docs/gpus for more information about - // support for GPUs. - Accelerators []*AcceleratorConfig `protobuf:"bytes,11,rep,name=accelerators,proto3" json:"accelerators,omitempty"` - // Type of the disk attached to each node (e.g. 'pd-standard', 'pd-ssd' or - // 'pd-balanced') - // - // If unspecified, the default disk type is 'pd-standard' - DiskType string `protobuf:"bytes,12,opt,name=disk_type,json=diskType,proto3" json:"disk_type,omitempty"` - // Minimum CPU platform to be used by this instance. The instance may be - // scheduled on the specified or newer CPU platform. Applicable values are the - // friendly names of CPU platforms, such as - // `minCpuPlatform: "Intel Haswell"` or - // `minCpuPlatform: "Intel Sandy Bridge"`. For more - // information, read [how to specify min CPU - // platform](https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform) - MinCpuPlatform string `protobuf:"bytes,13,opt,name=min_cpu_platform,json=minCpuPlatform,proto3" json:"min_cpu_platform,omitempty"` - // The workload metadata configuration for this node. - WorkloadMetadataConfig *WorkloadMetadataConfig `protobuf:"bytes,14,opt,name=workload_metadata_config,json=workloadMetadataConfig,proto3" json:"workload_metadata_config,omitempty"` - // List of kubernetes taints to be applied to each node. - // - // For more information, including usage and the valid values, see: - // https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ - Taints []*NodeTaint `protobuf:"bytes,15,rep,name=taints,proto3" json:"taints,omitempty"` - // Sandbox configuration for this node. - SandboxConfig *SandboxConfig `protobuf:"bytes,17,opt,name=sandbox_config,json=sandboxConfig,proto3" json:"sandbox_config,omitempty"` - // Setting this field will assign instances of this - // pool to run on the specified node group. This is useful for running - // workloads on [sole tenant - // nodes](https://cloud.google.com/compute/docs/nodes/sole-tenant-nodes). - NodeGroup string `protobuf:"bytes,18,opt,name=node_group,json=nodeGroup,proto3" json:"node_group,omitempty"` - // The optional reservation affinity. Setting this field will apply - // the specified [Zonal Compute - // Reservation](https://cloud.google.com/compute/docs/instances/reserving-zonal-resources) - // to this node pool. - ReservationAffinity *ReservationAffinity `protobuf:"bytes,19,opt,name=reservation_affinity,json=reservationAffinity,proto3" json:"reservation_affinity,omitempty"` - // Shielded Instance options. - ShieldedInstanceConfig *ShieldedInstanceConfig `protobuf:"bytes,20,opt,name=shielded_instance_config,json=shieldedInstanceConfig,proto3" json:"shielded_instance_config,omitempty"` - // - // The Customer Managed Encryption Key used to encrypt the boot disk attached - // to each node in the node pool. This should be of the form - // projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKeys/[KEY_NAME]. - // For more information about protecting resources with Cloud KMS Keys please - // see: - // https://cloud.google.com/compute/docs/disks/customer-managed-encryption - BootDiskKmsKey string `protobuf:"bytes,23,opt,name=boot_disk_kms_key,json=bootDiskKmsKey,proto3" json:"boot_disk_kms_key,omitempty"` -} - -func (x *NodeConfig) Reset() { - *x = NodeConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *NodeConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*NodeConfig) ProtoMessage() {} - -func (x *NodeConfig) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use NodeConfig.ProtoReflect.Descriptor instead. -func (*NodeConfig) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{0} -} - -func (x *NodeConfig) GetMachineType() string { - if x != nil { - return x.MachineType - } - return "" -} - -func (x *NodeConfig) GetDiskSizeGb() int32 { - if x != nil { - return x.DiskSizeGb - } - return 0 -} - -func (x *NodeConfig) GetOauthScopes() []string { - if x != nil { - return x.OauthScopes - } - return nil -} - -func (x *NodeConfig) GetServiceAccount() string { - if x != nil { - return x.ServiceAccount - } - return "" -} - -func (x *NodeConfig) GetMetadata() map[string]string { - if x != nil { - return x.Metadata - } - return nil -} - -func (x *NodeConfig) GetImageType() string { - if x != nil { - return x.ImageType - } - return "" -} - -func (x *NodeConfig) GetLabels() map[string]string { - if x != nil { - return x.Labels - } - return nil -} - -func (x *NodeConfig) GetLocalSsdCount() int32 { - if x != nil { - return x.LocalSsdCount - } - return 0 -} - -func (x *NodeConfig) GetTags() []string { - if x != nil { - return x.Tags - } - return nil -} - -func (x *NodeConfig) GetPreemptible() bool { - if x != nil { - return x.Preemptible - } - return false -} - -func (x *NodeConfig) GetAccelerators() []*AcceleratorConfig { - if x != nil { - return x.Accelerators - } - return nil -} - -func (x *NodeConfig) GetDiskType() string { - if x != nil { - return x.DiskType - } - return "" -} - -func (x *NodeConfig) GetMinCpuPlatform() string { - if x != nil { - return x.MinCpuPlatform - } - return "" -} - -func (x *NodeConfig) GetWorkloadMetadataConfig() *WorkloadMetadataConfig { - if x != nil { - return x.WorkloadMetadataConfig - } - return nil -} - -func (x *NodeConfig) GetTaints() []*NodeTaint { - if x != nil { - return x.Taints - } - return nil -} - -func (x *NodeConfig) GetSandboxConfig() *SandboxConfig { - if x != nil { - return x.SandboxConfig - } - return nil -} - -func (x *NodeConfig) GetNodeGroup() string { - if x != nil { - return x.NodeGroup - } - return "" -} - -func (x *NodeConfig) GetReservationAffinity() *ReservationAffinity { - if x != nil { - return x.ReservationAffinity - } - return nil -} - -func (x *NodeConfig) GetShieldedInstanceConfig() *ShieldedInstanceConfig { - if x != nil { - return x.ShieldedInstanceConfig - } - return nil -} - -func (x *NodeConfig) GetBootDiskKmsKey() string { - if x != nil { - return x.BootDiskKmsKey - } - return "" -} - -// A set of Shielded Instance options. -type ShieldedInstanceConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Defines whether the instance has Secure Boot enabled. - // - // Secure Boot helps ensure that the system only runs authentic software by - // verifying the digital signature of all boot components, and halting the - // boot process if signature verification fails. - EnableSecureBoot bool `protobuf:"varint,1,opt,name=enable_secure_boot,json=enableSecureBoot,proto3" json:"enable_secure_boot,omitempty"` - // Defines whether the instance has integrity monitoring enabled. - // - // Enables monitoring and attestation of the boot integrity of the instance. - // The attestation is performed against the integrity policy baseline. This - // baseline is initially derived from the implicitly trusted boot image when - // the instance is created. - EnableIntegrityMonitoring bool `protobuf:"varint,2,opt,name=enable_integrity_monitoring,json=enableIntegrityMonitoring,proto3" json:"enable_integrity_monitoring,omitempty"` -} - -func (x *ShieldedInstanceConfig) Reset() { - *x = ShieldedInstanceConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ShieldedInstanceConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ShieldedInstanceConfig) ProtoMessage() {} - -func (x *ShieldedInstanceConfig) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ShieldedInstanceConfig.ProtoReflect.Descriptor instead. -func (*ShieldedInstanceConfig) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{1} -} - -func (x *ShieldedInstanceConfig) GetEnableSecureBoot() bool { - if x != nil { - return x.EnableSecureBoot - } - return false -} - -func (x *ShieldedInstanceConfig) GetEnableIntegrityMonitoring() bool { - if x != nil { - return x.EnableIntegrityMonitoring - } - return false -} - -// SandboxConfig contains configurations of the sandbox to use for the node. -type SandboxConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Type of the sandbox to use for the node. - Type SandboxConfig_Type `protobuf:"varint,2,opt,name=type,proto3,enum=google.container.v1.SandboxConfig_Type" json:"type,omitempty"` -} - -func (x *SandboxConfig) Reset() { - *x = SandboxConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SandboxConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SandboxConfig) ProtoMessage() {} - -func (x *SandboxConfig) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SandboxConfig.ProtoReflect.Descriptor instead. -func (*SandboxConfig) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{2} -} - -func (x *SandboxConfig) GetType() SandboxConfig_Type { - if x != nil { - return x.Type - } - return SandboxConfig_UNSPECIFIED -} - -// [ReservationAffinity](https://cloud.google.com/compute/docs/instances/reserving-zonal-resources) -// is the configuration of desired reservation which instances could take -// capacity from. -type ReservationAffinity struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Corresponds to the type of reservation consumption. - ConsumeReservationType ReservationAffinity_Type `protobuf:"varint,1,opt,name=consume_reservation_type,json=consumeReservationType,proto3,enum=google.container.v1.ReservationAffinity_Type" json:"consume_reservation_type,omitempty"` - // Corresponds to the label key of a reservation resource. To target a - // SPECIFIC_RESERVATION by name, specify "googleapis.com/reservation-name" as - // the key and specify the name of your reservation as its value. - Key string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` - // Corresponds to the label value(s) of reservation resource(s). - Values []string `protobuf:"bytes,3,rep,name=values,proto3" json:"values,omitempty"` -} - -func (x *ReservationAffinity) Reset() { - *x = ReservationAffinity{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ReservationAffinity) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ReservationAffinity) ProtoMessage() {} - -func (x *ReservationAffinity) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ReservationAffinity.ProtoReflect.Descriptor instead. -func (*ReservationAffinity) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{3} -} - -func (x *ReservationAffinity) GetConsumeReservationType() ReservationAffinity_Type { - if x != nil { - return x.ConsumeReservationType - } - return ReservationAffinity_UNSPECIFIED -} - -func (x *ReservationAffinity) GetKey() string { - if x != nil { - return x.Key - } - return "" -} - -func (x *ReservationAffinity) GetValues() []string { - if x != nil { - return x.Values - } - return nil -} - -// Kubernetes taint is comprised of three fields: key, value, and effect. Effect -// can only be one of three types: NoSchedule, PreferNoSchedule or NoExecute. -// -// See -// [here](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration) -// for more information, including usage and the valid values. -type NodeTaint struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Key for taint. - Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - // Value for taint. - Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - // Effect for taint. - Effect NodeTaint_Effect `protobuf:"varint,3,opt,name=effect,proto3,enum=google.container.v1.NodeTaint_Effect" json:"effect,omitempty"` -} - -func (x *NodeTaint) Reset() { - *x = NodeTaint{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *NodeTaint) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*NodeTaint) ProtoMessage() {} - -func (x *NodeTaint) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use NodeTaint.ProtoReflect.Descriptor instead. -func (*NodeTaint) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{4} -} - -func (x *NodeTaint) GetKey() string { - if x != nil { - return x.Key - } - return "" -} - -func (x *NodeTaint) GetValue() string { - if x != nil { - return x.Value - } - return "" -} - -func (x *NodeTaint) GetEffect() NodeTaint_Effect { - if x != nil { - return x.Effect - } - return NodeTaint_EFFECT_UNSPECIFIED -} - -// The authentication information for accessing the master endpoint. -// Authentication can be done using HTTP basic auth or using client -// certificates. -type MasterAuth struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The username to use for HTTP basic authentication to the master endpoint. - // For clusters v1.6.0 and later, basic authentication can be disabled by - // leaving username unspecified (or setting it to the empty string). - // - // Warning: basic authentication is deprecated, and will be removed in GKE - // control plane versions 1.19 and newer. For a list of recommended - // authentication methods, see: - // https://cloud.google.com/kubernetes-engine/docs/how-to/api-server-authentication - // - // Deprecated: Do not use. - Username string `protobuf:"bytes,1,opt,name=username,proto3" json:"username,omitempty"` - // The password to use for HTTP basic authentication to the master endpoint. - // Because the master endpoint is open to the Internet, you should create a - // strong password. If a password is provided for cluster creation, username - // must be non-empty. - // - // Warning: basic authentication is deprecated, and will be removed in GKE - // control plane versions 1.19 and newer. For a list of recommended - // authentication methods, see: - // https://cloud.google.com/kubernetes-engine/docs/how-to/api-server-authentication - // - // Deprecated: Do not use. - Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` - // Configuration for client certificate authentication on the cluster. For - // clusters before v1.12, if no configuration is specified, a client - // certificate is issued. - ClientCertificateConfig *ClientCertificateConfig `protobuf:"bytes,3,opt,name=client_certificate_config,json=clientCertificateConfig,proto3" json:"client_certificate_config,omitempty"` - // [Output only] Base64-encoded public certificate that is the root of - // trust for the cluster. - ClusterCaCertificate string `protobuf:"bytes,100,opt,name=cluster_ca_certificate,json=clusterCaCertificate,proto3" json:"cluster_ca_certificate,omitempty"` - // [Output only] Base64-encoded public certificate used by clients to - // authenticate to the cluster endpoint. - ClientCertificate string `protobuf:"bytes,101,opt,name=client_certificate,json=clientCertificate,proto3" json:"client_certificate,omitempty"` - // [Output only] Base64-encoded private key used by clients to authenticate - // to the cluster endpoint. - ClientKey string `protobuf:"bytes,102,opt,name=client_key,json=clientKey,proto3" json:"client_key,omitempty"` -} - -func (x *MasterAuth) Reset() { - *x = MasterAuth{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *MasterAuth) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*MasterAuth) ProtoMessage() {} - -func (x *MasterAuth) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use MasterAuth.ProtoReflect.Descriptor instead. -func (*MasterAuth) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{5} -} - -// Deprecated: Do not use. -func (x *MasterAuth) GetUsername() string { - if x != nil { - return x.Username - } - return "" -} - -// Deprecated: Do not use. -func (x *MasterAuth) GetPassword() string { - if x != nil { - return x.Password - } - return "" -} - -func (x *MasterAuth) GetClientCertificateConfig() *ClientCertificateConfig { - if x != nil { - return x.ClientCertificateConfig - } - return nil -} - -func (x *MasterAuth) GetClusterCaCertificate() string { - if x != nil { - return x.ClusterCaCertificate - } - return "" -} - -func (x *MasterAuth) GetClientCertificate() string { - if x != nil { - return x.ClientCertificate - } - return "" -} - -func (x *MasterAuth) GetClientKey() string { - if x != nil { - return x.ClientKey - } - return "" -} - -// Configuration for client certificates on the cluster. -type ClientCertificateConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Issue a client certificate. - IssueClientCertificate bool `protobuf:"varint,1,opt,name=issue_client_certificate,json=issueClientCertificate,proto3" json:"issue_client_certificate,omitempty"` -} - -func (x *ClientCertificateConfig) Reset() { - *x = ClientCertificateConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ClientCertificateConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ClientCertificateConfig) ProtoMessage() {} - -func (x *ClientCertificateConfig) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ClientCertificateConfig.ProtoReflect.Descriptor instead. -func (*ClientCertificateConfig) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{6} -} - -func (x *ClientCertificateConfig) GetIssueClientCertificate() bool { - if x != nil { - return x.IssueClientCertificate - } - return false -} - -// Configuration for the addons that can be automatically spun up in the -// cluster, enabling additional functionality. -type AddonsConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Configuration for the HTTP (L7) load balancing controller addon, which - // makes it easy to set up HTTP load balancers for services in a cluster. - HttpLoadBalancing *HttpLoadBalancing `protobuf:"bytes,1,opt,name=http_load_balancing,json=httpLoadBalancing,proto3" json:"http_load_balancing,omitempty"` - // Configuration for the horizontal pod autoscaling feature, which - // increases or decreases the number of replica pods a replication controller - // has based on the resource usage of the existing pods. - HorizontalPodAutoscaling *HorizontalPodAutoscaling `protobuf:"bytes,2,opt,name=horizontal_pod_autoscaling,json=horizontalPodAutoscaling,proto3" json:"horizontal_pod_autoscaling,omitempty"` - // Configuration for the Kubernetes Dashboard. - // This addon is deprecated, and will be disabled in 1.15. It is recommended - // to use the Cloud Console to manage and monitor your Kubernetes clusters, - // workloads and applications. For more information, see: - // https://cloud.google.com/kubernetes-engine/docs/concepts/dashboards - // - // Deprecated: Do not use. - KubernetesDashboard *KubernetesDashboard `protobuf:"bytes,3,opt,name=kubernetes_dashboard,json=kubernetesDashboard,proto3" json:"kubernetes_dashboard,omitempty"` - // Configuration for NetworkPolicy. This only tracks whether the addon - // is enabled or not on the Master, it does not track whether network policy - // is enabled for the nodes. - NetworkPolicyConfig *NetworkPolicyConfig `protobuf:"bytes,4,opt,name=network_policy_config,json=networkPolicyConfig,proto3" json:"network_policy_config,omitempty"` - // Configuration for the Cloud Run addon, which allows the user to use a - // managed Knative service. - CloudRunConfig *CloudRunConfig `protobuf:"bytes,7,opt,name=cloud_run_config,json=cloudRunConfig,proto3" json:"cloud_run_config,omitempty"` - // Configuration for NodeLocalDNS, a dns cache running on cluster nodes - DnsCacheConfig *DnsCacheConfig `protobuf:"bytes,8,opt,name=dns_cache_config,json=dnsCacheConfig,proto3" json:"dns_cache_config,omitempty"` - // Configuration for the ConfigConnector add-on, a Kubernetes - // extension to manage hosted GCP services through the Kubernetes API - ConfigConnectorConfig *ConfigConnectorConfig `protobuf:"bytes,10,opt,name=config_connector_config,json=configConnectorConfig,proto3" json:"config_connector_config,omitempty"` -} - -func (x *AddonsConfig) Reset() { - *x = AddonsConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *AddonsConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*AddonsConfig) ProtoMessage() {} - -func (x *AddonsConfig) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use AddonsConfig.ProtoReflect.Descriptor instead. -func (*AddonsConfig) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{7} -} - -func (x *AddonsConfig) GetHttpLoadBalancing() *HttpLoadBalancing { - if x != nil { - return x.HttpLoadBalancing - } - return nil -} - -func (x *AddonsConfig) GetHorizontalPodAutoscaling() *HorizontalPodAutoscaling { - if x != nil { - return x.HorizontalPodAutoscaling - } - return nil -} - -// Deprecated: Do not use. -func (x *AddonsConfig) GetKubernetesDashboard() *KubernetesDashboard { - if x != nil { - return x.KubernetesDashboard - } - return nil -} - -func (x *AddonsConfig) GetNetworkPolicyConfig() *NetworkPolicyConfig { - if x != nil { - return x.NetworkPolicyConfig - } - return nil -} - -func (x *AddonsConfig) GetCloudRunConfig() *CloudRunConfig { - if x != nil { - return x.CloudRunConfig - } - return nil -} - -func (x *AddonsConfig) GetDnsCacheConfig() *DnsCacheConfig { - if x != nil { - return x.DnsCacheConfig - } - return nil -} - -func (x *AddonsConfig) GetConfigConnectorConfig() *ConfigConnectorConfig { - if x != nil { - return x.ConfigConnectorConfig - } - return nil -} - -// Configuration options for the HTTP (L7) load balancing controller addon, -// which makes it easy to set up HTTP load balancers for services in a cluster. -type HttpLoadBalancing struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Whether the HTTP Load Balancing controller is enabled in the cluster. - // When enabled, it runs a small pod in the cluster that manages the load - // balancers. - Disabled bool `protobuf:"varint,1,opt,name=disabled,proto3" json:"disabled,omitempty"` -} - -func (x *HttpLoadBalancing) Reset() { - *x = HttpLoadBalancing{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *HttpLoadBalancing) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*HttpLoadBalancing) ProtoMessage() {} - -func (x *HttpLoadBalancing) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use HttpLoadBalancing.ProtoReflect.Descriptor instead. -func (*HttpLoadBalancing) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{8} -} - -func (x *HttpLoadBalancing) GetDisabled() bool { - if x != nil { - return x.Disabled - } - return false -} - -// Configuration options for the horizontal pod autoscaling feature, which -// increases or decreases the number of replica pods a replication controller -// has based on the resource usage of the existing pods. -type HorizontalPodAutoscaling struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Whether the Horizontal Pod Autoscaling feature is enabled in the cluster. - // When enabled, it ensures that metrics are collected into Stackdriver - // Monitoring. - Disabled bool `protobuf:"varint,1,opt,name=disabled,proto3" json:"disabled,omitempty"` -} - -func (x *HorizontalPodAutoscaling) Reset() { - *x = HorizontalPodAutoscaling{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *HorizontalPodAutoscaling) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*HorizontalPodAutoscaling) ProtoMessage() {} - -func (x *HorizontalPodAutoscaling) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use HorizontalPodAutoscaling.ProtoReflect.Descriptor instead. -func (*HorizontalPodAutoscaling) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{9} -} - -func (x *HorizontalPodAutoscaling) GetDisabled() bool { - if x != nil { - return x.Disabled - } - return false -} - -// Configuration for the Kubernetes Dashboard. -type KubernetesDashboard struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Whether the Kubernetes Dashboard is enabled for this cluster. - Disabled bool `protobuf:"varint,1,opt,name=disabled,proto3" json:"disabled,omitempty"` -} - -func (x *KubernetesDashboard) Reset() { - *x = KubernetesDashboard{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *KubernetesDashboard) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*KubernetesDashboard) ProtoMessage() {} - -func (x *KubernetesDashboard) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use KubernetesDashboard.ProtoReflect.Descriptor instead. -func (*KubernetesDashboard) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{10} -} - -func (x *KubernetesDashboard) GetDisabled() bool { - if x != nil { - return x.Disabled - } - return false -} - -// Configuration for NetworkPolicy. This only tracks whether the addon -// is enabled or not on the Master, it does not track whether network policy -// is enabled for the nodes. -type NetworkPolicyConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Whether NetworkPolicy is enabled for this cluster. - Disabled bool `protobuf:"varint,1,opt,name=disabled,proto3" json:"disabled,omitempty"` -} - -func (x *NetworkPolicyConfig) Reset() { - *x = NetworkPolicyConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *NetworkPolicyConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*NetworkPolicyConfig) ProtoMessage() {} - -func (x *NetworkPolicyConfig) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use NetworkPolicyConfig.ProtoReflect.Descriptor instead. -func (*NetworkPolicyConfig) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{11} -} - -func (x *NetworkPolicyConfig) GetDisabled() bool { - if x != nil { - return x.Disabled - } - return false -} - -// Configuration for NodeLocal DNSCache -type DnsCacheConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Whether NodeLocal DNSCache is enabled for this cluster. - Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"` -} - -func (x *DnsCacheConfig) Reset() { - *x = DnsCacheConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *DnsCacheConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DnsCacheConfig) ProtoMessage() {} - -func (x *DnsCacheConfig) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[12] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DnsCacheConfig.ProtoReflect.Descriptor instead. -func (*DnsCacheConfig) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{12} -} - -func (x *DnsCacheConfig) GetEnabled() bool { - if x != nil { - return x.Enabled - } - return false -} - -// Configuration for controlling master global access settings. -type PrivateClusterMasterGlobalAccessConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Whenever master is accessible globally or not. - Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"` -} - -func (x *PrivateClusterMasterGlobalAccessConfig) Reset() { - *x = PrivateClusterMasterGlobalAccessConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *PrivateClusterMasterGlobalAccessConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*PrivateClusterMasterGlobalAccessConfig) ProtoMessage() {} - -func (x *PrivateClusterMasterGlobalAccessConfig) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[13] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use PrivateClusterMasterGlobalAccessConfig.ProtoReflect.Descriptor instead. -func (*PrivateClusterMasterGlobalAccessConfig) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{13} -} - -func (x *PrivateClusterMasterGlobalAccessConfig) GetEnabled() bool { - if x != nil { - return x.Enabled - } - return false -} - -// Configuration options for private clusters. -type PrivateClusterConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Whether nodes have internal IP addresses only. If enabled, all nodes are - // given only RFC 1918 private addresses and communicate with the master via - // private networking. - EnablePrivateNodes bool `protobuf:"varint,1,opt,name=enable_private_nodes,json=enablePrivateNodes,proto3" json:"enable_private_nodes,omitempty"` - // Whether the master's internal IP address is used as the cluster endpoint. - EnablePrivateEndpoint bool `protobuf:"varint,2,opt,name=enable_private_endpoint,json=enablePrivateEndpoint,proto3" json:"enable_private_endpoint,omitempty"` - // The IP range in CIDR notation to use for the hosted master network. This - // range will be used for assigning internal IP addresses to the master or - // set of masters, as well as the ILB VIP. This range must not overlap with - // any other ranges in use within the cluster's network. - MasterIpv4CidrBlock string `protobuf:"bytes,3,opt,name=master_ipv4_cidr_block,json=masterIpv4CidrBlock,proto3" json:"master_ipv4_cidr_block,omitempty"` - // Output only. The internal IP address of this cluster's master endpoint. - PrivateEndpoint string `protobuf:"bytes,4,opt,name=private_endpoint,json=privateEndpoint,proto3" json:"private_endpoint,omitempty"` - // Output only. The external IP address of this cluster's master endpoint. - PublicEndpoint string `protobuf:"bytes,5,opt,name=public_endpoint,json=publicEndpoint,proto3" json:"public_endpoint,omitempty"` - // Output only. The peering name in the customer VPC used by this cluster. - PeeringName string `protobuf:"bytes,7,opt,name=peering_name,json=peeringName,proto3" json:"peering_name,omitempty"` - // Controls master global access settings. - MasterGlobalAccessConfig *PrivateClusterMasterGlobalAccessConfig `protobuf:"bytes,8,opt,name=master_global_access_config,json=masterGlobalAccessConfig,proto3" json:"master_global_access_config,omitempty"` -} - -func (x *PrivateClusterConfig) Reset() { - *x = PrivateClusterConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *PrivateClusterConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*PrivateClusterConfig) ProtoMessage() {} - -func (x *PrivateClusterConfig) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[14] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use PrivateClusterConfig.ProtoReflect.Descriptor instead. -func (*PrivateClusterConfig) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{14} -} - -func (x *PrivateClusterConfig) GetEnablePrivateNodes() bool { - if x != nil { - return x.EnablePrivateNodes - } - return false -} - -func (x *PrivateClusterConfig) GetEnablePrivateEndpoint() bool { - if x != nil { - return x.EnablePrivateEndpoint - } - return false -} - -func (x *PrivateClusterConfig) GetMasterIpv4CidrBlock() string { - if x != nil { - return x.MasterIpv4CidrBlock - } - return "" -} - -func (x *PrivateClusterConfig) GetPrivateEndpoint() string { - if x != nil { - return x.PrivateEndpoint - } - return "" -} - -func (x *PrivateClusterConfig) GetPublicEndpoint() string { - if x != nil { - return x.PublicEndpoint - } - return "" -} - -func (x *PrivateClusterConfig) GetPeeringName() string { - if x != nil { - return x.PeeringName - } - return "" -} - -func (x *PrivateClusterConfig) GetMasterGlobalAccessConfig() *PrivateClusterMasterGlobalAccessConfig { - if x != nil { - return x.MasterGlobalAccessConfig - } - return nil -} - -// Configuration for returning group information from authenticators. -type AuthenticatorGroupsConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Whether this cluster should return group membership lookups - // during authentication using a group of security groups. - Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"` - // The name of the security group-of-groups to be used. Only relevant - // if enabled = true. - SecurityGroup string `protobuf:"bytes,2,opt,name=security_group,json=securityGroup,proto3" json:"security_group,omitempty"` -} - -func (x *AuthenticatorGroupsConfig) Reset() { - *x = AuthenticatorGroupsConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[15] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *AuthenticatorGroupsConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*AuthenticatorGroupsConfig) ProtoMessage() {} - -func (x *AuthenticatorGroupsConfig) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[15] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use AuthenticatorGroupsConfig.ProtoReflect.Descriptor instead. -func (*AuthenticatorGroupsConfig) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{15} -} - -func (x *AuthenticatorGroupsConfig) GetEnabled() bool { - if x != nil { - return x.Enabled - } - return false -} - -func (x *AuthenticatorGroupsConfig) GetSecurityGroup() string { - if x != nil { - return x.SecurityGroup - } - return "" -} - -// Configuration options for the Cloud Run feature. -type CloudRunConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Whether Cloud Run addon is enabled for this cluster. - Disabled bool `protobuf:"varint,1,opt,name=disabled,proto3" json:"disabled,omitempty"` - // Which load balancer type is installed for Cloud Run. - LoadBalancerType CloudRunConfig_LoadBalancerType `protobuf:"varint,3,opt,name=load_balancer_type,json=loadBalancerType,proto3,enum=google.container.v1.CloudRunConfig_LoadBalancerType" json:"load_balancer_type,omitempty"` -} - -func (x *CloudRunConfig) Reset() { - *x = CloudRunConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[16] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CloudRunConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CloudRunConfig) ProtoMessage() {} - -func (x *CloudRunConfig) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[16] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CloudRunConfig.ProtoReflect.Descriptor instead. -func (*CloudRunConfig) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{16} -} - -func (x *CloudRunConfig) GetDisabled() bool { - if x != nil { - return x.Disabled - } - return false -} - -func (x *CloudRunConfig) GetLoadBalancerType() CloudRunConfig_LoadBalancerType { - if x != nil { - return x.LoadBalancerType - } - return CloudRunConfig_LOAD_BALANCER_TYPE_UNSPECIFIED -} - -// Configuration options for the Config Connector add-on. -type ConfigConnectorConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Whether Cloud Connector is enabled for this cluster. - Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"` -} - -func (x *ConfigConnectorConfig) Reset() { - *x = ConfigConnectorConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[17] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ConfigConnectorConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ConfigConnectorConfig) ProtoMessage() {} - -func (x *ConfigConnectorConfig) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[17] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ConfigConnectorConfig.ProtoReflect.Descriptor instead. -func (*ConfigConnectorConfig) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{17} -} - -func (x *ConfigConnectorConfig) GetEnabled() bool { - if x != nil { - return x.Enabled - } - return false -} - -// Configuration options for the master authorized networks feature. Enabled -// master authorized networks will disallow all external traffic to access -// Kubernetes master through HTTPS except traffic from the given CIDR blocks, -// Google Compute Engine Public IPs and Google Prod IPs. -type MasterAuthorizedNetworksConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Whether or not master authorized networks is enabled. - Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"` - // cidr_blocks define up to 50 external networks that could access - // Kubernetes master through HTTPS. - CidrBlocks []*MasterAuthorizedNetworksConfig_CidrBlock `protobuf:"bytes,2,rep,name=cidr_blocks,json=cidrBlocks,proto3" json:"cidr_blocks,omitempty"` -} - -func (x *MasterAuthorizedNetworksConfig) Reset() { - *x = MasterAuthorizedNetworksConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[18] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *MasterAuthorizedNetworksConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*MasterAuthorizedNetworksConfig) ProtoMessage() {} - -func (x *MasterAuthorizedNetworksConfig) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[18] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use MasterAuthorizedNetworksConfig.ProtoReflect.Descriptor instead. -func (*MasterAuthorizedNetworksConfig) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{18} -} - -func (x *MasterAuthorizedNetworksConfig) GetEnabled() bool { - if x != nil { - return x.Enabled - } - return false -} - -func (x *MasterAuthorizedNetworksConfig) GetCidrBlocks() []*MasterAuthorizedNetworksConfig_CidrBlock { - if x != nil { - return x.CidrBlocks - } - return nil -} - -// Configuration for the legacy Attribute Based Access Control authorization -// mode. -type LegacyAbac struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Whether the ABAC authorizer is enabled for this cluster. When enabled, - // identities in the system, including service accounts, nodes, and - // controllers, will have statically granted permissions beyond those - // provided by the RBAC configuration or IAM. - Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"` -} - -func (x *LegacyAbac) Reset() { - *x = LegacyAbac{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[19] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *LegacyAbac) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*LegacyAbac) ProtoMessage() {} - -func (x *LegacyAbac) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[19] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use LegacyAbac.ProtoReflect.Descriptor instead. -func (*LegacyAbac) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{19} -} - -func (x *LegacyAbac) GetEnabled() bool { - if x != nil { - return x.Enabled - } - return false -} - -// Configuration options for the NetworkPolicy feature. -// https://kubernetes.io/docs/concepts/services-networking/networkpolicies/ -type NetworkPolicy struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The selected network policy provider. - Provider NetworkPolicy_Provider `protobuf:"varint,1,opt,name=provider,proto3,enum=google.container.v1.NetworkPolicy_Provider" json:"provider,omitempty"` - // Whether network policy is enabled on the cluster. - Enabled bool `protobuf:"varint,2,opt,name=enabled,proto3" json:"enabled,omitempty"` -} - -func (x *NetworkPolicy) Reset() { - *x = NetworkPolicy{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[20] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *NetworkPolicy) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*NetworkPolicy) ProtoMessage() {} - -func (x *NetworkPolicy) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[20] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use NetworkPolicy.ProtoReflect.Descriptor instead. -func (*NetworkPolicy) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{20} -} - -func (x *NetworkPolicy) GetProvider() NetworkPolicy_Provider { - if x != nil { - return x.Provider - } - return NetworkPolicy_PROVIDER_UNSPECIFIED -} - -func (x *NetworkPolicy) GetEnabled() bool { - if x != nil { - return x.Enabled - } - return false -} - -// Configuration for Binary Authorization. -type BinaryAuthorization struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Enable Binary Authorization for this cluster. If enabled, all container - // images will be validated by Binary Authorization. - Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"` -} - -func (x *BinaryAuthorization) Reset() { - *x = BinaryAuthorization{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[21] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *BinaryAuthorization) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*BinaryAuthorization) ProtoMessage() {} - -func (x *BinaryAuthorization) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[21] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use BinaryAuthorization.ProtoReflect.Descriptor instead. -func (*BinaryAuthorization) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{21} -} - -func (x *BinaryAuthorization) GetEnabled() bool { - if x != nil { - return x.Enabled - } - return false -} - -// Configuration for controlling how IPs are allocated in the cluster. -type IPAllocationPolicy struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Whether alias IPs will be used for pod IPs in the cluster. - // This is used in conjunction with use_routes. It cannot - // be true if use_routes is true. If both use_ip_aliases and use_routes are - // false, then the server picks the default IP allocation mode - UseIpAliases bool `protobuf:"varint,1,opt,name=use_ip_aliases,json=useIpAliases,proto3" json:"use_ip_aliases,omitempty"` - // Whether a new subnetwork will be created automatically for the cluster. - // - // This field is only applicable when `use_ip_aliases` is true. - CreateSubnetwork bool `protobuf:"varint,2,opt,name=create_subnetwork,json=createSubnetwork,proto3" json:"create_subnetwork,omitempty"` - // A custom subnetwork name to be used if `create_subnetwork` is true. If - // this field is empty, then an automatic name will be chosen for the new - // subnetwork. - SubnetworkName string `protobuf:"bytes,3,opt,name=subnetwork_name,json=subnetworkName,proto3" json:"subnetwork_name,omitempty"` - // This field is deprecated, use cluster_ipv4_cidr_block. - // - // Deprecated: Do not use. - ClusterIpv4Cidr string `protobuf:"bytes,4,opt,name=cluster_ipv4_cidr,json=clusterIpv4Cidr,proto3" json:"cluster_ipv4_cidr,omitempty"` - // This field is deprecated, use node_ipv4_cidr_block. - // - // Deprecated: Do not use. - NodeIpv4Cidr string `protobuf:"bytes,5,opt,name=node_ipv4_cidr,json=nodeIpv4Cidr,proto3" json:"node_ipv4_cidr,omitempty"` - // This field is deprecated, use services_ipv4_cidr_block. - // - // Deprecated: Do not use. - ServicesIpv4Cidr string `protobuf:"bytes,6,opt,name=services_ipv4_cidr,json=servicesIpv4Cidr,proto3" json:"services_ipv4_cidr,omitempty"` - // The name of the secondary range to be used for the cluster CIDR - // block. The secondary range will be used for pod IP - // addresses. This must be an existing secondary range associated - // with the cluster subnetwork. - // - // This field is only applicable with use_ip_aliases is true and - // create_subnetwork is false. - ClusterSecondaryRangeName string `protobuf:"bytes,7,opt,name=cluster_secondary_range_name,json=clusterSecondaryRangeName,proto3" json:"cluster_secondary_range_name,omitempty"` - // The name of the secondary range to be used as for the services - // CIDR block. The secondary range will be used for service - // ClusterIPs. This must be an existing secondary range associated - // with the cluster subnetwork. - // - // This field is only applicable with use_ip_aliases is true and - // create_subnetwork is false. - ServicesSecondaryRangeName string `protobuf:"bytes,8,opt,name=services_secondary_range_name,json=servicesSecondaryRangeName,proto3" json:"services_secondary_range_name,omitempty"` - // The IP address range for the cluster pod IPs. If this field is set, then - // `cluster.cluster_ipv4_cidr` must be left blank. - // - // This field is only applicable when `use_ip_aliases` is true. - // - // Set to blank to have a range chosen with the default size. - // - // Set to /netmask (e.g. `/14`) to have a range chosen with a specific - // netmask. - // - // Set to a - // [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) - // notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. - // `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range - // to use. - ClusterIpv4CidrBlock string `protobuf:"bytes,9,opt,name=cluster_ipv4_cidr_block,json=clusterIpv4CidrBlock,proto3" json:"cluster_ipv4_cidr_block,omitempty"` - // The IP address range of the instance IPs in this cluster. - // - // This is applicable only if `create_subnetwork` is true. - // - // Set to blank to have a range chosen with the default size. - // - // Set to /netmask (e.g. `/14`) to have a range chosen with a specific - // netmask. - // - // Set to a - // [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) - // notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. - // `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range - // to use. - NodeIpv4CidrBlock string `protobuf:"bytes,10,opt,name=node_ipv4_cidr_block,json=nodeIpv4CidrBlock,proto3" json:"node_ipv4_cidr_block,omitempty"` - // The IP address range of the services IPs in this cluster. If blank, a range - // will be automatically chosen with the default size. - // - // This field is only applicable when `use_ip_aliases` is true. - // - // Set to blank to have a range chosen with the default size. - // - // Set to /netmask (e.g. `/14`) to have a range chosen with a specific - // netmask. - // - // Set to a - // [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) - // notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. - // `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range - // to use. - ServicesIpv4CidrBlock string `protobuf:"bytes,11,opt,name=services_ipv4_cidr_block,json=servicesIpv4CidrBlock,proto3" json:"services_ipv4_cidr_block,omitempty"` - // The IP address range of the Cloud TPUs in this cluster. If unspecified, a - // range will be automatically chosen with the default size. - // - // This field is only applicable when `use_ip_aliases` is true. - // - // If unspecified, the range will use the default size. - // - // Set to /netmask (e.g. `/14`) to have a range chosen with a specific - // netmask. - // - // Set to a - // [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) - // notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. - // `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range - // to use. - TpuIpv4CidrBlock string `protobuf:"bytes,13,opt,name=tpu_ipv4_cidr_block,json=tpuIpv4CidrBlock,proto3" json:"tpu_ipv4_cidr_block,omitempty"` - // Whether routes will be used for pod IPs in the cluster. - // This is used in conjunction with use_ip_aliases. It cannot be true if - // use_ip_aliases is true. If both use_ip_aliases and use_routes are false, - // then the server picks the default IP allocation mode - UseRoutes bool `protobuf:"varint,15,opt,name=use_routes,json=useRoutes,proto3" json:"use_routes,omitempty"` -} - -func (x *IPAllocationPolicy) Reset() { - *x = IPAllocationPolicy{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[22] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *IPAllocationPolicy) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*IPAllocationPolicy) ProtoMessage() {} - -func (x *IPAllocationPolicy) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[22] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use IPAllocationPolicy.ProtoReflect.Descriptor instead. -func (*IPAllocationPolicy) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{22} -} - -func (x *IPAllocationPolicy) GetUseIpAliases() bool { - if x != nil { - return x.UseIpAliases - } - return false -} - -func (x *IPAllocationPolicy) GetCreateSubnetwork() bool { - if x != nil { - return x.CreateSubnetwork - } - return false -} - -func (x *IPAllocationPolicy) GetSubnetworkName() string { - if x != nil { - return x.SubnetworkName - } - return "" -} - -// Deprecated: Do not use. -func (x *IPAllocationPolicy) GetClusterIpv4Cidr() string { - if x != nil { - return x.ClusterIpv4Cidr - } - return "" -} - -// Deprecated: Do not use. -func (x *IPAllocationPolicy) GetNodeIpv4Cidr() string { - if x != nil { - return x.NodeIpv4Cidr - } - return "" -} - -// Deprecated: Do not use. -func (x *IPAllocationPolicy) GetServicesIpv4Cidr() string { - if x != nil { - return x.ServicesIpv4Cidr - } - return "" -} - -func (x *IPAllocationPolicy) GetClusterSecondaryRangeName() string { - if x != nil { - return x.ClusterSecondaryRangeName - } - return "" -} - -func (x *IPAllocationPolicy) GetServicesSecondaryRangeName() string { - if x != nil { - return x.ServicesSecondaryRangeName - } - return "" -} - -func (x *IPAllocationPolicy) GetClusterIpv4CidrBlock() string { - if x != nil { - return x.ClusterIpv4CidrBlock - } - return "" -} - -func (x *IPAllocationPolicy) GetNodeIpv4CidrBlock() string { - if x != nil { - return x.NodeIpv4CidrBlock - } - return "" -} - -func (x *IPAllocationPolicy) GetServicesIpv4CidrBlock() string { - if x != nil { - return x.ServicesIpv4CidrBlock - } - return "" -} - -func (x *IPAllocationPolicy) GetTpuIpv4CidrBlock() string { - if x != nil { - return x.TpuIpv4CidrBlock - } - return "" -} - -func (x *IPAllocationPolicy) GetUseRoutes() bool { - if x != nil { - return x.UseRoutes - } - return false -} - -// A Google Kubernetes Engine cluster. -type Cluster struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The name of this cluster. The name must be unique within this project - // and location (e.g. zone or region), and can be up to 40 characters with - // the following restrictions: - // - // * Lowercase letters, numbers, and hyphens only. - // * Must start with a letter. - // * Must end with a number or a letter. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // An optional description of this cluster. - Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` - // The number of nodes to create in this cluster. You must ensure that your - // Compute Engine [resource quota](https://cloud.google.com/compute/quotas) - // is sufficient for this number of instances. You must also have available - // firewall and routes quota. - // For requests, this field should only be used in lieu of a - // "node_pool" object, since this configuration (along with the - // "node_config") will be used to create a "NodePool" object with an - // auto-generated name. Do not use this and a node_pool at the same time. - // - // This field is deprecated, use node_pool.initial_node_count instead. - // - // Deprecated: Do not use. - InitialNodeCount int32 `protobuf:"varint,3,opt,name=initial_node_count,json=initialNodeCount,proto3" json:"initial_node_count,omitempty"` - // Parameters used in creating the cluster's nodes. - // For requests, this field should only be used in lieu of a - // "node_pool" object, since this configuration (along with the - // "initial_node_count") will be used to create a "NodePool" object with an - // auto-generated name. Do not use this and a node_pool at the same time. - // For responses, this field will be populated with the node configuration of - // the first node pool. (For configuration of each node pool, see - // `node_pool.config`) - // - // If unspecified, the defaults are used. - // This field is deprecated, use node_pool.config instead. - // - // Deprecated: Do not use. - NodeConfig *NodeConfig `protobuf:"bytes,4,opt,name=node_config,json=nodeConfig,proto3" json:"node_config,omitempty"` - // The authentication information for accessing the master endpoint. - // If unspecified, the defaults are used: - // For clusters before v1.12, if master_auth is unspecified, `username` will - // be set to "admin", a random password will be generated, and a client - // certificate will be issued. - MasterAuth *MasterAuth `protobuf:"bytes,5,opt,name=master_auth,json=masterAuth,proto3" json:"master_auth,omitempty"` - // The logging service the cluster should use to write logs. - // Currently available options: - // - // * `logging.googleapis.com/kubernetes` - The Cloud Logging - // service with a Kubernetes-native resource model - // * `logging.googleapis.com` - The legacy Cloud Logging service (no longer - // available as of GKE 1.15). - // * `none` - no logs will be exported from the cluster. - // - // If left as an empty string,`logging.googleapis.com/kubernetes` will be - // used for GKE 1.14+ or `logging.googleapis.com` for earlier versions. - LoggingService string `protobuf:"bytes,6,opt,name=logging_service,json=loggingService,proto3" json:"logging_service,omitempty"` - // The monitoring service the cluster should use to write metrics. - // Currently available options: - // - // * "monitoring.googleapis.com/kubernetes" - The Cloud Monitoring - // service with a Kubernetes-native resource model - // * `monitoring.googleapis.com` - The legacy Cloud Monitoring service (no - // longer available as of GKE 1.15). - // * `none` - No metrics will be exported from the cluster. - // - // If left as an empty string,`monitoring.googleapis.com/kubernetes` will be - // used for GKE 1.14+ or `monitoring.googleapis.com` for earlier versions. - MonitoringService string `protobuf:"bytes,7,opt,name=monitoring_service,json=monitoringService,proto3" json:"monitoring_service,omitempty"` - // The name of the Google Compute Engine - // [network](https://cloud.google.com/compute/docs/networks-and-firewalls#networks) - // to which the cluster is connected. If left unspecified, the `default` - // network will be used. - Network string `protobuf:"bytes,8,opt,name=network,proto3" json:"network,omitempty"` - // The IP address range of the container pods in this cluster, in - // [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) - // notation (e.g. `10.96.0.0/14`). Leave blank to have - // one automatically chosen or specify a `/14` block in `10.0.0.0/8`. - ClusterIpv4Cidr string `protobuf:"bytes,9,opt,name=cluster_ipv4_cidr,json=clusterIpv4Cidr,proto3" json:"cluster_ipv4_cidr,omitempty"` - // Configurations for the various addons available to run in the cluster. - AddonsConfig *AddonsConfig `protobuf:"bytes,10,opt,name=addons_config,json=addonsConfig,proto3" json:"addons_config,omitempty"` - // The name of the Google Compute Engine - // [subnetwork](https://cloud.google.com/compute/docs/subnetworks) to which - // the cluster is connected. - Subnetwork string `protobuf:"bytes,11,opt,name=subnetwork,proto3" json:"subnetwork,omitempty"` - // The node pools associated with this cluster. - // This field should not be set if "node_config" or "initial_node_count" are - // specified. - NodePools []*NodePool `protobuf:"bytes,12,rep,name=node_pools,json=nodePools,proto3" json:"node_pools,omitempty"` - // The list of Google Compute Engine - // [zones](https://cloud.google.com/compute/docs/zones#available) in which the - // cluster's nodes should be located. - // - // This field provides a default value if - // [NodePool.Locations](https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1/projects.locations.clusters.nodePools#NodePool.FIELDS.locations) - // are not specified during node pool creation. - // - // Warning: changing cluster locations will update the - // [NodePool.Locations](https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1/projects.locations.clusters.nodePools#NodePool.FIELDS.locations) - // of all node pools and will result in nodes being added and/or removed. - Locations []string `protobuf:"bytes,13,rep,name=locations,proto3" json:"locations,omitempty"` - // Kubernetes alpha features are enabled on this cluster. This includes alpha - // API groups (e.g. v1alpha1) and features that may not be production ready in - // the kubernetes version of the master and nodes. - // The cluster has no SLA for uptime and master/node upgrades are disabled. - // Alpha enabled clusters are automatically deleted thirty days after - // creation. - EnableKubernetesAlpha bool `protobuf:"varint,14,opt,name=enable_kubernetes_alpha,json=enableKubernetesAlpha,proto3" json:"enable_kubernetes_alpha,omitempty"` - // The resource labels for the cluster to use to annotate any related - // Google Compute Engine resources. - ResourceLabels map[string]string `protobuf:"bytes,15,rep,name=resource_labels,json=resourceLabels,proto3" json:"resource_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - // The fingerprint of the set of labels for this cluster. - LabelFingerprint string `protobuf:"bytes,16,opt,name=label_fingerprint,json=labelFingerprint,proto3" json:"label_fingerprint,omitempty"` - // Configuration for the legacy ABAC authorization mode. - LegacyAbac *LegacyAbac `protobuf:"bytes,18,opt,name=legacy_abac,json=legacyAbac,proto3" json:"legacy_abac,omitempty"` - // Configuration options for the NetworkPolicy feature. - NetworkPolicy *NetworkPolicy `protobuf:"bytes,19,opt,name=network_policy,json=networkPolicy,proto3" json:"network_policy,omitempty"` - // Configuration for cluster IP allocation. - IpAllocationPolicy *IPAllocationPolicy `protobuf:"bytes,20,opt,name=ip_allocation_policy,json=ipAllocationPolicy,proto3" json:"ip_allocation_policy,omitempty"` - // The configuration options for master authorized networks feature. - MasterAuthorizedNetworksConfig *MasterAuthorizedNetworksConfig `protobuf:"bytes,22,opt,name=master_authorized_networks_config,json=masterAuthorizedNetworksConfig,proto3" json:"master_authorized_networks_config,omitempty"` - // Configure the maintenance policy for this cluster. - MaintenancePolicy *MaintenancePolicy `protobuf:"bytes,23,opt,name=maintenance_policy,json=maintenancePolicy,proto3" json:"maintenance_policy,omitempty"` - // Configuration for Binary Authorization. - BinaryAuthorization *BinaryAuthorization `protobuf:"bytes,24,opt,name=binary_authorization,json=binaryAuthorization,proto3" json:"binary_authorization,omitempty"` - // Cluster-level autoscaling configuration. - Autoscaling *ClusterAutoscaling `protobuf:"bytes,26,opt,name=autoscaling,proto3" json:"autoscaling,omitempty"` - // Configuration for cluster networking. - NetworkConfig *NetworkConfig `protobuf:"bytes,27,opt,name=network_config,json=networkConfig,proto3" json:"network_config,omitempty"` - // The default constraint on the maximum number of pods that can be run - // simultaneously on a node in the node pool of this cluster. Only honored - // if cluster created with IP Alias support. - DefaultMaxPodsConstraint *MaxPodsConstraint `protobuf:"bytes,30,opt,name=default_max_pods_constraint,json=defaultMaxPodsConstraint,proto3" json:"default_max_pods_constraint,omitempty"` - // Configuration for exporting resource usages. Resource usage export is - // disabled when this config is unspecified. - ResourceUsageExportConfig *ResourceUsageExportConfig `protobuf:"bytes,33,opt,name=resource_usage_export_config,json=resourceUsageExportConfig,proto3" json:"resource_usage_export_config,omitempty"` - // Configuration controlling RBAC group membership information. - AuthenticatorGroupsConfig *AuthenticatorGroupsConfig `protobuf:"bytes,34,opt,name=authenticator_groups_config,json=authenticatorGroupsConfig,proto3" json:"authenticator_groups_config,omitempty"` - // Configuration for private cluster. - PrivateClusterConfig *PrivateClusterConfig `protobuf:"bytes,37,opt,name=private_cluster_config,json=privateClusterConfig,proto3" json:"private_cluster_config,omitempty"` - // Configuration of etcd encryption. - DatabaseEncryption *DatabaseEncryption `protobuf:"bytes,38,opt,name=database_encryption,json=databaseEncryption,proto3" json:"database_encryption,omitempty"` - // Cluster-level Vertical Pod Autoscaling configuration. - VerticalPodAutoscaling *VerticalPodAutoscaling `protobuf:"bytes,39,opt,name=vertical_pod_autoscaling,json=verticalPodAutoscaling,proto3" json:"vertical_pod_autoscaling,omitempty"` - // Shielded Nodes configuration. - ShieldedNodes *ShieldedNodes `protobuf:"bytes,40,opt,name=shielded_nodes,json=shieldedNodes,proto3" json:"shielded_nodes,omitempty"` - // Release channel configuration. - ReleaseChannel *ReleaseChannel `protobuf:"bytes,41,opt,name=release_channel,json=releaseChannel,proto3" json:"release_channel,omitempty"` - // Configuration for the use of Kubernetes Service Accounts in GCP IAM - // policies. - WorkloadIdentityConfig *WorkloadIdentityConfig `protobuf:"bytes,43,opt,name=workload_identity_config,json=workloadIdentityConfig,proto3" json:"workload_identity_config,omitempty"` - // [Output only] Server-defined URL for the resource. - SelfLink string `protobuf:"bytes,100,opt,name=self_link,json=selfLink,proto3" json:"self_link,omitempty"` - // [Output only] The name of the Google Compute Engine - // [zone](https://cloud.google.com/compute/docs/zones#available) in which the - // cluster resides. This field is deprecated, use location instead. - // - // Deprecated: Do not use. - Zone string `protobuf:"bytes,101,opt,name=zone,proto3" json:"zone,omitempty"` - // [Output only] The IP address of this cluster's master endpoint. - // The endpoint can be accessed from the internet at - // `https://username:password@endpoint/`. - // - // See the `masterAuth` property of this resource for username and - // password information. - Endpoint string `protobuf:"bytes,102,opt,name=endpoint,proto3" json:"endpoint,omitempty"` - // The initial Kubernetes version for this cluster. Valid versions are those - // found in validMasterVersions returned by getServerConfig. The version can - // be upgraded over time; such upgrades are reflected in - // currentMasterVersion and currentNodeVersion. - // - // Users may specify either explicit versions offered by - // Kubernetes Engine or version aliases, which have the following behavior: - // - // - "latest": picks the highest valid Kubernetes version - // - "1.X": picks the highest valid patch+gke.N patch in the 1.X version - // - "1.X.Y": picks the highest valid gke.N patch in the 1.X.Y version - // - "1.X.Y-gke.N": picks an explicit Kubernetes version - // - "","-": picks the default Kubernetes version - InitialClusterVersion string `protobuf:"bytes,103,opt,name=initial_cluster_version,json=initialClusterVersion,proto3" json:"initial_cluster_version,omitempty"` - // [Output only] The current software version of the master endpoint. - CurrentMasterVersion string `protobuf:"bytes,104,opt,name=current_master_version,json=currentMasterVersion,proto3" json:"current_master_version,omitempty"` - // [Output only] Deprecated, use - // [NodePools.version](https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1/projects.locations.clusters.nodePools) - // instead. The current version of the node software components. If they are - // currently at multiple versions because they're in the process of being - // upgraded, this reflects the minimum version of all nodes. - // - // Deprecated: Do not use. - CurrentNodeVersion string `protobuf:"bytes,105,opt,name=current_node_version,json=currentNodeVersion,proto3" json:"current_node_version,omitempty"` - // [Output only] The time the cluster was created, in - // [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format. - CreateTime string `protobuf:"bytes,106,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"` - // [Output only] The current status of this cluster. - Status Cluster_Status `protobuf:"varint,107,opt,name=status,proto3,enum=google.container.v1.Cluster_Status" json:"status,omitempty"` - // [Output only] Deprecated. Use conditions instead. - // Additional information about the current status of this - // cluster, if available. - // - // Deprecated: Do not use. - StatusMessage string `protobuf:"bytes,108,opt,name=status_message,json=statusMessage,proto3" json:"status_message,omitempty"` - // [Output only] The size of the address space on each node for hosting - // containers. This is provisioned from within the `container_ipv4_cidr` - // range. This field will only be set when cluster is in route-based network - // mode. - NodeIpv4CidrSize int32 `protobuf:"varint,109,opt,name=node_ipv4_cidr_size,json=nodeIpv4CidrSize,proto3" json:"node_ipv4_cidr_size,omitempty"` - // [Output only] The IP address range of the Kubernetes services in - // this cluster, in - // [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) - // notation (e.g. `1.2.3.4/29`). Service addresses are - // typically put in the last `/16` from the container CIDR. - ServicesIpv4Cidr string `protobuf:"bytes,110,opt,name=services_ipv4_cidr,json=servicesIpv4Cidr,proto3" json:"services_ipv4_cidr,omitempty"` - // Deprecated. Use node_pools.instance_group_urls. - // - // Deprecated: Do not use. - InstanceGroupUrls []string `protobuf:"bytes,111,rep,name=instance_group_urls,json=instanceGroupUrls,proto3" json:"instance_group_urls,omitempty"` - // [Output only] The number of nodes currently in the cluster. Deprecated. - // Call Kubernetes API directly to retrieve node information. - // - // Deprecated: Do not use. - CurrentNodeCount int32 `protobuf:"varint,112,opt,name=current_node_count,json=currentNodeCount,proto3" json:"current_node_count,omitempty"` - // [Output only] The time the cluster will be automatically - // deleted in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format. - ExpireTime string `protobuf:"bytes,113,opt,name=expire_time,json=expireTime,proto3" json:"expire_time,omitempty"` - // [Output only] The name of the Google Compute Engine - // [zone](https://cloud.google.com/compute/docs/regions-zones/regions-zones#available) - // or - // [region](https://cloud.google.com/compute/docs/regions-zones/regions-zones#available) - // in which the cluster resides. - Location string `protobuf:"bytes,114,opt,name=location,proto3" json:"location,omitempty"` - // Enable the ability to use Cloud TPUs in this cluster. - EnableTpu bool `protobuf:"varint,115,opt,name=enable_tpu,json=enableTpu,proto3" json:"enable_tpu,omitempty"` - // [Output only] The IP address range of the Cloud TPUs in this cluster, in - // [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) - // notation (e.g. `1.2.3.4/29`). - TpuIpv4CidrBlock string `protobuf:"bytes,116,opt,name=tpu_ipv4_cidr_block,json=tpuIpv4CidrBlock,proto3" json:"tpu_ipv4_cidr_block,omitempty"` - // Which conditions caused the current cluster state. - Conditions []*StatusCondition `protobuf:"bytes,118,rep,name=conditions,proto3" json:"conditions,omitempty"` -} - -func (x *Cluster) Reset() { - *x = Cluster{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[23] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Cluster) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Cluster) ProtoMessage() {} - -func (x *Cluster) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[23] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Cluster.ProtoReflect.Descriptor instead. -func (*Cluster) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{23} -} - -func (x *Cluster) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *Cluster) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -// Deprecated: Do not use. -func (x *Cluster) GetInitialNodeCount() int32 { - if x != nil { - return x.InitialNodeCount - } - return 0 -} - -// Deprecated: Do not use. -func (x *Cluster) GetNodeConfig() *NodeConfig { - if x != nil { - return x.NodeConfig - } - return nil -} - -func (x *Cluster) GetMasterAuth() *MasterAuth { - if x != nil { - return x.MasterAuth - } - return nil -} - -func (x *Cluster) GetLoggingService() string { - if x != nil { - return x.LoggingService - } - return "" -} - -func (x *Cluster) GetMonitoringService() string { - if x != nil { - return x.MonitoringService - } - return "" -} - -func (x *Cluster) GetNetwork() string { - if x != nil { - return x.Network - } - return "" -} - -func (x *Cluster) GetClusterIpv4Cidr() string { - if x != nil { - return x.ClusterIpv4Cidr - } - return "" -} - -func (x *Cluster) GetAddonsConfig() *AddonsConfig { - if x != nil { - return x.AddonsConfig - } - return nil -} - -func (x *Cluster) GetSubnetwork() string { - if x != nil { - return x.Subnetwork - } - return "" -} - -func (x *Cluster) GetNodePools() []*NodePool { - if x != nil { - return x.NodePools - } - return nil -} - -func (x *Cluster) GetLocations() []string { - if x != nil { - return x.Locations - } - return nil -} - -func (x *Cluster) GetEnableKubernetesAlpha() bool { - if x != nil { - return x.EnableKubernetesAlpha - } - return false -} - -func (x *Cluster) GetResourceLabels() map[string]string { - if x != nil { - return x.ResourceLabels - } - return nil -} - -func (x *Cluster) GetLabelFingerprint() string { - if x != nil { - return x.LabelFingerprint - } - return "" -} - -func (x *Cluster) GetLegacyAbac() *LegacyAbac { - if x != nil { - return x.LegacyAbac - } - return nil -} - -func (x *Cluster) GetNetworkPolicy() *NetworkPolicy { - if x != nil { - return x.NetworkPolicy - } - return nil -} - -func (x *Cluster) GetIpAllocationPolicy() *IPAllocationPolicy { - if x != nil { - return x.IpAllocationPolicy - } - return nil -} - -func (x *Cluster) GetMasterAuthorizedNetworksConfig() *MasterAuthorizedNetworksConfig { - if x != nil { - return x.MasterAuthorizedNetworksConfig - } - return nil -} - -func (x *Cluster) GetMaintenancePolicy() *MaintenancePolicy { - if x != nil { - return x.MaintenancePolicy - } - return nil -} - -func (x *Cluster) GetBinaryAuthorization() *BinaryAuthorization { - if x != nil { - return x.BinaryAuthorization - } - return nil -} - -func (x *Cluster) GetAutoscaling() *ClusterAutoscaling { - if x != nil { - return x.Autoscaling - } - return nil -} - -func (x *Cluster) GetNetworkConfig() *NetworkConfig { - if x != nil { - return x.NetworkConfig - } - return nil -} - -func (x *Cluster) GetDefaultMaxPodsConstraint() *MaxPodsConstraint { - if x != nil { - return x.DefaultMaxPodsConstraint - } - return nil -} - -func (x *Cluster) GetResourceUsageExportConfig() *ResourceUsageExportConfig { - if x != nil { - return x.ResourceUsageExportConfig - } - return nil -} - -func (x *Cluster) GetAuthenticatorGroupsConfig() *AuthenticatorGroupsConfig { - if x != nil { - return x.AuthenticatorGroupsConfig - } - return nil -} - -func (x *Cluster) GetPrivateClusterConfig() *PrivateClusterConfig { - if x != nil { - return x.PrivateClusterConfig - } - return nil -} - -func (x *Cluster) GetDatabaseEncryption() *DatabaseEncryption { - if x != nil { - return x.DatabaseEncryption - } - return nil -} - -func (x *Cluster) GetVerticalPodAutoscaling() *VerticalPodAutoscaling { - if x != nil { - return x.VerticalPodAutoscaling - } - return nil -} - -func (x *Cluster) GetShieldedNodes() *ShieldedNodes { - if x != nil { - return x.ShieldedNodes - } - return nil -} - -func (x *Cluster) GetReleaseChannel() *ReleaseChannel { - if x != nil { - return x.ReleaseChannel - } - return nil -} - -func (x *Cluster) GetWorkloadIdentityConfig() *WorkloadIdentityConfig { - if x != nil { - return x.WorkloadIdentityConfig - } - return nil -} - -func (x *Cluster) GetSelfLink() string { - if x != nil { - return x.SelfLink - } - return "" -} - -// Deprecated: Do not use. -func (x *Cluster) GetZone() string { - if x != nil { - return x.Zone - } - return "" -} - -func (x *Cluster) GetEndpoint() string { - if x != nil { - return x.Endpoint - } - return "" -} - -func (x *Cluster) GetInitialClusterVersion() string { - if x != nil { - return x.InitialClusterVersion - } - return "" -} - -func (x *Cluster) GetCurrentMasterVersion() string { - if x != nil { - return x.CurrentMasterVersion - } - return "" -} - -// Deprecated: Do not use. -func (x *Cluster) GetCurrentNodeVersion() string { - if x != nil { - return x.CurrentNodeVersion - } - return "" -} - -func (x *Cluster) GetCreateTime() string { - if x != nil { - return x.CreateTime - } - return "" -} - -func (x *Cluster) GetStatus() Cluster_Status { - if x != nil { - return x.Status - } - return Cluster_STATUS_UNSPECIFIED -} - -// Deprecated: Do not use. -func (x *Cluster) GetStatusMessage() string { - if x != nil { - return x.StatusMessage - } - return "" -} - -func (x *Cluster) GetNodeIpv4CidrSize() int32 { - if x != nil { - return x.NodeIpv4CidrSize - } - return 0 -} - -func (x *Cluster) GetServicesIpv4Cidr() string { - if x != nil { - return x.ServicesIpv4Cidr - } - return "" -} - -// Deprecated: Do not use. -func (x *Cluster) GetInstanceGroupUrls() []string { - if x != nil { - return x.InstanceGroupUrls - } - return nil -} - -// Deprecated: Do not use. -func (x *Cluster) GetCurrentNodeCount() int32 { - if x != nil { - return x.CurrentNodeCount - } - return 0 -} - -func (x *Cluster) GetExpireTime() string { - if x != nil { - return x.ExpireTime - } - return "" -} - -func (x *Cluster) GetLocation() string { - if x != nil { - return x.Location - } - return "" -} - -func (x *Cluster) GetEnableTpu() bool { - if x != nil { - return x.EnableTpu - } - return false -} - -func (x *Cluster) GetTpuIpv4CidrBlock() string { - if x != nil { - return x.TpuIpv4CidrBlock - } - return "" -} - -func (x *Cluster) GetConditions() []*StatusCondition { - if x != nil { - return x.Conditions - } - return nil -} - -// ClusterUpdate describes an update to the cluster. Exactly one update can -// be applied to a cluster with each request, so at most one field can be -// provided. -type ClusterUpdate struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The Kubernetes version to change the nodes to (typically an - // upgrade). - // - // Users may specify either explicit versions offered by - // Kubernetes Engine or version aliases, which have the following behavior: - // - // - "latest": picks the highest valid Kubernetes version - // - "1.X": picks the highest valid patch+gke.N patch in the 1.X version - // - "1.X.Y": picks the highest valid gke.N patch in the 1.X.Y version - // - "1.X.Y-gke.N": picks an explicit Kubernetes version - // - "-": picks the Kubernetes master version - DesiredNodeVersion string `protobuf:"bytes,4,opt,name=desired_node_version,json=desiredNodeVersion,proto3" json:"desired_node_version,omitempty"` - // The monitoring service the cluster should use to write metrics. - // Currently available options: - // - // * "monitoring.googleapis.com/kubernetes" - The Cloud Monitoring - // service with a Kubernetes-native resource model - // * `monitoring.googleapis.com` - The legacy Cloud Monitoring service (no - // longer available as of GKE 1.15). - // * `none` - No metrics will be exported from the cluster. - // - // If left as an empty string,`monitoring.googleapis.com/kubernetes` will be - // used for GKE 1.14+ or `monitoring.googleapis.com` for earlier versions. - DesiredMonitoringService string `protobuf:"bytes,5,opt,name=desired_monitoring_service,json=desiredMonitoringService,proto3" json:"desired_monitoring_service,omitempty"` - // Configurations for the various addons available to run in the cluster. - DesiredAddonsConfig *AddonsConfig `protobuf:"bytes,6,opt,name=desired_addons_config,json=desiredAddonsConfig,proto3" json:"desired_addons_config,omitempty"` - // The node pool to be upgraded. This field is mandatory if - // "desired_node_version", "desired_image_family" or - // "desired_node_pool_autoscaling" is specified and there is more than one - // node pool on the cluster. - DesiredNodePoolId string `protobuf:"bytes,7,opt,name=desired_node_pool_id,json=desiredNodePoolId,proto3" json:"desired_node_pool_id,omitempty"` - // The desired image type for the node pool. - // NOTE: Set the "desired_node_pool" field as well. - DesiredImageType string `protobuf:"bytes,8,opt,name=desired_image_type,json=desiredImageType,proto3" json:"desired_image_type,omitempty"` - // Configuration of etcd encryption. - DesiredDatabaseEncryption *DatabaseEncryption `protobuf:"bytes,46,opt,name=desired_database_encryption,json=desiredDatabaseEncryption,proto3" json:"desired_database_encryption,omitempty"` - // Configuration for Workload Identity. - DesiredWorkloadIdentityConfig *WorkloadIdentityConfig `protobuf:"bytes,47,opt,name=desired_workload_identity_config,json=desiredWorkloadIdentityConfig,proto3" json:"desired_workload_identity_config,omitempty"` - // Configuration for Shielded Nodes. - DesiredShieldedNodes *ShieldedNodes `protobuf:"bytes,48,opt,name=desired_shielded_nodes,json=desiredShieldedNodes,proto3" json:"desired_shielded_nodes,omitempty"` - // Autoscaler configuration for the node pool specified in - // desired_node_pool_id. If there is only one pool in the - // cluster and desired_node_pool_id is not provided then - // the change applies to that single node pool. - DesiredNodePoolAutoscaling *NodePoolAutoscaling `protobuf:"bytes,9,opt,name=desired_node_pool_autoscaling,json=desiredNodePoolAutoscaling,proto3" json:"desired_node_pool_autoscaling,omitempty"` - // The desired list of Google Compute Engine - // [zones](https://cloud.google.com/compute/docs/zones#available) in which the - // cluster's nodes should be located. - // - // This list must always include the cluster's primary zone. - // - // Warning: changing cluster locations will update the locations of all node - // pools and will result in nodes being added and/or removed. - DesiredLocations []string `protobuf:"bytes,10,rep,name=desired_locations,json=desiredLocations,proto3" json:"desired_locations,omitempty"` - // The desired configuration options for master authorized networks feature. - DesiredMasterAuthorizedNetworksConfig *MasterAuthorizedNetworksConfig `protobuf:"bytes,12,opt,name=desired_master_authorized_networks_config,json=desiredMasterAuthorizedNetworksConfig,proto3" json:"desired_master_authorized_networks_config,omitempty"` - // Cluster-level autoscaling configuration. - DesiredClusterAutoscaling *ClusterAutoscaling `protobuf:"bytes,15,opt,name=desired_cluster_autoscaling,json=desiredClusterAutoscaling,proto3" json:"desired_cluster_autoscaling,omitempty"` - // The desired configuration options for the Binary Authorization feature. - DesiredBinaryAuthorization *BinaryAuthorization `protobuf:"bytes,16,opt,name=desired_binary_authorization,json=desiredBinaryAuthorization,proto3" json:"desired_binary_authorization,omitempty"` - // The logging service the cluster should use to write logs. - // Currently available options: - // - // * `logging.googleapis.com/kubernetes` - The Cloud Logging - // service with a Kubernetes-native resource model - // * `logging.googleapis.com` - The legacy Cloud Logging service (no longer - // available as of GKE 1.15). - // * `none` - no logs will be exported from the cluster. - // - // If left as an empty string,`logging.googleapis.com/kubernetes` will be - // used for GKE 1.14+ or `logging.googleapis.com` for earlier versions. - DesiredLoggingService string `protobuf:"bytes,19,opt,name=desired_logging_service,json=desiredLoggingService,proto3" json:"desired_logging_service,omitempty"` - // The desired configuration for exporting resource usage. - DesiredResourceUsageExportConfig *ResourceUsageExportConfig `protobuf:"bytes,21,opt,name=desired_resource_usage_export_config,json=desiredResourceUsageExportConfig,proto3" json:"desired_resource_usage_export_config,omitempty"` - // Cluster-level Vertical Pod Autoscaling configuration. - DesiredVerticalPodAutoscaling *VerticalPodAutoscaling `protobuf:"bytes,22,opt,name=desired_vertical_pod_autoscaling,json=desiredVerticalPodAutoscaling,proto3" json:"desired_vertical_pod_autoscaling,omitempty"` - // The desired private cluster configuration. - DesiredPrivateClusterConfig *PrivateClusterConfig `protobuf:"bytes,25,opt,name=desired_private_cluster_config,json=desiredPrivateClusterConfig,proto3" json:"desired_private_cluster_config,omitempty"` - // The desired config of Intra-node visibility. - DesiredIntraNodeVisibilityConfig *IntraNodeVisibilityConfig `protobuf:"bytes,26,opt,name=desired_intra_node_visibility_config,json=desiredIntraNodeVisibilityConfig,proto3" json:"desired_intra_node_visibility_config,omitempty"` - // The desired status of whether to disable default sNAT for this cluster. - DesiredDefaultSnatStatus *DefaultSnatStatus `protobuf:"bytes,28,opt,name=desired_default_snat_status,json=desiredDefaultSnatStatus,proto3" json:"desired_default_snat_status,omitempty"` - // The desired release channel configuration. - DesiredReleaseChannel *ReleaseChannel `protobuf:"bytes,31,opt,name=desired_release_channel,json=desiredReleaseChannel,proto3" json:"desired_release_channel,omitempty"` - // The Kubernetes version to change the master to. - // - // Users may specify either explicit versions offered by - // Kubernetes Engine or version aliases, which have the following behavior: - // - // - "latest": picks the highest valid Kubernetes version - // - "1.X": picks the highest valid patch+gke.N patch in the 1.X version - // - "1.X.Y": picks the highest valid gke.N patch in the 1.X.Y version - // - "1.X.Y-gke.N": picks an explicit Kubernetes version - // - "-": picks the default Kubernetes version - DesiredMasterVersion string `protobuf:"bytes,100,opt,name=desired_master_version,json=desiredMasterVersion,proto3" json:"desired_master_version,omitempty"` -} - -func (x *ClusterUpdate) Reset() { - *x = ClusterUpdate{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[24] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ClusterUpdate) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ClusterUpdate) ProtoMessage() {} - -func (x *ClusterUpdate) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[24] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ClusterUpdate.ProtoReflect.Descriptor instead. -func (*ClusterUpdate) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{24} -} - -func (x *ClusterUpdate) GetDesiredNodeVersion() string { - if x != nil { - return x.DesiredNodeVersion - } - return "" -} - -func (x *ClusterUpdate) GetDesiredMonitoringService() string { - if x != nil { - return x.DesiredMonitoringService - } - return "" -} - -func (x *ClusterUpdate) GetDesiredAddonsConfig() *AddonsConfig { - if x != nil { - return x.DesiredAddonsConfig - } - return nil -} - -func (x *ClusterUpdate) GetDesiredNodePoolId() string { - if x != nil { - return x.DesiredNodePoolId - } - return "" -} - -func (x *ClusterUpdate) GetDesiredImageType() string { - if x != nil { - return x.DesiredImageType - } - return "" -} - -func (x *ClusterUpdate) GetDesiredDatabaseEncryption() *DatabaseEncryption { - if x != nil { - return x.DesiredDatabaseEncryption - } - return nil -} - -func (x *ClusterUpdate) GetDesiredWorkloadIdentityConfig() *WorkloadIdentityConfig { - if x != nil { - return x.DesiredWorkloadIdentityConfig - } - return nil -} - -func (x *ClusterUpdate) GetDesiredShieldedNodes() *ShieldedNodes { - if x != nil { - return x.DesiredShieldedNodes - } - return nil -} - -func (x *ClusterUpdate) GetDesiredNodePoolAutoscaling() *NodePoolAutoscaling { - if x != nil { - return x.DesiredNodePoolAutoscaling - } - return nil -} - -func (x *ClusterUpdate) GetDesiredLocations() []string { - if x != nil { - return x.DesiredLocations - } - return nil -} - -func (x *ClusterUpdate) GetDesiredMasterAuthorizedNetworksConfig() *MasterAuthorizedNetworksConfig { - if x != nil { - return x.DesiredMasterAuthorizedNetworksConfig - } - return nil -} - -func (x *ClusterUpdate) GetDesiredClusterAutoscaling() *ClusterAutoscaling { - if x != nil { - return x.DesiredClusterAutoscaling - } - return nil -} - -func (x *ClusterUpdate) GetDesiredBinaryAuthorization() *BinaryAuthorization { - if x != nil { - return x.DesiredBinaryAuthorization - } - return nil -} - -func (x *ClusterUpdate) GetDesiredLoggingService() string { - if x != nil { - return x.DesiredLoggingService - } - return "" -} - -func (x *ClusterUpdate) GetDesiredResourceUsageExportConfig() *ResourceUsageExportConfig { - if x != nil { - return x.DesiredResourceUsageExportConfig - } - return nil -} - -func (x *ClusterUpdate) GetDesiredVerticalPodAutoscaling() *VerticalPodAutoscaling { - if x != nil { - return x.DesiredVerticalPodAutoscaling - } - return nil -} - -func (x *ClusterUpdate) GetDesiredPrivateClusterConfig() *PrivateClusterConfig { - if x != nil { - return x.DesiredPrivateClusterConfig - } - return nil -} - -func (x *ClusterUpdate) GetDesiredIntraNodeVisibilityConfig() *IntraNodeVisibilityConfig { - if x != nil { - return x.DesiredIntraNodeVisibilityConfig - } - return nil -} - -func (x *ClusterUpdate) GetDesiredDefaultSnatStatus() *DefaultSnatStatus { - if x != nil { - return x.DesiredDefaultSnatStatus - } - return nil -} - -func (x *ClusterUpdate) GetDesiredReleaseChannel() *ReleaseChannel { - if x != nil { - return x.DesiredReleaseChannel - } - return nil -} - -func (x *ClusterUpdate) GetDesiredMasterVersion() string { - if x != nil { - return x.DesiredMasterVersion - } - return "" -} - -// This operation resource represents operations that may have happened or are -// happening on the cluster. All fields are output only. -type Operation struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The server-assigned ID for the operation. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // The name of the Google Compute Engine - // [zone](https://cloud.google.com/compute/docs/zones#available) in which the - // operation is taking place. This field is deprecated, use location instead. - // - // Deprecated: Do not use. - Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` - // The operation type. - OperationType Operation_Type `protobuf:"varint,3,opt,name=operation_type,json=operationType,proto3,enum=google.container.v1.Operation_Type" json:"operation_type,omitempty"` - // The current status of the operation. - Status Operation_Status `protobuf:"varint,4,opt,name=status,proto3,enum=google.container.v1.Operation_Status" json:"status,omitempty"` - // Detailed operation progress, if available. - Detail string `protobuf:"bytes,8,opt,name=detail,proto3" json:"detail,omitempty"` - // Output only. If an error has occurred, a textual description of the error. - StatusMessage string `protobuf:"bytes,5,opt,name=status_message,json=statusMessage,proto3" json:"status_message,omitempty"` - // Server-defined URL for the resource. - SelfLink string `protobuf:"bytes,6,opt,name=self_link,json=selfLink,proto3" json:"self_link,omitempty"` - // Server-defined URL for the target of the operation. - TargetLink string `protobuf:"bytes,7,opt,name=target_link,json=targetLink,proto3" json:"target_link,omitempty"` - // [Output only] The name of the Google Compute Engine - // [zone](https://cloud.google.com/compute/docs/regions-zones/regions-zones#available) - // or - // [region](https://cloud.google.com/compute/docs/regions-zones/regions-zones#available) - // in which the cluster resides. - Location string `protobuf:"bytes,9,opt,name=location,proto3" json:"location,omitempty"` - // [Output only] The time the operation started, in - // [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format. - StartTime string `protobuf:"bytes,10,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"` - // [Output only] The time the operation completed, in - // [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format. - EndTime string `protobuf:"bytes,11,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"` - // Output only. [Output only] Progress information for an operation. - Progress *OperationProgress `protobuf:"bytes,12,opt,name=progress,proto3" json:"progress,omitempty"` - // Which conditions caused the current cluster state. - ClusterConditions []*StatusCondition `protobuf:"bytes,13,rep,name=cluster_conditions,json=clusterConditions,proto3" json:"cluster_conditions,omitempty"` - // Which conditions caused the current node pool state. - NodepoolConditions []*StatusCondition `protobuf:"bytes,14,rep,name=nodepool_conditions,json=nodepoolConditions,proto3" json:"nodepool_conditions,omitempty"` -} - -func (x *Operation) Reset() { - *x = Operation{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[25] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Operation) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Operation) ProtoMessage() {} - -func (x *Operation) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[25] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Operation.ProtoReflect.Descriptor instead. -func (*Operation) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{25} -} - -func (x *Operation) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -// Deprecated: Do not use. -func (x *Operation) GetZone() string { - if x != nil { - return x.Zone - } - return "" -} - -func (x *Operation) GetOperationType() Operation_Type { - if x != nil { - return x.OperationType - } - return Operation_TYPE_UNSPECIFIED -} - -func (x *Operation) GetStatus() Operation_Status { - if x != nil { - return x.Status - } - return Operation_STATUS_UNSPECIFIED -} - -func (x *Operation) GetDetail() string { - if x != nil { - return x.Detail - } - return "" -} - -func (x *Operation) GetStatusMessage() string { - if x != nil { - return x.StatusMessage - } - return "" -} - -func (x *Operation) GetSelfLink() string { - if x != nil { - return x.SelfLink - } - return "" -} - -func (x *Operation) GetTargetLink() string { - if x != nil { - return x.TargetLink - } - return "" -} - -func (x *Operation) GetLocation() string { - if x != nil { - return x.Location - } - return "" -} - -func (x *Operation) GetStartTime() string { - if x != nil { - return x.StartTime - } - return "" -} - -func (x *Operation) GetEndTime() string { - if x != nil { - return x.EndTime - } - return "" -} - -func (x *Operation) GetProgress() *OperationProgress { - if x != nil { - return x.Progress - } - return nil -} - -func (x *Operation) GetClusterConditions() []*StatusCondition { - if x != nil { - return x.ClusterConditions - } - return nil -} - -func (x *Operation) GetNodepoolConditions() []*StatusCondition { - if x != nil { - return x.NodepoolConditions - } - return nil -} - -// Information about operation (or operation stage) progress. -type OperationProgress struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // A non-parameterized string describing an operation stage. - // Unset for single-stage operations. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Status of an operation stage. - // Unset for single-stage operations. - Status Operation_Status `protobuf:"varint,2,opt,name=status,proto3,enum=google.container.v1.Operation_Status" json:"status,omitempty"` - // Progress metric bundle, for example: - // metrics: [{name: "nodes done", int_value: 15}, - // {name: "nodes total", int_value: 32}] - // or - // metrics: [{name: "progress", double_value: 0.56}, - // {name: "progress scale", double_value: 1.0}] - Metrics []*OperationProgress_Metric `protobuf:"bytes,3,rep,name=metrics,proto3" json:"metrics,omitempty"` - // Substages of an operation or a stage. - Stages []*OperationProgress `protobuf:"bytes,4,rep,name=stages,proto3" json:"stages,omitempty"` -} - -func (x *OperationProgress) Reset() { - *x = OperationProgress{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[26] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *OperationProgress) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*OperationProgress) ProtoMessage() {} - -func (x *OperationProgress) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[26] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use OperationProgress.ProtoReflect.Descriptor instead. -func (*OperationProgress) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{26} -} - -func (x *OperationProgress) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *OperationProgress) GetStatus() Operation_Status { - if x != nil { - return x.Status - } - return Operation_STATUS_UNSPECIFIED -} - -func (x *OperationProgress) GetMetrics() []*OperationProgress_Metric { - if x != nil { - return x.Metrics - } - return nil -} - -func (x *OperationProgress) GetStages() []*OperationProgress { - if x != nil { - return x.Stages - } - return nil -} - -// CreateClusterRequest creates a cluster. -type CreateClusterRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Deprecated. The Google Developers Console [project ID or project - // number](https://support.google.com/cloud/answer/6158840). - // This field has been deprecated and replaced by the parent field. - // - // Deprecated: Do not use. - ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` - // Deprecated. The name of the Google Compute Engine - // [zone](https://cloud.google.com/compute/docs/zones#available) in which the - // cluster resides. This field has been deprecated and replaced by the parent - // field. - // - // Deprecated: Do not use. - Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` - // Required. A [cluster - // resource](https://cloud.google.com/container-engine/reference/rest/v1/projects.locations.clusters) - Cluster *Cluster `protobuf:"bytes,3,opt,name=cluster,proto3" json:"cluster,omitempty"` - // The parent (project and location) where the cluster will be created. - // Specified in the format `projects/*/locations/*`. - Parent string `protobuf:"bytes,5,opt,name=parent,proto3" json:"parent,omitempty"` -} - -func (x *CreateClusterRequest) Reset() { - *x = CreateClusterRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[27] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CreateClusterRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CreateClusterRequest) ProtoMessage() {} - -func (x *CreateClusterRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[27] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CreateClusterRequest.ProtoReflect.Descriptor instead. -func (*CreateClusterRequest) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{27} -} - -// Deprecated: Do not use. -func (x *CreateClusterRequest) GetProjectId() string { - if x != nil { - return x.ProjectId - } - return "" -} - -// Deprecated: Do not use. -func (x *CreateClusterRequest) GetZone() string { - if x != nil { - return x.Zone - } - return "" -} - -func (x *CreateClusterRequest) GetCluster() *Cluster { - if x != nil { - return x.Cluster - } - return nil -} - -func (x *CreateClusterRequest) GetParent() string { - if x != nil { - return x.Parent - } - return "" -} - -// GetClusterRequest gets the settings of a cluster. -type GetClusterRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Deprecated. The Google Developers Console [project ID or project - // number](https://support.google.com/cloud/answer/6158840). - // This field has been deprecated and replaced by the name field. - // - // Deprecated: Do not use. - ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` - // Deprecated. The name of the Google Compute Engine - // [zone](https://cloud.google.com/compute/docs/zones#available) in which the - // cluster resides. This field has been deprecated and replaced by the name - // field. - // - // Deprecated: Do not use. - Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` - // Deprecated. The name of the cluster to retrieve. - // This field has been deprecated and replaced by the name field. - // - // Deprecated: Do not use. - ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` - // The name (project, location, cluster) of the cluster to retrieve. - // Specified in the format `projects/*/locations/*/clusters/*`. - Name string `protobuf:"bytes,5,opt,name=name,proto3" json:"name,omitempty"` -} - -func (x *GetClusterRequest) Reset() { - *x = GetClusterRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[28] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetClusterRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetClusterRequest) ProtoMessage() {} - -func (x *GetClusterRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[28] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetClusterRequest.ProtoReflect.Descriptor instead. -func (*GetClusterRequest) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{28} -} - -// Deprecated: Do not use. -func (x *GetClusterRequest) GetProjectId() string { - if x != nil { - return x.ProjectId - } - return "" -} - -// Deprecated: Do not use. -func (x *GetClusterRequest) GetZone() string { - if x != nil { - return x.Zone - } - return "" -} - -// Deprecated: Do not use. -func (x *GetClusterRequest) GetClusterId() string { - if x != nil { - return x.ClusterId - } - return "" -} - -func (x *GetClusterRequest) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -// UpdateClusterRequest updates the settings of a cluster. -type UpdateClusterRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Deprecated. The Google Developers Console [project ID or project - // number](https://support.google.com/cloud/answer/6158840). - // This field has been deprecated and replaced by the name field. - // - // Deprecated: Do not use. - ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` - // Deprecated. The name of the Google Compute Engine - // [zone](https://cloud.google.com/compute/docs/zones#available) in which the - // cluster resides. This field has been deprecated and replaced by the name - // field. - // - // Deprecated: Do not use. - Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` - // Deprecated. The name of the cluster to upgrade. - // This field has been deprecated and replaced by the name field. - // - // Deprecated: Do not use. - ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` - // Required. A description of the update. - Update *ClusterUpdate `protobuf:"bytes,4,opt,name=update,proto3" json:"update,omitempty"` - // The name (project, location, cluster) of the cluster to update. - // Specified in the format `projects/*/locations/*/clusters/*`. - Name string `protobuf:"bytes,5,opt,name=name,proto3" json:"name,omitempty"` -} - -func (x *UpdateClusterRequest) Reset() { - *x = UpdateClusterRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[29] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *UpdateClusterRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*UpdateClusterRequest) ProtoMessage() {} - -func (x *UpdateClusterRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[29] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use UpdateClusterRequest.ProtoReflect.Descriptor instead. -func (*UpdateClusterRequest) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{29} -} - -// Deprecated: Do not use. -func (x *UpdateClusterRequest) GetProjectId() string { - if x != nil { - return x.ProjectId - } - return "" -} - -// Deprecated: Do not use. -func (x *UpdateClusterRequest) GetZone() string { - if x != nil { - return x.Zone - } - return "" -} - -// Deprecated: Do not use. -func (x *UpdateClusterRequest) GetClusterId() string { - if x != nil { - return x.ClusterId - } - return "" -} - -func (x *UpdateClusterRequest) GetUpdate() *ClusterUpdate { - if x != nil { - return x.Update - } - return nil -} - -func (x *UpdateClusterRequest) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -// UpdateNodePoolRequests update a node pool's image and/or version. -type UpdateNodePoolRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Deprecated. The Google Developers Console [project ID or project - // number](https://support.google.com/cloud/answer/6158840). - // This field has been deprecated and replaced by the name field. - // - // Deprecated: Do not use. - ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` - // Deprecated. The name of the Google Compute Engine - // [zone](https://cloud.google.com/compute/docs/zones#available) in which the - // cluster resides. This field has been deprecated and replaced by the name - // field. - // - // Deprecated: Do not use. - Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` - // Deprecated. The name of the cluster to upgrade. - // This field has been deprecated and replaced by the name field. - // - // Deprecated: Do not use. - ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` - // Deprecated. The name of the node pool to upgrade. - // This field has been deprecated and replaced by the name field. - // - // Deprecated: Do not use. - NodePoolId string `protobuf:"bytes,4,opt,name=node_pool_id,json=nodePoolId,proto3" json:"node_pool_id,omitempty"` - // Required. The Kubernetes version to change the nodes to (typically an - // upgrade). - // - // Users may specify either explicit versions offered by Kubernetes Engine or - // version aliases, which have the following behavior: - // - // - "latest": picks the highest valid Kubernetes version - // - "1.X": picks the highest valid patch+gke.N patch in the 1.X version - // - "1.X.Y": picks the highest valid gke.N patch in the 1.X.Y version - // - "1.X.Y-gke.N": picks an explicit Kubernetes version - // - "-": picks the Kubernetes master version - NodeVersion string `protobuf:"bytes,5,opt,name=node_version,json=nodeVersion,proto3" json:"node_version,omitempty"` - // Required. The desired image type for the node pool. - ImageType string `protobuf:"bytes,6,opt,name=image_type,json=imageType,proto3" json:"image_type,omitempty"` - // The name (project, location, cluster, node pool) of the node pool to - // update. Specified in the format - // `projects/*/locations/*/clusters/*/nodePools/*`. - Name string `protobuf:"bytes,8,opt,name=name,proto3" json:"name,omitempty"` - // The desired list of Google Compute Engine - // [zones](https://cloud.google.com/compute/docs/zones#available) in which the - // node pool's nodes should be located. Changing the locations for a node pool - // will result in nodes being either created or removed from the node pool, - // depending on whether locations are being added or removed. - Locations []string `protobuf:"bytes,13,rep,name=locations,proto3" json:"locations,omitempty"` - // The desired workload metadata config for the node pool. - WorkloadMetadataConfig *WorkloadMetadataConfig `protobuf:"bytes,14,opt,name=workload_metadata_config,json=workloadMetadataConfig,proto3" json:"workload_metadata_config,omitempty"` - // Upgrade settings control disruption and speed of the upgrade. - UpgradeSettings *NodePool_UpgradeSettings `protobuf:"bytes,15,opt,name=upgrade_settings,json=upgradeSettings,proto3" json:"upgrade_settings,omitempty"` -} - -func (x *UpdateNodePoolRequest) Reset() { - *x = UpdateNodePoolRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[30] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *UpdateNodePoolRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*UpdateNodePoolRequest) ProtoMessage() {} - -func (x *UpdateNodePoolRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[30] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use UpdateNodePoolRequest.ProtoReflect.Descriptor instead. -func (*UpdateNodePoolRequest) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{30} -} - -// Deprecated: Do not use. -func (x *UpdateNodePoolRequest) GetProjectId() string { - if x != nil { - return x.ProjectId - } - return "" -} - -// Deprecated: Do not use. -func (x *UpdateNodePoolRequest) GetZone() string { - if x != nil { - return x.Zone - } - return "" -} - -// Deprecated: Do not use. -func (x *UpdateNodePoolRequest) GetClusterId() string { - if x != nil { - return x.ClusterId - } - return "" -} - -// Deprecated: Do not use. -func (x *UpdateNodePoolRequest) GetNodePoolId() string { - if x != nil { - return x.NodePoolId - } - return "" -} - -func (x *UpdateNodePoolRequest) GetNodeVersion() string { - if x != nil { - return x.NodeVersion - } - return "" -} - -func (x *UpdateNodePoolRequest) GetImageType() string { - if x != nil { - return x.ImageType - } - return "" -} - -func (x *UpdateNodePoolRequest) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *UpdateNodePoolRequest) GetLocations() []string { - if x != nil { - return x.Locations - } - return nil -} - -func (x *UpdateNodePoolRequest) GetWorkloadMetadataConfig() *WorkloadMetadataConfig { - if x != nil { - return x.WorkloadMetadataConfig - } - return nil -} - -func (x *UpdateNodePoolRequest) GetUpgradeSettings() *NodePool_UpgradeSettings { - if x != nil { - return x.UpgradeSettings - } - return nil -} - -// SetNodePoolAutoscalingRequest sets the autoscaler settings of a node pool. -type SetNodePoolAutoscalingRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Deprecated. The Google Developers Console [project ID or project - // number](https://support.google.com/cloud/answer/6158840). - // This field has been deprecated and replaced by the name field. - // - // Deprecated: Do not use. - ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` - // Deprecated. The name of the Google Compute Engine - // [zone](https://cloud.google.com/compute/docs/zones#available) in which the - // cluster resides. This field has been deprecated and replaced by the name - // field. - // - // Deprecated: Do not use. - Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` - // Deprecated. The name of the cluster to upgrade. - // This field has been deprecated and replaced by the name field. - // - // Deprecated: Do not use. - ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` - // Deprecated. The name of the node pool to upgrade. - // This field has been deprecated and replaced by the name field. - // - // Deprecated: Do not use. - NodePoolId string `protobuf:"bytes,4,opt,name=node_pool_id,json=nodePoolId,proto3" json:"node_pool_id,omitempty"` - // Required. Autoscaling configuration for the node pool. - Autoscaling *NodePoolAutoscaling `protobuf:"bytes,5,opt,name=autoscaling,proto3" json:"autoscaling,omitempty"` - // The name (project, location, cluster, node pool) of the node pool to set - // autoscaler settings. Specified in the format - // `projects/*/locations/*/clusters/*/nodePools/*`. - Name string `protobuf:"bytes,6,opt,name=name,proto3" json:"name,omitempty"` -} - -func (x *SetNodePoolAutoscalingRequest) Reset() { - *x = SetNodePoolAutoscalingRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[31] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SetNodePoolAutoscalingRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SetNodePoolAutoscalingRequest) ProtoMessage() {} - -func (x *SetNodePoolAutoscalingRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[31] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SetNodePoolAutoscalingRequest.ProtoReflect.Descriptor instead. -func (*SetNodePoolAutoscalingRequest) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{31} -} - -// Deprecated: Do not use. -func (x *SetNodePoolAutoscalingRequest) GetProjectId() string { - if x != nil { - return x.ProjectId - } - return "" -} - -// Deprecated: Do not use. -func (x *SetNodePoolAutoscalingRequest) GetZone() string { - if x != nil { - return x.Zone - } - return "" -} - -// Deprecated: Do not use. -func (x *SetNodePoolAutoscalingRequest) GetClusterId() string { - if x != nil { - return x.ClusterId - } - return "" -} - -// Deprecated: Do not use. -func (x *SetNodePoolAutoscalingRequest) GetNodePoolId() string { - if x != nil { - return x.NodePoolId - } - return "" -} - -func (x *SetNodePoolAutoscalingRequest) GetAutoscaling() *NodePoolAutoscaling { - if x != nil { - return x.Autoscaling - } - return nil -} - -func (x *SetNodePoolAutoscalingRequest) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -// SetLoggingServiceRequest sets the logging service of a cluster. -type SetLoggingServiceRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Deprecated. The Google Developers Console [project ID or project - // number](https://support.google.com/cloud/answer/6158840). - // This field has been deprecated and replaced by the name field. - // - // Deprecated: Do not use. - ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` - // Deprecated. The name of the Google Compute Engine - // [zone](https://cloud.google.com/compute/docs/zones#available) in which the - // cluster resides. This field has been deprecated and replaced by the name - // field. - // - // Deprecated: Do not use. - Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` - // Deprecated. The name of the cluster to upgrade. - // This field has been deprecated and replaced by the name field. - // - // Deprecated: Do not use. - ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` - // Required. The logging service the cluster should use to write logs. - // Currently available options: - // - // * `logging.googleapis.com/kubernetes` - The Cloud Logging - // service with a Kubernetes-native resource model - // * `logging.googleapis.com` - The legacy Cloud Logging service (no longer - // available as of GKE 1.15). - // * `none` - no logs will be exported from the cluster. - // - // If left as an empty string,`logging.googleapis.com/kubernetes` will be - // used for GKE 1.14+ or `logging.googleapis.com` for earlier versions. - LoggingService string `protobuf:"bytes,4,opt,name=logging_service,json=loggingService,proto3" json:"logging_service,omitempty"` - // The name (project, location, cluster) of the cluster to set logging. - // Specified in the format `projects/*/locations/*/clusters/*`. - Name string `protobuf:"bytes,5,opt,name=name,proto3" json:"name,omitempty"` -} - -func (x *SetLoggingServiceRequest) Reset() { - *x = SetLoggingServiceRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[32] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SetLoggingServiceRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SetLoggingServiceRequest) ProtoMessage() {} - -func (x *SetLoggingServiceRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[32] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SetLoggingServiceRequest.ProtoReflect.Descriptor instead. -func (*SetLoggingServiceRequest) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{32} -} - -// Deprecated: Do not use. -func (x *SetLoggingServiceRequest) GetProjectId() string { - if x != nil { - return x.ProjectId - } - return "" -} - -// Deprecated: Do not use. -func (x *SetLoggingServiceRequest) GetZone() string { - if x != nil { - return x.Zone - } - return "" -} - -// Deprecated: Do not use. -func (x *SetLoggingServiceRequest) GetClusterId() string { - if x != nil { - return x.ClusterId - } - return "" -} - -func (x *SetLoggingServiceRequest) GetLoggingService() string { - if x != nil { - return x.LoggingService - } - return "" -} - -func (x *SetLoggingServiceRequest) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -// SetMonitoringServiceRequest sets the monitoring service of a cluster. -type SetMonitoringServiceRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Deprecated. The Google Developers Console [project ID or project - // number](https://support.google.com/cloud/answer/6158840). - // This field has been deprecated and replaced by the name field. - // - // Deprecated: Do not use. - ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` - // Deprecated. The name of the Google Compute Engine - // [zone](https://cloud.google.com/compute/docs/zones#available) in which the - // cluster resides. This field has been deprecated and replaced by the name - // field. - // - // Deprecated: Do not use. - Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` - // Deprecated. The name of the cluster to upgrade. - // This field has been deprecated and replaced by the name field. - // - // Deprecated: Do not use. - ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` - // Required. The monitoring service the cluster should use to write metrics. - // Currently available options: - // - // * "monitoring.googleapis.com/kubernetes" - The Cloud Monitoring - // service with a Kubernetes-native resource model - // * `monitoring.googleapis.com` - The legacy Cloud Monitoring service (no - // longer available as of GKE 1.15). - // * `none` - No metrics will be exported from the cluster. - // - // If left as an empty string,`monitoring.googleapis.com/kubernetes` will be - // used for GKE 1.14+ or `monitoring.googleapis.com` for earlier versions. - MonitoringService string `protobuf:"bytes,4,opt,name=monitoring_service,json=monitoringService,proto3" json:"monitoring_service,omitempty"` - // The name (project, location, cluster) of the cluster to set monitoring. - // Specified in the format `projects/*/locations/*/clusters/*`. - Name string `protobuf:"bytes,6,opt,name=name,proto3" json:"name,omitempty"` -} - -func (x *SetMonitoringServiceRequest) Reset() { - *x = SetMonitoringServiceRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[33] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SetMonitoringServiceRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SetMonitoringServiceRequest) ProtoMessage() {} - -func (x *SetMonitoringServiceRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[33] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SetMonitoringServiceRequest.ProtoReflect.Descriptor instead. -func (*SetMonitoringServiceRequest) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{33} -} - -// Deprecated: Do not use. -func (x *SetMonitoringServiceRequest) GetProjectId() string { - if x != nil { - return x.ProjectId - } - return "" -} - -// Deprecated: Do not use. -func (x *SetMonitoringServiceRequest) GetZone() string { - if x != nil { - return x.Zone - } - return "" -} - -// Deprecated: Do not use. -func (x *SetMonitoringServiceRequest) GetClusterId() string { - if x != nil { - return x.ClusterId - } - return "" -} - -func (x *SetMonitoringServiceRequest) GetMonitoringService() string { - if x != nil { - return x.MonitoringService - } - return "" -} - -func (x *SetMonitoringServiceRequest) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -// SetAddonsConfigRequest sets the addons associated with the cluster. -type SetAddonsConfigRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Deprecated. The Google Developers Console [project ID or project - // number](https://support.google.com/cloud/answer/6158840). - // This field has been deprecated and replaced by the name field. - // - // Deprecated: Do not use. - ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` - // Deprecated. The name of the Google Compute Engine - // [zone](https://cloud.google.com/compute/docs/zones#available) in which the - // cluster resides. This field has been deprecated and replaced by the name - // field. - // - // Deprecated: Do not use. - Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` - // Deprecated. The name of the cluster to upgrade. - // This field has been deprecated and replaced by the name field. - // - // Deprecated: Do not use. - ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` - // Required. The desired configurations for the various addons available to run in the - // cluster. - AddonsConfig *AddonsConfig `protobuf:"bytes,4,opt,name=addons_config,json=addonsConfig,proto3" json:"addons_config,omitempty"` - // The name (project, location, cluster) of the cluster to set addons. - // Specified in the format `projects/*/locations/*/clusters/*`. - Name string `protobuf:"bytes,6,opt,name=name,proto3" json:"name,omitempty"` -} - -func (x *SetAddonsConfigRequest) Reset() { - *x = SetAddonsConfigRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[34] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SetAddonsConfigRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SetAddonsConfigRequest) ProtoMessage() {} - -func (x *SetAddonsConfigRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[34] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SetAddonsConfigRequest.ProtoReflect.Descriptor instead. -func (*SetAddonsConfigRequest) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{34} -} - -// Deprecated: Do not use. -func (x *SetAddonsConfigRequest) GetProjectId() string { - if x != nil { - return x.ProjectId - } - return "" -} - -// Deprecated: Do not use. -func (x *SetAddonsConfigRequest) GetZone() string { - if x != nil { - return x.Zone - } - return "" -} - -// Deprecated: Do not use. -func (x *SetAddonsConfigRequest) GetClusterId() string { - if x != nil { - return x.ClusterId - } - return "" -} - -func (x *SetAddonsConfigRequest) GetAddonsConfig() *AddonsConfig { - if x != nil { - return x.AddonsConfig - } - return nil -} - -func (x *SetAddonsConfigRequest) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -// SetLocationsRequest sets the locations of the cluster. -type SetLocationsRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Deprecated. The Google Developers Console [project ID or project - // number](https://support.google.com/cloud/answer/6158840). - // This field has been deprecated and replaced by the name field. - // - // Deprecated: Do not use. - ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` - // Deprecated. The name of the Google Compute Engine - // [zone](https://cloud.google.com/compute/docs/zones#available) in which the - // cluster resides. This field has been deprecated and replaced by the name - // field. - // - // Deprecated: Do not use. - Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` - // Deprecated. The name of the cluster to upgrade. - // This field has been deprecated and replaced by the name field. - // - // Deprecated: Do not use. - ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` - // Required. The desired list of Google Compute Engine - // [zones](https://cloud.google.com/compute/docs/zones#available) in which the - // cluster's nodes should be located. Changing the locations a cluster is in - // will result in nodes being either created or removed from the cluster, - // depending on whether locations are being added or removed. - // - // This list must always include the cluster's primary zone. - Locations []string `protobuf:"bytes,4,rep,name=locations,proto3" json:"locations,omitempty"` - // The name (project, location, cluster) of the cluster to set locations. - // Specified in the format `projects/*/locations/*/clusters/*`. - Name string `protobuf:"bytes,6,opt,name=name,proto3" json:"name,omitempty"` -} - -func (x *SetLocationsRequest) Reset() { - *x = SetLocationsRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[35] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SetLocationsRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SetLocationsRequest) ProtoMessage() {} - -func (x *SetLocationsRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[35] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SetLocationsRequest.ProtoReflect.Descriptor instead. -func (*SetLocationsRequest) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{35} -} - -// Deprecated: Do not use. -func (x *SetLocationsRequest) GetProjectId() string { - if x != nil { - return x.ProjectId - } - return "" -} - -// Deprecated: Do not use. -func (x *SetLocationsRequest) GetZone() string { - if x != nil { - return x.Zone - } - return "" -} - -// Deprecated: Do not use. -func (x *SetLocationsRequest) GetClusterId() string { - if x != nil { - return x.ClusterId - } - return "" -} - -func (x *SetLocationsRequest) GetLocations() []string { - if x != nil { - return x.Locations - } - return nil -} - -func (x *SetLocationsRequest) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -// UpdateMasterRequest updates the master of the cluster. -type UpdateMasterRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Deprecated. The Google Developers Console [project ID or project - // number](https://support.google.com/cloud/answer/6158840). - // This field has been deprecated and replaced by the name field. - // - // Deprecated: Do not use. - ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` - // Deprecated. The name of the Google Compute Engine - // [zone](https://cloud.google.com/compute/docs/zones#available) in which the - // cluster resides. This field has been deprecated and replaced by the name - // field. - // - // Deprecated: Do not use. - Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` - // Deprecated. The name of the cluster to upgrade. - // This field has been deprecated and replaced by the name field. - // - // Deprecated: Do not use. - ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` - // Required. The Kubernetes version to change the master to. - // - // Users may specify either explicit versions offered by Kubernetes Engine or - // version aliases, which have the following behavior: - // - // - "latest": picks the highest valid Kubernetes version - // - "1.X": picks the highest valid patch+gke.N patch in the 1.X version - // - "1.X.Y": picks the highest valid gke.N patch in the 1.X.Y version - // - "1.X.Y-gke.N": picks an explicit Kubernetes version - // - "-": picks the default Kubernetes version - MasterVersion string `protobuf:"bytes,4,opt,name=master_version,json=masterVersion,proto3" json:"master_version,omitempty"` - // The name (project, location, cluster) of the cluster to update. - // Specified in the format `projects/*/locations/*/clusters/*`. - Name string `protobuf:"bytes,7,opt,name=name,proto3" json:"name,omitempty"` -} - -func (x *UpdateMasterRequest) Reset() { - *x = UpdateMasterRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[36] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *UpdateMasterRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*UpdateMasterRequest) ProtoMessage() {} - -func (x *UpdateMasterRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[36] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use UpdateMasterRequest.ProtoReflect.Descriptor instead. -func (*UpdateMasterRequest) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{36} -} - -// Deprecated: Do not use. -func (x *UpdateMasterRequest) GetProjectId() string { - if x != nil { - return x.ProjectId - } - return "" -} - -// Deprecated: Do not use. -func (x *UpdateMasterRequest) GetZone() string { - if x != nil { - return x.Zone - } - return "" -} - -// Deprecated: Do not use. -func (x *UpdateMasterRequest) GetClusterId() string { - if x != nil { - return x.ClusterId - } - return "" -} - -func (x *UpdateMasterRequest) GetMasterVersion() string { - if x != nil { - return x.MasterVersion - } - return "" -} - -func (x *UpdateMasterRequest) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -// SetMasterAuthRequest updates the admin password of a cluster. -type SetMasterAuthRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Deprecated. The Google Developers Console [project ID or project - // number](https://support.google.com/cloud/answer/6158840). - // This field has been deprecated and replaced by the name field. - // - // Deprecated: Do not use. - ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` - // Deprecated. The name of the Google Compute Engine - // [zone](https://cloud.google.com/compute/docs/zones#available) in which the - // cluster resides. This field has been deprecated and replaced by the name - // field. - // - // Deprecated: Do not use. - Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` - // Deprecated. The name of the cluster to upgrade. - // This field has been deprecated and replaced by the name field. - // - // Deprecated: Do not use. - ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` - // Required. The exact form of action to be taken on the master auth. - Action SetMasterAuthRequest_Action `protobuf:"varint,4,opt,name=action,proto3,enum=google.container.v1.SetMasterAuthRequest_Action" json:"action,omitempty"` - // Required. A description of the update. - Update *MasterAuth `protobuf:"bytes,5,opt,name=update,proto3" json:"update,omitempty"` - // The name (project, location, cluster) of the cluster to set auth. - // Specified in the format `projects/*/locations/*/clusters/*`. - Name string `protobuf:"bytes,7,opt,name=name,proto3" json:"name,omitempty"` -} - -func (x *SetMasterAuthRequest) Reset() { - *x = SetMasterAuthRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[37] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SetMasterAuthRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SetMasterAuthRequest) ProtoMessage() {} - -func (x *SetMasterAuthRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[37] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SetMasterAuthRequest.ProtoReflect.Descriptor instead. -func (*SetMasterAuthRequest) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{37} -} - -// Deprecated: Do not use. -func (x *SetMasterAuthRequest) GetProjectId() string { - if x != nil { - return x.ProjectId - } - return "" -} - -// Deprecated: Do not use. -func (x *SetMasterAuthRequest) GetZone() string { - if x != nil { - return x.Zone - } - return "" -} - -// Deprecated: Do not use. -func (x *SetMasterAuthRequest) GetClusterId() string { - if x != nil { - return x.ClusterId - } - return "" -} - -func (x *SetMasterAuthRequest) GetAction() SetMasterAuthRequest_Action { - if x != nil { - return x.Action - } - return SetMasterAuthRequest_UNKNOWN -} - -func (x *SetMasterAuthRequest) GetUpdate() *MasterAuth { - if x != nil { - return x.Update - } - return nil -} - -func (x *SetMasterAuthRequest) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -// DeleteClusterRequest deletes a cluster. -type DeleteClusterRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Deprecated. The Google Developers Console [project ID or project - // number](https://support.google.com/cloud/answer/6158840). - // This field has been deprecated and replaced by the name field. - // - // Deprecated: Do not use. - ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` - // Deprecated. The name of the Google Compute Engine - // [zone](https://cloud.google.com/compute/docs/zones#available) in which the - // cluster resides. This field has been deprecated and replaced by the name - // field. - // - // Deprecated: Do not use. - Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` - // Deprecated. The name of the cluster to delete. - // This field has been deprecated and replaced by the name field. - // - // Deprecated: Do not use. - ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` - // The name (project, location, cluster) of the cluster to delete. - // Specified in the format `projects/*/locations/*/clusters/*`. - Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` -} - -func (x *DeleteClusterRequest) Reset() { - *x = DeleteClusterRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[38] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *DeleteClusterRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DeleteClusterRequest) ProtoMessage() {} - -func (x *DeleteClusterRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[38] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DeleteClusterRequest.ProtoReflect.Descriptor instead. -func (*DeleteClusterRequest) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{38} -} - -// Deprecated: Do not use. -func (x *DeleteClusterRequest) GetProjectId() string { - if x != nil { - return x.ProjectId - } - return "" -} - -// Deprecated: Do not use. -func (x *DeleteClusterRequest) GetZone() string { - if x != nil { - return x.Zone - } - return "" -} - -// Deprecated: Do not use. -func (x *DeleteClusterRequest) GetClusterId() string { - if x != nil { - return x.ClusterId - } - return "" -} - -func (x *DeleteClusterRequest) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -// ListClustersRequest lists clusters. -type ListClustersRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Deprecated. The Google Developers Console [project ID or project - // number](https://support.google.com/cloud/answer/6158840). - // This field has been deprecated and replaced by the parent field. - // - // Deprecated: Do not use. - ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` - // Deprecated. The name of the Google Compute Engine - // [zone](https://cloud.google.com/compute/docs/zones#available) in which the - // cluster resides, or "-" for all zones. This field has been deprecated and - // replaced by the parent field. - // - // Deprecated: Do not use. - Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` - // The parent (project and location) where the clusters will be listed. - // Specified in the format `projects/*/locations/*`. - // Location "-" matches all zones and all regions. - Parent string `protobuf:"bytes,4,opt,name=parent,proto3" json:"parent,omitempty"` -} - -func (x *ListClustersRequest) Reset() { - *x = ListClustersRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[39] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListClustersRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListClustersRequest) ProtoMessage() {} - -func (x *ListClustersRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[39] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListClustersRequest.ProtoReflect.Descriptor instead. -func (*ListClustersRequest) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{39} -} - -// Deprecated: Do not use. -func (x *ListClustersRequest) GetProjectId() string { - if x != nil { - return x.ProjectId - } - return "" -} - -// Deprecated: Do not use. -func (x *ListClustersRequest) GetZone() string { - if x != nil { - return x.Zone - } - return "" -} - -func (x *ListClustersRequest) GetParent() string { - if x != nil { - return x.Parent - } - return "" -} - -// ListClustersResponse is the result of ListClustersRequest. -type ListClustersResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // A list of clusters in the project in the specified zone, or - // across all ones. - Clusters []*Cluster `protobuf:"bytes,1,rep,name=clusters,proto3" json:"clusters,omitempty"` - // If any zones are listed here, the list of clusters returned - // may be missing those zones. - MissingZones []string `protobuf:"bytes,2,rep,name=missing_zones,json=missingZones,proto3" json:"missing_zones,omitempty"` -} - -func (x *ListClustersResponse) Reset() { - *x = ListClustersResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[40] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListClustersResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListClustersResponse) ProtoMessage() {} - -func (x *ListClustersResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[40] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListClustersResponse.ProtoReflect.Descriptor instead. -func (*ListClustersResponse) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{40} -} - -func (x *ListClustersResponse) GetClusters() []*Cluster { - if x != nil { - return x.Clusters - } - return nil -} - -func (x *ListClustersResponse) GetMissingZones() []string { - if x != nil { - return x.MissingZones - } - return nil -} - -// GetOperationRequest gets a single operation. -type GetOperationRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Deprecated. The Google Developers Console [project ID or project - // number](https://support.google.com/cloud/answer/6158840). - // This field has been deprecated and replaced by the name field. - // - // Deprecated: Do not use. - ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` - // Deprecated. The name of the Google Compute Engine - // [zone](https://cloud.google.com/compute/docs/zones#available) in which the - // cluster resides. This field has been deprecated and replaced by the name - // field. - // - // Deprecated: Do not use. - Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` - // Deprecated. The server-assigned `name` of the operation. - // This field has been deprecated and replaced by the name field. - // - // Deprecated: Do not use. - OperationId string `protobuf:"bytes,3,opt,name=operation_id,json=operationId,proto3" json:"operation_id,omitempty"` - // The name (project, location, operation id) of the operation to get. - // Specified in the format `projects/*/locations/*/operations/*`. - Name string `protobuf:"bytes,5,opt,name=name,proto3" json:"name,omitempty"` -} - -func (x *GetOperationRequest) Reset() { - *x = GetOperationRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[41] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetOperationRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetOperationRequest) ProtoMessage() {} - -func (x *GetOperationRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[41] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetOperationRequest.ProtoReflect.Descriptor instead. -func (*GetOperationRequest) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{41} -} - -// Deprecated: Do not use. -func (x *GetOperationRequest) GetProjectId() string { - if x != nil { - return x.ProjectId - } - return "" -} - -// Deprecated: Do not use. -func (x *GetOperationRequest) GetZone() string { - if x != nil { - return x.Zone - } - return "" -} - -// Deprecated: Do not use. -func (x *GetOperationRequest) GetOperationId() string { - if x != nil { - return x.OperationId - } - return "" -} - -func (x *GetOperationRequest) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -// ListOperationsRequest lists operations. -type ListOperationsRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Deprecated. The Google Developers Console [project ID or project - // number](https://support.google.com/cloud/answer/6158840). - // This field has been deprecated and replaced by the parent field. - // - // Deprecated: Do not use. - ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` - // Deprecated. The name of the Google Compute Engine - // [zone](https://cloud.google.com/compute/docs/zones#available) to return - // operations for, or `-` for all zones. This field has been deprecated and - // replaced by the parent field. - // - // Deprecated: Do not use. - Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` - // The parent (project and location) where the operations will be listed. - // Specified in the format `projects/*/locations/*`. - // Location "-" matches all zones and all regions. - Parent string `protobuf:"bytes,4,opt,name=parent,proto3" json:"parent,omitempty"` -} - -func (x *ListOperationsRequest) Reset() { - *x = ListOperationsRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[42] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListOperationsRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListOperationsRequest) ProtoMessage() {} - -func (x *ListOperationsRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[42] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListOperationsRequest.ProtoReflect.Descriptor instead. -func (*ListOperationsRequest) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{42} -} - -// Deprecated: Do not use. -func (x *ListOperationsRequest) GetProjectId() string { - if x != nil { - return x.ProjectId - } - return "" -} - -// Deprecated: Do not use. -func (x *ListOperationsRequest) GetZone() string { - if x != nil { - return x.Zone - } - return "" -} - -func (x *ListOperationsRequest) GetParent() string { - if x != nil { - return x.Parent - } - return "" -} - -// CancelOperationRequest cancels a single operation. -type CancelOperationRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Deprecated. The Google Developers Console [project ID or project - // number](https://support.google.com/cloud/answer/6158840). - // This field has been deprecated and replaced by the name field. - // - // Deprecated: Do not use. - ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` - // Deprecated. The name of the Google Compute Engine - // [zone](https://cloud.google.com/compute/docs/zones#available) in which the - // operation resides. This field has been deprecated and replaced by the name - // field. - // - // Deprecated: Do not use. - Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` - // Deprecated. The server-assigned `name` of the operation. - // This field has been deprecated and replaced by the name field. - // - // Deprecated: Do not use. - OperationId string `protobuf:"bytes,3,opt,name=operation_id,json=operationId,proto3" json:"operation_id,omitempty"` - // The name (project, location, operation id) of the operation to cancel. - // Specified in the format `projects/*/locations/*/operations/*`. - Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` -} - -func (x *CancelOperationRequest) Reset() { - *x = CancelOperationRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[43] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CancelOperationRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CancelOperationRequest) ProtoMessage() {} - -func (x *CancelOperationRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[43] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CancelOperationRequest.ProtoReflect.Descriptor instead. -func (*CancelOperationRequest) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{43} -} - -// Deprecated: Do not use. -func (x *CancelOperationRequest) GetProjectId() string { - if x != nil { - return x.ProjectId - } - return "" -} - -// Deprecated: Do not use. -func (x *CancelOperationRequest) GetZone() string { - if x != nil { - return x.Zone - } - return "" -} - -// Deprecated: Do not use. -func (x *CancelOperationRequest) GetOperationId() string { - if x != nil { - return x.OperationId - } - return "" -} - -func (x *CancelOperationRequest) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -// ListOperationsResponse is the result of ListOperationsRequest. -type ListOperationsResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // A list of operations in the project in the specified zone. - Operations []*Operation `protobuf:"bytes,1,rep,name=operations,proto3" json:"operations,omitempty"` - // If any zones are listed here, the list of operations returned - // may be missing the operations from those zones. - MissingZones []string `protobuf:"bytes,2,rep,name=missing_zones,json=missingZones,proto3" json:"missing_zones,omitempty"` -} - -func (x *ListOperationsResponse) Reset() { - *x = ListOperationsResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[44] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListOperationsResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListOperationsResponse) ProtoMessage() {} - -func (x *ListOperationsResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[44] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListOperationsResponse.ProtoReflect.Descriptor instead. -func (*ListOperationsResponse) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{44} -} - -func (x *ListOperationsResponse) GetOperations() []*Operation { - if x != nil { - return x.Operations - } - return nil -} - -func (x *ListOperationsResponse) GetMissingZones() []string { - if x != nil { - return x.MissingZones - } - return nil -} - -// Gets the current Kubernetes Engine service configuration. -type GetServerConfigRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Deprecated. The Google Developers Console [project ID or project - // number](https://support.google.com/cloud/answer/6158840). - // This field has been deprecated and replaced by the name field. - // - // Deprecated: Do not use. - ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` - // Deprecated. The name of the Google Compute Engine - // [zone](https://cloud.google.com/compute/docs/zones#available) to return - // operations for. This field has been deprecated and replaced by the name - // field. - // - // Deprecated: Do not use. - Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` - // The name (project and location) of the server config to get, - // specified in the format `projects/*/locations/*`. - Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` -} - -func (x *GetServerConfigRequest) Reset() { - *x = GetServerConfigRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[45] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetServerConfigRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetServerConfigRequest) ProtoMessage() {} - -func (x *GetServerConfigRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[45] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetServerConfigRequest.ProtoReflect.Descriptor instead. -func (*GetServerConfigRequest) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{45} -} - -// Deprecated: Do not use. -func (x *GetServerConfigRequest) GetProjectId() string { - if x != nil { - return x.ProjectId - } - return "" -} - -// Deprecated: Do not use. -func (x *GetServerConfigRequest) GetZone() string { - if x != nil { - return x.Zone - } - return "" -} - -func (x *GetServerConfigRequest) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -// Kubernetes Engine service configuration. -type ServerConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Version of Kubernetes the service deploys by default. - DefaultClusterVersion string `protobuf:"bytes,1,opt,name=default_cluster_version,json=defaultClusterVersion,proto3" json:"default_cluster_version,omitempty"` - // List of valid node upgrade target versions, in descending order. - ValidNodeVersions []string `protobuf:"bytes,3,rep,name=valid_node_versions,json=validNodeVersions,proto3" json:"valid_node_versions,omitempty"` - // Default image type. - DefaultImageType string `protobuf:"bytes,4,opt,name=default_image_type,json=defaultImageType,proto3" json:"default_image_type,omitempty"` - // List of valid image types. - ValidImageTypes []string `protobuf:"bytes,5,rep,name=valid_image_types,json=validImageTypes,proto3" json:"valid_image_types,omitempty"` - // List of valid master versions, in descending order. - ValidMasterVersions []string `protobuf:"bytes,6,rep,name=valid_master_versions,json=validMasterVersions,proto3" json:"valid_master_versions,omitempty"` - // List of release channel configurations. - Channels []*ServerConfig_ReleaseChannelConfig `protobuf:"bytes,9,rep,name=channels,proto3" json:"channels,omitempty"` -} - -func (x *ServerConfig) Reset() { - *x = ServerConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[46] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ServerConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ServerConfig) ProtoMessage() {} - -func (x *ServerConfig) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[46] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ServerConfig.ProtoReflect.Descriptor instead. -func (*ServerConfig) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{46} -} - -func (x *ServerConfig) GetDefaultClusterVersion() string { - if x != nil { - return x.DefaultClusterVersion - } - return "" -} - -func (x *ServerConfig) GetValidNodeVersions() []string { - if x != nil { - return x.ValidNodeVersions - } - return nil -} - -func (x *ServerConfig) GetDefaultImageType() string { - if x != nil { - return x.DefaultImageType - } - return "" -} - -func (x *ServerConfig) GetValidImageTypes() []string { - if x != nil { - return x.ValidImageTypes - } - return nil -} - -func (x *ServerConfig) GetValidMasterVersions() []string { - if x != nil { - return x.ValidMasterVersions - } - return nil -} - -func (x *ServerConfig) GetChannels() []*ServerConfig_ReleaseChannelConfig { - if x != nil { - return x.Channels - } - return nil -} - -// CreateNodePoolRequest creates a node pool for a cluster. -type CreateNodePoolRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Deprecated. The Google Developers Console [project ID or project - // number](https://developers.google.com/console/help/new/#projectnumber). - // This field has been deprecated and replaced by the parent field. - // - // Deprecated: Do not use. - ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` - // Deprecated. The name of the Google Compute Engine - // [zone](https://cloud.google.com/compute/docs/zones#available) in which the - // cluster resides. This field has been deprecated and replaced by the parent - // field. - // - // Deprecated: Do not use. - Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` - // Deprecated. The name of the cluster. - // This field has been deprecated and replaced by the parent field. - // - // Deprecated: Do not use. - ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` - // Required. The node pool to create. - NodePool *NodePool `protobuf:"bytes,4,opt,name=node_pool,json=nodePool,proto3" json:"node_pool,omitempty"` - // The parent (project, location, cluster id) where the node pool will be - // created. Specified in the format - // `projects/*/locations/*/clusters/*`. - Parent string `protobuf:"bytes,6,opt,name=parent,proto3" json:"parent,omitempty"` -} - -func (x *CreateNodePoolRequest) Reset() { - *x = CreateNodePoolRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[47] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CreateNodePoolRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CreateNodePoolRequest) ProtoMessage() {} - -func (x *CreateNodePoolRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[47] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CreateNodePoolRequest.ProtoReflect.Descriptor instead. -func (*CreateNodePoolRequest) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{47} -} - -// Deprecated: Do not use. -func (x *CreateNodePoolRequest) GetProjectId() string { - if x != nil { - return x.ProjectId - } - return "" -} - -// Deprecated: Do not use. -func (x *CreateNodePoolRequest) GetZone() string { - if x != nil { - return x.Zone - } - return "" -} - -// Deprecated: Do not use. -func (x *CreateNodePoolRequest) GetClusterId() string { - if x != nil { - return x.ClusterId - } - return "" -} - -func (x *CreateNodePoolRequest) GetNodePool() *NodePool { - if x != nil { - return x.NodePool - } - return nil -} - -func (x *CreateNodePoolRequest) GetParent() string { - if x != nil { - return x.Parent - } - return "" -} - -// DeleteNodePoolRequest deletes a node pool for a cluster. -type DeleteNodePoolRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Deprecated. The Google Developers Console [project ID or project - // number](https://developers.google.com/console/help/new/#projectnumber). - // This field has been deprecated and replaced by the name field. - // - // Deprecated: Do not use. - ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` - // Deprecated. The name of the Google Compute Engine - // [zone](https://cloud.google.com/compute/docs/zones#available) in which the - // cluster resides. This field has been deprecated and replaced by the name - // field. - // - // Deprecated: Do not use. - Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` - // Deprecated. The name of the cluster. - // This field has been deprecated and replaced by the name field. - // - // Deprecated: Do not use. - ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` - // Deprecated. The name of the node pool to delete. - // This field has been deprecated and replaced by the name field. - // - // Deprecated: Do not use. - NodePoolId string `protobuf:"bytes,4,opt,name=node_pool_id,json=nodePoolId,proto3" json:"node_pool_id,omitempty"` - // The name (project, location, cluster, node pool id) of the node pool to - // delete. Specified in the format - // `projects/*/locations/*/clusters/*/nodePools/*`. - Name string `protobuf:"bytes,6,opt,name=name,proto3" json:"name,omitempty"` -} - -func (x *DeleteNodePoolRequest) Reset() { - *x = DeleteNodePoolRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[48] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *DeleteNodePoolRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DeleteNodePoolRequest) ProtoMessage() {} - -func (x *DeleteNodePoolRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[48] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DeleteNodePoolRequest.ProtoReflect.Descriptor instead. -func (*DeleteNodePoolRequest) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{48} -} - -// Deprecated: Do not use. -func (x *DeleteNodePoolRequest) GetProjectId() string { - if x != nil { - return x.ProjectId - } - return "" -} - -// Deprecated: Do not use. -func (x *DeleteNodePoolRequest) GetZone() string { - if x != nil { - return x.Zone - } - return "" -} - -// Deprecated: Do not use. -func (x *DeleteNodePoolRequest) GetClusterId() string { - if x != nil { - return x.ClusterId - } - return "" -} - -// Deprecated: Do not use. -func (x *DeleteNodePoolRequest) GetNodePoolId() string { - if x != nil { - return x.NodePoolId - } - return "" -} - -func (x *DeleteNodePoolRequest) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -// ListNodePoolsRequest lists the node pool(s) for a cluster. -type ListNodePoolsRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Deprecated. The Google Developers Console [project ID or project - // number](https://developers.google.com/console/help/new/#projectnumber). - // This field has been deprecated and replaced by the parent field. - // - // Deprecated: Do not use. - ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` - // Deprecated. The name of the Google Compute Engine - // [zone](https://cloud.google.com/compute/docs/zones#available) in which the - // cluster resides. This field has been deprecated and replaced by the parent - // field. - // - // Deprecated: Do not use. - Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` - // Deprecated. The name of the cluster. - // This field has been deprecated and replaced by the parent field. - // - // Deprecated: Do not use. - ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` - // The parent (project, location, cluster id) where the node pools will be - // listed. Specified in the format `projects/*/locations/*/clusters/*`. - Parent string `protobuf:"bytes,5,opt,name=parent,proto3" json:"parent,omitempty"` -} - -func (x *ListNodePoolsRequest) Reset() { - *x = ListNodePoolsRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[49] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListNodePoolsRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListNodePoolsRequest) ProtoMessage() {} - -func (x *ListNodePoolsRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[49] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListNodePoolsRequest.ProtoReflect.Descriptor instead. -func (*ListNodePoolsRequest) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{49} -} - -// Deprecated: Do not use. -func (x *ListNodePoolsRequest) GetProjectId() string { - if x != nil { - return x.ProjectId - } - return "" -} - -// Deprecated: Do not use. -func (x *ListNodePoolsRequest) GetZone() string { - if x != nil { - return x.Zone - } - return "" -} - -// Deprecated: Do not use. -func (x *ListNodePoolsRequest) GetClusterId() string { - if x != nil { - return x.ClusterId - } - return "" -} - -func (x *ListNodePoolsRequest) GetParent() string { - if x != nil { - return x.Parent - } - return "" -} - -// GetNodePoolRequest retrieves a node pool for a cluster. -type GetNodePoolRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Deprecated. The Google Developers Console [project ID or project - // number](https://developers.google.com/console/help/new/#projectnumber). - // This field has been deprecated and replaced by the name field. - // - // Deprecated: Do not use. - ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` - // Deprecated. The name of the Google Compute Engine - // [zone](https://cloud.google.com/compute/docs/zones#available) in which the - // cluster resides. This field has been deprecated and replaced by the name - // field. - // - // Deprecated: Do not use. - Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` - // Deprecated. The name of the cluster. - // This field has been deprecated and replaced by the name field. - // - // Deprecated: Do not use. - ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` - // Deprecated. The name of the node pool. - // This field has been deprecated and replaced by the name field. - // - // Deprecated: Do not use. - NodePoolId string `protobuf:"bytes,4,opt,name=node_pool_id,json=nodePoolId,proto3" json:"node_pool_id,omitempty"` - // The name (project, location, cluster, node pool id) of the node pool to - // get. Specified in the format - // `projects/*/locations/*/clusters/*/nodePools/*`. - Name string `protobuf:"bytes,6,opt,name=name,proto3" json:"name,omitempty"` -} - -func (x *GetNodePoolRequest) Reset() { - *x = GetNodePoolRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[50] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetNodePoolRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetNodePoolRequest) ProtoMessage() {} - -func (x *GetNodePoolRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[50] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetNodePoolRequest.ProtoReflect.Descriptor instead. -func (*GetNodePoolRequest) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{50} -} - -// Deprecated: Do not use. -func (x *GetNodePoolRequest) GetProjectId() string { - if x != nil { - return x.ProjectId - } - return "" -} - -// Deprecated: Do not use. -func (x *GetNodePoolRequest) GetZone() string { - if x != nil { - return x.Zone - } - return "" -} - -// Deprecated: Do not use. -func (x *GetNodePoolRequest) GetClusterId() string { - if x != nil { - return x.ClusterId - } - return "" -} - -// Deprecated: Do not use. -func (x *GetNodePoolRequest) GetNodePoolId() string { - if x != nil { - return x.NodePoolId - } - return "" -} - -func (x *GetNodePoolRequest) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -// NodePool contains the name and configuration for a cluster's node pool. -// Node pools are a set of nodes (i.e. VM's), with a common configuration and -// specification, under the control of the cluster master. They may have a set -// of Kubernetes labels applied to them, which may be used to reference them -// during pod scheduling. They may also be resized up or down, to accommodate -// the workload. -type NodePool struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The name of the node pool. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // The node configuration of the pool. - Config *NodeConfig `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` - // The initial node count for the pool. You must ensure that your - // Compute Engine [resource quota](https://cloud.google.com/compute/quotas) - // is sufficient for this number of instances. You must also have available - // firewall and routes quota. - InitialNodeCount int32 `protobuf:"varint,3,opt,name=initial_node_count,json=initialNodeCount,proto3" json:"initial_node_count,omitempty"` - // The list of Google Compute Engine - // [zones](https://cloud.google.com/compute/docs/zones#available) in which the - // NodePool's nodes should be located. - // - // If this value is unspecified during node pool creation, the - // [Cluster.Locations](https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1/projects.locations.clusters#Cluster.FIELDS.locations) - // value will be used, instead. - // - // Warning: changing node pool locations will result in nodes being added - // and/or removed. - Locations []string `protobuf:"bytes,13,rep,name=locations,proto3" json:"locations,omitempty"` - // [Output only] Server-defined URL for the resource. - SelfLink string `protobuf:"bytes,100,opt,name=self_link,json=selfLink,proto3" json:"self_link,omitempty"` - // The version of the Kubernetes of this node. - Version string `protobuf:"bytes,101,opt,name=version,proto3" json:"version,omitempty"` - // [Output only] The resource URLs of the [managed instance - // groups](https://cloud.google.com/compute/docs/instance-groups/creating-groups-of-managed-instances) - // associated with this node pool. - InstanceGroupUrls []string `protobuf:"bytes,102,rep,name=instance_group_urls,json=instanceGroupUrls,proto3" json:"instance_group_urls,omitempty"` - // [Output only] The status of the nodes in this pool instance. - Status NodePool_Status `protobuf:"varint,103,opt,name=status,proto3,enum=google.container.v1.NodePool_Status" json:"status,omitempty"` - // [Output only] Deprecated. Use conditions instead. - // Additional information about the current status of this - // node pool instance, if available. - // - // Deprecated: Do not use. - StatusMessage string `protobuf:"bytes,104,opt,name=status_message,json=statusMessage,proto3" json:"status_message,omitempty"` - // Autoscaler configuration for this NodePool. Autoscaler is enabled - // only if a valid configuration is present. - Autoscaling *NodePoolAutoscaling `protobuf:"bytes,4,opt,name=autoscaling,proto3" json:"autoscaling,omitempty"` - // NodeManagement configuration for this NodePool. - Management *NodeManagement `protobuf:"bytes,5,opt,name=management,proto3" json:"management,omitempty"` - // The constraint on the maximum number of pods that can be run - // simultaneously on a node in the node pool. - MaxPodsConstraint *MaxPodsConstraint `protobuf:"bytes,6,opt,name=max_pods_constraint,json=maxPodsConstraint,proto3" json:"max_pods_constraint,omitempty"` - // Which conditions caused the current node pool state. - Conditions []*StatusCondition `protobuf:"bytes,105,rep,name=conditions,proto3" json:"conditions,omitempty"` - // [Output only] The pod CIDR block size per node in this node pool. - PodIpv4CidrSize int32 `protobuf:"varint,7,opt,name=pod_ipv4_cidr_size,json=podIpv4CidrSize,proto3" json:"pod_ipv4_cidr_size,omitempty"` - // Upgrade settings control disruption and speed of the upgrade. - UpgradeSettings *NodePool_UpgradeSettings `protobuf:"bytes,107,opt,name=upgrade_settings,json=upgradeSettings,proto3" json:"upgrade_settings,omitempty"` -} - -func (x *NodePool) Reset() { - *x = NodePool{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[51] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *NodePool) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*NodePool) ProtoMessage() {} - -func (x *NodePool) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[51] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use NodePool.ProtoReflect.Descriptor instead. -func (*NodePool) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{51} -} - -func (x *NodePool) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *NodePool) GetConfig() *NodeConfig { - if x != nil { - return x.Config - } - return nil -} - -func (x *NodePool) GetInitialNodeCount() int32 { - if x != nil { - return x.InitialNodeCount - } - return 0 -} - -func (x *NodePool) GetLocations() []string { - if x != nil { - return x.Locations - } - return nil -} - -func (x *NodePool) GetSelfLink() string { - if x != nil { - return x.SelfLink - } - return "" -} - -func (x *NodePool) GetVersion() string { - if x != nil { - return x.Version - } - return "" -} - -func (x *NodePool) GetInstanceGroupUrls() []string { - if x != nil { - return x.InstanceGroupUrls - } - return nil -} - -func (x *NodePool) GetStatus() NodePool_Status { - if x != nil { - return x.Status - } - return NodePool_STATUS_UNSPECIFIED -} - -// Deprecated: Do not use. -func (x *NodePool) GetStatusMessage() string { - if x != nil { - return x.StatusMessage - } - return "" -} - -func (x *NodePool) GetAutoscaling() *NodePoolAutoscaling { - if x != nil { - return x.Autoscaling - } - return nil -} - -func (x *NodePool) GetManagement() *NodeManagement { - if x != nil { - return x.Management - } - return nil -} - -func (x *NodePool) GetMaxPodsConstraint() *MaxPodsConstraint { - if x != nil { - return x.MaxPodsConstraint - } - return nil -} - -func (x *NodePool) GetConditions() []*StatusCondition { - if x != nil { - return x.Conditions - } - return nil -} - -func (x *NodePool) GetPodIpv4CidrSize() int32 { - if x != nil { - return x.PodIpv4CidrSize - } - return 0 -} - -func (x *NodePool) GetUpgradeSettings() *NodePool_UpgradeSettings { - if x != nil { - return x.UpgradeSettings - } - return nil -} - -// NodeManagement defines the set of node management services turned on for the -// node pool. -type NodeManagement struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // A flag that specifies whether node auto-upgrade is enabled for the node - // pool. If enabled, node auto-upgrade helps keep the nodes in your node pool - // up to date with the latest release version of Kubernetes. - AutoUpgrade bool `protobuf:"varint,1,opt,name=auto_upgrade,json=autoUpgrade,proto3" json:"auto_upgrade,omitempty"` - // A flag that specifies whether the node auto-repair is enabled for the node - // pool. If enabled, the nodes in this node pool will be monitored and, if - // they fail health checks too many times, an automatic repair action will be - // triggered. - AutoRepair bool `protobuf:"varint,2,opt,name=auto_repair,json=autoRepair,proto3" json:"auto_repair,omitempty"` - // Specifies the Auto Upgrade knobs for the node pool. - UpgradeOptions *AutoUpgradeOptions `protobuf:"bytes,10,opt,name=upgrade_options,json=upgradeOptions,proto3" json:"upgrade_options,omitempty"` -} - -func (x *NodeManagement) Reset() { - *x = NodeManagement{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[52] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *NodeManagement) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*NodeManagement) ProtoMessage() {} - -func (x *NodeManagement) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[52] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use NodeManagement.ProtoReflect.Descriptor instead. -func (*NodeManagement) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{52} -} - -func (x *NodeManagement) GetAutoUpgrade() bool { - if x != nil { - return x.AutoUpgrade - } - return false -} - -func (x *NodeManagement) GetAutoRepair() bool { - if x != nil { - return x.AutoRepair - } - return false -} - -func (x *NodeManagement) GetUpgradeOptions() *AutoUpgradeOptions { - if x != nil { - return x.UpgradeOptions - } - return nil -} - -// AutoUpgradeOptions defines the set of options for the user to control how -// the Auto Upgrades will proceed. -type AutoUpgradeOptions struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // [Output only] This field is set when upgrades are about to commence - // with the approximate start time for the upgrades, in - // [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format. - AutoUpgradeStartTime string `protobuf:"bytes,1,opt,name=auto_upgrade_start_time,json=autoUpgradeStartTime,proto3" json:"auto_upgrade_start_time,omitempty"` - // [Output only] This field is set when upgrades are about to commence - // with the description of the upgrade. - Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` -} - -func (x *AutoUpgradeOptions) Reset() { - *x = AutoUpgradeOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[53] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *AutoUpgradeOptions) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*AutoUpgradeOptions) ProtoMessage() {} - -func (x *AutoUpgradeOptions) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[53] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use AutoUpgradeOptions.ProtoReflect.Descriptor instead. -func (*AutoUpgradeOptions) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{53} -} - -func (x *AutoUpgradeOptions) GetAutoUpgradeStartTime() string { - if x != nil { - return x.AutoUpgradeStartTime - } - return "" -} - -func (x *AutoUpgradeOptions) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -// MaintenancePolicy defines the maintenance policy to be used for the cluster. -type MaintenancePolicy struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Specifies the maintenance window in which maintenance may be performed. - Window *MaintenanceWindow `protobuf:"bytes,1,opt,name=window,proto3" json:"window,omitempty"` - // A hash identifying the version of this policy, so that updates to fields of - // the policy won't accidentally undo intermediate changes (and so that users - // of the API unaware of some fields won't accidentally remove other fields). - // Make a `get()` request to the cluster to get the current - // resource version and include it with requests to set the policy. - ResourceVersion string `protobuf:"bytes,3,opt,name=resource_version,json=resourceVersion,proto3" json:"resource_version,omitempty"` -} - -func (x *MaintenancePolicy) Reset() { - *x = MaintenancePolicy{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[54] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *MaintenancePolicy) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*MaintenancePolicy) ProtoMessage() {} - -func (x *MaintenancePolicy) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[54] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use MaintenancePolicy.ProtoReflect.Descriptor instead. -func (*MaintenancePolicy) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{54} -} - -func (x *MaintenancePolicy) GetWindow() *MaintenanceWindow { - if x != nil { - return x.Window - } - return nil -} - -func (x *MaintenancePolicy) GetResourceVersion() string { - if x != nil { - return x.ResourceVersion - } - return "" -} - -// MaintenanceWindow defines the maintenance window to be used for the cluster. -type MaintenanceWindow struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Types that are assignable to Policy: - // *MaintenanceWindow_DailyMaintenanceWindow - // *MaintenanceWindow_RecurringWindow - Policy isMaintenanceWindow_Policy `protobuf_oneof:"policy"` - // Exceptions to maintenance window. Non-emergency maintenance should not - // occur in these windows. - MaintenanceExclusions map[string]*TimeWindow `protobuf:"bytes,4,rep,name=maintenance_exclusions,json=maintenanceExclusions,proto3" json:"maintenance_exclusions,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` -} - -func (x *MaintenanceWindow) Reset() { - *x = MaintenanceWindow{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[55] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *MaintenanceWindow) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*MaintenanceWindow) ProtoMessage() {} - -func (x *MaintenanceWindow) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[55] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use MaintenanceWindow.ProtoReflect.Descriptor instead. -func (*MaintenanceWindow) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{55} -} - -func (m *MaintenanceWindow) GetPolicy() isMaintenanceWindow_Policy { - if m != nil { - return m.Policy - } - return nil -} - -func (x *MaintenanceWindow) GetDailyMaintenanceWindow() *DailyMaintenanceWindow { - if x, ok := x.GetPolicy().(*MaintenanceWindow_DailyMaintenanceWindow); ok { - return x.DailyMaintenanceWindow - } - return nil -} - -func (x *MaintenanceWindow) GetRecurringWindow() *RecurringTimeWindow { - if x, ok := x.GetPolicy().(*MaintenanceWindow_RecurringWindow); ok { - return x.RecurringWindow - } - return nil -} - -func (x *MaintenanceWindow) GetMaintenanceExclusions() map[string]*TimeWindow { - if x != nil { - return x.MaintenanceExclusions - } - return nil -} - -type isMaintenanceWindow_Policy interface { - isMaintenanceWindow_Policy() -} - -type MaintenanceWindow_DailyMaintenanceWindow struct { - // DailyMaintenanceWindow specifies a daily maintenance operation window. - DailyMaintenanceWindow *DailyMaintenanceWindow `protobuf:"bytes,2,opt,name=daily_maintenance_window,json=dailyMaintenanceWindow,proto3,oneof"` -} - -type MaintenanceWindow_RecurringWindow struct { - // RecurringWindow specifies some number of recurring time periods for - // maintenance to occur. The time windows may be overlapping. If no - // maintenance windows are set, maintenance can occur at any time. - RecurringWindow *RecurringTimeWindow `protobuf:"bytes,3,opt,name=recurring_window,json=recurringWindow,proto3,oneof"` -} - -func (*MaintenanceWindow_DailyMaintenanceWindow) isMaintenanceWindow_Policy() {} - -func (*MaintenanceWindow_RecurringWindow) isMaintenanceWindow_Policy() {} - -// Represents an arbitrary window of time. -type TimeWindow struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The time that the window first starts. - StartTime *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"` - // The time that the window ends. The end time should take place after the - // start time. - EndTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"` -} - -func (x *TimeWindow) Reset() { - *x = TimeWindow{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[56] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *TimeWindow) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TimeWindow) ProtoMessage() {} - -func (x *TimeWindow) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[56] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TimeWindow.ProtoReflect.Descriptor instead. -func (*TimeWindow) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{56} -} - -func (x *TimeWindow) GetStartTime() *timestamppb.Timestamp { - if x != nil { - return x.StartTime - } - return nil -} - -func (x *TimeWindow) GetEndTime() *timestamppb.Timestamp { - if x != nil { - return x.EndTime - } - return nil -} - -// Represents an arbitrary window of time that recurs. -type RecurringTimeWindow struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The window of the first recurrence. - Window *TimeWindow `protobuf:"bytes,1,opt,name=window,proto3" json:"window,omitempty"` - // An RRULE (https://tools.ietf.org/html/rfc5545#section-3.8.5.3) for how - // this window reccurs. They go on for the span of time between the start and - // end time. - // - // For example, to have something repeat every weekday, you'd use: - // `FREQ=WEEKLY;BYDAY=MO,TU,WE,TH,FR` - // - // To repeat some window daily (equivalent to the DailyMaintenanceWindow): - // `FREQ=DAILY` - // - // For the first weekend of every month: - // `FREQ=MONTHLY;BYSETPOS=1;BYDAY=SA,SU` - // - // This specifies how frequently the window starts. Eg, if you wanted to have - // a 9-5 UTC-4 window every weekday, you'd use something like: - // ``` - // start time = 2019-01-01T09:00:00-0400 - // end time = 2019-01-01T17:00:00-0400 - // recurrence = FREQ=WEEKLY;BYDAY=MO,TU,WE,TH,FR - // ``` - // - // Windows can span multiple days. Eg, to make the window encompass every - // weekend from midnight Saturday till the last minute of Sunday UTC: - // ``` - // start time = 2019-01-05T00:00:00Z - // end time = 2019-01-07T23:59:00Z - // recurrence = FREQ=WEEKLY;BYDAY=SA - // ``` - // - // Note the start and end time's specific dates are largely arbitrary except - // to specify duration of the window and when it first starts. - // The FREQ values of HOURLY, MINUTELY, and SECONDLY are not supported. - Recurrence string `protobuf:"bytes,2,opt,name=recurrence,proto3" json:"recurrence,omitempty"` -} - -func (x *RecurringTimeWindow) Reset() { - *x = RecurringTimeWindow{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[57] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *RecurringTimeWindow) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RecurringTimeWindow) ProtoMessage() {} - -func (x *RecurringTimeWindow) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[57] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RecurringTimeWindow.ProtoReflect.Descriptor instead. -func (*RecurringTimeWindow) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{57} -} - -func (x *RecurringTimeWindow) GetWindow() *TimeWindow { - if x != nil { - return x.Window - } - return nil -} - -func (x *RecurringTimeWindow) GetRecurrence() string { - if x != nil { - return x.Recurrence - } - return "" -} - -// Time window specified for daily maintenance operations. -type DailyMaintenanceWindow struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Time within the maintenance window to start the maintenance operations. - // Time format should be in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) - // format "HH:MM", where HH : [00-23] and MM : [00-59] GMT. - StartTime string `protobuf:"bytes,2,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"` - // [Output only] Duration of the time window, automatically chosen to be - // smallest possible in the given scenario. - // Duration will be in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) - // format "PTnHnMnS". - Duration string `protobuf:"bytes,3,opt,name=duration,proto3" json:"duration,omitempty"` -} - -func (x *DailyMaintenanceWindow) Reset() { - *x = DailyMaintenanceWindow{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[58] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *DailyMaintenanceWindow) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DailyMaintenanceWindow) ProtoMessage() {} - -func (x *DailyMaintenanceWindow) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[58] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DailyMaintenanceWindow.ProtoReflect.Descriptor instead. -func (*DailyMaintenanceWindow) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{58} -} - -func (x *DailyMaintenanceWindow) GetStartTime() string { - if x != nil { - return x.StartTime - } - return "" -} - -func (x *DailyMaintenanceWindow) GetDuration() string { - if x != nil { - return x.Duration - } - return "" -} - -// SetNodePoolManagementRequest sets the node management properties of a node -// pool. -type SetNodePoolManagementRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Deprecated. The Google Developers Console [project ID or project - // number](https://support.google.com/cloud/answer/6158840). - // This field has been deprecated and replaced by the name field. - // - // Deprecated: Do not use. - ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` - // Deprecated. The name of the Google Compute Engine - // [zone](https://cloud.google.com/compute/docs/zones#available) in which the - // cluster resides. This field has been deprecated and replaced by the name - // field. - // - // Deprecated: Do not use. - Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` - // Deprecated. The name of the cluster to update. - // This field has been deprecated and replaced by the name field. - // - // Deprecated: Do not use. - ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` - // Deprecated. The name of the node pool to update. - // This field has been deprecated and replaced by the name field. - // - // Deprecated: Do not use. - NodePoolId string `protobuf:"bytes,4,opt,name=node_pool_id,json=nodePoolId,proto3" json:"node_pool_id,omitempty"` - // Required. NodeManagement configuration for the node pool. - Management *NodeManagement `protobuf:"bytes,5,opt,name=management,proto3" json:"management,omitempty"` - // The name (project, location, cluster, node pool id) of the node pool to set - // management properties. Specified in the format - // `projects/*/locations/*/clusters/*/nodePools/*`. - Name string `protobuf:"bytes,7,opt,name=name,proto3" json:"name,omitempty"` -} - -func (x *SetNodePoolManagementRequest) Reset() { - *x = SetNodePoolManagementRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[59] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SetNodePoolManagementRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SetNodePoolManagementRequest) ProtoMessage() {} - -func (x *SetNodePoolManagementRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[59] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SetNodePoolManagementRequest.ProtoReflect.Descriptor instead. -func (*SetNodePoolManagementRequest) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{59} -} - -// Deprecated: Do not use. -func (x *SetNodePoolManagementRequest) GetProjectId() string { - if x != nil { - return x.ProjectId - } - return "" -} - -// Deprecated: Do not use. -func (x *SetNodePoolManagementRequest) GetZone() string { - if x != nil { - return x.Zone - } - return "" -} - -// Deprecated: Do not use. -func (x *SetNodePoolManagementRequest) GetClusterId() string { - if x != nil { - return x.ClusterId - } - return "" -} - -// Deprecated: Do not use. -func (x *SetNodePoolManagementRequest) GetNodePoolId() string { - if x != nil { - return x.NodePoolId - } - return "" -} - -func (x *SetNodePoolManagementRequest) GetManagement() *NodeManagement { - if x != nil { - return x.Management - } - return nil -} - -func (x *SetNodePoolManagementRequest) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -// SetNodePoolSizeRequest sets the size a node -// pool. -type SetNodePoolSizeRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Deprecated. The Google Developers Console [project ID or project - // number](https://support.google.com/cloud/answer/6158840). - // This field has been deprecated and replaced by the name field. - // - // Deprecated: Do not use. - ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` - // Deprecated. The name of the Google Compute Engine - // [zone](https://cloud.google.com/compute/docs/zones#available) in which the - // cluster resides. This field has been deprecated and replaced by the name - // field. - // - // Deprecated: Do not use. - Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` - // Deprecated. The name of the cluster to update. - // This field has been deprecated and replaced by the name field. - // - // Deprecated: Do not use. - ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` - // Deprecated. The name of the node pool to update. - // This field has been deprecated and replaced by the name field. - // - // Deprecated: Do not use. - NodePoolId string `protobuf:"bytes,4,opt,name=node_pool_id,json=nodePoolId,proto3" json:"node_pool_id,omitempty"` - // Required. The desired node count for the pool. - NodeCount int32 `protobuf:"varint,5,opt,name=node_count,json=nodeCount,proto3" json:"node_count,omitempty"` - // The name (project, location, cluster, node pool id) of the node pool to set - // size. - // Specified in the format `projects/*/locations/*/clusters/*/nodePools/*`. - Name string `protobuf:"bytes,7,opt,name=name,proto3" json:"name,omitempty"` -} - -func (x *SetNodePoolSizeRequest) Reset() { - *x = SetNodePoolSizeRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[60] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SetNodePoolSizeRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SetNodePoolSizeRequest) ProtoMessage() {} - -func (x *SetNodePoolSizeRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[60] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SetNodePoolSizeRequest.ProtoReflect.Descriptor instead. -func (*SetNodePoolSizeRequest) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{60} -} - -// Deprecated: Do not use. -func (x *SetNodePoolSizeRequest) GetProjectId() string { - if x != nil { - return x.ProjectId - } - return "" -} - -// Deprecated: Do not use. -func (x *SetNodePoolSizeRequest) GetZone() string { - if x != nil { - return x.Zone - } - return "" -} - -// Deprecated: Do not use. -func (x *SetNodePoolSizeRequest) GetClusterId() string { - if x != nil { - return x.ClusterId - } - return "" -} - -// Deprecated: Do not use. -func (x *SetNodePoolSizeRequest) GetNodePoolId() string { - if x != nil { - return x.NodePoolId - } - return "" -} - -func (x *SetNodePoolSizeRequest) GetNodeCount() int32 { - if x != nil { - return x.NodeCount - } - return 0 -} - -func (x *SetNodePoolSizeRequest) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -// RollbackNodePoolUpgradeRequest rollbacks the previously Aborted or Failed -// NodePool upgrade. This will be an no-op if the last upgrade successfully -// completed. -type RollbackNodePoolUpgradeRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Deprecated. The Google Developers Console [project ID or project - // number](https://support.google.com/cloud/answer/6158840). - // This field has been deprecated and replaced by the name field. - // - // Deprecated: Do not use. - ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` - // Deprecated. The name of the Google Compute Engine - // [zone](https://cloud.google.com/compute/docs/zones#available) in which the - // cluster resides. This field has been deprecated and replaced by the name - // field. - // - // Deprecated: Do not use. - Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` - // Deprecated. The name of the cluster to rollback. - // This field has been deprecated and replaced by the name field. - // - // Deprecated: Do not use. - ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` - // Deprecated. The name of the node pool to rollback. - // This field has been deprecated and replaced by the name field. - // - // Deprecated: Do not use. - NodePoolId string `protobuf:"bytes,4,opt,name=node_pool_id,json=nodePoolId,proto3" json:"node_pool_id,omitempty"` - // The name (project, location, cluster, node pool id) of the node poll to - // rollback upgrade. - // Specified in the format `projects/*/locations/*/clusters/*/nodePools/*`. - Name string `protobuf:"bytes,6,opt,name=name,proto3" json:"name,omitempty"` -} - -func (x *RollbackNodePoolUpgradeRequest) Reset() { - *x = RollbackNodePoolUpgradeRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[61] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *RollbackNodePoolUpgradeRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RollbackNodePoolUpgradeRequest) ProtoMessage() {} - -func (x *RollbackNodePoolUpgradeRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[61] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RollbackNodePoolUpgradeRequest.ProtoReflect.Descriptor instead. -func (*RollbackNodePoolUpgradeRequest) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{61} -} - -// Deprecated: Do not use. -func (x *RollbackNodePoolUpgradeRequest) GetProjectId() string { - if x != nil { - return x.ProjectId - } - return "" -} - -// Deprecated: Do not use. -func (x *RollbackNodePoolUpgradeRequest) GetZone() string { - if x != nil { - return x.Zone - } - return "" -} - -// Deprecated: Do not use. -func (x *RollbackNodePoolUpgradeRequest) GetClusterId() string { - if x != nil { - return x.ClusterId - } - return "" -} - -// Deprecated: Do not use. -func (x *RollbackNodePoolUpgradeRequest) GetNodePoolId() string { - if x != nil { - return x.NodePoolId - } - return "" -} - -func (x *RollbackNodePoolUpgradeRequest) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -// ListNodePoolsResponse is the result of ListNodePoolsRequest. -type ListNodePoolsResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // A list of node pools for a cluster. - NodePools []*NodePool `protobuf:"bytes,1,rep,name=node_pools,json=nodePools,proto3" json:"node_pools,omitempty"` -} - -func (x *ListNodePoolsResponse) Reset() { - *x = ListNodePoolsResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[62] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListNodePoolsResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListNodePoolsResponse) ProtoMessage() {} - -func (x *ListNodePoolsResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[62] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListNodePoolsResponse.ProtoReflect.Descriptor instead. -func (*ListNodePoolsResponse) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{62} -} - -func (x *ListNodePoolsResponse) GetNodePools() []*NodePool { - if x != nil { - return x.NodePools - } - return nil -} - -// ClusterAutoscaling contains global, per-cluster information -// required by Cluster Autoscaler to automatically adjust -// the size of the cluster and create/delete -// node pools based on the current needs. -type ClusterAutoscaling struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Enables automatic node pool creation and deletion. - EnableNodeAutoprovisioning bool `protobuf:"varint,1,opt,name=enable_node_autoprovisioning,json=enableNodeAutoprovisioning,proto3" json:"enable_node_autoprovisioning,omitempty"` - // Contains global constraints regarding minimum and maximum - // amount of resources in the cluster. - ResourceLimits []*ResourceLimit `protobuf:"bytes,2,rep,name=resource_limits,json=resourceLimits,proto3" json:"resource_limits,omitempty"` - // AutoprovisioningNodePoolDefaults contains defaults for a node pool - // created by NAP. - AutoprovisioningNodePoolDefaults *AutoprovisioningNodePoolDefaults `protobuf:"bytes,4,opt,name=autoprovisioning_node_pool_defaults,json=autoprovisioningNodePoolDefaults,proto3" json:"autoprovisioning_node_pool_defaults,omitempty"` - // The list of Google Compute Engine - // [zones](https://cloud.google.com/compute/docs/zones#available) in which the - // NodePool's nodes can be created by NAP. - AutoprovisioningLocations []string `protobuf:"bytes,5,rep,name=autoprovisioning_locations,json=autoprovisioningLocations,proto3" json:"autoprovisioning_locations,omitempty"` -} - -func (x *ClusterAutoscaling) Reset() { - *x = ClusterAutoscaling{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[63] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ClusterAutoscaling) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ClusterAutoscaling) ProtoMessage() {} - -func (x *ClusterAutoscaling) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[63] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ClusterAutoscaling.ProtoReflect.Descriptor instead. -func (*ClusterAutoscaling) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{63} -} - -func (x *ClusterAutoscaling) GetEnableNodeAutoprovisioning() bool { - if x != nil { - return x.EnableNodeAutoprovisioning - } - return false -} - -func (x *ClusterAutoscaling) GetResourceLimits() []*ResourceLimit { - if x != nil { - return x.ResourceLimits - } - return nil -} - -func (x *ClusterAutoscaling) GetAutoprovisioningNodePoolDefaults() *AutoprovisioningNodePoolDefaults { - if x != nil { - return x.AutoprovisioningNodePoolDefaults - } - return nil -} - -func (x *ClusterAutoscaling) GetAutoprovisioningLocations() []string { - if x != nil { - return x.AutoprovisioningLocations - } - return nil -} - -// AutoprovisioningNodePoolDefaults contains defaults for a node pool created -// by NAP. -type AutoprovisioningNodePoolDefaults struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Scopes that are used by NAP when creating node pools. - OauthScopes []string `protobuf:"bytes,1,rep,name=oauth_scopes,json=oauthScopes,proto3" json:"oauth_scopes,omitempty"` - // The Google Cloud Platform Service Account to be used by the node VMs. - ServiceAccount string `protobuf:"bytes,2,opt,name=service_account,json=serviceAccount,proto3" json:"service_account,omitempty"` - // Specifies the upgrade settings for NAP created node pools - UpgradeSettings *NodePool_UpgradeSettings `protobuf:"bytes,3,opt,name=upgrade_settings,json=upgradeSettings,proto3" json:"upgrade_settings,omitempty"` - // Specifies the node management options for NAP created node-pools. - Management *NodeManagement `protobuf:"bytes,4,opt,name=management,proto3" json:"management,omitempty"` - // Minimum CPU platform to be used for NAP created node pools. - // The instance may be scheduled on the specified or newer CPU platform. - // Applicable values are the friendly names of CPU platforms, such as - // minCpuPlatform: Intel Haswell or - // minCpuPlatform: Intel Sandy Bridge. For more - // information, read [how to specify min CPU - // platform](https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform) - // To unset the min cpu platform field pass "automatic" - // as field value. - MinCpuPlatform string `protobuf:"bytes,5,opt,name=min_cpu_platform,json=minCpuPlatform,proto3" json:"min_cpu_platform,omitempty"` - // Size of the disk attached to each node, specified in GB. - // The smallest allowed disk size is 10GB. - // - // If unspecified, the default disk size is 100GB. - DiskSizeGb int32 `protobuf:"varint,6,opt,name=disk_size_gb,json=diskSizeGb,proto3" json:"disk_size_gb,omitempty"` - // Type of the disk attached to each node (e.g. 'pd-standard', 'pd-ssd' or - // 'pd-balanced') - // - // If unspecified, the default disk type is 'pd-standard' - DiskType string `protobuf:"bytes,7,opt,name=disk_type,json=diskType,proto3" json:"disk_type,omitempty"` - // Shielded Instance options. - ShieldedInstanceConfig *ShieldedInstanceConfig `protobuf:"bytes,8,opt,name=shielded_instance_config,json=shieldedInstanceConfig,proto3" json:"shielded_instance_config,omitempty"` - // The Customer Managed Encryption Key used to encrypt the boot disk attached - // to each node in the node pool. This should be of the form - // projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKeys/[KEY_NAME]. - // For more information about protecting resources with Cloud KMS Keys please - // see: - // https://cloud.google.com/compute/docs/disks/customer-managed-encryption - BootDiskKmsKey string `protobuf:"bytes,9,opt,name=boot_disk_kms_key,json=bootDiskKmsKey,proto3" json:"boot_disk_kms_key,omitempty"` -} - -func (x *AutoprovisioningNodePoolDefaults) Reset() { - *x = AutoprovisioningNodePoolDefaults{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[64] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *AutoprovisioningNodePoolDefaults) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*AutoprovisioningNodePoolDefaults) ProtoMessage() {} - -func (x *AutoprovisioningNodePoolDefaults) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[64] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use AutoprovisioningNodePoolDefaults.ProtoReflect.Descriptor instead. -func (*AutoprovisioningNodePoolDefaults) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{64} -} - -func (x *AutoprovisioningNodePoolDefaults) GetOauthScopes() []string { - if x != nil { - return x.OauthScopes - } - return nil -} - -func (x *AutoprovisioningNodePoolDefaults) GetServiceAccount() string { - if x != nil { - return x.ServiceAccount - } - return "" -} - -func (x *AutoprovisioningNodePoolDefaults) GetUpgradeSettings() *NodePool_UpgradeSettings { - if x != nil { - return x.UpgradeSettings - } - return nil -} - -func (x *AutoprovisioningNodePoolDefaults) GetManagement() *NodeManagement { - if x != nil { - return x.Management - } - return nil -} - -func (x *AutoprovisioningNodePoolDefaults) GetMinCpuPlatform() string { - if x != nil { - return x.MinCpuPlatform - } - return "" -} - -func (x *AutoprovisioningNodePoolDefaults) GetDiskSizeGb() int32 { - if x != nil { - return x.DiskSizeGb - } - return 0 -} - -func (x *AutoprovisioningNodePoolDefaults) GetDiskType() string { - if x != nil { - return x.DiskType - } - return "" -} - -func (x *AutoprovisioningNodePoolDefaults) GetShieldedInstanceConfig() *ShieldedInstanceConfig { - if x != nil { - return x.ShieldedInstanceConfig - } - return nil -} - -func (x *AutoprovisioningNodePoolDefaults) GetBootDiskKmsKey() string { - if x != nil { - return x.BootDiskKmsKey - } - return "" -} - -// Contains information about amount of some resource in the cluster. -// For memory, value should be in GB. -type ResourceLimit struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Resource name "cpu", "memory" or gpu-specific string. - ResourceType string `protobuf:"bytes,1,opt,name=resource_type,json=resourceType,proto3" json:"resource_type,omitempty"` - // Minimum amount of the resource in the cluster. - Minimum int64 `protobuf:"varint,2,opt,name=minimum,proto3" json:"minimum,omitempty"` - // Maximum amount of the resource in the cluster. - Maximum int64 `protobuf:"varint,3,opt,name=maximum,proto3" json:"maximum,omitempty"` -} - -func (x *ResourceLimit) Reset() { - *x = ResourceLimit{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[65] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ResourceLimit) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ResourceLimit) ProtoMessage() {} - -func (x *ResourceLimit) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[65] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ResourceLimit.ProtoReflect.Descriptor instead. -func (*ResourceLimit) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{65} -} - -func (x *ResourceLimit) GetResourceType() string { - if x != nil { - return x.ResourceType - } - return "" -} - -func (x *ResourceLimit) GetMinimum() int64 { - if x != nil { - return x.Minimum - } - return 0 -} - -func (x *ResourceLimit) GetMaximum() int64 { - if x != nil { - return x.Maximum - } - return 0 -} - -// NodePoolAutoscaling contains information required by cluster autoscaler to -// adjust the size of the node pool to the current cluster usage. -type NodePoolAutoscaling struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Is autoscaling enabled for this node pool. - Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"` - // Minimum number of nodes in the NodePool. Must be >= 1 and <= - // max_node_count. - MinNodeCount int32 `protobuf:"varint,2,opt,name=min_node_count,json=minNodeCount,proto3" json:"min_node_count,omitempty"` - // Maximum number of nodes in the NodePool. Must be >= min_node_count. There - // has to enough quota to scale up the cluster. - MaxNodeCount int32 `protobuf:"varint,3,opt,name=max_node_count,json=maxNodeCount,proto3" json:"max_node_count,omitempty"` - // Can this node pool be deleted automatically. - Autoprovisioned bool `protobuf:"varint,4,opt,name=autoprovisioned,proto3" json:"autoprovisioned,omitempty"` -} - -func (x *NodePoolAutoscaling) Reset() { - *x = NodePoolAutoscaling{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[66] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *NodePoolAutoscaling) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*NodePoolAutoscaling) ProtoMessage() {} - -func (x *NodePoolAutoscaling) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[66] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use NodePoolAutoscaling.ProtoReflect.Descriptor instead. -func (*NodePoolAutoscaling) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{66} -} - -func (x *NodePoolAutoscaling) GetEnabled() bool { - if x != nil { - return x.Enabled - } - return false -} - -func (x *NodePoolAutoscaling) GetMinNodeCount() int32 { - if x != nil { - return x.MinNodeCount - } - return 0 -} - -func (x *NodePoolAutoscaling) GetMaxNodeCount() int32 { - if x != nil { - return x.MaxNodeCount - } - return 0 -} - -func (x *NodePoolAutoscaling) GetAutoprovisioned() bool { - if x != nil { - return x.Autoprovisioned - } - return false -} - -// SetLabelsRequest sets the Google Cloud Platform labels on a Google Container -// Engine cluster, which will in turn set them for Google Compute Engine -// resources used by that cluster -type SetLabelsRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Deprecated. The Google Developers Console [project ID or project - // number](https://developers.google.com/console/help/new/#projectnumber). - // This field has been deprecated and replaced by the name field. - // - // Deprecated: Do not use. - ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` - // Deprecated. The name of the Google Compute Engine - // [zone](https://cloud.google.com/compute/docs/zones#available) in which the - // cluster resides. This field has been deprecated and replaced by the name - // field. - // - // Deprecated: Do not use. - Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` - // Deprecated. The name of the cluster. - // This field has been deprecated and replaced by the name field. - // - // Deprecated: Do not use. - ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` - // Required. The labels to set for that cluster. - ResourceLabels map[string]string `protobuf:"bytes,4,rep,name=resource_labels,json=resourceLabels,proto3" json:"resource_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - // Required. The fingerprint of the previous set of labels for this resource, - // used to detect conflicts. The fingerprint is initially generated by - // Kubernetes Engine and changes after every request to modify or update - // labels. You must always provide an up-to-date fingerprint hash when - // updating or changing labels. Make a `get()` request to the - // resource to get the latest fingerprint. - LabelFingerprint string `protobuf:"bytes,5,opt,name=label_fingerprint,json=labelFingerprint,proto3" json:"label_fingerprint,omitempty"` - // The name (project, location, cluster id) of the cluster to set labels. - // Specified in the format `projects/*/locations/*/clusters/*`. - Name string `protobuf:"bytes,7,opt,name=name,proto3" json:"name,omitempty"` -} - -func (x *SetLabelsRequest) Reset() { - *x = SetLabelsRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[67] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SetLabelsRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SetLabelsRequest) ProtoMessage() {} - -func (x *SetLabelsRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[67] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SetLabelsRequest.ProtoReflect.Descriptor instead. -func (*SetLabelsRequest) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{67} -} - -// Deprecated: Do not use. -func (x *SetLabelsRequest) GetProjectId() string { - if x != nil { - return x.ProjectId - } - return "" -} - -// Deprecated: Do not use. -func (x *SetLabelsRequest) GetZone() string { - if x != nil { - return x.Zone - } - return "" -} - -// Deprecated: Do not use. -func (x *SetLabelsRequest) GetClusterId() string { - if x != nil { - return x.ClusterId - } - return "" -} - -func (x *SetLabelsRequest) GetResourceLabels() map[string]string { - if x != nil { - return x.ResourceLabels - } - return nil -} - -func (x *SetLabelsRequest) GetLabelFingerprint() string { - if x != nil { - return x.LabelFingerprint - } - return "" -} - -func (x *SetLabelsRequest) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -// SetLegacyAbacRequest enables or disables the ABAC authorization mechanism for -// a cluster. -type SetLegacyAbacRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Deprecated. The Google Developers Console [project ID or project - // number](https://support.google.com/cloud/answer/6158840). - // This field has been deprecated and replaced by the name field. - // - // Deprecated: Do not use. - ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` - // Deprecated. The name of the Google Compute Engine - // [zone](https://cloud.google.com/compute/docs/zones#available) in which the - // cluster resides. This field has been deprecated and replaced by the name - // field. - // - // Deprecated: Do not use. - Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` - // Deprecated. The name of the cluster to update. - // This field has been deprecated and replaced by the name field. - // - // Deprecated: Do not use. - ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` - // Required. Whether ABAC authorization will be enabled in the cluster. - Enabled bool `protobuf:"varint,4,opt,name=enabled,proto3" json:"enabled,omitempty"` - // The name (project, location, cluster id) of the cluster to set legacy abac. - // Specified in the format `projects/*/locations/*/clusters/*`. - Name string `protobuf:"bytes,6,opt,name=name,proto3" json:"name,omitempty"` -} - -func (x *SetLegacyAbacRequest) Reset() { - *x = SetLegacyAbacRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[68] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SetLegacyAbacRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SetLegacyAbacRequest) ProtoMessage() {} - -func (x *SetLegacyAbacRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[68] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SetLegacyAbacRequest.ProtoReflect.Descriptor instead. -func (*SetLegacyAbacRequest) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{68} -} - -// Deprecated: Do not use. -func (x *SetLegacyAbacRequest) GetProjectId() string { - if x != nil { - return x.ProjectId - } - return "" -} - -// Deprecated: Do not use. -func (x *SetLegacyAbacRequest) GetZone() string { - if x != nil { - return x.Zone - } - return "" -} - -// Deprecated: Do not use. -func (x *SetLegacyAbacRequest) GetClusterId() string { - if x != nil { - return x.ClusterId - } - return "" -} - -func (x *SetLegacyAbacRequest) GetEnabled() bool { - if x != nil { - return x.Enabled - } - return false -} - -func (x *SetLegacyAbacRequest) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -// StartIPRotationRequest creates a new IP for the cluster and then performs -// a node upgrade on each node pool to point to the new IP. -type StartIPRotationRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Deprecated. The Google Developers Console [project ID or project - // number](https://developers.google.com/console/help/new/#projectnumber). - // This field has been deprecated and replaced by the name field. - // - // Deprecated: Do not use. - ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` - // Deprecated. The name of the Google Compute Engine - // [zone](https://cloud.google.com/compute/docs/zones#available) in which the - // cluster resides. This field has been deprecated and replaced by the name - // field. - // - // Deprecated: Do not use. - Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` - // Deprecated. The name of the cluster. - // This field has been deprecated and replaced by the name field. - // - // Deprecated: Do not use. - ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` - // The name (project, location, cluster id) of the cluster to start IP - // rotation. Specified in the format `projects/*/locations/*/clusters/*`. - Name string `protobuf:"bytes,6,opt,name=name,proto3" json:"name,omitempty"` - // Whether to rotate credentials during IP rotation. - RotateCredentials bool `protobuf:"varint,7,opt,name=rotate_credentials,json=rotateCredentials,proto3" json:"rotate_credentials,omitempty"` -} - -func (x *StartIPRotationRequest) Reset() { - *x = StartIPRotationRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[69] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *StartIPRotationRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StartIPRotationRequest) ProtoMessage() {} - -func (x *StartIPRotationRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[69] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StartIPRotationRequest.ProtoReflect.Descriptor instead. -func (*StartIPRotationRequest) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{69} -} - -// Deprecated: Do not use. -func (x *StartIPRotationRequest) GetProjectId() string { - if x != nil { - return x.ProjectId - } - return "" -} - -// Deprecated: Do not use. -func (x *StartIPRotationRequest) GetZone() string { - if x != nil { - return x.Zone - } - return "" -} - -// Deprecated: Do not use. -func (x *StartIPRotationRequest) GetClusterId() string { - if x != nil { - return x.ClusterId - } - return "" -} - -func (x *StartIPRotationRequest) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *StartIPRotationRequest) GetRotateCredentials() bool { - if x != nil { - return x.RotateCredentials - } - return false -} - -// CompleteIPRotationRequest moves the cluster master back into single-IP mode. -type CompleteIPRotationRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Deprecated. The Google Developers Console [project ID or project - // number](https://developers.google.com/console/help/new/#projectnumber). - // This field has been deprecated and replaced by the name field. - // - // Deprecated: Do not use. - ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` - // Deprecated. The name of the Google Compute Engine - // [zone](https://cloud.google.com/compute/docs/zones#available) in which the - // cluster resides. This field has been deprecated and replaced by the name - // field. - // - // Deprecated: Do not use. - Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` - // Deprecated. The name of the cluster. - // This field has been deprecated and replaced by the name field. - // - // Deprecated: Do not use. - ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` - // The name (project, location, cluster id) of the cluster to complete IP - // rotation. Specified in the format `projects/*/locations/*/clusters/*`. - Name string `protobuf:"bytes,7,opt,name=name,proto3" json:"name,omitempty"` -} - -func (x *CompleteIPRotationRequest) Reset() { - *x = CompleteIPRotationRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[70] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CompleteIPRotationRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CompleteIPRotationRequest) ProtoMessage() {} - -func (x *CompleteIPRotationRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[70] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CompleteIPRotationRequest.ProtoReflect.Descriptor instead. -func (*CompleteIPRotationRequest) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{70} -} - -// Deprecated: Do not use. -func (x *CompleteIPRotationRequest) GetProjectId() string { - if x != nil { - return x.ProjectId - } - return "" -} - -// Deprecated: Do not use. -func (x *CompleteIPRotationRequest) GetZone() string { - if x != nil { - return x.Zone - } - return "" -} - -// Deprecated: Do not use. -func (x *CompleteIPRotationRequest) GetClusterId() string { - if x != nil { - return x.ClusterId - } - return "" -} - -func (x *CompleteIPRotationRequest) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -// AcceleratorConfig represents a Hardware Accelerator request. -type AcceleratorConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The number of the accelerator cards exposed to an instance. - AcceleratorCount int64 `protobuf:"varint,1,opt,name=accelerator_count,json=acceleratorCount,proto3" json:"accelerator_count,omitempty"` - // The accelerator type resource name. List of supported accelerators - // [here](https://cloud.google.com/compute/docs/gpus) - AcceleratorType string `protobuf:"bytes,2,opt,name=accelerator_type,json=acceleratorType,proto3" json:"accelerator_type,omitempty"` -} - -func (x *AcceleratorConfig) Reset() { - *x = AcceleratorConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[71] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *AcceleratorConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*AcceleratorConfig) ProtoMessage() {} - -func (x *AcceleratorConfig) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[71] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use AcceleratorConfig.ProtoReflect.Descriptor instead. -func (*AcceleratorConfig) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{71} -} - -func (x *AcceleratorConfig) GetAcceleratorCount() int64 { - if x != nil { - return x.AcceleratorCount - } - return 0 -} - -func (x *AcceleratorConfig) GetAcceleratorType() string { - if x != nil { - return x.AcceleratorType - } - return "" -} - -// WorkloadMetadataConfig defines the metadata configuration to expose to -// workloads on the node pool. -type WorkloadMetadataConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Mode is the configuration for how to expose metadata to workloads running - // on the node pool. - Mode WorkloadMetadataConfig_Mode `protobuf:"varint,2,opt,name=mode,proto3,enum=google.container.v1.WorkloadMetadataConfig_Mode" json:"mode,omitempty"` -} - -func (x *WorkloadMetadataConfig) Reset() { - *x = WorkloadMetadataConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[72] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *WorkloadMetadataConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*WorkloadMetadataConfig) ProtoMessage() {} - -func (x *WorkloadMetadataConfig) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[72] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use WorkloadMetadataConfig.ProtoReflect.Descriptor instead. -func (*WorkloadMetadataConfig) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{72} -} - -func (x *WorkloadMetadataConfig) GetMode() WorkloadMetadataConfig_Mode { - if x != nil { - return x.Mode - } - return WorkloadMetadataConfig_MODE_UNSPECIFIED -} - -// SetNetworkPolicyRequest enables/disables network policy for a cluster. -type SetNetworkPolicyRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Deprecated. The Google Developers Console [project ID or project - // number](https://developers.google.com/console/help/new/#projectnumber). - // This field has been deprecated and replaced by the name field. - // - // Deprecated: Do not use. - ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` - // Deprecated. The name of the Google Compute Engine - // [zone](https://cloud.google.com/compute/docs/zones#available) in which the - // cluster resides. This field has been deprecated and replaced by the name - // field. - // - // Deprecated: Do not use. - Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` - // Deprecated. The name of the cluster. - // This field has been deprecated and replaced by the name field. - // - // Deprecated: Do not use. - ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` - // Required. Configuration options for the NetworkPolicy feature. - NetworkPolicy *NetworkPolicy `protobuf:"bytes,4,opt,name=network_policy,json=networkPolicy,proto3" json:"network_policy,omitempty"` - // The name (project, location, cluster id) of the cluster to set networking - // policy. Specified in the format `projects/*/locations/*/clusters/*`. - Name string `protobuf:"bytes,6,opt,name=name,proto3" json:"name,omitempty"` -} - -func (x *SetNetworkPolicyRequest) Reset() { - *x = SetNetworkPolicyRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[73] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SetNetworkPolicyRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SetNetworkPolicyRequest) ProtoMessage() {} - -func (x *SetNetworkPolicyRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[73] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SetNetworkPolicyRequest.ProtoReflect.Descriptor instead. -func (*SetNetworkPolicyRequest) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{73} -} - -// Deprecated: Do not use. -func (x *SetNetworkPolicyRequest) GetProjectId() string { - if x != nil { - return x.ProjectId - } - return "" -} - -// Deprecated: Do not use. -func (x *SetNetworkPolicyRequest) GetZone() string { - if x != nil { - return x.Zone - } - return "" -} - -// Deprecated: Do not use. -func (x *SetNetworkPolicyRequest) GetClusterId() string { - if x != nil { - return x.ClusterId - } - return "" -} - -func (x *SetNetworkPolicyRequest) GetNetworkPolicy() *NetworkPolicy { - if x != nil { - return x.NetworkPolicy - } - return nil -} - -func (x *SetNetworkPolicyRequest) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -// SetMaintenancePolicyRequest sets the maintenance policy for a cluster. -type SetMaintenancePolicyRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The Google Developers Console [project ID or project - // number](https://support.google.com/cloud/answer/6158840). - ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` - // Required. The name of the Google Compute Engine - // [zone](https://cloud.google.com/compute/docs/zones#available) in which the - // cluster resides. - Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` - // Required. The name of the cluster to update. - ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` - // Required. The maintenance policy to be set for the cluster. An empty field - // clears the existing maintenance policy. - MaintenancePolicy *MaintenancePolicy `protobuf:"bytes,4,opt,name=maintenance_policy,json=maintenancePolicy,proto3" json:"maintenance_policy,omitempty"` - // The name (project, location, cluster id) of the cluster to set maintenance - // policy. - // Specified in the format `projects/*/locations/*/clusters/*`. - Name string `protobuf:"bytes,5,opt,name=name,proto3" json:"name,omitempty"` -} - -func (x *SetMaintenancePolicyRequest) Reset() { - *x = SetMaintenancePolicyRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[74] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SetMaintenancePolicyRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SetMaintenancePolicyRequest) ProtoMessage() {} - -func (x *SetMaintenancePolicyRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[74] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SetMaintenancePolicyRequest.ProtoReflect.Descriptor instead. -func (*SetMaintenancePolicyRequest) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{74} -} - -func (x *SetMaintenancePolicyRequest) GetProjectId() string { - if x != nil { - return x.ProjectId - } - return "" -} - -func (x *SetMaintenancePolicyRequest) GetZone() string { - if x != nil { - return x.Zone - } - return "" -} - -func (x *SetMaintenancePolicyRequest) GetClusterId() string { - if x != nil { - return x.ClusterId - } - return "" -} - -func (x *SetMaintenancePolicyRequest) GetMaintenancePolicy() *MaintenancePolicy { - if x != nil { - return x.MaintenancePolicy - } - return nil -} - -func (x *SetMaintenancePolicyRequest) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -// StatusCondition describes why a cluster or a node pool has a certain status -// (e.g., ERROR or DEGRADED). -type StatusCondition struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Machine-friendly representation of the condition - Code StatusCondition_Code `protobuf:"varint,1,opt,name=code,proto3,enum=google.container.v1.StatusCondition_Code" json:"code,omitempty"` - // Human-friendly representation of the condition - Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` -} - -func (x *StatusCondition) Reset() { - *x = StatusCondition{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[75] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *StatusCondition) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StatusCondition) ProtoMessage() {} - -func (x *StatusCondition) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[75] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StatusCondition.ProtoReflect.Descriptor instead. -func (*StatusCondition) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{75} -} - -func (x *StatusCondition) GetCode() StatusCondition_Code { - if x != nil { - return x.Code - } - return StatusCondition_UNKNOWN -} - -func (x *StatusCondition) GetMessage() string { - if x != nil { - return x.Message - } - return "" -} - -// NetworkConfig reports the relative names of network & subnetwork. -type NetworkConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Output only. The relative name of the Google Compute Engine - // [network][google.container.v1.NetworkConfig.network](https://cloud.google.com/compute/docs/networks-and-firewalls#networks) - // to which the cluster is connected. Example: - // projects/my-project/global/networks/my-network - Network string `protobuf:"bytes,1,opt,name=network,proto3" json:"network,omitempty"` - // Output only. The relative name of the Google Compute Engine - // [subnetwork](https://cloud.google.com/compute/docs/vpc) to which the - // cluster is connected. Example: - // projects/my-project/regions/us-central1/subnetworks/my-subnet - Subnetwork string `protobuf:"bytes,2,opt,name=subnetwork,proto3" json:"subnetwork,omitempty"` - // Whether Intra-node visibility is enabled for this cluster. - // This makes same node pod to pod traffic visible for VPC network. - EnableIntraNodeVisibility bool `protobuf:"varint,5,opt,name=enable_intra_node_visibility,json=enableIntraNodeVisibility,proto3" json:"enable_intra_node_visibility,omitempty"` - // Whether the cluster disables default in-node sNAT rules. In-node sNAT rules - // will be disabled when default_snat_status is disabled. When disabled is set - // to false, default IP masquerade rules will be applied to the nodes to - // prevent sNAT on cluster internal traffic. - DefaultSnatStatus *DefaultSnatStatus `protobuf:"bytes,7,opt,name=default_snat_status,json=defaultSnatStatus,proto3" json:"default_snat_status,omitempty"` -} - -func (x *NetworkConfig) Reset() { - *x = NetworkConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[76] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *NetworkConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*NetworkConfig) ProtoMessage() {} - -func (x *NetworkConfig) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[76] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use NetworkConfig.ProtoReflect.Descriptor instead. -func (*NetworkConfig) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{76} -} - -func (x *NetworkConfig) GetNetwork() string { - if x != nil { - return x.Network - } - return "" -} - -func (x *NetworkConfig) GetSubnetwork() string { - if x != nil { - return x.Subnetwork - } - return "" -} - -func (x *NetworkConfig) GetEnableIntraNodeVisibility() bool { - if x != nil { - return x.EnableIntraNodeVisibility - } - return false -} - -func (x *NetworkConfig) GetDefaultSnatStatus() *DefaultSnatStatus { - if x != nil { - return x.DefaultSnatStatus - } - return nil -} - -// GetOpenIDConfigRequest gets the OIDC discovery document for the -// cluster. See the OpenID Connect Discovery 1.0 specification for details. -type GetOpenIDConfigRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The cluster (project, location, cluster id) to get the discovery document - // for. Specified in the format `projects/*/locations/*/clusters/*`. - Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` -} - -func (x *GetOpenIDConfigRequest) Reset() { - *x = GetOpenIDConfigRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[77] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetOpenIDConfigRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetOpenIDConfigRequest) ProtoMessage() {} - -func (x *GetOpenIDConfigRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[77] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetOpenIDConfigRequest.ProtoReflect.Descriptor instead. -func (*GetOpenIDConfigRequest) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{77} -} - -func (x *GetOpenIDConfigRequest) GetParent() string { - if x != nil { - return x.Parent - } - return "" -} - -// GetOpenIDConfigResponse is an OIDC discovery document for the cluster. -// See the OpenID Connect Discovery 1.0 specification for details. -type GetOpenIDConfigResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // OIDC Issuer. - Issuer string `protobuf:"bytes,1,opt,name=issuer,proto3" json:"issuer,omitempty"` - // JSON Web Key uri. - JwksUri string `protobuf:"bytes,2,opt,name=jwks_uri,json=jwksUri,proto3" json:"jwks_uri,omitempty"` - // Supported response types. - ResponseTypesSupported []string `protobuf:"bytes,3,rep,name=response_types_supported,json=responseTypesSupported,proto3" json:"response_types_supported,omitempty"` - // Supported subject types. - SubjectTypesSupported []string `protobuf:"bytes,4,rep,name=subject_types_supported,json=subjectTypesSupported,proto3" json:"subject_types_supported,omitempty"` - // supported ID Token signing Algorithms. - IdTokenSigningAlgValuesSupported []string `protobuf:"bytes,5,rep,name=id_token_signing_alg_values_supported,json=idTokenSigningAlgValuesSupported,proto3" json:"id_token_signing_alg_values_supported,omitempty"` - // Supported claims. - ClaimsSupported []string `protobuf:"bytes,6,rep,name=claims_supported,json=claimsSupported,proto3" json:"claims_supported,omitempty"` - // Supported grant types. - GrantTypes []string `protobuf:"bytes,7,rep,name=grant_types,json=grantTypes,proto3" json:"grant_types,omitempty"` -} - -func (x *GetOpenIDConfigResponse) Reset() { - *x = GetOpenIDConfigResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[78] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetOpenIDConfigResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetOpenIDConfigResponse) ProtoMessage() {} - -func (x *GetOpenIDConfigResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[78] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetOpenIDConfigResponse.ProtoReflect.Descriptor instead. -func (*GetOpenIDConfigResponse) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{78} -} - -func (x *GetOpenIDConfigResponse) GetIssuer() string { - if x != nil { - return x.Issuer - } - return "" -} - -func (x *GetOpenIDConfigResponse) GetJwksUri() string { - if x != nil { - return x.JwksUri - } - return "" -} - -func (x *GetOpenIDConfigResponse) GetResponseTypesSupported() []string { - if x != nil { - return x.ResponseTypesSupported - } - return nil -} - -func (x *GetOpenIDConfigResponse) GetSubjectTypesSupported() []string { - if x != nil { - return x.SubjectTypesSupported - } - return nil -} - -func (x *GetOpenIDConfigResponse) GetIdTokenSigningAlgValuesSupported() []string { - if x != nil { - return x.IdTokenSigningAlgValuesSupported - } - return nil -} - -func (x *GetOpenIDConfigResponse) GetClaimsSupported() []string { - if x != nil { - return x.ClaimsSupported - } - return nil -} - -func (x *GetOpenIDConfigResponse) GetGrantTypes() []string { - if x != nil { - return x.GrantTypes - } - return nil -} - -// GetJSONWebKeysRequest gets the public component of the keys used by the -// cluster to sign token requests. This will be the jwks_uri for the discover -// document returned by getOpenIDConfig. See the OpenID Connect -// Discovery 1.0 specification for details. -type GetJSONWebKeysRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The cluster (project, location, cluster id) to get keys for. Specified in - // the format `projects/*/locations/*/clusters/*`. - Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` -} - -func (x *GetJSONWebKeysRequest) Reset() { - *x = GetJSONWebKeysRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[79] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetJSONWebKeysRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetJSONWebKeysRequest) ProtoMessage() {} - -func (x *GetJSONWebKeysRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[79] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetJSONWebKeysRequest.ProtoReflect.Descriptor instead. -func (*GetJSONWebKeysRequest) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{79} -} - -func (x *GetJSONWebKeysRequest) GetParent() string { - if x != nil { - return x.Parent - } - return "" -} - -// Jwk is a JSON Web Key as specified in RFC 7517 -type Jwk struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Key Type. - Kty string `protobuf:"bytes,1,opt,name=kty,proto3" json:"kty,omitempty"` - // Algorithm. - Alg string `protobuf:"bytes,2,opt,name=alg,proto3" json:"alg,omitempty"` - // Permitted uses for the public keys. - Use string `protobuf:"bytes,3,opt,name=use,proto3" json:"use,omitempty"` - // Key ID. - Kid string `protobuf:"bytes,4,opt,name=kid,proto3" json:"kid,omitempty"` - // Used for RSA keys. - N string `protobuf:"bytes,5,opt,name=n,proto3" json:"n,omitempty"` - // Used for RSA keys. - E string `protobuf:"bytes,6,opt,name=e,proto3" json:"e,omitempty"` - // Used for ECDSA keys. - X string `protobuf:"bytes,7,opt,name=x,proto3" json:"x,omitempty"` - // Used for ECDSA keys. - Y string `protobuf:"bytes,8,opt,name=y,proto3" json:"y,omitempty"` - // Used for ECDSA keys. - Crv string `protobuf:"bytes,9,opt,name=crv,proto3" json:"crv,omitempty"` -} - -func (x *Jwk) Reset() { - *x = Jwk{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[80] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Jwk) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Jwk) ProtoMessage() {} - -func (x *Jwk) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[80] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Jwk.ProtoReflect.Descriptor instead. -func (*Jwk) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{80} -} - -func (x *Jwk) GetKty() string { - if x != nil { - return x.Kty - } - return "" -} - -func (x *Jwk) GetAlg() string { - if x != nil { - return x.Alg - } - return "" -} - -func (x *Jwk) GetUse() string { - if x != nil { - return x.Use - } - return "" -} - -func (x *Jwk) GetKid() string { - if x != nil { - return x.Kid - } - return "" -} - -func (x *Jwk) GetN() string { - if x != nil { - return x.N - } - return "" -} - -func (x *Jwk) GetE() string { - if x != nil { - return x.E - } - return "" -} - -func (x *Jwk) GetX() string { - if x != nil { - return x.X - } - return "" -} - -func (x *Jwk) GetY() string { - if x != nil { - return x.Y - } - return "" -} - -func (x *Jwk) GetCrv() string { - if x != nil { - return x.Crv - } - return "" -} - -// GetJSONWebKeysResponse is a valid JSON Web Key Set as specififed in rfc 7517 -type GetJSONWebKeysResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The public component of the keys used by the cluster to sign token - // requests. - Keys []*Jwk `protobuf:"bytes,1,rep,name=keys,proto3" json:"keys,omitempty"` -} - -func (x *GetJSONWebKeysResponse) Reset() { - *x = GetJSONWebKeysResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[81] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetJSONWebKeysResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetJSONWebKeysResponse) ProtoMessage() {} - -func (x *GetJSONWebKeysResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[81] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetJSONWebKeysResponse.ProtoReflect.Descriptor instead. -func (*GetJSONWebKeysResponse) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{81} -} - -func (x *GetJSONWebKeysResponse) GetKeys() []*Jwk { - if x != nil { - return x.Keys - } - return nil -} - -// ReleaseChannel indicates which release channel a cluster is -// subscribed to. Release channels are arranged in order of risk. -// -// When a cluster is subscribed to a release channel, Google maintains -// both the master version and the node version. Node auto-upgrade -// defaults to true and cannot be disabled. -type ReleaseChannel struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // channel specifies which release channel the cluster is subscribed to. - Channel ReleaseChannel_Channel `protobuf:"varint,1,opt,name=channel,proto3,enum=google.container.v1.ReleaseChannel_Channel" json:"channel,omitempty"` -} - -func (x *ReleaseChannel) Reset() { - *x = ReleaseChannel{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[82] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ReleaseChannel) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ReleaseChannel) ProtoMessage() {} - -func (x *ReleaseChannel) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[82] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ReleaseChannel.ProtoReflect.Descriptor instead. -func (*ReleaseChannel) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{82} -} - -func (x *ReleaseChannel) GetChannel() ReleaseChannel_Channel { - if x != nil { - return x.Channel - } - return ReleaseChannel_UNSPECIFIED -} - -// IntraNodeVisibilityConfig contains the desired config of the intra-node -// visibility on this cluster. -type IntraNodeVisibilityConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Enables intra node visibility for this cluster. - Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"` -} - -func (x *IntraNodeVisibilityConfig) Reset() { - *x = IntraNodeVisibilityConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[83] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *IntraNodeVisibilityConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*IntraNodeVisibilityConfig) ProtoMessage() {} - -func (x *IntraNodeVisibilityConfig) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[83] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use IntraNodeVisibilityConfig.ProtoReflect.Descriptor instead. -func (*IntraNodeVisibilityConfig) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{83} -} - -func (x *IntraNodeVisibilityConfig) GetEnabled() bool { - if x != nil { - return x.Enabled - } - return false -} - -// Constraints applied to pods. -type MaxPodsConstraint struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Constraint enforced on the max num of pods per node. - MaxPodsPerNode int64 `protobuf:"varint,1,opt,name=max_pods_per_node,json=maxPodsPerNode,proto3" json:"max_pods_per_node,omitempty"` -} - -func (x *MaxPodsConstraint) Reset() { - *x = MaxPodsConstraint{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[84] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *MaxPodsConstraint) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*MaxPodsConstraint) ProtoMessage() {} - -func (x *MaxPodsConstraint) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[84] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use MaxPodsConstraint.ProtoReflect.Descriptor instead. -func (*MaxPodsConstraint) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{84} -} - -func (x *MaxPodsConstraint) GetMaxPodsPerNode() int64 { - if x != nil { - return x.MaxPodsPerNode - } - return 0 -} - -// Configuration for the use of Kubernetes Service Accounts in GCP IAM -// policies. -type WorkloadIdentityConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The workload pool to attach all Kubernetes service accounts to. - WorkloadPool string `protobuf:"bytes,2,opt,name=workload_pool,json=workloadPool,proto3" json:"workload_pool,omitempty"` -} - -func (x *WorkloadIdentityConfig) Reset() { - *x = WorkloadIdentityConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[85] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *WorkloadIdentityConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*WorkloadIdentityConfig) ProtoMessage() {} - -func (x *WorkloadIdentityConfig) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[85] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use WorkloadIdentityConfig.ProtoReflect.Descriptor instead. -func (*WorkloadIdentityConfig) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{85} -} - -func (x *WorkloadIdentityConfig) GetWorkloadPool() string { - if x != nil { - return x.WorkloadPool - } - return "" -} - -// Configuration of etcd encryption. -type DatabaseEncryption struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Denotes the state of etcd encryption. - State DatabaseEncryption_State `protobuf:"varint,2,opt,name=state,proto3,enum=google.container.v1.DatabaseEncryption_State" json:"state,omitempty"` - // Name of CloudKMS key to use for the encryption of secrets in etcd. - // Ex. projects/my-project/locations/global/keyRings/my-ring/cryptoKeys/my-key - KeyName string `protobuf:"bytes,1,opt,name=key_name,json=keyName,proto3" json:"key_name,omitempty"` -} - -func (x *DatabaseEncryption) Reset() { - *x = DatabaseEncryption{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[86] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *DatabaseEncryption) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DatabaseEncryption) ProtoMessage() {} - -func (x *DatabaseEncryption) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[86] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DatabaseEncryption.ProtoReflect.Descriptor instead. -func (*DatabaseEncryption) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{86} -} - -func (x *DatabaseEncryption) GetState() DatabaseEncryption_State { - if x != nil { - return x.State - } - return DatabaseEncryption_UNKNOWN -} - -func (x *DatabaseEncryption) GetKeyName() string { - if x != nil { - return x.KeyName - } - return "" -} - -// ListUsableSubnetworksRequest requests the list of usable subnetworks -// available to a user for creating clusters. -type ListUsableSubnetworksRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The parent project where subnetworks are usable. - // Specified in the format `projects/*`. - Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` - // Filtering currently only supports equality on the networkProjectId and must - // be in the form: "networkProjectId=[PROJECTID]", where `networkProjectId` - // is the project which owns the listed subnetworks. This defaults to the - // parent project ID. - Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"` - // The max number of results per page that should be returned. If the number - // of available results is larger than `page_size`, a `next_page_token` is - // returned which can be used to get the next page of results in subsequent - // requests. Acceptable values are 0 to 500, inclusive. (Default: 500) - PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` - // Specifies a page token to use. Set this to the nextPageToken returned by - // previous list requests to get the next page of results. - PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` -} - -func (x *ListUsableSubnetworksRequest) Reset() { - *x = ListUsableSubnetworksRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[87] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListUsableSubnetworksRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListUsableSubnetworksRequest) ProtoMessage() {} - -func (x *ListUsableSubnetworksRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[87] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListUsableSubnetworksRequest.ProtoReflect.Descriptor instead. -func (*ListUsableSubnetworksRequest) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{87} -} - -func (x *ListUsableSubnetworksRequest) GetParent() string { - if x != nil { - return x.Parent - } - return "" -} - -func (x *ListUsableSubnetworksRequest) GetFilter() string { - if x != nil { - return x.Filter - } - return "" -} - -func (x *ListUsableSubnetworksRequest) GetPageSize() int32 { - if x != nil { - return x.PageSize - } - return 0 -} - -func (x *ListUsableSubnetworksRequest) GetPageToken() string { - if x != nil { - return x.PageToken - } - return "" -} - -// ListUsableSubnetworksResponse is the response of -// ListUsableSubnetworksRequest. -type ListUsableSubnetworksResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // A list of usable subnetworks in the specified network project. - Subnetworks []*UsableSubnetwork `protobuf:"bytes,1,rep,name=subnetworks,proto3" json:"subnetworks,omitempty"` - // This token allows you to get the next page of results for list requests. - // If the number of results is larger than `page_size`, use the - // `next_page_token` as a value for the query parameter `page_token` in the - // next request. The value will become empty when there are no more pages. - NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` -} - -func (x *ListUsableSubnetworksResponse) Reset() { - *x = ListUsableSubnetworksResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[88] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListUsableSubnetworksResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListUsableSubnetworksResponse) ProtoMessage() {} - -func (x *ListUsableSubnetworksResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[88] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListUsableSubnetworksResponse.ProtoReflect.Descriptor instead. -func (*ListUsableSubnetworksResponse) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{88} -} - -func (x *ListUsableSubnetworksResponse) GetSubnetworks() []*UsableSubnetwork { - if x != nil { - return x.Subnetworks - } - return nil -} - -func (x *ListUsableSubnetworksResponse) GetNextPageToken() string { - if x != nil { - return x.NextPageToken - } - return "" -} - -// Secondary IP range of a usable subnetwork. -type UsableSubnetworkSecondaryRange struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The name associated with this subnetwork secondary range, used when adding - // an alias IP range to a VM instance. - RangeName string `protobuf:"bytes,1,opt,name=range_name,json=rangeName,proto3" json:"range_name,omitempty"` - // The range of IP addresses belonging to this subnetwork secondary range. - IpCidrRange string `protobuf:"bytes,2,opt,name=ip_cidr_range,json=ipCidrRange,proto3" json:"ip_cidr_range,omitempty"` - // This field is to determine the status of the secondary range programmably. - Status UsableSubnetworkSecondaryRange_Status `protobuf:"varint,3,opt,name=status,proto3,enum=google.container.v1.UsableSubnetworkSecondaryRange_Status" json:"status,omitempty"` -} - -func (x *UsableSubnetworkSecondaryRange) Reset() { - *x = UsableSubnetworkSecondaryRange{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[89] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *UsableSubnetworkSecondaryRange) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*UsableSubnetworkSecondaryRange) ProtoMessage() {} - -func (x *UsableSubnetworkSecondaryRange) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[89] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use UsableSubnetworkSecondaryRange.ProtoReflect.Descriptor instead. -func (*UsableSubnetworkSecondaryRange) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{89} -} - -func (x *UsableSubnetworkSecondaryRange) GetRangeName() string { - if x != nil { - return x.RangeName - } - return "" -} - -func (x *UsableSubnetworkSecondaryRange) GetIpCidrRange() string { - if x != nil { - return x.IpCidrRange - } - return "" -} - -func (x *UsableSubnetworkSecondaryRange) GetStatus() UsableSubnetworkSecondaryRange_Status { - if x != nil { - return x.Status - } - return UsableSubnetworkSecondaryRange_UNKNOWN -} - -// UsableSubnetwork resource returns the subnetwork name, its associated network -// and the primary CIDR range. -type UsableSubnetwork struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Subnetwork Name. - // Example: projects/my-project/regions/us-central1/subnetworks/my-subnet - Subnetwork string `protobuf:"bytes,1,opt,name=subnetwork,proto3" json:"subnetwork,omitempty"` - // Network Name. - // Example: projects/my-project/global/networks/my-network - Network string `protobuf:"bytes,2,opt,name=network,proto3" json:"network,omitempty"` - // The range of internal addresses that are owned by this subnetwork. - IpCidrRange string `protobuf:"bytes,3,opt,name=ip_cidr_range,json=ipCidrRange,proto3" json:"ip_cidr_range,omitempty"` - // Secondary IP ranges. - SecondaryIpRanges []*UsableSubnetworkSecondaryRange `protobuf:"bytes,4,rep,name=secondary_ip_ranges,json=secondaryIpRanges,proto3" json:"secondary_ip_ranges,omitempty"` - // A human readable status message representing the reasons for cases where - // the caller cannot use the secondary ranges under the subnet. For example if - // the secondary_ip_ranges is empty due to a permission issue, an insufficient - // permission message will be given by status_message. - StatusMessage string `protobuf:"bytes,5,opt,name=status_message,json=statusMessage,proto3" json:"status_message,omitempty"` -} - -func (x *UsableSubnetwork) Reset() { - *x = UsableSubnetwork{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[90] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *UsableSubnetwork) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*UsableSubnetwork) ProtoMessage() {} - -func (x *UsableSubnetwork) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[90] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use UsableSubnetwork.ProtoReflect.Descriptor instead. -func (*UsableSubnetwork) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{90} -} - -func (x *UsableSubnetwork) GetSubnetwork() string { - if x != nil { - return x.Subnetwork - } - return "" -} - -func (x *UsableSubnetwork) GetNetwork() string { - if x != nil { - return x.Network - } - return "" -} - -func (x *UsableSubnetwork) GetIpCidrRange() string { - if x != nil { - return x.IpCidrRange - } - return "" -} - -func (x *UsableSubnetwork) GetSecondaryIpRanges() []*UsableSubnetworkSecondaryRange { - if x != nil { - return x.SecondaryIpRanges - } - return nil -} - -func (x *UsableSubnetwork) GetStatusMessage() string { - if x != nil { - return x.StatusMessage - } - return "" -} - -// Configuration for exporting cluster resource usages. -type ResourceUsageExportConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Configuration to use BigQuery as usage export destination. - BigqueryDestination *ResourceUsageExportConfig_BigQueryDestination `protobuf:"bytes,1,opt,name=bigquery_destination,json=bigqueryDestination,proto3" json:"bigquery_destination,omitempty"` - // Whether to enable network egress metering for this cluster. If enabled, a - // daemonset will be created in the cluster to meter network egress traffic. - EnableNetworkEgressMetering bool `protobuf:"varint,2,opt,name=enable_network_egress_metering,json=enableNetworkEgressMetering,proto3" json:"enable_network_egress_metering,omitempty"` - // Configuration to enable resource consumption metering. - ConsumptionMeteringConfig *ResourceUsageExportConfig_ConsumptionMeteringConfig `protobuf:"bytes,3,opt,name=consumption_metering_config,json=consumptionMeteringConfig,proto3" json:"consumption_metering_config,omitempty"` -} - -func (x *ResourceUsageExportConfig) Reset() { - *x = ResourceUsageExportConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[91] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ResourceUsageExportConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ResourceUsageExportConfig) ProtoMessage() {} - -func (x *ResourceUsageExportConfig) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[91] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ResourceUsageExportConfig.ProtoReflect.Descriptor instead. -func (*ResourceUsageExportConfig) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{91} -} - -func (x *ResourceUsageExportConfig) GetBigqueryDestination() *ResourceUsageExportConfig_BigQueryDestination { - if x != nil { - return x.BigqueryDestination - } - return nil -} - -func (x *ResourceUsageExportConfig) GetEnableNetworkEgressMetering() bool { - if x != nil { - return x.EnableNetworkEgressMetering - } - return false -} - -func (x *ResourceUsageExportConfig) GetConsumptionMeteringConfig() *ResourceUsageExportConfig_ConsumptionMeteringConfig { - if x != nil { - return x.ConsumptionMeteringConfig - } - return nil -} - -// VerticalPodAutoscaling contains global, per-cluster information -// required by Vertical Pod Autoscaler to automatically adjust -// the resources of pods controlled by it. -type VerticalPodAutoscaling struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Enables vertical pod autoscaling. - Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"` -} - -func (x *VerticalPodAutoscaling) Reset() { - *x = VerticalPodAutoscaling{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[92] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *VerticalPodAutoscaling) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*VerticalPodAutoscaling) ProtoMessage() {} - -func (x *VerticalPodAutoscaling) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[92] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use VerticalPodAutoscaling.ProtoReflect.Descriptor instead. -func (*VerticalPodAutoscaling) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{92} -} - -func (x *VerticalPodAutoscaling) GetEnabled() bool { - if x != nil { - return x.Enabled - } - return false -} - -// DefaultSnatStatus contains the desired state of whether default sNAT should -// be disabled on the cluster. -type DefaultSnatStatus struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Disables cluster default sNAT rules. - Disabled bool `protobuf:"varint,1,opt,name=disabled,proto3" json:"disabled,omitempty"` -} - -func (x *DefaultSnatStatus) Reset() { - *x = DefaultSnatStatus{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[93] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *DefaultSnatStatus) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DefaultSnatStatus) ProtoMessage() {} - -func (x *DefaultSnatStatus) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[93] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DefaultSnatStatus.ProtoReflect.Descriptor instead. -func (*DefaultSnatStatus) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{93} -} - -func (x *DefaultSnatStatus) GetDisabled() bool { - if x != nil { - return x.Disabled - } - return false -} - -// Configuration of Shielded Nodes feature. -type ShieldedNodes struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Whether Shielded Nodes features are enabled on all nodes in this cluster. - Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"` -} - -func (x *ShieldedNodes) Reset() { - *x = ShieldedNodes{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[94] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ShieldedNodes) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ShieldedNodes) ProtoMessage() {} - -func (x *ShieldedNodes) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[94] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ShieldedNodes.ProtoReflect.Descriptor instead. -func (*ShieldedNodes) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{94} -} - -func (x *ShieldedNodes) GetEnabled() bool { - if x != nil { - return x.Enabled - } - return false -} - -// CidrBlock contains an optional name and one CIDR block. -type MasterAuthorizedNetworksConfig_CidrBlock struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // display_name is an optional field for users to identify CIDR blocks. - DisplayName string `protobuf:"bytes,1,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` - // cidr_block must be specified in CIDR notation. - CidrBlock string `protobuf:"bytes,2,opt,name=cidr_block,json=cidrBlock,proto3" json:"cidr_block,omitempty"` -} - -func (x *MasterAuthorizedNetworksConfig_CidrBlock) Reset() { - *x = MasterAuthorizedNetworksConfig_CidrBlock{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[97] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *MasterAuthorizedNetworksConfig_CidrBlock) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*MasterAuthorizedNetworksConfig_CidrBlock) ProtoMessage() {} - -func (x *MasterAuthorizedNetworksConfig_CidrBlock) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[97] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use MasterAuthorizedNetworksConfig_CidrBlock.ProtoReflect.Descriptor instead. -func (*MasterAuthorizedNetworksConfig_CidrBlock) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{18, 0} -} - -func (x *MasterAuthorizedNetworksConfig_CidrBlock) GetDisplayName() string { - if x != nil { - return x.DisplayName - } - return "" -} - -func (x *MasterAuthorizedNetworksConfig_CidrBlock) GetCidrBlock() string { - if x != nil { - return x.CidrBlock - } - return "" -} - -// Progress metric is (string, int|float|string) pair. -type OperationProgress_Metric struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. Metric name, e.g., "nodes total", "percent done". - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Strictly one of the values is required. - // - // Types that are assignable to Value: - // *OperationProgress_Metric_IntValue - // *OperationProgress_Metric_DoubleValue - // *OperationProgress_Metric_StringValue - Value isOperationProgress_Metric_Value `protobuf_oneof:"value"` -} - -func (x *OperationProgress_Metric) Reset() { - *x = OperationProgress_Metric{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[99] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *OperationProgress_Metric) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*OperationProgress_Metric) ProtoMessage() {} - -func (x *OperationProgress_Metric) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[99] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use OperationProgress_Metric.ProtoReflect.Descriptor instead. -func (*OperationProgress_Metric) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{26, 0} -} - -func (x *OperationProgress_Metric) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (m *OperationProgress_Metric) GetValue() isOperationProgress_Metric_Value { - if m != nil { - return m.Value - } - return nil -} - -func (x *OperationProgress_Metric) GetIntValue() int64 { - if x, ok := x.GetValue().(*OperationProgress_Metric_IntValue); ok { - return x.IntValue - } - return 0 -} - -func (x *OperationProgress_Metric) GetDoubleValue() float64 { - if x, ok := x.GetValue().(*OperationProgress_Metric_DoubleValue); ok { - return x.DoubleValue - } - return 0 -} - -func (x *OperationProgress_Metric) GetStringValue() string { - if x, ok := x.GetValue().(*OperationProgress_Metric_StringValue); ok { - return x.StringValue - } - return "" -} - -type isOperationProgress_Metric_Value interface { - isOperationProgress_Metric_Value() -} - -type OperationProgress_Metric_IntValue struct { - // For metrics with integer value. - IntValue int64 `protobuf:"varint,2,opt,name=int_value,json=intValue,proto3,oneof"` -} - -type OperationProgress_Metric_DoubleValue struct { - // For metrics with floating point value. - DoubleValue float64 `protobuf:"fixed64,3,opt,name=double_value,json=doubleValue,proto3,oneof"` -} - -type OperationProgress_Metric_StringValue struct { - // For metrics with custom values (ratios, visual progress, etc.). - StringValue string `protobuf:"bytes,4,opt,name=string_value,json=stringValue,proto3,oneof"` -} - -func (*OperationProgress_Metric_IntValue) isOperationProgress_Metric_Value() {} - -func (*OperationProgress_Metric_DoubleValue) isOperationProgress_Metric_Value() {} - -func (*OperationProgress_Metric_StringValue) isOperationProgress_Metric_Value() {} - -// ReleaseChannelConfig exposes configuration for a release channel. -type ServerConfig_ReleaseChannelConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The release channel this configuration applies to. - Channel ReleaseChannel_Channel `protobuf:"varint,1,opt,name=channel,proto3,enum=google.container.v1.ReleaseChannel_Channel" json:"channel,omitempty"` - // The default version for newly created clusters on the channel. - DefaultVersion string `protobuf:"bytes,2,opt,name=default_version,json=defaultVersion,proto3" json:"default_version,omitempty"` - // List of valid versions for the channel. - ValidVersions []string `protobuf:"bytes,4,rep,name=valid_versions,json=validVersions,proto3" json:"valid_versions,omitempty"` -} - -func (x *ServerConfig_ReleaseChannelConfig) Reset() { - *x = ServerConfig_ReleaseChannelConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[100] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ServerConfig_ReleaseChannelConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ServerConfig_ReleaseChannelConfig) ProtoMessage() {} - -func (x *ServerConfig_ReleaseChannelConfig) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[100] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ServerConfig_ReleaseChannelConfig.ProtoReflect.Descriptor instead. -func (*ServerConfig_ReleaseChannelConfig) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{46, 0} -} - -func (x *ServerConfig_ReleaseChannelConfig) GetChannel() ReleaseChannel_Channel { - if x != nil { - return x.Channel - } - return ReleaseChannel_UNSPECIFIED -} - -func (x *ServerConfig_ReleaseChannelConfig) GetDefaultVersion() string { - if x != nil { - return x.DefaultVersion - } - return "" -} - -func (x *ServerConfig_ReleaseChannelConfig) GetValidVersions() []string { - if x != nil { - return x.ValidVersions - } - return nil -} - -// These upgrade settings control the level of parallelism and the level of -// disruption caused by an upgrade. -// -// maxUnavailable controls the number of nodes that can be simultaneously -// unavailable. -// -// maxSurge controls the number of additional nodes that can be added to the -// node pool temporarily for the time of the upgrade to increase the number of -// available nodes. -// -// (maxUnavailable + maxSurge) determines the level of parallelism (how many -// nodes are being upgraded at the same time). -// -// Note: upgrades inevitably introduce some disruption since workloads need to -// be moved from old nodes to new, upgraded ones. Even if maxUnavailable=0, -// this holds true. (Disruption stays within the limits of -// PodDisruptionBudget, if it is configured.) -// -// Consider a hypothetical node pool with 5 nodes having maxSurge=2, -// maxUnavailable=1. This means the upgrade process upgrades 3 nodes -// simultaneously. It creates 2 additional (upgraded) nodes, then it brings -// down 3 old (not yet upgraded) nodes at the same time. This ensures that -// there are always at least 4 nodes available. -type NodePool_UpgradeSettings struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The maximum number of nodes that can be created beyond the current size - // of the node pool during the upgrade process. - MaxSurge int32 `protobuf:"varint,1,opt,name=max_surge,json=maxSurge,proto3" json:"max_surge,omitempty"` - // The maximum number of nodes that can be simultaneously unavailable during - // the upgrade process. A node is considered available if its status is - // Ready. - MaxUnavailable int32 `protobuf:"varint,2,opt,name=max_unavailable,json=maxUnavailable,proto3" json:"max_unavailable,omitempty"` -} - -func (x *NodePool_UpgradeSettings) Reset() { - *x = NodePool_UpgradeSettings{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[101] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *NodePool_UpgradeSettings) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*NodePool_UpgradeSettings) ProtoMessage() {} - -func (x *NodePool_UpgradeSettings) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[101] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use NodePool_UpgradeSettings.ProtoReflect.Descriptor instead. -func (*NodePool_UpgradeSettings) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{51, 0} -} - -func (x *NodePool_UpgradeSettings) GetMaxSurge() int32 { - if x != nil { - return x.MaxSurge - } - return 0 -} - -func (x *NodePool_UpgradeSettings) GetMaxUnavailable() int32 { - if x != nil { - return x.MaxUnavailable - } - return 0 -} - -// Parameters for using BigQuery as the destination of resource usage export. -type ResourceUsageExportConfig_BigQueryDestination struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The ID of a BigQuery Dataset. - DatasetId string `protobuf:"bytes,1,opt,name=dataset_id,json=datasetId,proto3" json:"dataset_id,omitempty"` -} - -func (x *ResourceUsageExportConfig_BigQueryDestination) Reset() { - *x = ResourceUsageExportConfig_BigQueryDestination{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[104] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ResourceUsageExportConfig_BigQueryDestination) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ResourceUsageExportConfig_BigQueryDestination) ProtoMessage() {} - -func (x *ResourceUsageExportConfig_BigQueryDestination) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[104] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ResourceUsageExportConfig_BigQueryDestination.ProtoReflect.Descriptor instead. -func (*ResourceUsageExportConfig_BigQueryDestination) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{91, 0} -} - -func (x *ResourceUsageExportConfig_BigQueryDestination) GetDatasetId() string { - if x != nil { - return x.DatasetId - } - return "" -} - -// Parameters for controlling consumption metering. -type ResourceUsageExportConfig_ConsumptionMeteringConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Whether to enable consumption metering for this cluster. If enabled, a - // second BigQuery table will be created to hold resource consumption - // records. - Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"` -} - -func (x *ResourceUsageExportConfig_ConsumptionMeteringConfig) Reset() { - *x = ResourceUsageExportConfig_ConsumptionMeteringConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[105] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ResourceUsageExportConfig_ConsumptionMeteringConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ResourceUsageExportConfig_ConsumptionMeteringConfig) ProtoMessage() {} - -func (x *ResourceUsageExportConfig_ConsumptionMeteringConfig) ProtoReflect() protoreflect.Message { - mi := &file_google_container_v1_cluster_service_proto_msgTypes[105] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ResourceUsageExportConfig_ConsumptionMeteringConfig.ProtoReflect.Descriptor instead. -func (*ResourceUsageExportConfig_ConsumptionMeteringConfig) Descriptor() ([]byte, []int) { - return file_google_container_v1_cluster_service_proto_rawDescGZIP(), []int{91, 1} -} - -func (x *ResourceUsageExportConfig_ConsumptionMeteringConfig) GetEnabled() bool { - if x != nil { - return x.Enabled - } - return false -} - -var File_google_container_v1_cluster_service_proto protoreflect.FileDescriptor - -var file_google_container_v1_cluster_service_proto_rawDesc = []byte{ - 0x0a, 0x29, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, - 0x65, 0x72, 0x2f, 0x76, 0x31, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x13, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, - 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, - 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, - 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, - 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, - 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xad, 0x09, 0x0a, 0x0a, 0x4e, 0x6f, 0x64, 0x65, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x21, 0x0a, 0x0c, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, - 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6d, 0x61, 0x63, - 0x68, 0x69, 0x6e, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x20, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x6b, - 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x67, 0x62, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, - 0x64, 0x69, 0x73, 0x6b, 0x53, 0x69, 0x7a, 0x65, 0x47, 0x62, 0x12, 0x21, 0x0a, 0x0c, 0x6f, 0x61, - 0x75, 0x74, 0x68, 0x5f, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, - 0x52, 0x0b, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x27, 0x0a, - 0x0f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, - 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, - 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x49, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, - 0x74, 0x61, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4e, - 0x6f, 0x64, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, - 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, - 0x61, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x54, 0x79, 0x70, 0x65, - 0x12, 0x43, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, - 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, - 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x73, - 0x73, 0x64, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0d, - 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x53, 0x73, 0x64, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x12, 0x0a, - 0x04, 0x74, 0x61, 0x67, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x74, 0x61, 0x67, - 0x73, 0x12, 0x20, 0x0a, 0x0b, 0x70, 0x72, 0x65, 0x65, 0x6d, 0x70, 0x74, 0x69, 0x62, 0x6c, 0x65, - 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x70, 0x72, 0x65, 0x65, 0x6d, 0x70, 0x74, 0x69, - 0x62, 0x6c, 0x65, 0x12, 0x4a, 0x0a, 0x0c, 0x61, 0x63, 0x63, 0x65, 0x6c, 0x65, 0x72, 0x61, 0x74, - 0x6f, 0x72, 0x73, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, - 0x41, 0x63, 0x63, 0x65, 0x6c, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x52, 0x0c, 0x61, 0x63, 0x63, 0x65, 0x6c, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x73, 0x12, - 0x1b, 0x0a, 0x09, 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0c, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x08, 0x64, 0x69, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x12, 0x28, 0x0a, 0x10, - 0x6d, 0x69, 0x6e, 0x5f, 0x63, 0x70, 0x75, 0x5f, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, - 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6d, 0x69, 0x6e, 0x43, 0x70, 0x75, 0x50, 0x6c, - 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x12, 0x65, 0x0a, 0x18, 0x77, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, - 0x61, 0x64, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x57, - 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x16, 0x77, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x4d, - 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x36, 0x0a, - 0x06, 0x74, 0x61, 0x69, 0x6e, 0x74, 0x73, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, - 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x54, 0x61, 0x69, 0x6e, 0x74, 0x52, 0x06, 0x74, - 0x61, 0x69, 0x6e, 0x74, 0x73, 0x12, 0x49, 0x0a, 0x0e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, - 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, - 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x52, 0x0d, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x12, 0x1d, 0x0a, 0x0a, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, 0x12, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x6f, 0x64, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, - 0x5b, 0x0a, 0x14, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, - 0x66, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x79, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, - 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, - 0x66, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x79, 0x52, 0x13, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x66, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x79, 0x12, 0x65, 0x0a, 0x18, - 0x73, 0x68, 0x69, 0x65, 0x6c, 0x64, 0x65, 0x64, 0x5f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, - 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, - 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x68, 0x69, 0x65, 0x6c, 0x64, 0x65, 0x64, 0x49, 0x6e, 0x73, - 0x74, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x16, 0x73, 0x68, 0x69, - 0x65, 0x6c, 0x64, 0x65, 0x64, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x12, 0x29, 0x0a, 0x11, 0x62, 0x6f, 0x6f, 0x74, 0x5f, 0x64, 0x69, 0x73, 0x6b, - 0x5f, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x17, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, - 0x62, 0x6f, 0x6f, 0x74, 0x44, 0x69, 0x73, 0x6b, 0x4b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x1a, 0x3b, - 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, - 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, - 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, - 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, - 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x86, 0x01, 0x0a, 0x16, 0x53, 0x68, 0x69, 0x65, 0x6c, - 0x64, 0x65, 0x64, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x12, 0x2c, 0x0a, 0x12, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x75, - 0x72, 0x65, 0x5f, 0x62, 0x6f, 0x6f, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, - 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x65, 0x63, 0x75, 0x72, 0x65, 0x42, 0x6f, 0x6f, 0x74, 0x12, - 0x3e, 0x0a, 0x1b, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x67, 0x72, - 0x69, 0x74, 0x79, 0x5f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x19, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x49, 0x6e, 0x74, 0x65, - 0x67, 0x72, 0x69, 0x74, 0x79, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x22, - 0x71, 0x0a, 0x0d, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x12, 0x3b, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x27, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, - 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x22, 0x23, 0x0a, - 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, - 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x47, 0x56, 0x49, 0x53, 0x4f, 0x52, - 0x10, 0x01, 0x22, 0x84, 0x02, 0x0a, 0x13, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x41, 0x66, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x79, 0x12, 0x67, 0x0a, 0x18, 0x63, 0x6f, - 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x5f, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, - 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x66, - 0x66, 0x69, 0x6e, 0x69, 0x74, 0x79, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x16, 0x63, 0x6f, 0x6e, - 0x73, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, - 0x79, 0x70, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, - 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x22, 0x5a, 0x0a, - 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, - 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x12, 0x0a, 0x0e, 0x4e, 0x4f, 0x5f, 0x52, 0x45, 0x53, - 0x45, 0x52, 0x56, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x01, 0x12, 0x13, 0x0a, 0x0f, 0x41, 0x4e, - 0x59, 0x5f, 0x52, 0x45, 0x53, 0x45, 0x52, 0x56, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x02, 0x12, - 0x18, 0x0a, 0x14, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x43, 0x5f, 0x52, 0x45, 0x53, 0x45, - 0x52, 0x56, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x03, 0x22, 0xcd, 0x01, 0x0a, 0x09, 0x4e, 0x6f, - 0x64, 0x65, 0x54, 0x61, 0x69, 0x6e, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, - 0x3d, 0x0a, 0x06, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, - 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, - 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x54, 0x61, 0x69, 0x6e, 0x74, 0x2e, - 0x45, 0x66, 0x66, 0x65, 0x63, 0x74, 0x52, 0x06, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x22, 0x59, - 0x0a, 0x06, 0x45, 0x66, 0x66, 0x65, 0x63, 0x74, 0x12, 0x16, 0x0a, 0x12, 0x45, 0x46, 0x46, 0x45, - 0x43, 0x54, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, - 0x12, 0x0f, 0x0a, 0x0b, 0x4e, 0x4f, 0x5f, 0x53, 0x43, 0x48, 0x45, 0x44, 0x55, 0x4c, 0x45, 0x10, - 0x01, 0x12, 0x16, 0x0a, 0x12, 0x50, 0x52, 0x45, 0x46, 0x45, 0x52, 0x5f, 0x4e, 0x4f, 0x5f, 0x53, - 0x43, 0x48, 0x45, 0x44, 0x55, 0x4c, 0x45, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x4e, 0x4f, 0x5f, - 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x45, 0x10, 0x03, 0x22, 0xba, 0x02, 0x0a, 0x0a, 0x4d, 0x61, - 0x73, 0x74, 0x65, 0x72, 0x41, 0x75, 0x74, 0x68, 0x12, 0x1e, 0x0a, 0x08, 0x75, 0x73, 0x65, 0x72, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x08, - 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1e, 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, - 0x77, 0x6f, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x08, - 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x12, 0x68, 0x0a, 0x19, 0x63, 0x6c, 0x69, 0x65, - 0x6e, 0x74, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, - 0x31, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x17, 0x63, 0x6c, 0x69, 0x65, 0x6e, - 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x12, 0x34, 0x0a, 0x16, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x61, - 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x18, 0x64, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x14, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x43, 0x61, 0x43, 0x65, 0x72, - 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x2d, 0x0a, 0x12, 0x63, 0x6c, 0x69, 0x65, - 0x6e, 0x74, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x18, 0x65, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, - 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x69, 0x65, 0x6e, - 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x66, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x69, - 0x65, 0x6e, 0x74, 0x4b, 0x65, 0x79, 0x22, 0x53, 0x0a, 0x17, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, - 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x12, 0x38, 0x0a, 0x18, 0x69, 0x73, 0x73, 0x75, 0x65, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, - 0x74, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x16, 0x69, 0x73, 0x73, 0x75, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, - 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x22, 0x94, 0x05, 0x0a, 0x0c, - 0x41, 0x64, 0x64, 0x6f, 0x6e, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x56, 0x0a, 0x13, - 0x68, 0x74, 0x74, 0x70, 0x5f, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, - 0x69, 0x6e, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, - 0x48, 0x74, 0x74, 0x70, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, - 0x67, 0x52, 0x11, 0x68, 0x74, 0x74, 0x70, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, - 0x63, 0x69, 0x6e, 0x67, 0x12, 0x6b, 0x0a, 0x1a, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x6f, 0x6e, 0x74, - 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x64, 0x5f, 0x61, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, - 0x6e, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x48, - 0x6f, 0x72, 0x69, 0x7a, 0x6f, 0x6e, 0x74, 0x61, 0x6c, 0x50, 0x6f, 0x64, 0x41, 0x75, 0x74, 0x6f, - 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x52, 0x18, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x6f, 0x6e, - 0x74, 0x61, 0x6c, 0x50, 0x6f, 0x64, 0x41, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, - 0x67, 0x12, 0x5f, 0x0a, 0x14, 0x6b, 0x75, 0x62, 0x65, 0x72, 0x6e, 0x65, 0x74, 0x65, 0x73, 0x5f, - 0x64, 0x61, 0x73, 0x68, 0x62, 0x6f, 0x61, 0x72, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, - 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x75, 0x62, 0x65, 0x72, 0x6e, 0x65, 0x74, 0x65, 0x73, - 0x44, 0x61, 0x73, 0x68, 0x62, 0x6f, 0x61, 0x72, 0x64, 0x42, 0x02, 0x18, 0x01, 0x52, 0x13, 0x6b, - 0x75, 0x62, 0x65, 0x72, 0x6e, 0x65, 0x74, 0x65, 0x73, 0x44, 0x61, 0x73, 0x68, 0x62, 0x6f, 0x61, - 0x72, 0x64, 0x12, 0x5c, 0x0a, 0x15, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x5f, 0x70, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, - 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x50, - 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x13, 0x6e, 0x65, 0x74, - 0x77, 0x6f, 0x72, 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x12, 0x4d, 0x0a, 0x10, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, - 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x52, 0x75, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, - 0x0e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x52, 0x75, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, - 0x4d, 0x0a, 0x10, 0x64, 0x6e, 0x73, 0x5f, 0x63, 0x61, 0x63, 0x68, 0x65, 0x5f, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, - 0x44, 0x6e, 0x73, 0x43, 0x61, 0x63, 0x68, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0e, - 0x64, 0x6e, 0x73, 0x43, 0x61, 0x63, 0x68, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x62, - 0x0a, 0x17, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, - 0x6f, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, - 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x43, 0x6f, 0x6e, 0x6e, - 0x65, 0x63, 0x74, 0x6f, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x15, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x22, 0x2f, 0x0a, 0x11, 0x48, 0x74, 0x74, 0x70, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, - 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x69, 0x73, 0x61, 0x62, - 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x64, 0x69, 0x73, 0x61, 0x62, - 0x6c, 0x65, 0x64, 0x22, 0x36, 0x0a, 0x18, 0x48, 0x6f, 0x72, 0x69, 0x7a, 0x6f, 0x6e, 0x74, 0x61, - 0x6c, 0x50, 0x6f, 0x64, 0x41, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x12, - 0x1a, 0x0a, 0x08, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x08, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, 0x31, 0x0a, 0x13, 0x4b, - 0x75, 0x62, 0x65, 0x72, 0x6e, 0x65, 0x74, 0x65, 0x73, 0x44, 0x61, 0x73, 0x68, 0x62, 0x6f, 0x61, - 0x72, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, 0x31, - 0x0a, 0x13, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, - 0x64, 0x22, 0x2a, 0x0a, 0x0e, 0x44, 0x6e, 0x73, 0x43, 0x61, 0x63, 0x68, 0x65, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, 0x42, 0x0a, - 0x26, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4d, - 0x61, 0x73, 0x74, 0x65, 0x72, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x41, 0x63, 0x63, 0x65, 0x73, - 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, - 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, - 0x64, 0x22, 0xa8, 0x03, 0x0a, 0x14, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x43, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x30, 0x0a, 0x14, 0x65, 0x6e, - 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x5f, 0x6e, 0x6f, 0x64, - 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, - 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x12, 0x36, 0x0a, 0x17, - 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x5f, 0x65, - 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x65, - 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x64, 0x70, - 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x33, 0x0a, 0x16, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, - 0x70, 0x76, 0x34, 0x5f, 0x63, 0x69, 0x64, 0x72, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x49, 0x70, 0x76, 0x34, - 0x43, 0x69, 0x64, 0x72, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x29, 0x0a, 0x10, 0x70, 0x72, 0x69, - 0x76, 0x61, 0x74, 0x65, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x64, 0x70, - 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x65, - 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x70, - 0x75, 0x62, 0x6c, 0x69, 0x63, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x21, 0x0a, - 0x0c, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x07, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0b, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x4e, 0x61, 0x6d, 0x65, - 0x12, 0x7a, 0x0a, 0x1b, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x67, 0x6c, 0x6f, 0x62, 0x61, - 0x6c, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, - 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, - 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x69, 0x76, - 0x61, 0x74, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, - 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x52, 0x18, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, - 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x5c, 0x0a, 0x19, - 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x47, 0x72, 0x6f, - 0x75, 0x70, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, - 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, - 0x6c, 0x65, 0x64, 0x12, 0x25, 0x0a, 0x0e, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x5f, - 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x65, 0x63, - 0x75, 0x72, 0x69, 0x74, 0x79, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x22, 0x8a, 0x02, 0x0a, 0x0e, 0x43, - 0x6c, 0x6f, 0x75, 0x64, 0x52, 0x75, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1a, 0x0a, - 0x08, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x08, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x62, 0x0a, 0x12, 0x6c, 0x6f, 0x61, - 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x34, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, - 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x6f, 0x75, - 0x64, 0x52, 0x75, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, - 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x52, 0x10, 0x6c, 0x6f, 0x61, - 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x22, 0x78, 0x0a, - 0x10, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x54, 0x79, 0x70, - 0x65, 0x12, 0x22, 0x0a, 0x1e, 0x4c, 0x4f, 0x41, 0x44, 0x5f, 0x42, 0x41, 0x4c, 0x41, 0x4e, 0x43, - 0x45, 0x52, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, - 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1f, 0x0a, 0x1b, 0x4c, 0x4f, 0x41, 0x44, 0x5f, 0x42, 0x41, - 0x4c, 0x41, 0x4e, 0x43, 0x45, 0x52, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x58, 0x54, 0x45, - 0x52, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12, 0x1f, 0x0a, 0x1b, 0x4c, 0x4f, 0x41, 0x44, 0x5f, 0x42, - 0x41, 0x4c, 0x41, 0x4e, 0x43, 0x45, 0x52, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, - 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x10, 0x02, 0x22, 0x31, 0x0a, 0x15, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, 0xe9, 0x01, 0x0a, 0x1e, 0x4d, - 0x61, 0x73, 0x74, 0x65, 0x72, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x4e, - 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x18, 0x0a, - 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, - 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x5e, 0x0a, 0x0b, 0x63, 0x69, 0x64, 0x72, 0x5f, - 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3d, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, - 0x76, 0x31, 0x2e, 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, - 0x7a, 0x65, 0x64, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x2e, 0x43, 0x69, 0x64, 0x72, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x0a, 0x63, 0x69, 0x64, - 0x72, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x1a, 0x4d, 0x0a, 0x09, 0x43, 0x69, 0x64, 0x72, 0x42, - 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, - 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x69, 0x64, 0x72, 0x5f, - 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x69, 0x64, - 0x72, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x22, 0x26, 0x0a, 0x0a, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, - 0x41, 0x62, 0x61, 0x63, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, 0xa4, - 0x01, 0x0a, 0x0d, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, - 0x12, 0x47, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, - 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, - 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x52, - 0x08, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, - 0x62, 0x6c, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, - 0x6c, 0x65, 0x64, 0x22, 0x30, 0x0a, 0x08, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, - 0x18, 0x0a, 0x14, 0x50, 0x52, 0x4f, 0x56, 0x49, 0x44, 0x45, 0x52, 0x5f, 0x55, 0x4e, 0x53, 0x50, - 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x41, 0x4c, - 0x49, 0x43, 0x4f, 0x10, 0x01, 0x22, 0x2f, 0x0a, 0x13, 0x42, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x41, - 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, - 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, - 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, 0x8f, 0x05, 0x0a, 0x12, 0x49, 0x50, 0x41, 0x6c, 0x6c, - 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x24, 0x0a, - 0x0e, 0x75, 0x73, 0x65, 0x5f, 0x69, 0x70, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x75, 0x73, 0x65, 0x49, 0x70, 0x41, 0x6c, 0x69, 0x61, - 0x73, 0x65, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x73, 0x75, - 0x62, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, - 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, - 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, 0x75, 0x62, 0x6e, 0x65, - 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2e, 0x0a, 0x11, 0x63, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x70, 0x76, 0x34, 0x5f, 0x63, 0x69, 0x64, 0x72, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x49, 0x70, 0x76, 0x34, 0x43, 0x69, 0x64, 0x72, 0x12, 0x28, 0x0a, 0x0e, 0x6e, 0x6f, 0x64, - 0x65, 0x5f, 0x69, 0x70, 0x76, 0x34, 0x5f, 0x63, 0x69, 0x64, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0c, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x70, 0x76, 0x34, 0x43, - 0x69, 0x64, 0x72, 0x12, 0x30, 0x0a, 0x12, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x5f, - 0x69, 0x70, 0x76, 0x34, 0x5f, 0x63, 0x69, 0x64, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, - 0x02, 0x18, 0x01, 0x52, 0x10, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x49, 0x70, 0x76, - 0x34, 0x43, 0x69, 0x64, 0x72, 0x12, 0x3f, 0x0a, 0x1c, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, - 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x61, 0x72, 0x79, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, - 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x19, 0x63, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x61, 0x72, 0x79, 0x52, 0x61, 0x6e, - 0x67, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x41, 0x0a, 0x1d, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x73, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x61, 0x72, 0x79, 0x5f, 0x72, 0x61, 0x6e, - 0x67, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1a, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x61, 0x72, 0x79, - 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x17, 0x63, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x70, 0x76, 0x34, 0x5f, 0x63, 0x69, 0x64, 0x72, 0x5f, 0x62, - 0x6c, 0x6f, 0x63, 0x6b, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x63, 0x6c, 0x75, 0x73, - 0x74, 0x65, 0x72, 0x49, 0x70, 0x76, 0x34, 0x43, 0x69, 0x64, 0x72, 0x42, 0x6c, 0x6f, 0x63, 0x6b, - 0x12, 0x2f, 0x0a, 0x14, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x70, 0x76, 0x34, 0x5f, 0x63, 0x69, - 0x64, 0x72, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, - 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x70, 0x76, 0x34, 0x43, 0x69, 0x64, 0x72, 0x42, 0x6c, 0x6f, 0x63, - 0x6b, 0x12, 0x37, 0x0a, 0x18, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x5f, 0x69, 0x70, - 0x76, 0x34, 0x5f, 0x63, 0x69, 0x64, 0x72, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x18, 0x0b, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x49, 0x70, 0x76, - 0x34, 0x43, 0x69, 0x64, 0x72, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x2d, 0x0a, 0x13, 0x74, 0x70, - 0x75, 0x5f, 0x69, 0x70, 0x76, 0x34, 0x5f, 0x63, 0x69, 0x64, 0x72, 0x5f, 0x62, 0x6c, 0x6f, 0x63, - 0x6b, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x74, 0x70, 0x75, 0x49, 0x70, 0x76, 0x34, - 0x43, 0x69, 0x64, 0x72, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x1d, 0x0a, 0x0a, 0x75, 0x73, 0x65, - 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x75, - 0x73, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x22, 0x85, 0x1a, 0x0a, 0x07, 0x43, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x30, 0x0a, 0x12, 0x69, 0x6e, - 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x42, 0x02, 0x18, 0x01, 0x52, 0x10, 0x69, 0x6e, 0x69, 0x74, - 0x69, 0x61, 0x6c, 0x4e, 0x6f, 0x64, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x44, 0x0a, 0x0b, - 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, - 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0a, 0x6e, 0x6f, 0x64, 0x65, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x12, 0x40, 0x0a, 0x0b, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x61, 0x75, 0x74, - 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x61, - 0x73, 0x74, 0x65, 0x72, 0x41, 0x75, 0x74, 0x68, 0x52, 0x0a, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, - 0x41, 0x75, 0x74, 0x68, 0x12, 0x27, 0x0a, 0x0f, 0x6c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x5f, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6c, - 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x2d, 0x0a, - 0x12, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x6d, 0x6f, 0x6e, 0x69, 0x74, - 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x18, 0x0a, 0x07, - 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6e, - 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x12, 0x2a, 0x0a, 0x11, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x5f, 0x69, 0x70, 0x76, 0x34, 0x5f, 0x63, 0x69, 0x64, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x70, 0x76, 0x34, 0x43, 0x69, - 0x64, 0x72, 0x12, 0x46, 0x0a, 0x0d, 0x61, 0x64, 0x64, 0x6f, 0x6e, 0x73, 0x5f, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, - 0x41, 0x64, 0x64, 0x6f, 0x6e, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0c, 0x61, 0x64, - 0x64, 0x6f, 0x6e, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1e, 0x0a, 0x0a, 0x73, 0x75, - 0x62, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, - 0x73, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x12, 0x3c, 0x0a, 0x0a, 0x6e, 0x6f, - 0x64, 0x65, 0x5f, 0x70, 0x6f, 0x6f, 0x6c, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, - 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x50, 0x6f, 0x6f, 0x6c, 0x52, 0x09, 0x6e, - 0x6f, 0x64, 0x65, 0x50, 0x6f, 0x6f, 0x6c, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x6c, 0x6f, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x6c, 0x6f, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x36, 0x0a, 0x17, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, - 0x5f, 0x6b, 0x75, 0x62, 0x65, 0x72, 0x6e, 0x65, 0x74, 0x65, 0x73, 0x5f, 0x61, 0x6c, 0x70, 0x68, - 0x61, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x4b, - 0x75, 0x62, 0x65, 0x72, 0x6e, 0x65, 0x74, 0x65, 0x73, 0x41, 0x6c, 0x70, 0x68, 0x61, 0x12, 0x59, - 0x0a, 0x0f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, - 0x73, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, - 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4c, 0x61, - 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, 0x72, 0x65, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x6c, 0x61, 0x62, - 0x65, 0x6c, 0x5f, 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x18, 0x10, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x46, 0x69, 0x6e, 0x67, 0x65, - 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x12, 0x40, 0x0a, 0x0b, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, - 0x5f, 0x61, 0x62, 0x61, 0x63, 0x18, 0x12, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, - 0x31, 0x2e, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x41, 0x62, 0x61, 0x63, 0x52, 0x0a, 0x6c, 0x65, - 0x67, 0x61, 0x63, 0x79, 0x41, 0x62, 0x61, 0x63, 0x12, 0x49, 0x0a, 0x0e, 0x6e, 0x65, 0x74, 0x77, - 0x6f, 0x72, 0x6b, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, - 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x50, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0d, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x12, 0x59, 0x0a, 0x14, 0x69, 0x70, 0x5f, 0x61, 0x6c, 0x6c, 0x6f, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x14, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, - 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x50, 0x41, 0x6c, 0x6c, 0x6f, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x12, 0x69, 0x70, 0x41, 0x6c, - 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x7e, - 0x0a, 0x21, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, - 0x7a, 0x65, 0x64, 0x5f, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x5f, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, - 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, - 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x1e, - 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, - 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x55, - 0x0a, 0x12, 0x6d, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x70, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, - 0x2e, 0x4d, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x63, 0x65, 0x50, 0x6f, 0x6c, 0x69, - 0x63, 0x79, 0x52, 0x11, 0x6d, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x63, 0x65, 0x50, - 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x5b, 0x0a, 0x14, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x5f, - 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x18, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, - 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x69, 0x6e, 0x61, 0x72, 0x79, - 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x62, - 0x69, 0x6e, 0x61, 0x72, 0x79, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x49, 0x0a, 0x0b, 0x61, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, - 0x67, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, - 0x75, 0x73, 0x74, 0x65, 0x72, 0x41, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, - 0x52, 0x0b, 0x61, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x49, 0x0a, - 0x0e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, - 0x1b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, - 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x65, 0x74, 0x77, - 0x6f, 0x72, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0d, 0x6e, 0x65, 0x74, 0x77, 0x6f, - 0x72, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x65, 0x0a, 0x1b, 0x64, 0x65, 0x66, 0x61, - 0x75, 0x6c, 0x74, 0x5f, 0x6d, 0x61, 0x78, 0x5f, 0x70, 0x6f, 0x64, 0x73, 0x5f, 0x63, 0x6f, 0x6e, - 0x73, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, 0x18, 0x1e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, - 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x61, 0x78, 0x50, 0x6f, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x73, 0x74, - 0x72, 0x61, 0x69, 0x6e, 0x74, 0x52, 0x18, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x4d, 0x61, - 0x78, 0x50, 0x6f, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, 0x12, - 0x6f, 0x0a, 0x1c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x75, 0x73, 0x61, 0x67, - 0x65, 0x5f, 0x65, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, - 0x21, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, - 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x55, 0x73, 0x61, 0x67, 0x65, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x19, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x55, - 0x73, 0x61, 0x67, 0x65, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x12, 0x6e, 0x0a, 0x1b, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x6f, - 0x72, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, - 0x22, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, - 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x75, 0x74, 0x68, - 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x19, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, - 0x61, 0x74, 0x6f, 0x72, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x12, 0x5f, 0x0a, 0x16, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x6c, 0x75, 0x73, - 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x25, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, - 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x43, 0x6c, - 0x75, 0x73, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x14, 0x70, 0x72, 0x69, - 0x76, 0x61, 0x74, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x12, 0x58, 0x0a, 0x13, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x65, 0x6e, - 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x26, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, - 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x45, 0x6e, 0x63, - 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x12, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, - 0x65, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x65, 0x0a, 0x18, 0x76, - 0x65, 0x72, 0x74, 0x69, 0x63, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x64, 0x5f, 0x61, 0x75, 0x74, 0x6f, - 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x18, 0x27, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, - 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x65, 0x72, 0x74, 0x69, 0x63, 0x61, 0x6c, 0x50, 0x6f, 0x64, 0x41, - 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x52, 0x16, 0x76, 0x65, 0x72, 0x74, - 0x69, 0x63, 0x61, 0x6c, 0x50, 0x6f, 0x64, 0x41, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, - 0x6e, 0x67, 0x12, 0x49, 0x0a, 0x0e, 0x73, 0x68, 0x69, 0x65, 0x6c, 0x64, 0x65, 0x64, 0x5f, 0x6e, - 0x6f, 0x64, 0x65, 0x73, 0x18, 0x28, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, - 0x2e, 0x53, 0x68, 0x69, 0x65, 0x6c, 0x64, 0x65, 0x64, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x52, 0x0d, - 0x73, 0x68, 0x69, 0x65, 0x6c, 0x64, 0x65, 0x64, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x12, 0x4c, 0x0a, - 0x0f, 0x72, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, - 0x18, 0x29, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x6c, - 0x65, 0x61, 0x73, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x0e, 0x72, 0x65, 0x6c, - 0x65, 0x61, 0x73, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x12, 0x65, 0x0a, 0x18, 0x77, - 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, - 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x2b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, - 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x65, 0x6e, - 0x74, 0x69, 0x74, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x16, 0x77, 0x6f, 0x72, 0x6b, - 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x65, 0x6c, 0x66, 0x5f, 0x6c, 0x69, 0x6e, 0x6b, 0x18, - 0x64, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x6c, 0x66, 0x4c, 0x69, 0x6e, 0x6b, 0x12, - 0x16, 0x0a, 0x04, 0x7a, 0x6f, 0x6e, 0x65, 0x18, 0x65, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, - 0x01, 0x52, 0x04, 0x7a, 0x6f, 0x6e, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x70, 0x6f, - 0x69, 0x6e, 0x74, 0x18, 0x66, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, 0x64, 0x70, 0x6f, - 0x69, 0x6e, 0x74, 0x12, 0x36, 0x0a, 0x17, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x63, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x67, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x15, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x43, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x34, 0x0a, 0x16, 0x63, - 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x76, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x68, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x63, 0x75, 0x72, - 0x72, 0x65, 0x6e, 0x74, 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x12, 0x34, 0x0a, 0x14, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x6e, 0x6f, 0x64, - 0x65, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x69, 0x20, 0x01, 0x28, 0x09, 0x42, - 0x02, 0x18, 0x01, 0x52, 0x12, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x4e, 0x6f, 0x64, 0x65, - 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, - 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x6a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x72, - 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x18, 0x6b, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x43, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x29, 0x0a, 0x0e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, - 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x6c, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, - 0x01, 0x52, 0x0d, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x12, 0x2d, 0x0a, 0x13, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x70, 0x76, 0x34, 0x5f, 0x63, 0x69, - 0x64, 0x72, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x6d, 0x20, 0x01, 0x28, 0x05, 0x52, 0x10, 0x6e, - 0x6f, 0x64, 0x65, 0x49, 0x70, 0x76, 0x34, 0x43, 0x69, 0x64, 0x72, 0x53, 0x69, 0x7a, 0x65, 0x12, - 0x2c, 0x0a, 0x12, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x5f, 0x69, 0x70, 0x76, 0x34, - 0x5f, 0x63, 0x69, 0x64, 0x72, 0x18, 0x6e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x73, 0x49, 0x70, 0x76, 0x34, 0x43, 0x69, 0x64, 0x72, 0x12, 0x32, 0x0a, - 0x13, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, - 0x75, 0x72, 0x6c, 0x73, 0x18, 0x6f, 0x20, 0x03, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x11, - 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x55, 0x72, 0x6c, - 0x73, 0x12, 0x30, 0x0a, 0x12, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x6e, 0x6f, 0x64, - 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x70, 0x20, 0x01, 0x28, 0x05, 0x42, 0x02, 0x18, - 0x01, 0x52, 0x10, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x43, 0x6f, - 0x75, 0x6e, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x5f, 0x74, 0x69, - 0x6d, 0x65, 0x18, 0x71, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, - 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0x72, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x1d, 0x0a, 0x0a, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x74, 0x70, 0x75, 0x18, 0x73, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x54, 0x70, 0x75, 0x12, - 0x2d, 0x0a, 0x13, 0x74, 0x70, 0x75, 0x5f, 0x69, 0x70, 0x76, 0x34, 0x5f, 0x63, 0x69, 0x64, 0x72, - 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x18, 0x74, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x74, 0x70, - 0x75, 0x49, 0x70, 0x76, 0x34, 0x43, 0x69, 0x64, 0x72, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x44, - 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x76, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, - 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, - 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x41, 0x0a, 0x13, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, - 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x77, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x12, 0x16, 0x0a, 0x12, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x55, 0x4e, 0x53, 0x50, - 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c, 0x50, 0x52, 0x4f, - 0x56, 0x49, 0x53, 0x49, 0x4f, 0x4e, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x52, - 0x55, 0x4e, 0x4e, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x12, 0x0f, 0x0a, 0x0b, 0x52, 0x45, 0x43, 0x4f, - 0x4e, 0x43, 0x49, 0x4c, 0x49, 0x4e, 0x47, 0x10, 0x03, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x54, 0x4f, - 0x50, 0x50, 0x49, 0x4e, 0x47, 0x10, 0x04, 0x12, 0x09, 0x0a, 0x05, 0x45, 0x52, 0x52, 0x4f, 0x52, - 0x10, 0x05, 0x12, 0x0c, 0x0a, 0x08, 0x44, 0x45, 0x47, 0x52, 0x41, 0x44, 0x45, 0x44, 0x10, 0x06, - 0x22, 0x85, 0x0f, 0x0a, 0x0d, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x55, 0x70, 0x64, 0x61, - 0x74, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x64, 0x65, 0x73, 0x69, 0x72, 0x65, 0x64, 0x5f, 0x6e, 0x6f, - 0x64, 0x65, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x12, 0x64, 0x65, 0x73, 0x69, 0x72, 0x65, 0x64, 0x4e, 0x6f, 0x64, 0x65, 0x56, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x3c, 0x0a, 0x1a, 0x64, 0x65, 0x73, 0x69, 0x72, 0x65, 0x64, 0x5f, - 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x18, 0x64, 0x65, 0x73, 0x69, 0x72, 0x65, - 0x64, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x12, 0x55, 0x0a, 0x15, 0x64, 0x65, 0x73, 0x69, 0x72, 0x65, 0x64, 0x5f, 0x61, 0x64, - 0x64, 0x6f, 0x6e, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, - 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x64, 0x64, 0x6f, 0x6e, 0x73, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x52, 0x13, 0x64, 0x65, 0x73, 0x69, 0x72, 0x65, 0x64, 0x41, 0x64, 0x64, - 0x6f, 0x6e, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x2f, 0x0a, 0x14, 0x64, 0x65, 0x73, - 0x69, 0x72, 0x65, 0x64, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x70, 0x6f, 0x6f, 0x6c, 0x5f, 0x69, - 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x64, 0x65, 0x73, 0x69, 0x72, 0x65, 0x64, - 0x4e, 0x6f, 0x64, 0x65, 0x50, 0x6f, 0x6f, 0x6c, 0x49, 0x64, 0x12, 0x2c, 0x0a, 0x12, 0x64, 0x65, - 0x73, 0x69, 0x72, 0x65, 0x64, 0x5f, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, - 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x64, 0x65, 0x73, 0x69, 0x72, 0x65, 0x64, 0x49, - 0x6d, 0x61, 0x67, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x67, 0x0a, 0x1b, 0x64, 0x65, 0x73, 0x69, - 0x72, 0x65, 0x64, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x65, 0x6e, 0x63, - 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x2e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, - 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x45, 0x6e, 0x63, 0x72, - 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x19, 0x64, 0x65, 0x73, 0x69, 0x72, 0x65, 0x64, 0x44, - 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x74, 0x0a, 0x20, 0x64, 0x65, 0x73, 0x69, 0x72, 0x65, 0x64, 0x5f, 0x77, 0x6f, 0x72, - 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x2f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, - 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, - 0x74, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x1d, 0x64, 0x65, 0x73, 0x69, 0x72, 0x65, - 0x64, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, - 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x58, 0x0a, 0x16, 0x64, 0x65, 0x73, 0x69, 0x72, - 0x65, 0x64, 0x5f, 0x73, 0x68, 0x69, 0x65, 0x6c, 0x64, 0x65, 0x64, 0x5f, 0x6e, 0x6f, 0x64, 0x65, - 0x73, 0x18, 0x30, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x68, - 0x69, 0x65, 0x6c, 0x64, 0x65, 0x64, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x52, 0x14, 0x64, 0x65, 0x73, - 0x69, 0x72, 0x65, 0x64, 0x53, 0x68, 0x69, 0x65, 0x6c, 0x64, 0x65, 0x64, 0x4e, 0x6f, 0x64, 0x65, - 0x73, 0x12, 0x6b, 0x0a, 0x1d, 0x64, 0x65, 0x73, 0x69, 0x72, 0x65, 0x64, 0x5f, 0x6e, 0x6f, 0x64, - 0x65, 0x5f, 0x70, 0x6f, 0x6f, 0x6c, 0x5f, 0x61, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, - 0x6e, 0x67, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4e, - 0x6f, 0x64, 0x65, 0x50, 0x6f, 0x6f, 0x6c, 0x41, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, - 0x6e, 0x67, 0x52, 0x1a, 0x64, 0x65, 0x73, 0x69, 0x72, 0x65, 0x64, 0x4e, 0x6f, 0x64, 0x65, 0x50, - 0x6f, 0x6f, 0x6c, 0x41, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x2b, - 0x0a, 0x11, 0x64, 0x65, 0x73, 0x69, 0x72, 0x65, 0x64, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x64, 0x65, 0x73, 0x69, 0x72, - 0x65, 0x64, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x8d, 0x01, 0x0a, 0x29, - 0x64, 0x65, 0x73, 0x69, 0x72, 0x65, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x61, - 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, - 0x6b, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, - 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x41, 0x75, 0x74, 0x68, - 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x52, 0x25, 0x64, 0x65, 0x73, 0x69, 0x72, 0x65, 0x64, 0x4d, 0x61, 0x73, - 0x74, 0x65, 0x72, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x4e, 0x65, 0x74, - 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x67, 0x0a, 0x1b, 0x64, - 0x65, 0x73, 0x69, 0x72, 0x65, 0x64, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x61, - 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, - 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x41, 0x75, - 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x52, 0x19, 0x64, 0x65, 0x73, 0x69, 0x72, - 0x65, 0x64, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x41, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, - 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x6a, 0x0a, 0x1c, 0x64, 0x65, 0x73, 0x69, 0x72, 0x65, 0x64, 0x5f, - 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, - 0x2e, 0x42, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x1a, 0x64, 0x65, 0x73, 0x69, 0x72, 0x65, 0x64, 0x42, 0x69, 0x6e, - 0x61, 0x72, 0x79, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x36, 0x0a, 0x17, 0x64, 0x65, 0x73, 0x69, 0x72, 0x65, 0x64, 0x5f, 0x6c, 0x6f, 0x67, 0x67, - 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x13, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x15, 0x64, 0x65, 0x73, 0x69, 0x72, 0x65, 0x64, 0x4c, 0x6f, 0x67, 0x67, 0x69, 0x6e, - 0x67, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x7e, 0x0a, 0x24, 0x64, 0x65, 0x73, 0x69, - 0x72, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x75, 0x73, 0x61, - 0x67, 0x65, 0x5f, 0x65, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x55, 0x73, 0x61, 0x67, 0x65, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x20, 0x64, 0x65, 0x73, 0x69, 0x72, 0x65, 0x64, 0x52, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x55, 0x73, 0x61, 0x67, 0x65, 0x45, 0x78, 0x70, 0x6f, - 0x72, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x74, 0x0a, 0x20, 0x64, 0x65, 0x73, 0x69, - 0x72, 0x65, 0x64, 0x5f, 0x76, 0x65, 0x72, 0x74, 0x69, 0x63, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x64, - 0x5f, 0x61, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x18, 0x16, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, - 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x65, 0x72, 0x74, 0x69, 0x63, 0x61, - 0x6c, 0x50, 0x6f, 0x64, 0x41, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x52, - 0x1d, 0x64, 0x65, 0x73, 0x69, 0x72, 0x65, 0x64, 0x56, 0x65, 0x72, 0x74, 0x69, 0x63, 0x61, 0x6c, - 0x50, 0x6f, 0x64, 0x41, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x6e, - 0x0a, 0x1e, 0x64, 0x65, 0x73, 0x69, 0x72, 0x65, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, - 0x65, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x18, 0x19, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x69, - 0x76, 0x61, 0x74, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x52, 0x1b, 0x64, 0x65, 0x73, 0x69, 0x72, 0x65, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, - 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x7e, - 0x0a, 0x24, 0x64, 0x65, 0x73, 0x69, 0x72, 0x65, 0x64, 0x5f, 0x69, 0x6e, 0x74, 0x72, 0x61, 0x5f, - 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x76, 0x69, 0x73, 0x69, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x5f, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, - 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x74, 0x72, 0x61, 0x4e, 0x6f, 0x64, 0x65, 0x56, 0x69, 0x73, 0x69, - 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x20, 0x64, 0x65, - 0x73, 0x69, 0x72, 0x65, 0x64, 0x49, 0x6e, 0x74, 0x72, 0x61, 0x4e, 0x6f, 0x64, 0x65, 0x56, 0x69, - 0x73, 0x69, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x65, - 0x0a, 0x1b, 0x64, 0x65, 0x73, 0x69, 0x72, 0x65, 0x64, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, - 0x74, 0x5f, 0x73, 0x6e, 0x61, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x1c, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, - 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, - 0x74, 0x53, 0x6e, 0x61, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x18, 0x64, 0x65, 0x73, - 0x69, 0x72, 0x65, 0x64, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x53, 0x6e, 0x61, 0x74, 0x53, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x5b, 0x0a, 0x17, 0x64, 0x65, 0x73, 0x69, 0x72, 0x65, 0x64, - 0x5f, 0x72, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, - 0x18, 0x1f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x6c, - 0x65, 0x61, 0x73, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x15, 0x64, 0x65, 0x73, - 0x69, 0x72, 0x65, 0x64, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x6e, - 0x65, 0x6c, 0x12, 0x34, 0x0a, 0x16, 0x64, 0x65, 0x73, 0x69, 0x72, 0x65, 0x64, 0x5f, 0x6d, 0x61, - 0x73, 0x74, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x64, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x14, 0x64, 0x65, 0x73, 0x69, 0x72, 0x65, 0x64, 0x4d, 0x61, 0x73, 0x74, 0x65, - 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xe3, 0x08, 0x0a, 0x09, 0x4f, 0x70, 0x65, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x04, 0x7a, 0x6f, - 0x6e, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x04, 0x7a, 0x6f, - 0x6e, 0x65, 0x12, 0x4a, 0x0a, 0x0e, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, - 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, - 0x0d, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x3d, - 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x25, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, - 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x16, 0x0a, - 0x06, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, - 0x65, 0x74, 0x61, 0x69, 0x6c, 0x12, 0x2a, 0x0a, 0x0e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, - 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, - 0x41, 0x03, 0x52, 0x0d, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x65, 0x6c, 0x66, 0x5f, 0x6c, 0x69, 0x6e, 0x6b, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x6c, 0x66, 0x4c, 0x69, 0x6e, 0x6b, 0x12, 0x1f, - 0x0a, 0x0b, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x6c, 0x69, 0x6e, 0x6b, 0x18, 0x07, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4c, 0x69, 0x6e, 0x6b, 0x12, - 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1d, 0x0a, 0x0a, 0x73, - 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x65, 0x6e, - 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x65, 0x6e, - 0x64, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x47, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, - 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x70, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x42, - 0x03, 0xe0, 0x41, 0x03, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x12, 0x53, - 0x0a, 0x12, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, - 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x11, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x12, 0x55, 0x0a, 0x13, 0x6e, 0x6f, 0x64, 0x65, 0x70, 0x6f, 0x6f, 0x6c, 0x5f, - 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, - 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x6e, - 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x12, 0x6e, 0x6f, 0x64, 0x65, 0x70, 0x6f, 0x6f, 0x6c, - 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x52, 0x0a, 0x06, 0x53, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x12, 0x16, 0x0a, 0x12, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x55, - 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, - 0x50, 0x45, 0x4e, 0x44, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x52, 0x55, 0x4e, - 0x4e, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x44, 0x4f, 0x4e, 0x45, 0x10, 0x03, - 0x12, 0x0c, 0x0a, 0x08, 0x41, 0x42, 0x4f, 0x52, 0x54, 0x49, 0x4e, 0x47, 0x10, 0x04, 0x22, 0xfd, - 0x02, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x12, 0x0a, - 0x0e, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x5f, 0x43, 0x4c, 0x55, 0x53, 0x54, 0x45, 0x52, 0x10, - 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x5f, 0x43, 0x4c, 0x55, 0x53, - 0x54, 0x45, 0x52, 0x10, 0x02, 0x12, 0x12, 0x0a, 0x0e, 0x55, 0x50, 0x47, 0x52, 0x41, 0x44, 0x45, - 0x5f, 0x4d, 0x41, 0x53, 0x54, 0x45, 0x52, 0x10, 0x03, 0x12, 0x11, 0x0a, 0x0d, 0x55, 0x50, 0x47, - 0x52, 0x41, 0x44, 0x45, 0x5f, 0x4e, 0x4f, 0x44, 0x45, 0x53, 0x10, 0x04, 0x12, 0x12, 0x0a, 0x0e, - 0x52, 0x45, 0x50, 0x41, 0x49, 0x52, 0x5f, 0x43, 0x4c, 0x55, 0x53, 0x54, 0x45, 0x52, 0x10, 0x05, - 0x12, 0x12, 0x0a, 0x0e, 0x55, 0x50, 0x44, 0x41, 0x54, 0x45, 0x5f, 0x43, 0x4c, 0x55, 0x53, 0x54, - 0x45, 0x52, 0x10, 0x06, 0x12, 0x14, 0x0a, 0x10, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x5f, 0x4e, - 0x4f, 0x44, 0x45, 0x5f, 0x50, 0x4f, 0x4f, 0x4c, 0x10, 0x07, 0x12, 0x14, 0x0a, 0x10, 0x44, 0x45, - 0x4c, 0x45, 0x54, 0x45, 0x5f, 0x4e, 0x4f, 0x44, 0x45, 0x5f, 0x50, 0x4f, 0x4f, 0x4c, 0x10, 0x08, - 0x12, 0x1c, 0x0a, 0x18, 0x53, 0x45, 0x54, 0x5f, 0x4e, 0x4f, 0x44, 0x45, 0x5f, 0x50, 0x4f, 0x4f, - 0x4c, 0x5f, 0x4d, 0x41, 0x4e, 0x41, 0x47, 0x45, 0x4d, 0x45, 0x4e, 0x54, 0x10, 0x09, 0x12, 0x15, - 0x0a, 0x11, 0x41, 0x55, 0x54, 0x4f, 0x5f, 0x52, 0x45, 0x50, 0x41, 0x49, 0x52, 0x5f, 0x4e, 0x4f, - 0x44, 0x45, 0x53, 0x10, 0x0a, 0x12, 0x16, 0x0a, 0x12, 0x41, 0x55, 0x54, 0x4f, 0x5f, 0x55, 0x50, - 0x47, 0x52, 0x41, 0x44, 0x45, 0x5f, 0x4e, 0x4f, 0x44, 0x45, 0x53, 0x10, 0x0b, 0x12, 0x0e, 0x0a, - 0x0a, 0x53, 0x45, 0x54, 0x5f, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x53, 0x10, 0x0c, 0x12, 0x13, 0x0a, - 0x0f, 0x53, 0x45, 0x54, 0x5f, 0x4d, 0x41, 0x53, 0x54, 0x45, 0x52, 0x5f, 0x41, 0x55, 0x54, 0x48, - 0x10, 0x0d, 0x12, 0x16, 0x0a, 0x12, 0x53, 0x45, 0x54, 0x5f, 0x4e, 0x4f, 0x44, 0x45, 0x5f, 0x50, - 0x4f, 0x4f, 0x4c, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x10, 0x0e, 0x12, 0x16, 0x0a, 0x12, 0x53, 0x45, - 0x54, 0x5f, 0x4e, 0x45, 0x54, 0x57, 0x4f, 0x52, 0x4b, 0x5f, 0x50, 0x4f, 0x4c, 0x49, 0x43, 0x59, - 0x10, 0x0f, 0x12, 0x1a, 0x0a, 0x16, 0x53, 0x45, 0x54, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x54, 0x45, - 0x4e, 0x41, 0x4e, 0x43, 0x45, 0x5f, 0x50, 0x4f, 0x4c, 0x49, 0x43, 0x59, 0x10, 0x10, 0x22, 0x85, - 0x03, 0x0a, 0x11, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x67, - 0x72, 0x65, 0x73, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3d, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4f, - 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, - 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x47, 0x0a, 0x07, 0x6d, 0x65, 0x74, 0x72, 0x69, - 0x63, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4f, - 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, - 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x52, 0x07, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, - 0x12, 0x3e, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x67, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, - 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x50, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x67, 0x65, 0x73, - 0x1a, 0x93, 0x01, 0x0a, 0x06, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, 0x17, 0x0a, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x09, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x08, 0x69, 0x6e, 0x74, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x64, 0x6f, 0x75, - 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, - 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, - 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0x0a, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xa6, 0x01, 0x0a, 0x14, 0x43, 0x72, 0x65, 0x61, 0x74, - 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x21, 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, - 0x49, 0x64, 0x12, 0x16, 0x0a, 0x04, 0x7a, 0x6f, 0x6e, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x02, 0x18, 0x01, 0x52, 0x04, 0x7a, 0x6f, 0x6e, 0x65, 0x12, 0x3b, 0x0a, 0x07, 0x63, 0x6c, - 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, - 0x31, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x07, - 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, - 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x22, - 0x85, 0x01, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, - 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x09, 0x70, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x04, 0x7a, 0x6f, 0x6e, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x04, 0x7a, 0x6f, 0x6e, 0x65, - 0x12, 0x21, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xc9, 0x01, 0x0a, 0x14, 0x55, 0x70, 0x64, 0x61, - 0x74, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x21, 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, - 0x74, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x04, 0x7a, 0x6f, 0x6e, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x04, 0x7a, 0x6f, 0x6e, 0x65, 0x12, 0x21, 0x0a, 0x0a, 0x63, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, - 0x02, 0x18, 0x01, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x3f, - 0x0a, 0x06, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, - 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x55, 0x70, 0x64, 0x61, - 0x74, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, - 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x22, 0xda, 0x03, 0x0a, 0x15, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4e, 0x6f, - 0x64, 0x65, 0x50, 0x6f, 0x6f, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, - 0x0a, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x64, - 0x12, 0x16, 0x0a, 0x04, 0x7a, 0x6f, 0x6e, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, - 0x18, 0x01, 0x52, 0x04, 0x7a, 0x6f, 0x6e, 0x65, 0x12, 0x21, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, - 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, - 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x24, 0x0a, 0x0c, 0x6e, - 0x6f, 0x64, 0x65, 0x5f, 0x70, 0x6f, 0x6f, 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0a, 0x6e, 0x6f, 0x64, 0x65, 0x50, 0x6f, 0x6f, 0x6c, 0x49, - 0x64, 0x12, 0x26, 0x0a, 0x0c, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x6e, 0x6f, - 0x64, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x22, 0x0a, 0x0a, 0x69, 0x6d, 0x61, - 0x67, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, - 0x41, 0x02, 0x52, 0x09, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0d, - 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, - 0x65, 0x0a, 0x18, 0x77, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x6d, 0x65, 0x74, 0x61, - 0x64, 0x61, 0x74, 0x61, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x0e, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, - 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, - 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x16, - 0x77, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x58, 0x0a, 0x10, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, - 0x65, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, - 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x50, 0x6f, 0x6f, 0x6c, 0x2e, - 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, - 0x0f, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, - 0x22, 0x88, 0x02, 0x0a, 0x1d, 0x53, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x50, 0x6f, 0x6f, 0x6c, - 0x41, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x6a, - 0x65, 0x63, 0x74, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x04, 0x7a, 0x6f, 0x6e, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x04, 0x7a, 0x6f, 0x6e, 0x65, 0x12, 0x21, 0x0a, - 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, - 0x12, 0x24, 0x0a, 0x0c, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x70, 0x6f, 0x6f, 0x6c, 0x5f, 0x69, 0x64, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0a, 0x6e, 0x6f, 0x64, 0x65, - 0x50, 0x6f, 0x6f, 0x6c, 0x49, 0x64, 0x12, 0x4f, 0x0a, 0x0b, 0x61, 0x75, 0x74, 0x6f, 0x73, 0x63, - 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, - 0x31, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x50, 0x6f, 0x6f, 0x6c, 0x41, 0x75, 0x74, 0x6f, 0x73, 0x63, - 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x61, 0x75, 0x74, 0x6f, - 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xba, 0x01, 0x0a, 0x18, - 0x53, 0x65, 0x74, 0x4c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x6a, - 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, - 0x52, 0x09, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x04, 0x7a, - 0x6f, 0x6e, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x04, 0x7a, - 0x6f, 0x6e, 0x65, 0x12, 0x21, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, - 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x09, 0x63, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x2c, 0x0a, 0x0f, 0x6c, 0x6f, 0x67, 0x67, 0x69, 0x6e, - 0x67, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, - 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0e, 0x6c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xc3, 0x01, 0x0a, 0x1b, 0x53, 0x65, 0x74, - 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x6a, - 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, - 0x52, 0x09, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x04, 0x7a, - 0x6f, 0x6e, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x04, 0x7a, - 0x6f, 0x6e, 0x65, 0x12, 0x21, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, - 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x09, 0x63, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x32, 0x0a, 0x12, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, - 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x11, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, - 0x69, 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xd7, - 0x01, 0x0a, 0x16, 0x53, 0x65, 0x74, 0x41, 0x64, 0x64, 0x6f, 0x6e, 0x73, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0a, 0x70, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, - 0x01, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x04, - 0x7a, 0x6f, 0x6e, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x04, - 0x7a, 0x6f, 0x6e, 0x65, 0x12, 0x21, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, - 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x09, 0x63, 0x6c, - 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x4b, 0x0a, 0x0d, 0x61, 0x64, 0x64, 0x6f, 0x6e, - 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, - 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x64, 0x64, 0x6f, 0x6e, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0c, 0x61, 0x64, 0x64, 0x6f, 0x6e, 0x73, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xaa, 0x01, 0x0a, 0x13, 0x53, 0x65, 0x74, - 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x21, 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, - 0x74, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x04, 0x7a, 0x6f, 0x6e, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x04, 0x7a, 0x6f, 0x6e, 0x65, 0x12, 0x21, 0x0a, 0x0a, 0x63, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, - 0x02, 0x18, 0x01, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x21, - 0x0a, 0x09, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, - 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x09, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xb3, 0x01, 0x0a, 0x13, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, - 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, - 0x0a, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x64, - 0x12, 0x16, 0x0a, 0x04, 0x7a, 0x6f, 0x6e, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, - 0x18, 0x01, 0x52, 0x04, 0x7a, 0x6f, 0x6e, 0x65, 0x12, 0x21, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, - 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, - 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x2a, 0x0a, 0x0e, 0x6d, - 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0d, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, - 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xe7, 0x02, 0x0a, 0x14, - 0x53, 0x65, 0x74, 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x41, 0x75, 0x74, 0x68, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, - 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x09, 0x70, 0x72, - 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x04, 0x7a, 0x6f, 0x6e, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x04, 0x7a, 0x6f, 0x6e, 0x65, 0x12, - 0x21, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, - 0x49, 0x64, 0x12, 0x4d, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, - 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x74, 0x4d, 0x61, 0x73, 0x74, - 0x65, 0x72, 0x41, 0x75, 0x74, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x41, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x3c, 0x0a, 0x06, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, - 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x41, 0x75, - 0x74, 0x68, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, - 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x22, 0x50, 0x0a, 0x06, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0b, 0x0a, - 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x45, - 0x54, 0x5f, 0x50, 0x41, 0x53, 0x53, 0x57, 0x4f, 0x52, 0x44, 0x10, 0x01, 0x12, 0x15, 0x0a, 0x11, - 0x47, 0x45, 0x4e, 0x45, 0x52, 0x41, 0x54, 0x45, 0x5f, 0x50, 0x41, 0x53, 0x53, 0x57, 0x4f, 0x52, - 0x44, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x45, 0x54, 0x5f, 0x55, 0x53, 0x45, 0x52, 0x4e, - 0x41, 0x4d, 0x45, 0x10, 0x03, 0x22, 0x88, 0x01, 0x0a, 0x14, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, - 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, - 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x49, - 0x64, 0x12, 0x16, 0x0a, 0x04, 0x7a, 0x6f, 0x6e, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, - 0x02, 0x18, 0x01, 0x52, 0x04, 0x7a, 0x6f, 0x6e, 0x65, 0x12, 0x21, 0x0a, 0x0a, 0x63, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, - 0x01, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x22, 0x68, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x6a, 0x65, - 0x63, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, - 0x09, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x04, 0x7a, 0x6f, - 0x6e, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x04, 0x7a, 0x6f, - 0x6e, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x22, 0x75, 0x0a, 0x14, 0x4c, 0x69, - 0x73, 0x74, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x38, 0x0a, 0x08, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, - 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, - 0x65, 0x72, 0x52, 0x08, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x12, 0x23, 0x0a, 0x0d, - 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x5f, 0x7a, 0x6f, 0x6e, 0x65, 0x73, 0x18, 0x02, 0x20, - 0x03, 0x28, 0x09, 0x52, 0x0c, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x5a, 0x6f, 0x6e, 0x65, - 0x73, 0x22, 0x8b, 0x01, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0a, 0x70, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, - 0x01, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x04, - 0x7a, 0x6f, 0x6e, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x04, - 0x7a, 0x6f, 0x6e, 0x65, 0x12, 0x25, 0x0a, 0x0c, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0b, - 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, - 0x6a, 0x0a, 0x15, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x6a, - 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, - 0x52, 0x09, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x04, 0x7a, - 0x6f, 0x6e, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x04, 0x7a, - 0x6f, 0x6e, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x22, 0x8e, 0x01, 0x0a, 0x16, - 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, - 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x09, - 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x04, 0x7a, 0x6f, 0x6e, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x04, 0x7a, 0x6f, 0x6e, - 0x65, 0x12, 0x25, 0x0a, 0x0c, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, - 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0b, 0x6f, 0x70, 0x65, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x7d, 0x0a, 0x16, - 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3e, 0x0a, 0x0a, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, - 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x6f, 0x70, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6e, - 0x67, 0x5f, 0x7a, 0x6f, 0x6e, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x6d, - 0x69, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x5a, 0x6f, 0x6e, 0x65, 0x73, 0x22, 0x67, 0x0a, 0x16, 0x47, - 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, - 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x09, 0x70, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x04, 0x7a, 0x6f, 0x6e, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x04, 0x7a, 0x6f, 0x6e, 0x65, - 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x88, 0x04, 0x0a, 0x0c, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x36, 0x0a, 0x17, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, - 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x15, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x43, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2e, 0x0a, - 0x13, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x76, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x11, 0x76, 0x61, 0x6c, 0x69, - 0x64, 0x4e, 0x6f, 0x64, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x2c, 0x0a, - 0x12, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x5f, 0x74, - 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x64, 0x65, 0x66, 0x61, 0x75, - 0x6c, 0x74, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2a, 0x0a, 0x11, 0x76, - 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, - 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x49, 0x6d, 0x61, - 0x67, 0x65, 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, 0x32, 0x0a, 0x15, 0x76, 0x61, 0x6c, 0x69, 0x64, - 0x5f, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, - 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x13, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x4d, 0x61, 0x73, - 0x74, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x52, 0x0a, 0x08, 0x63, - 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, - 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2e, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x08, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x1a, - 0xad, 0x01, 0x0a, 0x14, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x6e, - 0x65, 0x6c, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x45, 0x0a, 0x07, 0x63, 0x68, 0x61, 0x6e, - 0x6e, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, - 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x2e, 0x43, - 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x12, - 0x27, 0x0a, 0x0f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, - 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x25, 0x0a, 0x0e, 0x76, 0x61, 0x6c, 0x69, - 0x64, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, - 0x52, 0x0d, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x22, - 0xce, 0x01, 0x0a, 0x15, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x50, 0x6f, - 0x6f, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0a, 0x70, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, - 0x01, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x04, - 0x7a, 0x6f, 0x6e, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x04, - 0x7a, 0x6f, 0x6e, 0x65, 0x12, 0x21, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, - 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x09, 0x63, 0x6c, - 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x3f, 0x0a, 0x09, 0x6e, 0x6f, 0x64, 0x65, 0x5f, - 0x70, 0x6f, 0x6f, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, - 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x50, 0x6f, 0x6f, 0x6c, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, - 0x6e, 0x6f, 0x64, 0x65, 0x50, 0x6f, 0x6f, 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, - 0x6e, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, - 0x22, 0xaf, 0x01, 0x0a, 0x15, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x50, - 0x6f, 0x6f, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0a, 0x70, 0x72, - 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, - 0x18, 0x01, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x64, 0x12, 0x16, 0x0a, - 0x04, 0x7a, 0x6f, 0x6e, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, - 0x04, 0x7a, 0x6f, 0x6e, 0x65, 0x12, 0x21, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, - 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x09, 0x63, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x24, 0x0a, 0x0c, 0x6e, 0x6f, 0x64, 0x65, - 0x5f, 0x70, 0x6f, 0x6f, 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, - 0x18, 0x01, 0x52, 0x0a, 0x6e, 0x6f, 0x64, 0x65, 0x50, 0x6f, 0x6f, 0x6c, 0x49, 0x64, 0x12, 0x12, - 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x22, 0x8c, 0x01, 0x0a, 0x14, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x50, - 0x6f, 0x6f, 0x6c, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0a, 0x70, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, - 0x02, 0x18, 0x01, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x64, 0x12, 0x16, - 0x0a, 0x04, 0x7a, 0x6f, 0x6e, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, - 0x52, 0x04, 0x7a, 0x6f, 0x6e, 0x65, 0x12, 0x21, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x09, - 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x72, - 0x65, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, - 0x74, 0x22, 0xac, 0x01, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x50, 0x6f, 0x6f, - 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x6a, - 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, - 0x52, 0x09, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x04, 0x7a, - 0x6f, 0x6e, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x04, 0x7a, - 0x6f, 0x6e, 0x65, 0x12, 0x21, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, - 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x09, 0x63, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x24, 0x0a, 0x0c, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x70, - 0x6f, 0x6f, 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, - 0x52, 0x0a, 0x6e, 0x6f, 0x64, 0x65, 0x50, 0x6f, 0x6f, 0x6c, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x22, 0x86, 0x08, 0x0a, 0x08, 0x4e, 0x6f, 0x64, 0x65, 0x50, 0x6f, 0x6f, 0x6c, 0x12, 0x12, 0x0a, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x12, 0x37, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, - 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x2c, 0x0a, 0x12, 0x69, 0x6e, - 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x10, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x4e, - 0x6f, 0x64, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x6c, 0x6f, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x6c, 0x6f, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x65, 0x6c, 0x66, 0x5f, 0x6c, - 0x69, 0x6e, 0x6b, 0x18, 0x64, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x6c, 0x66, 0x4c, - 0x69, 0x6e, 0x6b, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x65, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2e, 0x0a, - 0x13, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, - 0x75, 0x72, 0x6c, 0x73, 0x18, 0x66, 0x20, 0x03, 0x28, 0x09, 0x52, 0x11, 0x69, 0x6e, 0x73, 0x74, - 0x61, 0x6e, 0x63, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x55, 0x72, 0x6c, 0x73, 0x12, 0x3c, 0x0a, - 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x67, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, - 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x50, 0x6f, 0x6f, 0x6c, 0x2e, 0x53, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x29, 0x0a, 0x0e, 0x73, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x68, 0x20, - 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0d, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x4d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x4a, 0x0a, 0x0b, 0x61, 0x75, 0x74, 0x6f, 0x73, 0x63, - 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, - 0x31, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x50, 0x6f, 0x6f, 0x6c, 0x41, 0x75, 0x74, 0x6f, 0x73, 0x63, - 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x52, 0x0b, 0x61, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, - 0x6e, 0x67, 0x12, 0x43, 0x0a, 0x0a, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x6f, 0x64, - 0x65, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x0a, 0x6d, 0x61, 0x6e, - 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x56, 0x0a, 0x13, 0x6d, 0x61, 0x78, 0x5f, 0x70, - 0x6f, 0x64, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, - 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x61, 0x78, 0x50, 0x6f, - 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, 0x52, 0x11, 0x6d, 0x61, - 0x78, 0x50, 0x6f, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, 0x12, - 0x44, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x69, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, - 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x63, 0x6f, 0x6e, 0x64, 0x69, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x2b, 0x0a, 0x12, 0x70, 0x6f, 0x64, 0x5f, 0x69, 0x70, 0x76, - 0x34, 0x5f, 0x63, 0x69, 0x64, 0x72, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, - 0x05, 0x52, 0x0f, 0x70, 0x6f, 0x64, 0x49, 0x70, 0x76, 0x34, 0x43, 0x69, 0x64, 0x72, 0x53, 0x69, - 0x7a, 0x65, 0x12, 0x58, 0x0a, 0x10, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x5f, 0x73, 0x65, - 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x6b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, - 0x76, 0x31, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x50, 0x6f, 0x6f, 0x6c, 0x2e, 0x55, 0x70, 0x67, 0x72, - 0x61, 0x64, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0f, 0x75, 0x70, 0x67, - 0x72, 0x61, 0x64, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x1a, 0x57, 0x0a, 0x0f, - 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, - 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x73, 0x75, 0x72, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x05, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x53, 0x75, 0x72, 0x67, 0x65, 0x12, 0x27, 0x0a, 0x0f, - 0x6d, 0x61, 0x78, 0x5f, 0x75, 0x6e, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x55, 0x6e, 0x61, 0x76, 0x61, 0x69, - 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x22, 0x81, 0x01, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x12, 0x16, 0x0a, 0x12, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, - 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c, 0x50, 0x52, 0x4f, 0x56, - 0x49, 0x53, 0x49, 0x4f, 0x4e, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x52, 0x55, - 0x4e, 0x4e, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x12, 0x16, 0x0a, 0x12, 0x52, 0x55, 0x4e, 0x4e, 0x49, - 0x4e, 0x47, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x03, 0x12, - 0x0f, 0x0a, 0x0b, 0x52, 0x45, 0x43, 0x4f, 0x4e, 0x43, 0x49, 0x4c, 0x49, 0x4e, 0x47, 0x10, 0x04, - 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x54, 0x4f, 0x50, 0x50, 0x49, 0x4e, 0x47, 0x10, 0x05, 0x12, 0x09, - 0x0a, 0x05, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x06, 0x22, 0xa6, 0x01, 0x0a, 0x0e, 0x4e, 0x6f, - 0x64, 0x65, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x21, 0x0a, 0x0c, - 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x0b, 0x61, 0x75, 0x74, 0x6f, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x12, - 0x1f, 0x0a, 0x0b, 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x72, 0x65, 0x70, 0x61, 0x69, 0x72, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x75, 0x74, 0x6f, 0x52, 0x65, 0x70, 0x61, 0x69, 0x72, - 0x12, 0x50, 0x0a, 0x0f, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x5f, 0x6f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, - 0x41, 0x75, 0x74, 0x6f, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x52, 0x0e, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x22, 0x6d, 0x0a, 0x12, 0x41, 0x75, 0x74, 0x6f, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, - 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x35, 0x0a, 0x17, 0x61, 0x75, 0x74, 0x6f, - 0x5f, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, - 0x69, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x61, 0x75, 0x74, 0x6f, 0x55, - 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x53, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, - 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x22, 0x7e, 0x0a, 0x11, 0x4d, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x63, 0x65, - 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x3e, 0x0a, 0x06, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x61, 0x69, - 0x6e, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x63, 0x65, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x52, 0x06, - 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x12, 0x29, 0x0a, 0x10, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x22, 0xc2, 0x03, 0x0a, 0x11, 0x4d, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x63, - 0x65, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x12, 0x67, 0x0a, 0x18, 0x64, 0x61, 0x69, 0x6c, 0x79, - 0x5f, 0x6d, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x77, 0x69, 0x6e, - 0x64, 0x6f, 0x77, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, - 0x44, 0x61, 0x69, 0x6c, 0x79, 0x4d, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x63, 0x65, - 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x48, 0x00, 0x52, 0x16, 0x64, 0x61, 0x69, 0x6c, 0x79, 0x4d, - 0x61, 0x69, 0x6e, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x63, 0x65, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, - 0x12, 0x55, 0x0a, 0x10, 0x72, 0x65, 0x63, 0x75, 0x72, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x77, 0x69, - 0x6e, 0x64, 0x6f, 0x77, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, - 0x2e, 0x52, 0x65, 0x63, 0x75, 0x72, 0x72, 0x69, 0x6e, 0x67, 0x54, 0x69, 0x6d, 0x65, 0x57, 0x69, - 0x6e, 0x64, 0x6f, 0x77, 0x48, 0x00, 0x52, 0x0f, 0x72, 0x65, 0x63, 0x75, 0x72, 0x72, 0x69, 0x6e, - 0x67, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x12, 0x78, 0x0a, 0x16, 0x6d, 0x61, 0x69, 0x6e, 0x74, - 0x65, 0x6e, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x6f, 0x6e, - 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x41, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x61, - 0x69, 0x6e, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x63, 0x65, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x2e, - 0x4d, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x63, 0x65, 0x45, 0x78, 0x63, 0x6c, 0x75, - 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x15, 0x6d, 0x61, 0x69, 0x6e, - 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x63, 0x65, 0x45, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x6f, 0x6e, - 0x73, 0x1a, 0x69, 0x0a, 0x1a, 0x4d, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x63, 0x65, - 0x45, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, - 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, - 0x79, 0x12, 0x35, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, - 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x57, 0x69, 0x6e, 0x64, 0x6f, - 0x77, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x08, 0x0a, 0x06, - 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x7e, 0x0a, 0x0a, 0x54, 0x69, 0x6d, 0x65, 0x57, 0x69, - 0x6e, 0x64, 0x6f, 0x77, 0x12, 0x39, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, - 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, - 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, - 0x35, 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x07, 0x65, - 0x6e, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x22, 0x6e, 0x0a, 0x13, 0x52, 0x65, 0x63, 0x75, 0x72, 0x72, - 0x69, 0x6e, 0x67, 0x54, 0x69, 0x6d, 0x65, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x12, 0x37, 0x0a, - 0x06, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, - 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x52, 0x06, - 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x12, 0x1e, 0x0a, 0x0a, 0x72, 0x65, 0x63, 0x75, 0x72, 0x72, - 0x65, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x72, 0x65, 0x63, 0x75, - 0x72, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x22, 0x53, 0x0a, 0x16, 0x44, 0x61, 0x69, 0x6c, 0x79, 0x4d, - 0x61, 0x69, 0x6e, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x63, 0x65, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, - 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, - 0x1a, 0x0a, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x80, 0x02, 0x0a, 0x1c, - 0x53, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x50, 0x6f, 0x6f, 0x6c, 0x4d, 0x61, 0x6e, 0x61, 0x67, - 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0a, - 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x02, 0x18, 0x01, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x64, 0x12, - 0x16, 0x0a, 0x04, 0x7a, 0x6f, 0x6e, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, - 0x01, 0x52, 0x04, 0x7a, 0x6f, 0x6e, 0x65, 0x12, 0x21, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, - 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, - 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x24, 0x0a, 0x0c, 0x6e, 0x6f, - 0x64, 0x65, 0x5f, 0x70, 0x6f, 0x6f, 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x02, 0x18, 0x01, 0x52, 0x0a, 0x6e, 0x6f, 0x64, 0x65, 0x50, 0x6f, 0x6f, 0x6c, 0x49, 0x64, - 0x12, 0x48, 0x0a, 0x0a, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, - 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x4d, - 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, - 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xd4, - 0x01, 0x0a, 0x16, 0x53, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x50, 0x6f, 0x6f, 0x6c, 0x53, 0x69, - 0x7a, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0a, 0x70, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, - 0x01, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x04, - 0x7a, 0x6f, 0x6e, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x04, - 0x7a, 0x6f, 0x6e, 0x65, 0x12, 0x21, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, - 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x09, 0x63, 0x6c, - 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x24, 0x0a, 0x0c, 0x6e, 0x6f, 0x64, 0x65, 0x5f, - 0x70, 0x6f, 0x6f, 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, - 0x01, 0x52, 0x0a, 0x6e, 0x6f, 0x64, 0x65, 0x50, 0x6f, 0x6f, 0x6c, 0x49, 0x64, 0x12, 0x22, 0x0a, - 0x0a, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x05, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x09, 0x6e, 0x6f, 0x64, 0x65, 0x43, 0x6f, 0x75, 0x6e, - 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xb8, 0x01, 0x0a, 0x1e, 0x52, 0x6f, 0x6c, 0x6c, 0x62, 0x61, - 0x63, 0x6b, 0x4e, 0x6f, 0x64, 0x65, 0x50, 0x6f, 0x6f, 0x6c, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, - 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x6a, - 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, - 0x52, 0x09, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x04, 0x7a, - 0x6f, 0x6e, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x04, 0x7a, - 0x6f, 0x6e, 0x65, 0x12, 0x21, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, - 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x09, 0x63, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x24, 0x0a, 0x0c, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x70, - 0x6f, 0x6f, 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, - 0x52, 0x0a, 0x6e, 0x6f, 0x64, 0x65, 0x50, 0x6f, 0x6f, 0x6c, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x22, 0x55, 0x0a, 0x15, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x50, 0x6f, 0x6f, 0x6c, - 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3c, 0x0a, 0x0a, 0x6e, 0x6f, 0x64, - 0x65, 0x5f, 0x70, 0x6f, 0x6f, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, - 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x50, 0x6f, 0x6f, 0x6c, 0x52, 0x09, 0x6e, 0x6f, - 0x64, 0x65, 0x50, 0x6f, 0x6f, 0x6c, 0x73, 0x22, 0xe9, 0x02, 0x0a, 0x12, 0x43, 0x6c, 0x75, 0x73, - 0x74, 0x65, 0x72, 0x41, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x40, - 0x0a, 0x1c, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x61, 0x75, - 0x74, 0x6f, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x1a, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x6f, 0x64, 0x65, - 0x41, 0x75, 0x74, 0x6f, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, - 0x12, 0x4b, 0x0a, 0x0f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6c, 0x69, 0x6d, - 0x69, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, - 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x52, 0x0e, 0x72, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x73, 0x12, 0x84, 0x01, - 0x0a, 0x23, 0x61, 0x75, 0x74, 0x6f, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x69, - 0x6e, 0x67, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x70, 0x6f, 0x6f, 0x6c, 0x5f, 0x64, 0x65, 0x66, - 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, - 0x31, 0x2e, 0x41, 0x75, 0x74, 0x6f, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x69, - 0x6e, 0x67, 0x4e, 0x6f, 0x64, 0x65, 0x50, 0x6f, 0x6f, 0x6c, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, - 0x74, 0x73, 0x52, 0x20, 0x61, 0x75, 0x74, 0x6f, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, - 0x6e, 0x69, 0x6e, 0x67, 0x4e, 0x6f, 0x64, 0x65, 0x50, 0x6f, 0x6f, 0x6c, 0x44, 0x65, 0x66, 0x61, - 0x75, 0x6c, 0x74, 0x73, 0x12, 0x3d, 0x0a, 0x1a, 0x61, 0x75, 0x74, 0x6f, 0x70, 0x72, 0x6f, 0x76, - 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x19, 0x61, 0x75, 0x74, 0x6f, 0x70, 0x72, - 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x22, 0x88, 0x04, 0x0a, 0x20, 0x41, 0x75, 0x74, 0x6f, 0x70, 0x72, 0x6f, 0x76, - 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x4e, 0x6f, 0x64, 0x65, 0x50, 0x6f, 0x6f, 0x6c, - 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6f, 0x61, 0x75, 0x74, - 0x68, 0x5f, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, - 0x6f, 0x61, 0x75, 0x74, 0x68, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, - 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x58, 0x0a, 0x10, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x5f, - 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, - 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x50, 0x6f, 0x6f, 0x6c, 0x2e, 0x55, 0x70, - 0x67, 0x72, 0x61, 0x64, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0f, 0x75, - 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x43, - 0x0a, 0x0a, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, - 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x4d, 0x61, 0x6e, - 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x0a, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, - 0x65, 0x6e, 0x74, 0x12, 0x28, 0x0a, 0x10, 0x6d, 0x69, 0x6e, 0x5f, 0x63, 0x70, 0x75, 0x5f, 0x70, - 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6d, - 0x69, 0x6e, 0x43, 0x70, 0x75, 0x50, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x12, 0x20, 0x0a, - 0x0c, 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x67, 0x62, 0x18, 0x06, 0x20, - 0x01, 0x28, 0x05, 0x52, 0x0a, 0x64, 0x69, 0x73, 0x6b, 0x53, 0x69, 0x7a, 0x65, 0x47, 0x62, 0x12, - 0x1b, 0x0a, 0x09, 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x07, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x08, 0x64, 0x69, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x12, 0x65, 0x0a, 0x18, - 0x73, 0x68, 0x69, 0x65, 0x6c, 0x64, 0x65, 0x64, 0x5f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, - 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, - 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x68, 0x69, 0x65, 0x6c, 0x64, 0x65, 0x64, 0x49, 0x6e, 0x73, - 0x74, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x16, 0x73, 0x68, 0x69, - 0x65, 0x6c, 0x64, 0x65, 0x64, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x12, 0x29, 0x0a, 0x11, 0x62, 0x6f, 0x6f, 0x74, 0x5f, 0x64, 0x69, 0x73, 0x6b, - 0x5f, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, - 0x62, 0x6f, 0x6f, 0x74, 0x44, 0x69, 0x73, 0x6b, 0x4b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x22, 0x68, - 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, - 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x54, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x18, - 0x0a, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x22, 0xa5, 0x01, 0x0a, 0x13, 0x4e, 0x6f, 0x64, - 0x65, 0x50, 0x6f, 0x6f, 0x6c, 0x41, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, - 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x24, 0x0a, 0x0e, 0x6d, 0x69, - 0x6e, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x05, 0x52, 0x0c, 0x6d, 0x69, 0x6e, 0x4e, 0x6f, 0x64, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, - 0x12, 0x24, 0x0a, 0x0e, 0x6d, 0x61, 0x78, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x63, 0x6f, 0x75, - 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0c, 0x6d, 0x61, 0x78, 0x4e, 0x6f, 0x64, - 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x28, 0x0a, 0x0f, 0x61, 0x75, 0x74, 0x6f, 0x70, 0x72, - 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x0f, 0x61, 0x75, 0x74, 0x6f, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x64, - 0x22, 0xe2, 0x02, 0x0a, 0x10, 0x53, 0x65, 0x74, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, - 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x09, 0x70, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x04, 0x7a, 0x6f, 0x6e, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x04, 0x7a, 0x6f, 0x6e, 0x65, - 0x12, 0x21, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x49, 0x64, 0x12, 0x67, 0x0a, 0x0f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, - 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, - 0x76, 0x31, 0x2e, 0x53, 0x65, 0x74, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4c, 0x61, 0x62, 0x65, - 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0e, 0x72, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x30, 0x0a, 0x11, - 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x5f, 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, - 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x10, 0x6c, 0x61, - 0x62, 0x65, 0x6c, 0x46, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x12, 0x12, - 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x1a, 0x41, 0x0a, 0x13, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4c, 0x61, - 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xa7, 0x01, 0x0a, 0x14, 0x53, 0x65, 0x74, 0x4c, 0x65, 0x67, - 0x61, 0x63, 0x79, 0x41, 0x62, 0x61, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, - 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x49, - 0x64, 0x12, 0x16, 0x0a, 0x04, 0x7a, 0x6f, 0x6e, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, - 0x02, 0x18, 0x01, 0x52, 0x04, 0x7a, 0x6f, 0x6e, 0x65, 0x12, 0x21, 0x0a, 0x0a, 0x63, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, - 0x01, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x07, - 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, - 0x41, 0x02, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, - 0xb9, 0x01, 0x0a, 0x16, 0x53, 0x74, 0x61, 0x72, 0x74, 0x49, 0x50, 0x52, 0x6f, 0x74, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0a, 0x70, 0x72, - 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, - 0x18, 0x01, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x64, 0x12, 0x16, 0x0a, - 0x04, 0x7a, 0x6f, 0x6e, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, - 0x04, 0x7a, 0x6f, 0x6e, 0x65, 0x12, 0x21, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, - 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x09, 0x63, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2d, 0x0a, 0x12, - 0x72, 0x6f, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, - 0x6c, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x72, 0x6f, 0x74, 0x61, 0x74, 0x65, - 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x22, 0x8d, 0x01, 0x0a, 0x19, - 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x49, 0x50, 0x52, 0x6f, 0x74, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0a, 0x70, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, - 0x01, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x04, - 0x7a, 0x6f, 0x6e, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x04, - 0x7a, 0x6f, 0x6e, 0x65, 0x12, 0x21, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, - 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x09, 0x63, 0x6c, - 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x6b, 0x0a, 0x11, 0x41, - 0x63, 0x63, 0x65, 0x6c, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x12, 0x2b, 0x0a, 0x11, 0x61, 0x63, 0x63, 0x65, 0x6c, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x5f, - 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x61, 0x63, 0x63, - 0x65, 0x6c, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x29, 0x0a, - 0x10, 0x61, 0x63, 0x63, 0x65, 0x6c, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x5f, 0x74, 0x79, 0x70, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x61, 0x63, 0x63, 0x65, 0x6c, 0x65, 0x72, - 0x61, 0x74, 0x6f, 0x72, 0x54, 0x79, 0x70, 0x65, 0x22, 0xa0, 0x01, 0x0a, 0x16, 0x57, 0x6f, 0x72, - 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x12, 0x44, 0x0a, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, - 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, - 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4d, - 0x6f, 0x64, 0x65, 0x52, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x22, 0x40, 0x0a, 0x04, 0x4d, 0x6f, 0x64, - 0x65, 0x12, 0x14, 0x0a, 0x10, 0x4d, 0x4f, 0x44, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, - 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c, 0x47, 0x43, 0x45, 0x5f, 0x4d, - 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x47, 0x4b, 0x45, - 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, 0x10, 0x02, 0x22, 0xdb, 0x01, 0x0a, 0x17, - 0x53, 0x65, 0x74, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x6a, 0x65, - 0x63, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, - 0x09, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x04, 0x7a, 0x6f, - 0x6e, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x04, 0x7a, 0x6f, - 0x6e, 0x65, 0x12, 0x21, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, - 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x4e, 0x0a, 0x0e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, - 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, - 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0d, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x50, - 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xee, 0x01, 0x0a, 0x1b, 0x53, 0x65, - 0x74, 0x4d, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x63, 0x65, 0x50, 0x6f, 0x6c, 0x69, - 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x22, 0x0a, 0x0a, 0x70, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, - 0x41, 0x02, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x64, 0x12, 0x17, 0x0a, - 0x04, 0x7a, 0x6f, 0x6e, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, - 0x52, 0x04, 0x7a, 0x6f, 0x6e, 0x65, 0x12, 0x22, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, - 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x5a, 0x0a, 0x12, 0x6d, 0x61, - 0x69, 0x6e, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x61, 0x69, - 0x6e, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x63, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x03, - 0xe0, 0x41, 0x02, 0x52, 0x11, 0x6d, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x63, 0x65, - 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xf9, 0x01, 0x0a, 0x0f, 0x53, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3d, - 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, - 0x76, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x2e, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, - 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, - 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x8c, 0x01, 0x0a, 0x04, 0x43, 0x6f, 0x64, 0x65, - 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x10, 0x0a, - 0x0c, 0x47, 0x43, 0x45, 0x5f, 0x53, 0x54, 0x4f, 0x43, 0x4b, 0x4f, 0x55, 0x54, 0x10, 0x01, 0x12, - 0x1f, 0x0a, 0x1b, 0x47, 0x4b, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x41, - 0x43, 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x5f, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x44, 0x10, 0x02, - 0x12, 0x16, 0x0a, 0x12, 0x47, 0x43, 0x45, 0x5f, 0x51, 0x55, 0x4f, 0x54, 0x41, 0x5f, 0x45, 0x58, - 0x43, 0x45, 0x45, 0x44, 0x45, 0x44, 0x10, 0x03, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x45, 0x54, 0x5f, - 0x42, 0x59, 0x5f, 0x4f, 0x50, 0x45, 0x52, 0x41, 0x54, 0x4f, 0x52, 0x10, 0x04, 0x12, 0x17, 0x0a, - 0x13, 0x43, 0x4c, 0x4f, 0x55, 0x44, 0x5f, 0x4b, 0x4d, 0x53, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x45, - 0x52, 0x52, 0x4f, 0x52, 0x10, 0x07, 0x22, 0xe2, 0x01, 0x0a, 0x0d, 0x4e, 0x65, 0x74, 0x77, 0x6f, - 0x72, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x18, 0x0a, 0x07, 0x6e, 0x65, 0x74, 0x77, - 0x6f, 0x72, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6e, 0x65, 0x74, 0x77, 0x6f, - 0x72, 0x6b, 0x12, 0x1e, 0x0a, 0x0a, 0x73, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x77, 0x6f, - 0x72, 0x6b, 0x12, 0x3f, 0x0a, 0x1c, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x69, 0x6e, 0x74, - 0x72, 0x61, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x76, 0x69, 0x73, 0x69, 0x62, 0x69, 0x6c, 0x69, - 0x74, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x19, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, - 0x49, 0x6e, 0x74, 0x72, 0x61, 0x4e, 0x6f, 0x64, 0x65, 0x56, 0x69, 0x73, 0x69, 0x62, 0x69, 0x6c, - 0x69, 0x74, 0x79, 0x12, 0x56, 0x0a, 0x13, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x73, - 0x6e, 0x61, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, - 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x53, 0x6e, - 0x61, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x11, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, - 0x74, 0x53, 0x6e, 0x61, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x30, 0x0a, 0x16, 0x47, - 0x65, 0x74, 0x4f, 0x70, 0x65, 0x6e, 0x49, 0x44, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x22, 0xdb, 0x02, - 0x0a, 0x17, 0x47, 0x65, 0x74, 0x4f, 0x70, 0x65, 0x6e, 0x49, 0x44, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x69, 0x73, 0x73, - 0x75, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x69, 0x73, 0x73, 0x75, 0x65, - 0x72, 0x12, 0x19, 0x0a, 0x08, 0x6a, 0x77, 0x6b, 0x73, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x07, 0x6a, 0x77, 0x6b, 0x73, 0x55, 0x72, 0x69, 0x12, 0x38, 0x0a, 0x18, - 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x5f, 0x73, - 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x16, - 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x54, 0x79, 0x70, 0x65, 0x73, 0x53, 0x75, 0x70, - 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x12, 0x36, 0x0a, 0x17, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, - 0x64, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x15, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x54, 0x79, 0x70, 0x65, 0x73, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x12, 0x4f, - 0x0a, 0x25, 0x69, 0x64, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x69, - 0x6e, 0x67, 0x5f, 0x61, 0x6c, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x5f, 0x73, 0x75, - 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x20, 0x69, - 0x64, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x53, 0x69, 0x67, 0x6e, 0x69, 0x6e, 0x67, 0x41, 0x6c, 0x67, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x12, - 0x29, 0x0a, 0x10, 0x63, 0x6c, 0x61, 0x69, 0x6d, 0x73, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, - 0x74, 0x65, 0x64, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x6c, 0x61, 0x69, 0x6d, - 0x73, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x67, 0x72, - 0x61, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, - 0x0a, 0x67, 0x72, 0x61, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x73, 0x22, 0x2f, 0x0a, 0x15, 0x47, - 0x65, 0x74, 0x4a, 0x53, 0x4f, 0x4e, 0x57, 0x65, 0x62, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x22, 0x97, 0x01, 0x0a, - 0x03, 0x4a, 0x77, 0x6b, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x03, 0x6b, 0x74, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x61, 0x6c, 0x67, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x03, 0x61, 0x6c, 0x67, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x73, 0x65, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x73, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x69, - 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x69, 0x64, 0x12, 0x0c, 0x0a, 0x01, - 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x01, 0x6e, 0x12, 0x0c, 0x0a, 0x01, 0x65, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x01, 0x65, 0x12, 0x0c, 0x0a, 0x01, 0x78, 0x18, 0x07, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x01, 0x78, 0x12, 0x0c, 0x0a, 0x01, 0x79, 0x18, 0x08, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x01, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x63, 0x72, 0x76, 0x18, 0x09, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x03, 0x63, 0x72, 0x76, 0x22, 0x46, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x4a, 0x53, 0x4f, - 0x4e, 0x57, 0x65, 0x62, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x2c, 0x0a, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, - 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4a, 0x77, 0x6b, 0x52, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x22, 0x97, - 0x01, 0x0a, 0x0e, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, - 0x6c, 0x12, 0x45, 0x0a, 0x07, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, - 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, - 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, - 0x07, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x22, 0x3e, 0x0a, 0x07, 0x43, 0x68, 0x61, 0x6e, - 0x6e, 0x65, 0x6c, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, - 0x45, 0x44, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x52, 0x41, 0x50, 0x49, 0x44, 0x10, 0x01, 0x12, - 0x0b, 0x0a, 0x07, 0x52, 0x45, 0x47, 0x55, 0x4c, 0x41, 0x52, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, - 0x53, 0x54, 0x41, 0x42, 0x4c, 0x45, 0x10, 0x03, 0x22, 0x35, 0x0a, 0x19, 0x49, 0x6e, 0x74, 0x72, - 0x61, 0x4e, 0x6f, 0x64, 0x65, 0x56, 0x69, 0x73, 0x69, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, - 0x3e, 0x0a, 0x11, 0x4d, 0x61, 0x78, 0x50, 0x6f, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x72, - 0x61, 0x69, 0x6e, 0x74, 0x12, 0x29, 0x0a, 0x11, 0x6d, 0x61, 0x78, 0x5f, 0x70, 0x6f, 0x64, 0x73, - 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x0e, 0x6d, 0x61, 0x78, 0x50, 0x6f, 0x64, 0x73, 0x50, 0x65, 0x72, 0x4e, 0x6f, 0x64, 0x65, 0x22, - 0x3d, 0x0a, 0x16, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x65, 0x6e, 0x74, - 0x69, 0x74, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x23, 0x0a, 0x0d, 0x77, 0x6f, 0x72, - 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x70, 0x6f, 0x6f, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0c, 0x77, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x6f, 0x6f, 0x6c, 0x22, 0xa8, - 0x01, 0x0a, 0x12, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x45, 0x6e, 0x63, 0x72, 0x79, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x43, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, - 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x62, - 0x61, 0x73, 0x65, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x74, - 0x61, 0x74, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x6b, 0x65, - 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6b, 0x65, - 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x32, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0b, - 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x45, - 0x4e, 0x43, 0x52, 0x59, 0x50, 0x54, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x45, - 0x43, 0x52, 0x59, 0x50, 0x54, 0x45, 0x44, 0x10, 0x02, 0x22, 0x8a, 0x01, 0x0a, 0x1c, 0x4c, 0x69, - 0x73, 0x74, 0x55, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x77, 0x6f, - 0x72, 0x6b, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, - 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, - 0x6e, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, - 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, - 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, - 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, - 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x90, 0x01, 0x0a, 0x1d, 0x4c, 0x69, 0x73, 0x74, 0x55, - 0x73, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x47, 0x0a, 0x0b, 0x73, 0x75, 0x62, 0x6e, - 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, - 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x75, 0x62, 0x6e, 0x65, 0x74, - 0x77, 0x6f, 0x72, 0x6b, 0x52, 0x0b, 0x73, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, - 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, - 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, - 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xa0, 0x02, 0x0a, 0x1e, 0x55, 0x73, - 0x61, 0x62, 0x6c, 0x65, 0x53, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x53, 0x65, - 0x63, 0x6f, 0x6e, 0x64, 0x61, 0x72, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x1d, 0x0a, 0x0a, - 0x72, 0x61, 0x6e, 0x67, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x09, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x22, 0x0a, 0x0d, 0x69, - 0x70, 0x5f, 0x63, 0x69, 0x64, 0x72, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0b, 0x69, 0x70, 0x43, 0x69, 0x64, 0x72, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, - 0x52, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, - 0x3a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, - 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x75, 0x62, 0x6e, - 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x61, 0x72, 0x79, 0x52, - 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x22, 0x67, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x0b, 0x0a, - 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x55, 0x4e, - 0x55, 0x53, 0x45, 0x44, 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x49, 0x4e, 0x5f, 0x55, 0x53, 0x45, - 0x5f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x10, 0x02, 0x12, 0x18, 0x0a, 0x14, 0x49, 0x4e, - 0x5f, 0x55, 0x53, 0x45, 0x5f, 0x53, 0x48, 0x41, 0x52, 0x45, 0x41, 0x42, 0x4c, 0x45, 0x5f, 0x50, - 0x4f, 0x44, 0x10, 0x03, 0x12, 0x16, 0x0a, 0x12, 0x49, 0x4e, 0x5f, 0x55, 0x53, 0x45, 0x5f, 0x4d, - 0x41, 0x4e, 0x41, 0x47, 0x45, 0x44, 0x5f, 0x50, 0x4f, 0x44, 0x10, 0x04, 0x22, 0xfc, 0x01, 0x0a, - 0x10, 0x55, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, - 0x6b, 0x12, 0x1e, 0x0a, 0x0a, 0x73, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, - 0x6b, 0x12, 0x18, 0x0a, 0x07, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x07, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x12, 0x22, 0x0a, 0x0d, 0x69, - 0x70, 0x5f, 0x63, 0x69, 0x64, 0x72, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0b, 0x69, 0x70, 0x43, 0x69, 0x64, 0x72, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, - 0x63, 0x0a, 0x13, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x61, 0x72, 0x79, 0x5f, 0x69, 0x70, 0x5f, - 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, - 0x76, 0x31, 0x2e, 0x55, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x77, - 0x6f, 0x72, 0x6b, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x61, 0x72, 0x79, 0x52, 0x61, 0x6e, 0x67, - 0x65, 0x52, 0x11, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x61, 0x72, 0x79, 0x49, 0x70, 0x52, 0x61, - 0x6e, 0x67, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x6d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0xcf, 0x03, 0x0a, 0x19, - 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x55, 0x73, 0x61, 0x67, 0x65, 0x45, 0x78, 0x70, - 0x6f, 0x72, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x75, 0x0a, 0x14, 0x62, 0x69, 0x67, - 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x55, 0x73, 0x61, 0x67, 0x65, 0x45, 0x78, 0x70, 0x6f, 0x72, - 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x42, 0x69, 0x67, 0x51, 0x75, 0x65, 0x72, 0x79, - 0x44, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x62, 0x69, 0x67, - 0x71, 0x75, 0x65, 0x72, 0x79, 0x44, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x43, 0x0a, 0x1e, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x65, 0x74, 0x77, 0x6f, - 0x72, 0x6b, 0x5f, 0x65, 0x67, 0x72, 0x65, 0x73, 0x73, 0x5f, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x69, - 0x6e, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1b, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, - 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x45, 0x67, 0x72, 0x65, 0x73, 0x73, 0x4d, 0x65, 0x74, - 0x65, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x88, 0x01, 0x0a, 0x1b, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x48, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, - 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x55, 0x73, 0x61, 0x67, 0x65, 0x45, - 0x78, 0x70, 0x6f, 0x72, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, 0x6f, 0x6e, 0x73, - 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x19, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x1a, 0x34, 0x0a, 0x13, 0x42, 0x69, 0x67, 0x51, 0x75, 0x65, 0x72, 0x79, 0x44, 0x65, 0x73, 0x74, - 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1d, 0x0a, 0x0a, 0x64, 0x61, 0x74, 0x61, 0x73, - 0x65, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, 0x61, 0x74, - 0x61, 0x73, 0x65, 0x74, 0x49, 0x64, 0x1a, 0x35, 0x0a, 0x19, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6d, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, 0x32, 0x0a, - 0x16, 0x56, 0x65, 0x72, 0x74, 0x69, 0x63, 0x61, 0x6c, 0x50, 0x6f, 0x64, 0x41, 0x75, 0x74, 0x6f, - 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, - 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, - 0x64, 0x22, 0x2f, 0x0a, 0x11, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x53, 0x6e, 0x61, 0x74, - 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, - 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, - 0x65, 0x64, 0x22, 0x29, 0x0a, 0x0d, 0x53, 0x68, 0x69, 0x65, 0x6c, 0x64, 0x65, 0x64, 0x4e, 0x6f, - 0x64, 0x65, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x32, 0x85, 0x46, - 0x0a, 0x0e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, - 0x12, 0xe8, 0x01, 0x0a, 0x0c, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, - 0x73, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, - 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6c, 0x75, 0x73, - 0x74, 0x65, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, - 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x82, 0x01, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x61, 0x12, - 0x2c, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x5a, 0x31, 0x12, - 0x2f, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x7a, 0x6f, 0x6e, 0x65, 0x73, - 0x2f, 0x7b, 0x7a, 0x6f, 0x6e, 0x65, 0x7d, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, - 0xda, 0x41, 0x0f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x2c, 0x7a, 0x6f, - 0x6e, 0x65, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0xed, 0x01, 0x0a, 0x0a, - 0x47, 0x65, 0x74, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, - 0x2e, 0x47, 0x65, 0x74, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, - 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, - 0x22, 0x98, 0x01, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x6e, 0x12, 0x2c, 0x2f, 0x76, 0x31, 0x2f, 0x7b, - 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, - 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x6c, 0x75, 0x73, - 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x7d, 0x5a, 0x3e, 0x12, 0x3c, 0x2f, 0x76, 0x31, 0x2f, 0x70, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, - 0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x7a, 0x6f, 0x6e, 0x65, 0x73, 0x2f, 0x7b, 0x7a, 0x6f, 0x6e, 0x65, - 0x7d, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x63, 0x6c, 0x75, 0x73, - 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x7d, 0xda, 0x41, 0x1a, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, - 0x74, 0x5f, 0x69, 0x64, 0x2c, 0x7a, 0x6f, 0x6e, 0x65, 0x2c, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x5f, 0x69, 0x64, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0xf5, 0x01, 0x0a, 0x0d, - 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x29, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, - 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4f, - 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x98, 0x01, 0x82, 0xd3, 0xe4, 0x93, 0x02, - 0x67, 0x22, 0x2c, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x3a, - 0x01, 0x2a, 0x5a, 0x34, 0x22, 0x2f, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, - 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x7d, 0x2f, - 0x7a, 0x6f, 0x6e, 0x65, 0x73, 0x2f, 0x7b, 0x7a, 0x6f, 0x6e, 0x65, 0x7d, 0x2f, 0x63, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x73, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x17, 0x70, 0x72, 0x6f, 0x6a, 0x65, - 0x63, 0x74, 0x5f, 0x69, 0x64, 0x2c, 0x7a, 0x6f, 0x6e, 0x65, 0x2c, 0x63, 0x6c, 0x75, 0x73, 0x74, - 0x65, 0x72, 0xda, 0x41, 0x0e, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x63, 0x6c, 0x75, 0x73, - 0x74, 0x65, 0x72, 0x12, 0x89, 0x02, 0x0a, 0x0d, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x6c, - 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, - 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, - 0x74, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, - 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x22, 0xac, 0x01, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x74, 0x1a, 0x2c, 0x2f, 0x76, 0x31, 0x2f, 0x7b, - 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, - 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x6c, 0x75, 0x73, - 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x01, 0x2a, 0x5a, 0x41, 0x1a, 0x3c, 0x2f, 0x76, - 0x31, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, - 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x7a, 0x6f, 0x6e, 0x65, 0x73, 0x2f, 0x7b, 0x7a, - 0x6f, 0x6e, 0x65, 0x7d, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x63, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x7d, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x21, - 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x2c, 0x7a, 0x6f, 0x6e, 0x65, 0x2c, - 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, - 0x65, 0xda, 0x41, 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, - 0x86, 0x02, 0x0a, 0x0e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x50, 0x6f, - 0x6f, 0x6c, 0x12, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, - 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4e, - 0x6f, 0x64, 0x65, 0x50, 0x6f, 0x6f, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, - 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xa7, - 0x01, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0xa0, 0x01, 0x1a, 0x38, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, - 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, - 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, - 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x50, 0x6f, 0x6f, 0x6c, 0x73, 0x2f, - 0x2a, 0x7d, 0x3a, 0x01, 0x2a, 0x5a, 0x61, 0x22, 0x5c, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x69, - 0x64, 0x7d, 0x2f, 0x7a, 0x6f, 0x6e, 0x65, 0x73, 0x2f, 0x7b, 0x7a, 0x6f, 0x6e, 0x65, 0x7d, 0x2f, - 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x50, 0x6f, 0x6f, 0x6c, 0x73, 0x2f, - 0x7b, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x70, 0x6f, 0x6f, 0x6c, 0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x75, - 0x70, 0x64, 0x61, 0x74, 0x65, 0x3a, 0x01, 0x2a, 0x12, 0xaa, 0x02, 0x0a, 0x16, 0x53, 0x65, 0x74, - 0x4e, 0x6f, 0x64, 0x65, 0x50, 0x6f, 0x6f, 0x6c, 0x41, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, - 0x69, 0x6e, 0x67, 0x12, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, - 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x74, 0x4e, 0x6f, 0x64, - 0x65, 0x50, 0x6f, 0x6f, 0x6c, 0x41, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x70, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xbb, 0x01, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0xb4, - 0x01, 0x22, 0x47, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x2f, 0x6e, - 0x6f, 0x64, 0x65, 0x50, 0x6f, 0x6f, 0x6c, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x73, 0x65, 0x74, 0x41, - 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x3a, 0x01, 0x2a, 0x5a, 0x66, 0x22, - 0x61, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x7a, 0x6f, 0x6e, 0x65, 0x73, - 0x2f, 0x7b, 0x7a, 0x6f, 0x6e, 0x65, 0x7d, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, - 0x2f, 0x7b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x6e, 0x6f, - 0x64, 0x65, 0x50, 0x6f, 0x6f, 0x6c, 0x73, 0x2f, 0x7b, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x70, 0x6f, - 0x6f, 0x6c, 0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x61, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, - 0x6e, 0x67, 0x3a, 0x01, 0x2a, 0x12, 0xb7, 0x02, 0x0a, 0x11, 0x53, 0x65, 0x74, 0x4c, 0x6f, 0x67, - 0x67, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x2d, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, - 0x31, 0x2e, 0x53, 0x65, 0x74, 0x4c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, - 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xd2, 0x01, 0x82, 0xd3, 0xe4, - 0x93, 0x02, 0x87, 0x01, 0x22, 0x37, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, - 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, - 0x2a, 0x7d, 0x3a, 0x73, 0x65, 0x74, 0x4c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x3a, 0x01, 0x2a, - 0x5a, 0x49, 0x22, 0x44, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, - 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x7a, 0x6f, - 0x6e, 0x65, 0x73, 0x2f, 0x7b, 0x7a, 0x6f, 0x6e, 0x65, 0x7d, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, - 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x7d, - 0x2f, 0x6c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x2a, 0x70, 0x72, - 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x2c, 0x7a, 0x6f, 0x6e, 0x65, 0x2c, 0x63, 0x6c, - 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x2c, 0x6c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, - 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0xda, 0x41, 0x14, 0x6e, 0x61, 0x6d, 0x65, 0x2c, - 0x6c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, - 0xc9, 0x02, 0x0a, 0x14, 0x53, 0x65, 0x74, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, - 0x67, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x53, - 0x65, 0x74, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, - 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xde, 0x01, 0x82, 0xd3, 0xe4, - 0x93, 0x02, 0x8d, 0x01, 0x22, 0x3a, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, - 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, - 0x2a, 0x7d, 0x3a, 0x73, 0x65, 0x74, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, - 0x3a, 0x01, 0x2a, 0x5a, 0x4c, 0x22, 0x47, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, - 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x7d, - 0x2f, 0x7a, 0x6f, 0x6e, 0x65, 0x73, 0x2f, 0x7b, 0x7a, 0x6f, 0x6e, 0x65, 0x7d, 0x2f, 0x63, 0x6c, - 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, - 0x69, 0x64, 0x7d, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x01, - 0x2a, 0xda, 0x41, 0x2d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x2c, 0x7a, - 0x6f, 0x6e, 0x65, 0x2c, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x2c, 0x6d, - 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0xda, 0x41, 0x17, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, - 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0xad, 0x02, 0x0a, 0x0f, - 0x53, 0x65, 0x74, 0x41, 0x64, 0x64, 0x6f, 0x6e, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, - 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, - 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x74, 0x41, 0x64, 0x64, 0x6f, 0x6e, 0x73, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, - 0x76, 0x31, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xcc, 0x01, 0x82, - 0xd3, 0xe4, 0x93, 0x02, 0x85, 0x01, 0x22, 0x36, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, - 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, - 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x73, 0x65, 0x74, 0x41, 0x64, 0x64, 0x6f, 0x6e, 0x73, 0x3a, 0x01, - 0x2a, 0x5a, 0x48, 0x22, 0x43, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, - 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x7a, - 0x6f, 0x6e, 0x65, 0x73, 0x2f, 0x7b, 0x7a, 0x6f, 0x6e, 0x65, 0x7d, 0x2f, 0x63, 0x6c, 0x75, 0x73, - 0x74, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, - 0x7d, 0x2f, 0x61, 0x64, 0x64, 0x6f, 0x6e, 0x73, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x28, 0x70, 0x72, - 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x2c, 0x7a, 0x6f, 0x6e, 0x65, 0x2c, 0x63, 0x6c, - 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x2c, 0x61, 0x64, 0x64, 0x6f, 0x6e, 0x73, 0x5f, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0xda, 0x41, 0x12, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x61, 0x64, - 0x64, 0x6f, 0x6e, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0xa8, 0x02, 0x0a, 0x0c, - 0x53, 0x65, 0x74, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x28, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, - 0x76, 0x31, 0x2e, 0x53, 0x65, 0x74, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x70, 0x65, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xcd, 0x01, 0x88, 0x02, 0x01, 0x82, 0xd3, 0xe4, 0x93, - 0x02, 0x8b, 0x01, 0x22, 0x39, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, - 0x7d, 0x3a, 0x73, 0x65, 0x74, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3a, 0x01, - 0x2a, 0x5a, 0x4b, 0x22, 0x46, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, - 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x7a, - 0x6f, 0x6e, 0x65, 0x73, 0x2f, 0x7b, 0x7a, 0x6f, 0x6e, 0x65, 0x7d, 0x2f, 0x63, 0x6c, 0x75, 0x73, - 0x74, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, - 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3a, 0x01, 0x2a, 0xda, 0x41, - 0x24, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x2c, 0x7a, 0x6f, 0x6e, 0x65, - 0x2c, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x2c, 0x6c, 0x6f, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xda, 0x41, 0x0e, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x6c, 0x6f, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0xac, 0x02, 0x0a, 0x0c, 0x55, 0x70, 0x64, 0x61, 0x74, - 0x65, 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, - 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, - 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x22, 0xd1, 0x01, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x88, 0x01, 0x22, 0x39, 0x2f, 0x76, 0x31, - 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, - 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x6c, - 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, - 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x3a, 0x01, 0x2a, 0x5a, 0x48, 0x22, 0x43, 0x2f, 0x76, 0x31, - 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, - 0x63, 0x74, 0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x7a, 0x6f, 0x6e, 0x65, 0x73, 0x2f, 0x7b, 0x7a, 0x6f, - 0x6e, 0x65, 0x7d, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x63, 0x6c, - 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, - 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x29, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, - 0x2c, 0x7a, 0x6f, 0x6e, 0x65, 0x2c, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, - 0x2c, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0xda, - 0x41, 0x13, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x76, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0xf4, 0x01, 0x0a, 0x0d, 0x53, 0x65, 0x74, 0x4d, 0x61, 0x73, - 0x74, 0x65, 0x72, 0x41, 0x75, 0x74, 0x68, 0x12, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, - 0x74, 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x41, 0x75, 0x74, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, - 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x22, 0x97, 0x01, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x90, 0x01, 0x22, 0x3a, 0x2f, 0x76, - 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, - 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x63, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x73, 0x65, 0x74, 0x4d, 0x61, - 0x73, 0x74, 0x65, 0x72, 0x41, 0x75, 0x74, 0x68, 0x3a, 0x01, 0x2a, 0x5a, 0x4f, 0x22, 0x4a, 0x2f, - 0x76, 0x31, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x7a, 0x6f, 0x6e, 0x65, 0x73, 0x2f, 0x7b, - 0x7a, 0x6f, 0x6e, 0x65, 0x7d, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x7b, - 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x7d, 0x3a, 0x73, 0x65, 0x74, 0x4d, - 0x61, 0x73, 0x74, 0x65, 0x72, 0x41, 0x75, 0x74, 0x68, 0x3a, 0x01, 0x2a, 0x12, 0xf5, 0x01, 0x0a, - 0x0d, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x29, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, - 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, - 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, - 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x98, 0x01, 0x82, 0xd3, 0xe4, 0x93, - 0x02, 0x6e, 0x2a, 0x2c, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, - 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x7d, - 0x5a, 0x3e, 0x2a, 0x3c, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, - 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x7a, 0x6f, - 0x6e, 0x65, 0x73, 0x2f, 0x7b, 0x7a, 0x6f, 0x6e, 0x65, 0x7d, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, - 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x7d, - 0xda, 0x41, 0x1a, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x2c, 0x7a, 0x6f, - 0x6e, 0x65, 0x2c, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0xda, 0x41, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x12, 0xe8, 0x01, 0x0a, 0x0e, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x70, 0x65, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, - 0x73, 0x74, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, - 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x70, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x7d, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x65, 0x12, 0x2e, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, - 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, - 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x6f, 0x70, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x5a, 0x33, 0x12, 0x31, 0x2f, 0x76, 0x31, 0x2f, - 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, - 0x74, 0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x7a, 0x6f, 0x6e, 0x65, 0x73, 0x2f, 0x7b, 0x7a, 0x6f, 0x6e, - 0x65, 0x7d, 0x2f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xda, 0x41, 0x0f, - 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x2c, 0x7a, 0x6f, 0x6e, 0x65, 0x12, - 0xfb, 0x01, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, - 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, - 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xa0, 0x01, 0x82, 0xd3, 0xe4, - 0x93, 0x02, 0x74, 0x12, 0x2e, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x2f, 0x2a, 0x7d, 0x5a, 0x42, 0x12, 0x40, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, - 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x7d, - 0x2f, 0x7a, 0x6f, 0x6e, 0x65, 0x73, 0x2f, 0x7b, 0x7a, 0x6f, 0x6e, 0x65, 0x7d, 0x2f, 0x6f, 0x70, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x7d, 0xda, 0x41, 0x1c, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, - 0x74, 0x5f, 0x69, 0x64, 0x2c, 0x7a, 0x6f, 0x6e, 0x65, 0x2c, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x8e, 0x02, - 0x0a, 0x0f, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, - 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x4f, 0x70, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0xb5, 0x01, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x88, 0x01, - 0x22, 0x35, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, - 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x2f, 0x2a, 0x2f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, - 0x3a, 0x63, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x3a, 0x01, 0x2a, 0x5a, 0x4c, 0x22, 0x47, 0x2f, 0x76, - 0x31, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, - 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x7a, 0x6f, 0x6e, 0x65, 0x73, 0x2f, 0x7b, 0x7a, - 0x6f, 0x6e, 0x65, 0x7d, 0x2f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, - 0x7b, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x7d, 0x3a, 0x63, - 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x1c, 0x70, 0x72, 0x6f, 0x6a, 0x65, - 0x63, 0x74, 0x5f, 0x69, 0x64, 0x2c, 0x7a, 0x6f, 0x6e, 0x65, 0x2c, 0x6f, 0x70, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0xea, - 0x01, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, - 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, - 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x22, 0x86, 0x01, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x67, 0x12, 0x2e, 0x2f, 0x76, 0x31, - 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, - 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5a, 0x35, 0x12, 0x33, 0x2f, - 0x76, 0x31, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x7a, 0x6f, 0x6e, 0x65, 0x73, 0x2f, 0x7b, - 0x7a, 0x6f, 0x6e, 0x65, 0x7d, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0xda, 0x41, 0x0f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x2c, - 0x7a, 0x6f, 0x6e, 0x65, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0xa6, 0x01, 0x0a, 0x0e, - 0x47, 0x65, 0x74, 0x4a, 0x53, 0x4f, 0x4e, 0x57, 0x65, 0x62, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x2a, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, - 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x4a, 0x53, 0x4f, 0x4e, 0x57, 0x65, 0x62, 0x4b, - 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, - 0x2e, 0x47, 0x65, 0x74, 0x4a, 0x53, 0x4f, 0x4e, 0x57, 0x65, 0x62, 0x4b, 0x65, 0x79, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x35, 0x12, - 0x33, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, - 0x6a, 0x77, 0x6b, 0x73, 0x12, 0x9a, 0x02, 0x0a, 0x0d, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x64, - 0x65, 0x50, 0x6f, 0x6f, 0x6c, 0x73, 0x12, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, - 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x50, 0x6f, 0x6f, 0x6c, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, - 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x64, 0x65, - 0x50, 0x6f, 0x6f, 0x6c, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xb1, 0x01, - 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x84, 0x01, 0x12, 0x38, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, - 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, - 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x6c, 0x75, 0x73, - 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x50, 0x6f, 0x6f, 0x6c, - 0x73, 0x5a, 0x48, 0x12, 0x46, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, - 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x7a, - 0x6f, 0x6e, 0x65, 0x73, 0x2f, 0x7b, 0x7a, 0x6f, 0x6e, 0x65, 0x7d, 0x2f, 0x63, 0x6c, 0x75, 0x73, - 0x74, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, - 0x7d, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x50, 0x6f, 0x6f, 0x6c, 0x73, 0xda, 0x41, 0x1a, 0x70, 0x72, - 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x2c, 0x7a, 0x6f, 0x6e, 0x65, 0x2c, 0x63, 0x6c, - 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, - 0x74, 0x12, 0xa3, 0x02, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x50, 0x6f, 0x6f, - 0x6c, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, - 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x50, - 0x6f, 0x6f, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, - 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x50, 0x6f, 0x6f, 0x6c, 0x22, 0xcb, 0x01, 0x82, 0xd3, 0xe4, 0x93, - 0x02, 0x93, 0x01, 0x12, 0x38, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, - 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x50, 0x6f, 0x6f, 0x6c, 0x73, 0x2f, 0x2a, 0x7d, 0x5a, 0x57, 0x12, - 0x55, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x7a, 0x6f, 0x6e, 0x65, 0x73, - 0x2f, 0x7b, 0x7a, 0x6f, 0x6e, 0x65, 0x7d, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, - 0x2f, 0x7b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x6e, 0x6f, - 0x64, 0x65, 0x50, 0x6f, 0x6f, 0x6c, 0x73, 0x2f, 0x7b, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x70, 0x6f, - 0x6f, 0x6c, 0x5f, 0x69, 0x64, 0x7d, 0xda, 0x41, 0x27, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, - 0x5f, 0x69, 0x64, 0x2c, 0x7a, 0x6f, 0x6e, 0x65, 0x2c, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, - 0x5f, 0x69, 0x64, 0x2c, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x70, 0x6f, 0x6f, 0x6c, 0x5f, 0x69, 0x64, - 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0xaa, 0x02, 0x0a, 0x0e, 0x43, 0x72, 0x65, 0x61, - 0x74, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x50, 0x6f, 0x6f, 0x6c, 0x12, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, - 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x50, 0x6f, 0x6f, 0x6c, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x70, 0x65, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xcb, 0x01, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x8a, 0x01, - 0x22, 0x38, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, - 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x7d, - 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x50, 0x6f, 0x6f, 0x6c, 0x73, 0x3a, 0x01, 0x2a, 0x5a, 0x4b, 0x22, - 0x46, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x7a, 0x6f, 0x6e, 0x65, 0x73, - 0x2f, 0x7b, 0x7a, 0x6f, 0x6e, 0x65, 0x7d, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, - 0x2f, 0x7b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x6e, 0x6f, - 0x64, 0x65, 0x50, 0x6f, 0x6f, 0x6c, 0x73, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x24, 0x70, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x2c, 0x7a, 0x6f, 0x6e, 0x65, 0x2c, 0x63, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x2c, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x70, 0x6f, 0x6f, - 0x6c, 0xda, 0x41, 0x10, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x6e, 0x6f, 0x64, 0x65, 0x5f, - 0x70, 0x6f, 0x6f, 0x6c, 0x12, 0xaa, 0x02, 0x0a, 0x0e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, - 0x6f, 0x64, 0x65, 0x50, 0x6f, 0x6f, 0x6c, 0x12, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, - 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x50, 0x6f, 0x6f, 0x6c, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, - 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x22, 0xcb, 0x01, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x93, 0x01, 0x2a, 0x38, 0x2f, - 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, - 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, - 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x50, - 0x6f, 0x6f, 0x6c, 0x73, 0x2f, 0x2a, 0x7d, 0x5a, 0x57, 0x2a, 0x55, 0x2f, 0x76, 0x31, 0x2f, 0x70, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, - 0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x7a, 0x6f, 0x6e, 0x65, 0x73, 0x2f, 0x7b, 0x7a, 0x6f, 0x6e, 0x65, - 0x7d, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x63, 0x6c, 0x75, 0x73, - 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x50, 0x6f, 0x6f, 0x6c, - 0x73, 0x2f, 0x7b, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x70, 0x6f, 0x6f, 0x6c, 0x5f, 0x69, 0x64, 0x7d, - 0xda, 0x41, 0x27, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x2c, 0x7a, 0x6f, - 0x6e, 0x65, 0x2c, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x2c, 0x6e, 0x6f, - 0x64, 0x65, 0x5f, 0x70, 0x6f, 0x6f, 0x6c, 0x5f, 0x69, 0x64, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x12, 0xd4, 0x02, 0x0a, 0x17, 0x52, 0x6f, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x4e, 0x6f, - 0x64, 0x65, 0x50, 0x6f, 0x6f, 0x6c, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x12, 0x33, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, - 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x6f, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x4e, 0x6f, 0x64, 0x65, - 0x50, 0x6f, 0x6f, 0x6c, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, - 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x22, 0xe3, 0x01, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0xab, 0x01, 0x22, 0x41, 0x2f, 0x76, - 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, - 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x63, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x50, 0x6f, - 0x6f, 0x6c, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x72, 0x6f, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x3a, - 0x01, 0x2a, 0x5a, 0x63, 0x22, 0x5e, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, - 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x7d, 0x2f, - 0x7a, 0x6f, 0x6e, 0x65, 0x73, 0x2f, 0x7b, 0x7a, 0x6f, 0x6e, 0x65, 0x7d, 0x2f, 0x63, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, - 0x64, 0x7d, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x50, 0x6f, 0x6f, 0x6c, 0x73, 0x2f, 0x7b, 0x6e, 0x6f, - 0x64, 0x65, 0x5f, 0x70, 0x6f, 0x6f, 0x6c, 0x5f, 0x69, 0x64, 0x7d, 0x3a, 0x72, 0x6f, 0x6c, 0x6c, - 0x62, 0x61, 0x63, 0x6b, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x27, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, - 0x74, 0x5f, 0x69, 0x64, 0x2c, 0x7a, 0x6f, 0x6e, 0x65, 0x2c, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x5f, 0x69, 0x64, 0x2c, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x70, 0x6f, 0x6f, 0x6c, 0x5f, 0x69, - 0x64, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0xa9, 0x02, 0x0a, 0x15, 0x53, 0x65, 0x74, - 0x4e, 0x6f, 0x64, 0x65, 0x50, 0x6f, 0x6f, 0x6c, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, - 0x6e, 0x74, 0x12, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, - 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, - 0x50, 0x6f, 0x6f, 0x6c, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, - 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x70, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xbc, 0x01, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0xb5, 0x01, 0x22, - 0x46, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, - 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, - 0x2a, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x2f, 0x6e, 0x6f, 0x64, - 0x65, 0x50, 0x6f, 0x6f, 0x6c, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x73, 0x65, 0x74, 0x4d, 0x61, 0x6e, - 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x3a, 0x01, 0x2a, 0x5a, 0x68, 0x22, 0x63, 0x2f, 0x76, - 0x31, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, - 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x7a, 0x6f, 0x6e, 0x65, 0x73, 0x2f, 0x7b, 0x7a, - 0x6f, 0x6e, 0x65, 0x7d, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x63, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x50, - 0x6f, 0x6f, 0x6c, 0x73, 0x2f, 0x7b, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x70, 0x6f, 0x6f, 0x6c, 0x5f, - 0x69, 0x64, 0x7d, 0x2f, 0x73, 0x65, 0x74, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, - 0x74, 0x3a, 0x01, 0x2a, 0x12, 0xf1, 0x01, 0x0a, 0x09, 0x53, 0x65, 0x74, 0x4c, 0x61, 0x62, 0x65, - 0x6c, 0x73, 0x12, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, - 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x74, 0x4c, 0x61, 0x62, 0x65, - 0x6c, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, - 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x9c, 0x01, 0x82, 0xd3, 0xe4, 0x93, - 0x02, 0x95, 0x01, 0x22, 0x3e, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, - 0x7d, 0x3a, 0x73, 0x65, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4c, 0x61, 0x62, - 0x65, 0x6c, 0x73, 0x3a, 0x01, 0x2a, 0x5a, 0x50, 0x22, 0x4b, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, - 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, - 0x69, 0x64, 0x7d, 0x2f, 0x7a, 0x6f, 0x6e, 0x65, 0x73, 0x2f, 0x7b, 0x7a, 0x6f, 0x6e, 0x65, 0x7d, - 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x63, 0x6c, 0x75, 0x73, 0x74, - 0x65, 0x72, 0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4c, - 0x61, 0x62, 0x65, 0x6c, 0x73, 0x3a, 0x01, 0x2a, 0x12, 0xa5, 0x02, 0x0a, 0x0d, 0x53, 0x65, 0x74, - 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x41, 0x62, 0x61, 0x63, 0x12, 0x29, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, - 0x2e, 0x53, 0x65, 0x74, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x41, 0x62, 0x61, 0x63, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, - 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x70, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xc8, 0x01, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x8d, 0x01, 0x22, - 0x3a, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, - 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, - 0x2a, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x73, 0x65, - 0x74, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x41, 0x62, 0x61, 0x63, 0x3a, 0x01, 0x2a, 0x5a, 0x4c, - 0x22, 0x47, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, - 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x7a, 0x6f, 0x6e, 0x65, - 0x73, 0x2f, 0x7b, 0x7a, 0x6f, 0x6e, 0x65, 0x7d, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, - 0x73, 0x2f, 0x7b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x6c, - 0x65, 0x67, 0x61, 0x63, 0x79, 0x41, 0x62, 0x61, 0x63, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x22, 0x70, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x2c, 0x7a, 0x6f, 0x6e, 0x65, 0x2c, 0x63, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x2c, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, - 0x64, 0xda, 0x41, 0x0c, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, - 0x12, 0xa0, 0x02, 0x0a, 0x0f, 0x53, 0x74, 0x61, 0x72, 0x74, 0x49, 0x50, 0x52, 0x6f, 0x74, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, - 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, - 0x49, 0x50, 0x52, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, - 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x22, 0xbf, 0x01, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x94, 0x01, 0x22, 0x3c, 0x2f, 0x76, 0x31, - 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, - 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x6c, - 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x49, - 0x70, 0x52, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x3a, 0x01, 0x2a, 0x5a, 0x51, 0x22, 0x4c, - 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, - 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x7a, 0x6f, 0x6e, 0x65, 0x73, 0x2f, - 0x7b, 0x7a, 0x6f, 0x6e, 0x65, 0x7d, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, - 0x7b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x7d, 0x3a, 0x73, 0x74, 0x61, - 0x72, 0x74, 0x49, 0x70, 0x52, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x3a, 0x01, 0x2a, 0xda, - 0x41, 0x1a, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x2c, 0x7a, 0x6f, 0x6e, - 0x65, 0x2c, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0xda, 0x41, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0xac, 0x02, 0x0a, 0x12, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, - 0x49, 0x50, 0x52, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, - 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x49, 0x50, 0x52, 0x6f, 0x74, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, - 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xc5, 0x01, 0x82, 0xd3, 0xe4, - 0x93, 0x02, 0x9a, 0x01, 0x22, 0x3f, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, - 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, - 0x2a, 0x7d, 0x3a, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x49, 0x70, 0x52, 0x6f, 0x74, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x3a, 0x01, 0x2a, 0x5a, 0x54, 0x22, 0x4f, 0x2f, 0x76, 0x31, 0x2f, - 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, - 0x74, 0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x7a, 0x6f, 0x6e, 0x65, 0x73, 0x2f, 0x7b, 0x7a, 0x6f, 0x6e, - 0x65, 0x7d, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x63, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x7d, 0x3a, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, - 0x65, 0x49, 0x70, 0x52, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x3a, 0x01, 0x2a, 0xda, 0x41, - 0x1a, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x2c, 0x7a, 0x6f, 0x6e, 0x65, - 0x2c, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0xda, 0x41, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x12, 0x91, 0x02, 0x0a, 0x0f, 0x53, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x50, 0x6f, - 0x6f, 0x6c, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x74, - 0x4e, 0x6f, 0x64, 0x65, 0x50, 0x6f, 0x6f, 0x6c, 0x53, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, - 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x22, 0xb0, 0x01, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0xa9, 0x01, 0x22, 0x40, 0x2f, - 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, - 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, - 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x50, - 0x6f, 0x6f, 0x6c, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x73, 0x65, 0x74, 0x53, 0x69, 0x7a, 0x65, 0x3a, - 0x01, 0x2a, 0x5a, 0x62, 0x22, 0x5d, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, - 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x7d, 0x2f, - 0x7a, 0x6f, 0x6e, 0x65, 0x73, 0x2f, 0x7b, 0x7a, 0x6f, 0x6e, 0x65, 0x7d, 0x2f, 0x63, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, - 0x64, 0x7d, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x50, 0x6f, 0x6f, 0x6c, 0x73, 0x2f, 0x7b, 0x6e, 0x6f, - 0x64, 0x65, 0x5f, 0x70, 0x6f, 0x6f, 0x6c, 0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x73, 0x65, 0x74, 0x53, - 0x69, 0x7a, 0x65, 0x3a, 0x01, 0x2a, 0x12, 0xc2, 0x02, 0x0a, 0x10, 0x53, 0x65, 0x74, 0x4e, 0x65, - 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x2c, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, - 0x31, 0x2e, 0x53, 0x65, 0x74, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x50, 0x6f, 0x6c, 0x69, - 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, - 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xdf, 0x01, 0x82, 0xd3, 0xe4, 0x93, - 0x02, 0x96, 0x01, 0x22, 0x3d, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, - 0x7d, 0x3a, 0x73, 0x65, 0x74, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x50, 0x6f, 0x6c, 0x69, - 0x63, 0x79, 0x3a, 0x01, 0x2a, 0x5a, 0x52, 0x22, 0x4d, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x69, - 0x64, 0x7d, 0x2f, 0x7a, 0x6f, 0x6e, 0x65, 0x73, 0x2f, 0x7b, 0x7a, 0x6f, 0x6e, 0x65, 0x7d, 0x2f, - 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x5f, 0x69, 0x64, 0x7d, 0x3a, 0x73, 0x65, 0x74, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, - 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x29, 0x70, 0x72, 0x6f, 0x6a, - 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x2c, 0x7a, 0x6f, 0x6e, 0x65, 0x2c, 0x63, 0x6c, 0x75, 0x73, - 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x2c, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x5f, 0x70, - 0x6f, 0x6c, 0x69, 0x63, 0x79, 0xda, 0x41, 0x13, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x6e, 0x65, 0x74, - 0x77, 0x6f, 0x72, 0x6b, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0xda, 0x02, 0x0a, 0x14, - 0x53, 0x65, 0x74, 0x4d, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x63, 0x65, 0x50, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x12, 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, - 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x74, 0x4d, 0x61, - 0x69, 0x6e, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x63, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x70, 0x65, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xef, 0x01, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x9e, 0x01, - 0x22, 0x41, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, - 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x2f, 0x2a, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x73, - 0x65, 0x74, 0x4d, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x63, 0x65, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x3a, 0x01, 0x2a, 0x5a, 0x56, 0x22, 0x51, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, - 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, - 0x69, 0x64, 0x7d, 0x2f, 0x7a, 0x6f, 0x6e, 0x65, 0x73, 0x2f, 0x7b, 0x7a, 0x6f, 0x6e, 0x65, 0x7d, - 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x63, 0x6c, 0x75, 0x73, 0x74, - 0x65, 0x72, 0x5f, 0x69, 0x64, 0x7d, 0x3a, 0x73, 0x65, 0x74, 0x4d, 0x61, 0x69, 0x6e, 0x74, 0x65, - 0x6e, 0x61, 0x6e, 0x63, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x3a, 0x01, 0x2a, 0xda, 0x41, - 0x2d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x2c, 0x7a, 0x6f, 0x6e, 0x65, - 0x2c, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x2c, 0x6d, 0x61, 0x69, 0x6e, - 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0xda, 0x41, - 0x17, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x6d, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x63, - 0x65, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0xbc, 0x01, 0x0a, 0x15, 0x4c, 0x69, 0x73, - 0x74, 0x55, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, - 0x6b, 0x73, 0x12, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, - 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x55, 0x73, 0x61, - 0x62, 0x6c, 0x65, 0x53, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, - 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, - 0x55, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, - 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3c, 0x82, 0xd3, 0xe4, 0x93, 0x02, - 0x36, 0x12, 0x34, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x61, 0x67, 0x67, 0x72, 0x65, - 0x67, 0x61, 0x74, 0x65, 0x64, 0x2f, 0x75, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x75, 0x62, 0x6e, - 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x1a, 0x4c, 0xca, 0x41, 0x18, 0x63, 0x6f, 0x6e, 0x74, - 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, - 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, - 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, - 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, - 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x42, 0xc5, 0x01, 0x0a, 0x17, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x76, - 0x31, 0x42, 0x13, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, - 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2f, 0x76, 0x31, 0x3b, 0x63, 0x6f, 0x6e, - 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0xaa, 0x02, 0x19, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, - 0x56, 0x31, 0xca, 0x02, 0x19, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, - 0x64, 0x5c, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5c, 0x56, 0x31, 0xea, 0x02, - 0x1c, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, - 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_google_container_v1_cluster_service_proto_rawDescOnce sync.Once - file_google_container_v1_cluster_service_proto_rawDescData = file_google_container_v1_cluster_service_proto_rawDesc -) - -func file_google_container_v1_cluster_service_proto_rawDescGZIP() []byte { - file_google_container_v1_cluster_service_proto_rawDescOnce.Do(func() { - file_google_container_v1_cluster_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_container_v1_cluster_service_proto_rawDescData) - }) - return file_google_container_v1_cluster_service_proto_rawDescData -} - -var file_google_container_v1_cluster_service_proto_enumTypes = make([]protoimpl.EnumInfo, 15) -var file_google_container_v1_cluster_service_proto_msgTypes = make([]protoimpl.MessageInfo, 106) -var file_google_container_v1_cluster_service_proto_goTypes = []interface{}{ - (SandboxConfig_Type)(0), // 0: google.container.v1.SandboxConfig.Type - (ReservationAffinity_Type)(0), // 1: google.container.v1.ReservationAffinity.Type - (NodeTaint_Effect)(0), // 2: google.container.v1.NodeTaint.Effect - (CloudRunConfig_LoadBalancerType)(0), // 3: google.container.v1.CloudRunConfig.LoadBalancerType - (NetworkPolicy_Provider)(0), // 4: google.container.v1.NetworkPolicy.Provider - (Cluster_Status)(0), // 5: google.container.v1.Cluster.Status - (Operation_Status)(0), // 6: google.container.v1.Operation.Status - (Operation_Type)(0), // 7: google.container.v1.Operation.Type - (SetMasterAuthRequest_Action)(0), // 8: google.container.v1.SetMasterAuthRequest.Action - (NodePool_Status)(0), // 9: google.container.v1.NodePool.Status - (WorkloadMetadataConfig_Mode)(0), // 10: google.container.v1.WorkloadMetadataConfig.Mode - (StatusCondition_Code)(0), // 11: google.container.v1.StatusCondition.Code - (ReleaseChannel_Channel)(0), // 12: google.container.v1.ReleaseChannel.Channel - (DatabaseEncryption_State)(0), // 13: google.container.v1.DatabaseEncryption.State - (UsableSubnetworkSecondaryRange_Status)(0), // 14: google.container.v1.UsableSubnetworkSecondaryRange.Status - (*NodeConfig)(nil), // 15: google.container.v1.NodeConfig - (*ShieldedInstanceConfig)(nil), // 16: google.container.v1.ShieldedInstanceConfig - (*SandboxConfig)(nil), // 17: google.container.v1.SandboxConfig - (*ReservationAffinity)(nil), // 18: google.container.v1.ReservationAffinity - (*NodeTaint)(nil), // 19: google.container.v1.NodeTaint - (*MasterAuth)(nil), // 20: google.container.v1.MasterAuth - (*ClientCertificateConfig)(nil), // 21: google.container.v1.ClientCertificateConfig - (*AddonsConfig)(nil), // 22: google.container.v1.AddonsConfig - (*HttpLoadBalancing)(nil), // 23: google.container.v1.HttpLoadBalancing - (*HorizontalPodAutoscaling)(nil), // 24: google.container.v1.HorizontalPodAutoscaling - (*KubernetesDashboard)(nil), // 25: google.container.v1.KubernetesDashboard - (*NetworkPolicyConfig)(nil), // 26: google.container.v1.NetworkPolicyConfig - (*DnsCacheConfig)(nil), // 27: google.container.v1.DnsCacheConfig - (*PrivateClusterMasterGlobalAccessConfig)(nil), // 28: google.container.v1.PrivateClusterMasterGlobalAccessConfig - (*PrivateClusterConfig)(nil), // 29: google.container.v1.PrivateClusterConfig - (*AuthenticatorGroupsConfig)(nil), // 30: google.container.v1.AuthenticatorGroupsConfig - (*CloudRunConfig)(nil), // 31: google.container.v1.CloudRunConfig - (*ConfigConnectorConfig)(nil), // 32: google.container.v1.ConfigConnectorConfig - (*MasterAuthorizedNetworksConfig)(nil), // 33: google.container.v1.MasterAuthorizedNetworksConfig - (*LegacyAbac)(nil), // 34: google.container.v1.LegacyAbac - (*NetworkPolicy)(nil), // 35: google.container.v1.NetworkPolicy - (*BinaryAuthorization)(nil), // 36: google.container.v1.BinaryAuthorization - (*IPAllocationPolicy)(nil), // 37: google.container.v1.IPAllocationPolicy - (*Cluster)(nil), // 38: google.container.v1.Cluster - (*ClusterUpdate)(nil), // 39: google.container.v1.ClusterUpdate - (*Operation)(nil), // 40: google.container.v1.Operation - (*OperationProgress)(nil), // 41: google.container.v1.OperationProgress - (*CreateClusterRequest)(nil), // 42: google.container.v1.CreateClusterRequest - (*GetClusterRequest)(nil), // 43: google.container.v1.GetClusterRequest - (*UpdateClusterRequest)(nil), // 44: google.container.v1.UpdateClusterRequest - (*UpdateNodePoolRequest)(nil), // 45: google.container.v1.UpdateNodePoolRequest - (*SetNodePoolAutoscalingRequest)(nil), // 46: google.container.v1.SetNodePoolAutoscalingRequest - (*SetLoggingServiceRequest)(nil), // 47: google.container.v1.SetLoggingServiceRequest - (*SetMonitoringServiceRequest)(nil), // 48: google.container.v1.SetMonitoringServiceRequest - (*SetAddonsConfigRequest)(nil), // 49: google.container.v1.SetAddonsConfigRequest - (*SetLocationsRequest)(nil), // 50: google.container.v1.SetLocationsRequest - (*UpdateMasterRequest)(nil), // 51: google.container.v1.UpdateMasterRequest - (*SetMasterAuthRequest)(nil), // 52: google.container.v1.SetMasterAuthRequest - (*DeleteClusterRequest)(nil), // 53: google.container.v1.DeleteClusterRequest - (*ListClustersRequest)(nil), // 54: google.container.v1.ListClustersRequest - (*ListClustersResponse)(nil), // 55: google.container.v1.ListClustersResponse - (*GetOperationRequest)(nil), // 56: google.container.v1.GetOperationRequest - (*ListOperationsRequest)(nil), // 57: google.container.v1.ListOperationsRequest - (*CancelOperationRequest)(nil), // 58: google.container.v1.CancelOperationRequest - (*ListOperationsResponse)(nil), // 59: google.container.v1.ListOperationsResponse - (*GetServerConfigRequest)(nil), // 60: google.container.v1.GetServerConfigRequest - (*ServerConfig)(nil), // 61: google.container.v1.ServerConfig - (*CreateNodePoolRequest)(nil), // 62: google.container.v1.CreateNodePoolRequest - (*DeleteNodePoolRequest)(nil), // 63: google.container.v1.DeleteNodePoolRequest - (*ListNodePoolsRequest)(nil), // 64: google.container.v1.ListNodePoolsRequest - (*GetNodePoolRequest)(nil), // 65: google.container.v1.GetNodePoolRequest - (*NodePool)(nil), // 66: google.container.v1.NodePool - (*NodeManagement)(nil), // 67: google.container.v1.NodeManagement - (*AutoUpgradeOptions)(nil), // 68: google.container.v1.AutoUpgradeOptions - (*MaintenancePolicy)(nil), // 69: google.container.v1.MaintenancePolicy - (*MaintenanceWindow)(nil), // 70: google.container.v1.MaintenanceWindow - (*TimeWindow)(nil), // 71: google.container.v1.TimeWindow - (*RecurringTimeWindow)(nil), // 72: google.container.v1.RecurringTimeWindow - (*DailyMaintenanceWindow)(nil), // 73: google.container.v1.DailyMaintenanceWindow - (*SetNodePoolManagementRequest)(nil), // 74: google.container.v1.SetNodePoolManagementRequest - (*SetNodePoolSizeRequest)(nil), // 75: google.container.v1.SetNodePoolSizeRequest - (*RollbackNodePoolUpgradeRequest)(nil), // 76: google.container.v1.RollbackNodePoolUpgradeRequest - (*ListNodePoolsResponse)(nil), // 77: google.container.v1.ListNodePoolsResponse - (*ClusterAutoscaling)(nil), // 78: google.container.v1.ClusterAutoscaling - (*AutoprovisioningNodePoolDefaults)(nil), // 79: google.container.v1.AutoprovisioningNodePoolDefaults - (*ResourceLimit)(nil), // 80: google.container.v1.ResourceLimit - (*NodePoolAutoscaling)(nil), // 81: google.container.v1.NodePoolAutoscaling - (*SetLabelsRequest)(nil), // 82: google.container.v1.SetLabelsRequest - (*SetLegacyAbacRequest)(nil), // 83: google.container.v1.SetLegacyAbacRequest - (*StartIPRotationRequest)(nil), // 84: google.container.v1.StartIPRotationRequest - (*CompleteIPRotationRequest)(nil), // 85: google.container.v1.CompleteIPRotationRequest - (*AcceleratorConfig)(nil), // 86: google.container.v1.AcceleratorConfig - (*WorkloadMetadataConfig)(nil), // 87: google.container.v1.WorkloadMetadataConfig - (*SetNetworkPolicyRequest)(nil), // 88: google.container.v1.SetNetworkPolicyRequest - (*SetMaintenancePolicyRequest)(nil), // 89: google.container.v1.SetMaintenancePolicyRequest - (*StatusCondition)(nil), // 90: google.container.v1.StatusCondition - (*NetworkConfig)(nil), // 91: google.container.v1.NetworkConfig - (*GetOpenIDConfigRequest)(nil), // 92: google.container.v1.GetOpenIDConfigRequest - (*GetOpenIDConfigResponse)(nil), // 93: google.container.v1.GetOpenIDConfigResponse - (*GetJSONWebKeysRequest)(nil), // 94: google.container.v1.GetJSONWebKeysRequest - (*Jwk)(nil), // 95: google.container.v1.Jwk - (*GetJSONWebKeysResponse)(nil), // 96: google.container.v1.GetJSONWebKeysResponse - (*ReleaseChannel)(nil), // 97: google.container.v1.ReleaseChannel - (*IntraNodeVisibilityConfig)(nil), // 98: google.container.v1.IntraNodeVisibilityConfig - (*MaxPodsConstraint)(nil), // 99: google.container.v1.MaxPodsConstraint - (*WorkloadIdentityConfig)(nil), // 100: google.container.v1.WorkloadIdentityConfig - (*DatabaseEncryption)(nil), // 101: google.container.v1.DatabaseEncryption - (*ListUsableSubnetworksRequest)(nil), // 102: google.container.v1.ListUsableSubnetworksRequest - (*ListUsableSubnetworksResponse)(nil), // 103: google.container.v1.ListUsableSubnetworksResponse - (*UsableSubnetworkSecondaryRange)(nil), // 104: google.container.v1.UsableSubnetworkSecondaryRange - (*UsableSubnetwork)(nil), // 105: google.container.v1.UsableSubnetwork - (*ResourceUsageExportConfig)(nil), // 106: google.container.v1.ResourceUsageExportConfig - (*VerticalPodAutoscaling)(nil), // 107: google.container.v1.VerticalPodAutoscaling - (*DefaultSnatStatus)(nil), // 108: google.container.v1.DefaultSnatStatus - (*ShieldedNodes)(nil), // 109: google.container.v1.ShieldedNodes - nil, // 110: google.container.v1.NodeConfig.MetadataEntry - nil, // 111: google.container.v1.NodeConfig.LabelsEntry - (*MasterAuthorizedNetworksConfig_CidrBlock)(nil), // 112: google.container.v1.MasterAuthorizedNetworksConfig.CidrBlock - nil, // 113: google.container.v1.Cluster.ResourceLabelsEntry - (*OperationProgress_Metric)(nil), // 114: google.container.v1.OperationProgress.Metric - (*ServerConfig_ReleaseChannelConfig)(nil), // 115: google.container.v1.ServerConfig.ReleaseChannelConfig - (*NodePool_UpgradeSettings)(nil), // 116: google.container.v1.NodePool.UpgradeSettings - nil, // 117: google.container.v1.MaintenanceWindow.MaintenanceExclusionsEntry - nil, // 118: google.container.v1.SetLabelsRequest.ResourceLabelsEntry - (*ResourceUsageExportConfig_BigQueryDestination)(nil), // 119: google.container.v1.ResourceUsageExportConfig.BigQueryDestination - (*ResourceUsageExportConfig_ConsumptionMeteringConfig)(nil), // 120: google.container.v1.ResourceUsageExportConfig.ConsumptionMeteringConfig - (*timestamppb.Timestamp)(nil), // 121: google.protobuf.Timestamp - (*emptypb.Empty)(nil), // 122: google.protobuf.Empty -} -var file_google_container_v1_cluster_service_proto_depIdxs = []int32{ - 110, // 0: google.container.v1.NodeConfig.metadata:type_name -> google.container.v1.NodeConfig.MetadataEntry - 111, // 1: google.container.v1.NodeConfig.labels:type_name -> google.container.v1.NodeConfig.LabelsEntry - 86, // 2: google.container.v1.NodeConfig.accelerators:type_name -> google.container.v1.AcceleratorConfig - 87, // 3: google.container.v1.NodeConfig.workload_metadata_config:type_name -> google.container.v1.WorkloadMetadataConfig - 19, // 4: google.container.v1.NodeConfig.taints:type_name -> google.container.v1.NodeTaint - 17, // 5: google.container.v1.NodeConfig.sandbox_config:type_name -> google.container.v1.SandboxConfig - 18, // 6: google.container.v1.NodeConfig.reservation_affinity:type_name -> google.container.v1.ReservationAffinity - 16, // 7: google.container.v1.NodeConfig.shielded_instance_config:type_name -> google.container.v1.ShieldedInstanceConfig - 0, // 8: google.container.v1.SandboxConfig.type:type_name -> google.container.v1.SandboxConfig.Type - 1, // 9: google.container.v1.ReservationAffinity.consume_reservation_type:type_name -> google.container.v1.ReservationAffinity.Type - 2, // 10: google.container.v1.NodeTaint.effect:type_name -> google.container.v1.NodeTaint.Effect - 21, // 11: google.container.v1.MasterAuth.client_certificate_config:type_name -> google.container.v1.ClientCertificateConfig - 23, // 12: google.container.v1.AddonsConfig.http_load_balancing:type_name -> google.container.v1.HttpLoadBalancing - 24, // 13: google.container.v1.AddonsConfig.horizontal_pod_autoscaling:type_name -> google.container.v1.HorizontalPodAutoscaling - 25, // 14: google.container.v1.AddonsConfig.kubernetes_dashboard:type_name -> google.container.v1.KubernetesDashboard - 26, // 15: google.container.v1.AddonsConfig.network_policy_config:type_name -> google.container.v1.NetworkPolicyConfig - 31, // 16: google.container.v1.AddonsConfig.cloud_run_config:type_name -> google.container.v1.CloudRunConfig - 27, // 17: google.container.v1.AddonsConfig.dns_cache_config:type_name -> google.container.v1.DnsCacheConfig - 32, // 18: google.container.v1.AddonsConfig.config_connector_config:type_name -> google.container.v1.ConfigConnectorConfig - 28, // 19: google.container.v1.PrivateClusterConfig.master_global_access_config:type_name -> google.container.v1.PrivateClusterMasterGlobalAccessConfig - 3, // 20: google.container.v1.CloudRunConfig.load_balancer_type:type_name -> google.container.v1.CloudRunConfig.LoadBalancerType - 112, // 21: google.container.v1.MasterAuthorizedNetworksConfig.cidr_blocks:type_name -> google.container.v1.MasterAuthorizedNetworksConfig.CidrBlock - 4, // 22: google.container.v1.NetworkPolicy.provider:type_name -> google.container.v1.NetworkPolicy.Provider - 15, // 23: google.container.v1.Cluster.node_config:type_name -> google.container.v1.NodeConfig - 20, // 24: google.container.v1.Cluster.master_auth:type_name -> google.container.v1.MasterAuth - 22, // 25: google.container.v1.Cluster.addons_config:type_name -> google.container.v1.AddonsConfig - 66, // 26: google.container.v1.Cluster.node_pools:type_name -> google.container.v1.NodePool - 113, // 27: google.container.v1.Cluster.resource_labels:type_name -> google.container.v1.Cluster.ResourceLabelsEntry - 34, // 28: google.container.v1.Cluster.legacy_abac:type_name -> google.container.v1.LegacyAbac - 35, // 29: google.container.v1.Cluster.network_policy:type_name -> google.container.v1.NetworkPolicy - 37, // 30: google.container.v1.Cluster.ip_allocation_policy:type_name -> google.container.v1.IPAllocationPolicy - 33, // 31: google.container.v1.Cluster.master_authorized_networks_config:type_name -> google.container.v1.MasterAuthorizedNetworksConfig - 69, // 32: google.container.v1.Cluster.maintenance_policy:type_name -> google.container.v1.MaintenancePolicy - 36, // 33: google.container.v1.Cluster.binary_authorization:type_name -> google.container.v1.BinaryAuthorization - 78, // 34: google.container.v1.Cluster.autoscaling:type_name -> google.container.v1.ClusterAutoscaling - 91, // 35: google.container.v1.Cluster.network_config:type_name -> google.container.v1.NetworkConfig - 99, // 36: google.container.v1.Cluster.default_max_pods_constraint:type_name -> google.container.v1.MaxPodsConstraint - 106, // 37: google.container.v1.Cluster.resource_usage_export_config:type_name -> google.container.v1.ResourceUsageExportConfig - 30, // 38: google.container.v1.Cluster.authenticator_groups_config:type_name -> google.container.v1.AuthenticatorGroupsConfig - 29, // 39: google.container.v1.Cluster.private_cluster_config:type_name -> google.container.v1.PrivateClusterConfig - 101, // 40: google.container.v1.Cluster.database_encryption:type_name -> google.container.v1.DatabaseEncryption - 107, // 41: google.container.v1.Cluster.vertical_pod_autoscaling:type_name -> google.container.v1.VerticalPodAutoscaling - 109, // 42: google.container.v1.Cluster.shielded_nodes:type_name -> google.container.v1.ShieldedNodes - 97, // 43: google.container.v1.Cluster.release_channel:type_name -> google.container.v1.ReleaseChannel - 100, // 44: google.container.v1.Cluster.workload_identity_config:type_name -> google.container.v1.WorkloadIdentityConfig - 5, // 45: google.container.v1.Cluster.status:type_name -> google.container.v1.Cluster.Status - 90, // 46: google.container.v1.Cluster.conditions:type_name -> google.container.v1.StatusCondition - 22, // 47: google.container.v1.ClusterUpdate.desired_addons_config:type_name -> google.container.v1.AddonsConfig - 101, // 48: google.container.v1.ClusterUpdate.desired_database_encryption:type_name -> google.container.v1.DatabaseEncryption - 100, // 49: google.container.v1.ClusterUpdate.desired_workload_identity_config:type_name -> google.container.v1.WorkloadIdentityConfig - 109, // 50: google.container.v1.ClusterUpdate.desired_shielded_nodes:type_name -> google.container.v1.ShieldedNodes - 81, // 51: google.container.v1.ClusterUpdate.desired_node_pool_autoscaling:type_name -> google.container.v1.NodePoolAutoscaling - 33, // 52: google.container.v1.ClusterUpdate.desired_master_authorized_networks_config:type_name -> google.container.v1.MasterAuthorizedNetworksConfig - 78, // 53: google.container.v1.ClusterUpdate.desired_cluster_autoscaling:type_name -> google.container.v1.ClusterAutoscaling - 36, // 54: google.container.v1.ClusterUpdate.desired_binary_authorization:type_name -> google.container.v1.BinaryAuthorization - 106, // 55: google.container.v1.ClusterUpdate.desired_resource_usage_export_config:type_name -> google.container.v1.ResourceUsageExportConfig - 107, // 56: google.container.v1.ClusterUpdate.desired_vertical_pod_autoscaling:type_name -> google.container.v1.VerticalPodAutoscaling - 29, // 57: google.container.v1.ClusterUpdate.desired_private_cluster_config:type_name -> google.container.v1.PrivateClusterConfig - 98, // 58: google.container.v1.ClusterUpdate.desired_intra_node_visibility_config:type_name -> google.container.v1.IntraNodeVisibilityConfig - 108, // 59: google.container.v1.ClusterUpdate.desired_default_snat_status:type_name -> google.container.v1.DefaultSnatStatus - 97, // 60: google.container.v1.ClusterUpdate.desired_release_channel:type_name -> google.container.v1.ReleaseChannel - 7, // 61: google.container.v1.Operation.operation_type:type_name -> google.container.v1.Operation.Type - 6, // 62: google.container.v1.Operation.status:type_name -> google.container.v1.Operation.Status - 41, // 63: google.container.v1.Operation.progress:type_name -> google.container.v1.OperationProgress - 90, // 64: google.container.v1.Operation.cluster_conditions:type_name -> google.container.v1.StatusCondition - 90, // 65: google.container.v1.Operation.nodepool_conditions:type_name -> google.container.v1.StatusCondition - 6, // 66: google.container.v1.OperationProgress.status:type_name -> google.container.v1.Operation.Status - 114, // 67: google.container.v1.OperationProgress.metrics:type_name -> google.container.v1.OperationProgress.Metric - 41, // 68: google.container.v1.OperationProgress.stages:type_name -> google.container.v1.OperationProgress - 38, // 69: google.container.v1.CreateClusterRequest.cluster:type_name -> google.container.v1.Cluster - 39, // 70: google.container.v1.UpdateClusterRequest.update:type_name -> google.container.v1.ClusterUpdate - 87, // 71: google.container.v1.UpdateNodePoolRequest.workload_metadata_config:type_name -> google.container.v1.WorkloadMetadataConfig - 116, // 72: google.container.v1.UpdateNodePoolRequest.upgrade_settings:type_name -> google.container.v1.NodePool.UpgradeSettings - 81, // 73: google.container.v1.SetNodePoolAutoscalingRequest.autoscaling:type_name -> google.container.v1.NodePoolAutoscaling - 22, // 74: google.container.v1.SetAddonsConfigRequest.addons_config:type_name -> google.container.v1.AddonsConfig - 8, // 75: google.container.v1.SetMasterAuthRequest.action:type_name -> google.container.v1.SetMasterAuthRequest.Action - 20, // 76: google.container.v1.SetMasterAuthRequest.update:type_name -> google.container.v1.MasterAuth - 38, // 77: google.container.v1.ListClustersResponse.clusters:type_name -> google.container.v1.Cluster - 40, // 78: google.container.v1.ListOperationsResponse.operations:type_name -> google.container.v1.Operation - 115, // 79: google.container.v1.ServerConfig.channels:type_name -> google.container.v1.ServerConfig.ReleaseChannelConfig - 66, // 80: google.container.v1.CreateNodePoolRequest.node_pool:type_name -> google.container.v1.NodePool - 15, // 81: google.container.v1.NodePool.config:type_name -> google.container.v1.NodeConfig - 9, // 82: google.container.v1.NodePool.status:type_name -> google.container.v1.NodePool.Status - 81, // 83: google.container.v1.NodePool.autoscaling:type_name -> google.container.v1.NodePoolAutoscaling - 67, // 84: google.container.v1.NodePool.management:type_name -> google.container.v1.NodeManagement - 99, // 85: google.container.v1.NodePool.max_pods_constraint:type_name -> google.container.v1.MaxPodsConstraint - 90, // 86: google.container.v1.NodePool.conditions:type_name -> google.container.v1.StatusCondition - 116, // 87: google.container.v1.NodePool.upgrade_settings:type_name -> google.container.v1.NodePool.UpgradeSettings - 68, // 88: google.container.v1.NodeManagement.upgrade_options:type_name -> google.container.v1.AutoUpgradeOptions - 70, // 89: google.container.v1.MaintenancePolicy.window:type_name -> google.container.v1.MaintenanceWindow - 73, // 90: google.container.v1.MaintenanceWindow.daily_maintenance_window:type_name -> google.container.v1.DailyMaintenanceWindow - 72, // 91: google.container.v1.MaintenanceWindow.recurring_window:type_name -> google.container.v1.RecurringTimeWindow - 117, // 92: google.container.v1.MaintenanceWindow.maintenance_exclusions:type_name -> google.container.v1.MaintenanceWindow.MaintenanceExclusionsEntry - 121, // 93: google.container.v1.TimeWindow.start_time:type_name -> google.protobuf.Timestamp - 121, // 94: google.container.v1.TimeWindow.end_time:type_name -> google.protobuf.Timestamp - 71, // 95: google.container.v1.RecurringTimeWindow.window:type_name -> google.container.v1.TimeWindow - 67, // 96: google.container.v1.SetNodePoolManagementRequest.management:type_name -> google.container.v1.NodeManagement - 66, // 97: google.container.v1.ListNodePoolsResponse.node_pools:type_name -> google.container.v1.NodePool - 80, // 98: google.container.v1.ClusterAutoscaling.resource_limits:type_name -> google.container.v1.ResourceLimit - 79, // 99: google.container.v1.ClusterAutoscaling.autoprovisioning_node_pool_defaults:type_name -> google.container.v1.AutoprovisioningNodePoolDefaults - 116, // 100: google.container.v1.AutoprovisioningNodePoolDefaults.upgrade_settings:type_name -> google.container.v1.NodePool.UpgradeSettings - 67, // 101: google.container.v1.AutoprovisioningNodePoolDefaults.management:type_name -> google.container.v1.NodeManagement - 16, // 102: google.container.v1.AutoprovisioningNodePoolDefaults.shielded_instance_config:type_name -> google.container.v1.ShieldedInstanceConfig - 118, // 103: google.container.v1.SetLabelsRequest.resource_labels:type_name -> google.container.v1.SetLabelsRequest.ResourceLabelsEntry - 10, // 104: google.container.v1.WorkloadMetadataConfig.mode:type_name -> google.container.v1.WorkloadMetadataConfig.Mode - 35, // 105: google.container.v1.SetNetworkPolicyRequest.network_policy:type_name -> google.container.v1.NetworkPolicy - 69, // 106: google.container.v1.SetMaintenancePolicyRequest.maintenance_policy:type_name -> google.container.v1.MaintenancePolicy - 11, // 107: google.container.v1.StatusCondition.code:type_name -> google.container.v1.StatusCondition.Code - 108, // 108: google.container.v1.NetworkConfig.default_snat_status:type_name -> google.container.v1.DefaultSnatStatus - 95, // 109: google.container.v1.GetJSONWebKeysResponse.keys:type_name -> google.container.v1.Jwk - 12, // 110: google.container.v1.ReleaseChannel.channel:type_name -> google.container.v1.ReleaseChannel.Channel - 13, // 111: google.container.v1.DatabaseEncryption.state:type_name -> google.container.v1.DatabaseEncryption.State - 105, // 112: google.container.v1.ListUsableSubnetworksResponse.subnetworks:type_name -> google.container.v1.UsableSubnetwork - 14, // 113: google.container.v1.UsableSubnetworkSecondaryRange.status:type_name -> google.container.v1.UsableSubnetworkSecondaryRange.Status - 104, // 114: google.container.v1.UsableSubnetwork.secondary_ip_ranges:type_name -> google.container.v1.UsableSubnetworkSecondaryRange - 119, // 115: google.container.v1.ResourceUsageExportConfig.bigquery_destination:type_name -> google.container.v1.ResourceUsageExportConfig.BigQueryDestination - 120, // 116: google.container.v1.ResourceUsageExportConfig.consumption_metering_config:type_name -> google.container.v1.ResourceUsageExportConfig.ConsumptionMeteringConfig - 12, // 117: google.container.v1.ServerConfig.ReleaseChannelConfig.channel:type_name -> google.container.v1.ReleaseChannel.Channel - 71, // 118: google.container.v1.MaintenanceWindow.MaintenanceExclusionsEntry.value:type_name -> google.container.v1.TimeWindow - 54, // 119: google.container.v1.ClusterManager.ListClusters:input_type -> google.container.v1.ListClustersRequest - 43, // 120: google.container.v1.ClusterManager.GetCluster:input_type -> google.container.v1.GetClusterRequest - 42, // 121: google.container.v1.ClusterManager.CreateCluster:input_type -> google.container.v1.CreateClusterRequest - 44, // 122: google.container.v1.ClusterManager.UpdateCluster:input_type -> google.container.v1.UpdateClusterRequest - 45, // 123: google.container.v1.ClusterManager.UpdateNodePool:input_type -> google.container.v1.UpdateNodePoolRequest - 46, // 124: google.container.v1.ClusterManager.SetNodePoolAutoscaling:input_type -> google.container.v1.SetNodePoolAutoscalingRequest - 47, // 125: google.container.v1.ClusterManager.SetLoggingService:input_type -> google.container.v1.SetLoggingServiceRequest - 48, // 126: google.container.v1.ClusterManager.SetMonitoringService:input_type -> google.container.v1.SetMonitoringServiceRequest - 49, // 127: google.container.v1.ClusterManager.SetAddonsConfig:input_type -> google.container.v1.SetAddonsConfigRequest - 50, // 128: google.container.v1.ClusterManager.SetLocations:input_type -> google.container.v1.SetLocationsRequest - 51, // 129: google.container.v1.ClusterManager.UpdateMaster:input_type -> google.container.v1.UpdateMasterRequest - 52, // 130: google.container.v1.ClusterManager.SetMasterAuth:input_type -> google.container.v1.SetMasterAuthRequest - 53, // 131: google.container.v1.ClusterManager.DeleteCluster:input_type -> google.container.v1.DeleteClusterRequest - 57, // 132: google.container.v1.ClusterManager.ListOperations:input_type -> google.container.v1.ListOperationsRequest - 56, // 133: google.container.v1.ClusterManager.GetOperation:input_type -> google.container.v1.GetOperationRequest - 58, // 134: google.container.v1.ClusterManager.CancelOperation:input_type -> google.container.v1.CancelOperationRequest - 60, // 135: google.container.v1.ClusterManager.GetServerConfig:input_type -> google.container.v1.GetServerConfigRequest - 94, // 136: google.container.v1.ClusterManager.GetJSONWebKeys:input_type -> google.container.v1.GetJSONWebKeysRequest - 64, // 137: google.container.v1.ClusterManager.ListNodePools:input_type -> google.container.v1.ListNodePoolsRequest - 65, // 138: google.container.v1.ClusterManager.GetNodePool:input_type -> google.container.v1.GetNodePoolRequest - 62, // 139: google.container.v1.ClusterManager.CreateNodePool:input_type -> google.container.v1.CreateNodePoolRequest - 63, // 140: google.container.v1.ClusterManager.DeleteNodePool:input_type -> google.container.v1.DeleteNodePoolRequest - 76, // 141: google.container.v1.ClusterManager.RollbackNodePoolUpgrade:input_type -> google.container.v1.RollbackNodePoolUpgradeRequest - 74, // 142: google.container.v1.ClusterManager.SetNodePoolManagement:input_type -> google.container.v1.SetNodePoolManagementRequest - 82, // 143: google.container.v1.ClusterManager.SetLabels:input_type -> google.container.v1.SetLabelsRequest - 83, // 144: google.container.v1.ClusterManager.SetLegacyAbac:input_type -> google.container.v1.SetLegacyAbacRequest - 84, // 145: google.container.v1.ClusterManager.StartIPRotation:input_type -> google.container.v1.StartIPRotationRequest - 85, // 146: google.container.v1.ClusterManager.CompleteIPRotation:input_type -> google.container.v1.CompleteIPRotationRequest - 75, // 147: google.container.v1.ClusterManager.SetNodePoolSize:input_type -> google.container.v1.SetNodePoolSizeRequest - 88, // 148: google.container.v1.ClusterManager.SetNetworkPolicy:input_type -> google.container.v1.SetNetworkPolicyRequest - 89, // 149: google.container.v1.ClusterManager.SetMaintenancePolicy:input_type -> google.container.v1.SetMaintenancePolicyRequest - 102, // 150: google.container.v1.ClusterManager.ListUsableSubnetworks:input_type -> google.container.v1.ListUsableSubnetworksRequest - 55, // 151: google.container.v1.ClusterManager.ListClusters:output_type -> google.container.v1.ListClustersResponse - 38, // 152: google.container.v1.ClusterManager.GetCluster:output_type -> google.container.v1.Cluster - 40, // 153: google.container.v1.ClusterManager.CreateCluster:output_type -> google.container.v1.Operation - 40, // 154: google.container.v1.ClusterManager.UpdateCluster:output_type -> google.container.v1.Operation - 40, // 155: google.container.v1.ClusterManager.UpdateNodePool:output_type -> google.container.v1.Operation - 40, // 156: google.container.v1.ClusterManager.SetNodePoolAutoscaling:output_type -> google.container.v1.Operation - 40, // 157: google.container.v1.ClusterManager.SetLoggingService:output_type -> google.container.v1.Operation - 40, // 158: google.container.v1.ClusterManager.SetMonitoringService:output_type -> google.container.v1.Operation - 40, // 159: google.container.v1.ClusterManager.SetAddonsConfig:output_type -> google.container.v1.Operation - 40, // 160: google.container.v1.ClusterManager.SetLocations:output_type -> google.container.v1.Operation - 40, // 161: google.container.v1.ClusterManager.UpdateMaster:output_type -> google.container.v1.Operation - 40, // 162: google.container.v1.ClusterManager.SetMasterAuth:output_type -> google.container.v1.Operation - 40, // 163: google.container.v1.ClusterManager.DeleteCluster:output_type -> google.container.v1.Operation - 59, // 164: google.container.v1.ClusterManager.ListOperations:output_type -> google.container.v1.ListOperationsResponse - 40, // 165: google.container.v1.ClusterManager.GetOperation:output_type -> google.container.v1.Operation - 122, // 166: google.container.v1.ClusterManager.CancelOperation:output_type -> google.protobuf.Empty - 61, // 167: google.container.v1.ClusterManager.GetServerConfig:output_type -> google.container.v1.ServerConfig - 96, // 168: google.container.v1.ClusterManager.GetJSONWebKeys:output_type -> google.container.v1.GetJSONWebKeysResponse - 77, // 169: google.container.v1.ClusterManager.ListNodePools:output_type -> google.container.v1.ListNodePoolsResponse - 66, // 170: google.container.v1.ClusterManager.GetNodePool:output_type -> google.container.v1.NodePool - 40, // 171: google.container.v1.ClusterManager.CreateNodePool:output_type -> google.container.v1.Operation - 40, // 172: google.container.v1.ClusterManager.DeleteNodePool:output_type -> google.container.v1.Operation - 40, // 173: google.container.v1.ClusterManager.RollbackNodePoolUpgrade:output_type -> google.container.v1.Operation - 40, // 174: google.container.v1.ClusterManager.SetNodePoolManagement:output_type -> google.container.v1.Operation - 40, // 175: google.container.v1.ClusterManager.SetLabels:output_type -> google.container.v1.Operation - 40, // 176: google.container.v1.ClusterManager.SetLegacyAbac:output_type -> google.container.v1.Operation - 40, // 177: google.container.v1.ClusterManager.StartIPRotation:output_type -> google.container.v1.Operation - 40, // 178: google.container.v1.ClusterManager.CompleteIPRotation:output_type -> google.container.v1.Operation - 40, // 179: google.container.v1.ClusterManager.SetNodePoolSize:output_type -> google.container.v1.Operation - 40, // 180: google.container.v1.ClusterManager.SetNetworkPolicy:output_type -> google.container.v1.Operation - 40, // 181: google.container.v1.ClusterManager.SetMaintenancePolicy:output_type -> google.container.v1.Operation - 103, // 182: google.container.v1.ClusterManager.ListUsableSubnetworks:output_type -> google.container.v1.ListUsableSubnetworksResponse - 151, // [151:183] is the sub-list for method output_type - 119, // [119:151] is the sub-list for method input_type - 119, // [119:119] is the sub-list for extension type_name - 119, // [119:119] is the sub-list for extension extendee - 0, // [0:119] is the sub-list for field type_name -} - -func init() { file_google_container_v1_cluster_service_proto_init() } -func file_google_container_v1_cluster_service_proto_init() { - if File_google_container_v1_cluster_service_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_google_container_v1_cluster_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NodeConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ShieldedInstanceConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SandboxConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReservationAffinity); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NodeTaint); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MasterAuth); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ClientCertificateConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AddonsConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HttpLoadBalancing); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HorizontalPodAutoscaling); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*KubernetesDashboard); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NetworkPolicyConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DnsCacheConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PrivateClusterMasterGlobalAccessConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PrivateClusterConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AuthenticatorGroupsConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CloudRunConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ConfigConnectorConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MasterAuthorizedNetworksConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*LegacyAbac); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NetworkPolicy); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*BinaryAuthorization); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*IPAllocationPolicy); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Cluster); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ClusterUpdate); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Operation); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*OperationProgress); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateClusterRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetClusterRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UpdateClusterRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UpdateNodePoolRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SetNodePoolAutoscalingRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SetLoggingServiceRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SetMonitoringServiceRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SetAddonsConfigRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SetLocationsRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UpdateMasterRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SetMasterAuthRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteClusterRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListClustersRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListClustersResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetOperationRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListOperationsRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CancelOperationRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListOperationsResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetServerConfigRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ServerConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateNodePoolRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteNodePoolRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListNodePoolsRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetNodePoolRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NodePool); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NodeManagement); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AutoUpgradeOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MaintenancePolicy); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MaintenanceWindow); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[56].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TimeWindow); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[57].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RecurringTimeWindow); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[58].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DailyMaintenanceWindow); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[59].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SetNodePoolManagementRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[60].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SetNodePoolSizeRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[61].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RollbackNodePoolUpgradeRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[62].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListNodePoolsResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[63].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ClusterAutoscaling); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[64].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AutoprovisioningNodePoolDefaults); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[65].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ResourceLimit); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[66].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NodePoolAutoscaling); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[67].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SetLabelsRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[68].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SetLegacyAbacRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[69].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StartIPRotationRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[70].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CompleteIPRotationRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[71].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AcceleratorConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[72].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*WorkloadMetadataConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[73].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SetNetworkPolicyRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[74].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SetMaintenancePolicyRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[75].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StatusCondition); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[76].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NetworkConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[77].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetOpenIDConfigRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[78].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetOpenIDConfigResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[79].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetJSONWebKeysRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[80].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Jwk); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[81].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetJSONWebKeysResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[82].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReleaseChannel); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[83].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*IntraNodeVisibilityConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[84].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MaxPodsConstraint); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[85].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*WorkloadIdentityConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[86].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DatabaseEncryption); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[87].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListUsableSubnetworksRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[88].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListUsableSubnetworksResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[89].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UsableSubnetworkSecondaryRange); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[90].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UsableSubnetwork); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[91].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ResourceUsageExportConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[92].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VerticalPodAutoscaling); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[93].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DefaultSnatStatus); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[94].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ShieldedNodes); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[97].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MasterAuthorizedNetworksConfig_CidrBlock); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[99].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*OperationProgress_Metric); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[100].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ServerConfig_ReleaseChannelConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[101].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NodePool_UpgradeSettings); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[104].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ResourceUsageExportConfig_BigQueryDestination); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_container_v1_cluster_service_proto_msgTypes[105].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ResourceUsageExportConfig_ConsumptionMeteringConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_google_container_v1_cluster_service_proto_msgTypes[55].OneofWrappers = []interface{}{ - (*MaintenanceWindow_DailyMaintenanceWindow)(nil), - (*MaintenanceWindow_RecurringWindow)(nil), - } - file_google_container_v1_cluster_service_proto_msgTypes[99].OneofWrappers = []interface{}{ - (*OperationProgress_Metric_IntValue)(nil), - (*OperationProgress_Metric_DoubleValue)(nil), - (*OperationProgress_Metric_StringValue)(nil), - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_container_v1_cluster_service_proto_rawDesc, - NumEnums: 15, - NumMessages: 106, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_google_container_v1_cluster_service_proto_goTypes, - DependencyIndexes: file_google_container_v1_cluster_service_proto_depIdxs, - EnumInfos: file_google_container_v1_cluster_service_proto_enumTypes, - MessageInfos: file_google_container_v1_cluster_service_proto_msgTypes, - }.Build() - File_google_container_v1_cluster_service_proto = out.File - file_google_container_v1_cluster_service_proto_rawDesc = nil - file_google_container_v1_cluster_service_proto_goTypes = nil - file_google_container_v1_cluster_service_proto_depIdxs = nil -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConnInterface - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion6 - -// ClusterManagerClient is the client API for ClusterManager service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type ClusterManagerClient interface { - // Lists all clusters owned by a project in either the specified zone or all - // zones. - ListClusters(ctx context.Context, in *ListClustersRequest, opts ...grpc.CallOption) (*ListClustersResponse, error) - // Gets the details of a specific cluster. - GetCluster(ctx context.Context, in *GetClusterRequest, opts ...grpc.CallOption) (*Cluster, error) - // Creates a cluster, consisting of the specified number and type of Google - // Compute Engine instances. - // - // By default, the cluster is created in the project's - // [default - // network](https://cloud.google.com/compute/docs/networks-and-firewalls#networks). - // - // One firewall is added for the cluster. After cluster creation, - // the Kubelet creates routes for each node to allow the containers - // on that node to communicate with all other instances in the - // cluster. - // - // Finally, an entry is added to the project's global metadata indicating - // which CIDR range the cluster is using. - CreateCluster(ctx context.Context, in *CreateClusterRequest, opts ...grpc.CallOption) (*Operation, error) - // Updates the settings of a specific cluster. - UpdateCluster(ctx context.Context, in *UpdateClusterRequest, opts ...grpc.CallOption) (*Operation, error) - // Updates the version and/or image type for the specified node pool. - UpdateNodePool(ctx context.Context, in *UpdateNodePoolRequest, opts ...grpc.CallOption) (*Operation, error) - // Sets the autoscaling settings for the specified node pool. - SetNodePoolAutoscaling(ctx context.Context, in *SetNodePoolAutoscalingRequest, opts ...grpc.CallOption) (*Operation, error) - // Sets the logging service for a specific cluster. - SetLoggingService(ctx context.Context, in *SetLoggingServiceRequest, opts ...grpc.CallOption) (*Operation, error) - // Sets the monitoring service for a specific cluster. - SetMonitoringService(ctx context.Context, in *SetMonitoringServiceRequest, opts ...grpc.CallOption) (*Operation, error) - // Sets the addons for a specific cluster. - SetAddonsConfig(ctx context.Context, in *SetAddonsConfigRequest, opts ...grpc.CallOption) (*Operation, error) - // Deprecated: Do not use. - // Sets the locations for a specific cluster. - // Deprecated. Use - // [projects.locations.clusters.update](https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1/projects.locations.clusters/update) - // instead. - SetLocations(ctx context.Context, in *SetLocationsRequest, opts ...grpc.CallOption) (*Operation, error) - // Updates the master for a specific cluster. - UpdateMaster(ctx context.Context, in *UpdateMasterRequest, opts ...grpc.CallOption) (*Operation, error) - // Sets master auth materials. Currently supports changing the admin password - // or a specific cluster, either via password generation or explicitly setting - // the password. - SetMasterAuth(ctx context.Context, in *SetMasterAuthRequest, opts ...grpc.CallOption) (*Operation, error) - // Deletes the cluster, including the Kubernetes endpoint and all worker - // nodes. - // - // Firewalls and routes that were configured during cluster creation - // are also deleted. - // - // Other Google Compute Engine resources that might be in use by the cluster, - // such as load balancer resources, are not deleted if they weren't present - // when the cluster was initially created. - DeleteCluster(ctx context.Context, in *DeleteClusterRequest, opts ...grpc.CallOption) (*Operation, error) - // Lists all operations in a project in a specific zone or all zones. - ListOperations(ctx context.Context, in *ListOperationsRequest, opts ...grpc.CallOption) (*ListOperationsResponse, error) - // Gets the specified operation. - GetOperation(ctx context.Context, in *GetOperationRequest, opts ...grpc.CallOption) (*Operation, error) - // Cancels the specified operation. - CancelOperation(ctx context.Context, in *CancelOperationRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) - // Returns configuration info about the Google Kubernetes Engine service. - GetServerConfig(ctx context.Context, in *GetServerConfigRequest, opts ...grpc.CallOption) (*ServerConfig, error) - // Gets the public component of the cluster signing keys in - // JSON Web Key format. - // This API is not yet intended for general use, and is not available for all - // clusters. - GetJSONWebKeys(ctx context.Context, in *GetJSONWebKeysRequest, opts ...grpc.CallOption) (*GetJSONWebKeysResponse, error) - // Lists the node pools for a cluster. - ListNodePools(ctx context.Context, in *ListNodePoolsRequest, opts ...grpc.CallOption) (*ListNodePoolsResponse, error) - // Retrieves the requested node pool. - GetNodePool(ctx context.Context, in *GetNodePoolRequest, opts ...grpc.CallOption) (*NodePool, error) - // Creates a node pool for a cluster. - CreateNodePool(ctx context.Context, in *CreateNodePoolRequest, opts ...grpc.CallOption) (*Operation, error) - // Deletes a node pool from a cluster. - DeleteNodePool(ctx context.Context, in *DeleteNodePoolRequest, opts ...grpc.CallOption) (*Operation, error) - // Rolls back a previously Aborted or Failed NodePool upgrade. - // This makes no changes if the last upgrade successfully completed. - RollbackNodePoolUpgrade(ctx context.Context, in *RollbackNodePoolUpgradeRequest, opts ...grpc.CallOption) (*Operation, error) - // Sets the NodeManagement options for a node pool. - SetNodePoolManagement(ctx context.Context, in *SetNodePoolManagementRequest, opts ...grpc.CallOption) (*Operation, error) - // Sets labels on a cluster. - SetLabels(ctx context.Context, in *SetLabelsRequest, opts ...grpc.CallOption) (*Operation, error) - // Enables or disables the ABAC authorization mechanism on a cluster. - SetLegacyAbac(ctx context.Context, in *SetLegacyAbacRequest, opts ...grpc.CallOption) (*Operation, error) - // Starts master IP rotation. - StartIPRotation(ctx context.Context, in *StartIPRotationRequest, opts ...grpc.CallOption) (*Operation, error) - // Completes master IP rotation. - CompleteIPRotation(ctx context.Context, in *CompleteIPRotationRequest, opts ...grpc.CallOption) (*Operation, error) - // Sets the size for a specific node pool. - SetNodePoolSize(ctx context.Context, in *SetNodePoolSizeRequest, opts ...grpc.CallOption) (*Operation, error) - // Enables or disables Network Policy for a cluster. - SetNetworkPolicy(ctx context.Context, in *SetNetworkPolicyRequest, opts ...grpc.CallOption) (*Operation, error) - // Sets the maintenance policy for a cluster. - SetMaintenancePolicy(ctx context.Context, in *SetMaintenancePolicyRequest, opts ...grpc.CallOption) (*Operation, error) - // Lists subnetworks that are usable for creating clusters in a project. - ListUsableSubnetworks(ctx context.Context, in *ListUsableSubnetworksRequest, opts ...grpc.CallOption) (*ListUsableSubnetworksResponse, error) -} - -type clusterManagerClient struct { - cc grpc.ClientConnInterface -} - -func NewClusterManagerClient(cc grpc.ClientConnInterface) ClusterManagerClient { - return &clusterManagerClient{cc} -} - -func (c *clusterManagerClient) ListClusters(ctx context.Context, in *ListClustersRequest, opts ...grpc.CallOption) (*ListClustersResponse, error) { - out := new(ListClustersResponse) - err := c.cc.Invoke(ctx, "/google.container.v1.ClusterManager/ListClusters", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *clusterManagerClient) GetCluster(ctx context.Context, in *GetClusterRequest, opts ...grpc.CallOption) (*Cluster, error) { - out := new(Cluster) - err := c.cc.Invoke(ctx, "/google.container.v1.ClusterManager/GetCluster", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *clusterManagerClient) CreateCluster(ctx context.Context, in *CreateClusterRequest, opts ...grpc.CallOption) (*Operation, error) { - out := new(Operation) - err := c.cc.Invoke(ctx, "/google.container.v1.ClusterManager/CreateCluster", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *clusterManagerClient) UpdateCluster(ctx context.Context, in *UpdateClusterRequest, opts ...grpc.CallOption) (*Operation, error) { - out := new(Operation) - err := c.cc.Invoke(ctx, "/google.container.v1.ClusterManager/UpdateCluster", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *clusterManagerClient) UpdateNodePool(ctx context.Context, in *UpdateNodePoolRequest, opts ...grpc.CallOption) (*Operation, error) { - out := new(Operation) - err := c.cc.Invoke(ctx, "/google.container.v1.ClusterManager/UpdateNodePool", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *clusterManagerClient) SetNodePoolAutoscaling(ctx context.Context, in *SetNodePoolAutoscalingRequest, opts ...grpc.CallOption) (*Operation, error) { - out := new(Operation) - err := c.cc.Invoke(ctx, "/google.container.v1.ClusterManager/SetNodePoolAutoscaling", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *clusterManagerClient) SetLoggingService(ctx context.Context, in *SetLoggingServiceRequest, opts ...grpc.CallOption) (*Operation, error) { - out := new(Operation) - err := c.cc.Invoke(ctx, "/google.container.v1.ClusterManager/SetLoggingService", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *clusterManagerClient) SetMonitoringService(ctx context.Context, in *SetMonitoringServiceRequest, opts ...grpc.CallOption) (*Operation, error) { - out := new(Operation) - err := c.cc.Invoke(ctx, "/google.container.v1.ClusterManager/SetMonitoringService", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *clusterManagerClient) SetAddonsConfig(ctx context.Context, in *SetAddonsConfigRequest, opts ...grpc.CallOption) (*Operation, error) { - out := new(Operation) - err := c.cc.Invoke(ctx, "/google.container.v1.ClusterManager/SetAddonsConfig", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// Deprecated: Do not use. -func (c *clusterManagerClient) SetLocations(ctx context.Context, in *SetLocationsRequest, opts ...grpc.CallOption) (*Operation, error) { - out := new(Operation) - err := c.cc.Invoke(ctx, "/google.container.v1.ClusterManager/SetLocations", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *clusterManagerClient) UpdateMaster(ctx context.Context, in *UpdateMasterRequest, opts ...grpc.CallOption) (*Operation, error) { - out := new(Operation) - err := c.cc.Invoke(ctx, "/google.container.v1.ClusterManager/UpdateMaster", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *clusterManagerClient) SetMasterAuth(ctx context.Context, in *SetMasterAuthRequest, opts ...grpc.CallOption) (*Operation, error) { - out := new(Operation) - err := c.cc.Invoke(ctx, "/google.container.v1.ClusterManager/SetMasterAuth", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *clusterManagerClient) DeleteCluster(ctx context.Context, in *DeleteClusterRequest, opts ...grpc.CallOption) (*Operation, error) { - out := new(Operation) - err := c.cc.Invoke(ctx, "/google.container.v1.ClusterManager/DeleteCluster", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *clusterManagerClient) ListOperations(ctx context.Context, in *ListOperationsRequest, opts ...grpc.CallOption) (*ListOperationsResponse, error) { - out := new(ListOperationsResponse) - err := c.cc.Invoke(ctx, "/google.container.v1.ClusterManager/ListOperations", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *clusterManagerClient) GetOperation(ctx context.Context, in *GetOperationRequest, opts ...grpc.CallOption) (*Operation, error) { - out := new(Operation) - err := c.cc.Invoke(ctx, "/google.container.v1.ClusterManager/GetOperation", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *clusterManagerClient) CancelOperation(ctx context.Context, in *CancelOperationRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { - out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/google.container.v1.ClusterManager/CancelOperation", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *clusterManagerClient) GetServerConfig(ctx context.Context, in *GetServerConfigRequest, opts ...grpc.CallOption) (*ServerConfig, error) { - out := new(ServerConfig) - err := c.cc.Invoke(ctx, "/google.container.v1.ClusterManager/GetServerConfig", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *clusterManagerClient) GetJSONWebKeys(ctx context.Context, in *GetJSONWebKeysRequest, opts ...grpc.CallOption) (*GetJSONWebKeysResponse, error) { - out := new(GetJSONWebKeysResponse) - err := c.cc.Invoke(ctx, "/google.container.v1.ClusterManager/GetJSONWebKeys", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *clusterManagerClient) ListNodePools(ctx context.Context, in *ListNodePoolsRequest, opts ...grpc.CallOption) (*ListNodePoolsResponse, error) { - out := new(ListNodePoolsResponse) - err := c.cc.Invoke(ctx, "/google.container.v1.ClusterManager/ListNodePools", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *clusterManagerClient) GetNodePool(ctx context.Context, in *GetNodePoolRequest, opts ...grpc.CallOption) (*NodePool, error) { - out := new(NodePool) - err := c.cc.Invoke(ctx, "/google.container.v1.ClusterManager/GetNodePool", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *clusterManagerClient) CreateNodePool(ctx context.Context, in *CreateNodePoolRequest, opts ...grpc.CallOption) (*Operation, error) { - out := new(Operation) - err := c.cc.Invoke(ctx, "/google.container.v1.ClusterManager/CreateNodePool", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *clusterManagerClient) DeleteNodePool(ctx context.Context, in *DeleteNodePoolRequest, opts ...grpc.CallOption) (*Operation, error) { - out := new(Operation) - err := c.cc.Invoke(ctx, "/google.container.v1.ClusterManager/DeleteNodePool", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *clusterManagerClient) RollbackNodePoolUpgrade(ctx context.Context, in *RollbackNodePoolUpgradeRequest, opts ...grpc.CallOption) (*Operation, error) { - out := new(Operation) - err := c.cc.Invoke(ctx, "/google.container.v1.ClusterManager/RollbackNodePoolUpgrade", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *clusterManagerClient) SetNodePoolManagement(ctx context.Context, in *SetNodePoolManagementRequest, opts ...grpc.CallOption) (*Operation, error) { - out := new(Operation) - err := c.cc.Invoke(ctx, "/google.container.v1.ClusterManager/SetNodePoolManagement", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *clusterManagerClient) SetLabels(ctx context.Context, in *SetLabelsRequest, opts ...grpc.CallOption) (*Operation, error) { - out := new(Operation) - err := c.cc.Invoke(ctx, "/google.container.v1.ClusterManager/SetLabels", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *clusterManagerClient) SetLegacyAbac(ctx context.Context, in *SetLegacyAbacRequest, opts ...grpc.CallOption) (*Operation, error) { - out := new(Operation) - err := c.cc.Invoke(ctx, "/google.container.v1.ClusterManager/SetLegacyAbac", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *clusterManagerClient) StartIPRotation(ctx context.Context, in *StartIPRotationRequest, opts ...grpc.CallOption) (*Operation, error) { - out := new(Operation) - err := c.cc.Invoke(ctx, "/google.container.v1.ClusterManager/StartIPRotation", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *clusterManagerClient) CompleteIPRotation(ctx context.Context, in *CompleteIPRotationRequest, opts ...grpc.CallOption) (*Operation, error) { - out := new(Operation) - err := c.cc.Invoke(ctx, "/google.container.v1.ClusterManager/CompleteIPRotation", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *clusterManagerClient) SetNodePoolSize(ctx context.Context, in *SetNodePoolSizeRequest, opts ...grpc.CallOption) (*Operation, error) { - out := new(Operation) - err := c.cc.Invoke(ctx, "/google.container.v1.ClusterManager/SetNodePoolSize", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *clusterManagerClient) SetNetworkPolicy(ctx context.Context, in *SetNetworkPolicyRequest, opts ...grpc.CallOption) (*Operation, error) { - out := new(Operation) - err := c.cc.Invoke(ctx, "/google.container.v1.ClusterManager/SetNetworkPolicy", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *clusterManagerClient) SetMaintenancePolicy(ctx context.Context, in *SetMaintenancePolicyRequest, opts ...grpc.CallOption) (*Operation, error) { - out := new(Operation) - err := c.cc.Invoke(ctx, "/google.container.v1.ClusterManager/SetMaintenancePolicy", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *clusterManagerClient) ListUsableSubnetworks(ctx context.Context, in *ListUsableSubnetworksRequest, opts ...grpc.CallOption) (*ListUsableSubnetworksResponse, error) { - out := new(ListUsableSubnetworksResponse) - err := c.cc.Invoke(ctx, "/google.container.v1.ClusterManager/ListUsableSubnetworks", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// ClusterManagerServer is the server API for ClusterManager service. -type ClusterManagerServer interface { - // Lists all clusters owned by a project in either the specified zone or all - // zones. - ListClusters(context.Context, *ListClustersRequest) (*ListClustersResponse, error) - // Gets the details of a specific cluster. - GetCluster(context.Context, *GetClusterRequest) (*Cluster, error) - // Creates a cluster, consisting of the specified number and type of Google - // Compute Engine instances. - // - // By default, the cluster is created in the project's - // [default - // network](https://cloud.google.com/compute/docs/networks-and-firewalls#networks). - // - // One firewall is added for the cluster. After cluster creation, - // the Kubelet creates routes for each node to allow the containers - // on that node to communicate with all other instances in the - // cluster. - // - // Finally, an entry is added to the project's global metadata indicating - // which CIDR range the cluster is using. - CreateCluster(context.Context, *CreateClusterRequest) (*Operation, error) - // Updates the settings of a specific cluster. - UpdateCluster(context.Context, *UpdateClusterRequest) (*Operation, error) - // Updates the version and/or image type for the specified node pool. - UpdateNodePool(context.Context, *UpdateNodePoolRequest) (*Operation, error) - // Sets the autoscaling settings for the specified node pool. - SetNodePoolAutoscaling(context.Context, *SetNodePoolAutoscalingRequest) (*Operation, error) - // Sets the logging service for a specific cluster. - SetLoggingService(context.Context, *SetLoggingServiceRequest) (*Operation, error) - // Sets the monitoring service for a specific cluster. - SetMonitoringService(context.Context, *SetMonitoringServiceRequest) (*Operation, error) - // Sets the addons for a specific cluster. - SetAddonsConfig(context.Context, *SetAddonsConfigRequest) (*Operation, error) - // Deprecated: Do not use. - // Sets the locations for a specific cluster. - // Deprecated. Use - // [projects.locations.clusters.update](https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1/projects.locations.clusters/update) - // instead. - SetLocations(context.Context, *SetLocationsRequest) (*Operation, error) - // Updates the master for a specific cluster. - UpdateMaster(context.Context, *UpdateMasterRequest) (*Operation, error) - // Sets master auth materials. Currently supports changing the admin password - // or a specific cluster, either via password generation or explicitly setting - // the password. - SetMasterAuth(context.Context, *SetMasterAuthRequest) (*Operation, error) - // Deletes the cluster, including the Kubernetes endpoint and all worker - // nodes. - // - // Firewalls and routes that were configured during cluster creation - // are also deleted. - // - // Other Google Compute Engine resources that might be in use by the cluster, - // such as load balancer resources, are not deleted if they weren't present - // when the cluster was initially created. - DeleteCluster(context.Context, *DeleteClusterRequest) (*Operation, error) - // Lists all operations in a project in a specific zone or all zones. - ListOperations(context.Context, *ListOperationsRequest) (*ListOperationsResponse, error) - // Gets the specified operation. - GetOperation(context.Context, *GetOperationRequest) (*Operation, error) - // Cancels the specified operation. - CancelOperation(context.Context, *CancelOperationRequest) (*emptypb.Empty, error) - // Returns configuration info about the Google Kubernetes Engine service. - GetServerConfig(context.Context, *GetServerConfigRequest) (*ServerConfig, error) - // Gets the public component of the cluster signing keys in - // JSON Web Key format. - // This API is not yet intended for general use, and is not available for all - // clusters. - GetJSONWebKeys(context.Context, *GetJSONWebKeysRequest) (*GetJSONWebKeysResponse, error) - // Lists the node pools for a cluster. - ListNodePools(context.Context, *ListNodePoolsRequest) (*ListNodePoolsResponse, error) - // Retrieves the requested node pool. - GetNodePool(context.Context, *GetNodePoolRequest) (*NodePool, error) - // Creates a node pool for a cluster. - CreateNodePool(context.Context, *CreateNodePoolRequest) (*Operation, error) - // Deletes a node pool from a cluster. - DeleteNodePool(context.Context, *DeleteNodePoolRequest) (*Operation, error) - // Rolls back a previously Aborted or Failed NodePool upgrade. - // This makes no changes if the last upgrade successfully completed. - RollbackNodePoolUpgrade(context.Context, *RollbackNodePoolUpgradeRequest) (*Operation, error) - // Sets the NodeManagement options for a node pool. - SetNodePoolManagement(context.Context, *SetNodePoolManagementRequest) (*Operation, error) - // Sets labels on a cluster. - SetLabels(context.Context, *SetLabelsRequest) (*Operation, error) - // Enables or disables the ABAC authorization mechanism on a cluster. - SetLegacyAbac(context.Context, *SetLegacyAbacRequest) (*Operation, error) - // Starts master IP rotation. - StartIPRotation(context.Context, *StartIPRotationRequest) (*Operation, error) - // Completes master IP rotation. - CompleteIPRotation(context.Context, *CompleteIPRotationRequest) (*Operation, error) - // Sets the size for a specific node pool. - SetNodePoolSize(context.Context, *SetNodePoolSizeRequest) (*Operation, error) - // Enables or disables Network Policy for a cluster. - SetNetworkPolicy(context.Context, *SetNetworkPolicyRequest) (*Operation, error) - // Sets the maintenance policy for a cluster. - SetMaintenancePolicy(context.Context, *SetMaintenancePolicyRequest) (*Operation, error) - // Lists subnetworks that are usable for creating clusters in a project. - ListUsableSubnetworks(context.Context, *ListUsableSubnetworksRequest) (*ListUsableSubnetworksResponse, error) -} - -// UnimplementedClusterManagerServer can be embedded to have forward compatible implementations. -type UnimplementedClusterManagerServer struct { -} - -func (*UnimplementedClusterManagerServer) ListClusters(context.Context, *ListClustersRequest) (*ListClustersResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ListClusters not implemented") -} -func (*UnimplementedClusterManagerServer) GetCluster(context.Context, *GetClusterRequest) (*Cluster, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetCluster not implemented") -} -func (*UnimplementedClusterManagerServer) CreateCluster(context.Context, *CreateClusterRequest) (*Operation, error) { - return nil, status.Errorf(codes.Unimplemented, "method CreateCluster not implemented") -} -func (*UnimplementedClusterManagerServer) UpdateCluster(context.Context, *UpdateClusterRequest) (*Operation, error) { - return nil, status.Errorf(codes.Unimplemented, "method UpdateCluster not implemented") -} -func (*UnimplementedClusterManagerServer) UpdateNodePool(context.Context, *UpdateNodePoolRequest) (*Operation, error) { - return nil, status.Errorf(codes.Unimplemented, "method UpdateNodePool not implemented") -} -func (*UnimplementedClusterManagerServer) SetNodePoolAutoscaling(context.Context, *SetNodePoolAutoscalingRequest) (*Operation, error) { - return nil, status.Errorf(codes.Unimplemented, "method SetNodePoolAutoscaling not implemented") -} -func (*UnimplementedClusterManagerServer) SetLoggingService(context.Context, *SetLoggingServiceRequest) (*Operation, error) { - return nil, status.Errorf(codes.Unimplemented, "method SetLoggingService not implemented") -} -func (*UnimplementedClusterManagerServer) SetMonitoringService(context.Context, *SetMonitoringServiceRequest) (*Operation, error) { - return nil, status.Errorf(codes.Unimplemented, "method SetMonitoringService not implemented") -} -func (*UnimplementedClusterManagerServer) SetAddonsConfig(context.Context, *SetAddonsConfigRequest) (*Operation, error) { - return nil, status.Errorf(codes.Unimplemented, "method SetAddonsConfig not implemented") -} -func (*UnimplementedClusterManagerServer) SetLocations(context.Context, *SetLocationsRequest) (*Operation, error) { - return nil, status.Errorf(codes.Unimplemented, "method SetLocations not implemented") -} -func (*UnimplementedClusterManagerServer) UpdateMaster(context.Context, *UpdateMasterRequest) (*Operation, error) { - return nil, status.Errorf(codes.Unimplemented, "method UpdateMaster not implemented") -} -func (*UnimplementedClusterManagerServer) SetMasterAuth(context.Context, *SetMasterAuthRequest) (*Operation, error) { - return nil, status.Errorf(codes.Unimplemented, "method SetMasterAuth not implemented") -} -func (*UnimplementedClusterManagerServer) DeleteCluster(context.Context, *DeleteClusterRequest) (*Operation, error) { - return nil, status.Errorf(codes.Unimplemented, "method DeleteCluster not implemented") -} -func (*UnimplementedClusterManagerServer) ListOperations(context.Context, *ListOperationsRequest) (*ListOperationsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ListOperations not implemented") -} -func (*UnimplementedClusterManagerServer) GetOperation(context.Context, *GetOperationRequest) (*Operation, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetOperation not implemented") -} -func (*UnimplementedClusterManagerServer) CancelOperation(context.Context, *CancelOperationRequest) (*emptypb.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method CancelOperation not implemented") -} -func (*UnimplementedClusterManagerServer) GetServerConfig(context.Context, *GetServerConfigRequest) (*ServerConfig, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetServerConfig not implemented") -} -func (*UnimplementedClusterManagerServer) GetJSONWebKeys(context.Context, *GetJSONWebKeysRequest) (*GetJSONWebKeysResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetJSONWebKeys not implemented") -} -func (*UnimplementedClusterManagerServer) ListNodePools(context.Context, *ListNodePoolsRequest) (*ListNodePoolsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ListNodePools not implemented") -} -func (*UnimplementedClusterManagerServer) GetNodePool(context.Context, *GetNodePoolRequest) (*NodePool, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetNodePool not implemented") -} -func (*UnimplementedClusterManagerServer) CreateNodePool(context.Context, *CreateNodePoolRequest) (*Operation, error) { - return nil, status.Errorf(codes.Unimplemented, "method CreateNodePool not implemented") -} -func (*UnimplementedClusterManagerServer) DeleteNodePool(context.Context, *DeleteNodePoolRequest) (*Operation, error) { - return nil, status.Errorf(codes.Unimplemented, "method DeleteNodePool not implemented") -} -func (*UnimplementedClusterManagerServer) RollbackNodePoolUpgrade(context.Context, *RollbackNodePoolUpgradeRequest) (*Operation, error) { - return nil, status.Errorf(codes.Unimplemented, "method RollbackNodePoolUpgrade not implemented") -} -func (*UnimplementedClusterManagerServer) SetNodePoolManagement(context.Context, *SetNodePoolManagementRequest) (*Operation, error) { - return nil, status.Errorf(codes.Unimplemented, "method SetNodePoolManagement not implemented") -} -func (*UnimplementedClusterManagerServer) SetLabels(context.Context, *SetLabelsRequest) (*Operation, error) { - return nil, status.Errorf(codes.Unimplemented, "method SetLabels not implemented") -} -func (*UnimplementedClusterManagerServer) SetLegacyAbac(context.Context, *SetLegacyAbacRequest) (*Operation, error) { - return nil, status.Errorf(codes.Unimplemented, "method SetLegacyAbac not implemented") -} -func (*UnimplementedClusterManagerServer) StartIPRotation(context.Context, *StartIPRotationRequest) (*Operation, error) { - return nil, status.Errorf(codes.Unimplemented, "method StartIPRotation not implemented") -} -func (*UnimplementedClusterManagerServer) CompleteIPRotation(context.Context, *CompleteIPRotationRequest) (*Operation, error) { - return nil, status.Errorf(codes.Unimplemented, "method CompleteIPRotation not implemented") -} -func (*UnimplementedClusterManagerServer) SetNodePoolSize(context.Context, *SetNodePoolSizeRequest) (*Operation, error) { - return nil, status.Errorf(codes.Unimplemented, "method SetNodePoolSize not implemented") -} -func (*UnimplementedClusterManagerServer) SetNetworkPolicy(context.Context, *SetNetworkPolicyRequest) (*Operation, error) { - return nil, status.Errorf(codes.Unimplemented, "method SetNetworkPolicy not implemented") -} -func (*UnimplementedClusterManagerServer) SetMaintenancePolicy(context.Context, *SetMaintenancePolicyRequest) (*Operation, error) { - return nil, status.Errorf(codes.Unimplemented, "method SetMaintenancePolicy not implemented") -} -func (*UnimplementedClusterManagerServer) ListUsableSubnetworks(context.Context, *ListUsableSubnetworksRequest) (*ListUsableSubnetworksResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ListUsableSubnetworks not implemented") -} - -func RegisterClusterManagerServer(s *grpc.Server, srv ClusterManagerServer) { - s.RegisterService(&_ClusterManager_serviceDesc, srv) -} - -func _ClusterManager_ListClusters_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListClustersRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ClusterManagerServer).ListClusters(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.container.v1.ClusterManager/ListClusters", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ClusterManagerServer).ListClusters(ctx, req.(*ListClustersRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _ClusterManager_GetCluster_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetClusterRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ClusterManagerServer).GetCluster(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.container.v1.ClusterManager/GetCluster", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ClusterManagerServer).GetCluster(ctx, req.(*GetClusterRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _ClusterManager_CreateCluster_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CreateClusterRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ClusterManagerServer).CreateCluster(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.container.v1.ClusterManager/CreateCluster", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ClusterManagerServer).CreateCluster(ctx, req.(*CreateClusterRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _ClusterManager_UpdateCluster_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(UpdateClusterRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ClusterManagerServer).UpdateCluster(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.container.v1.ClusterManager/UpdateCluster", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ClusterManagerServer).UpdateCluster(ctx, req.(*UpdateClusterRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _ClusterManager_UpdateNodePool_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(UpdateNodePoolRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ClusterManagerServer).UpdateNodePool(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.container.v1.ClusterManager/UpdateNodePool", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ClusterManagerServer).UpdateNodePool(ctx, req.(*UpdateNodePoolRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _ClusterManager_SetNodePoolAutoscaling_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(SetNodePoolAutoscalingRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ClusterManagerServer).SetNodePoolAutoscaling(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.container.v1.ClusterManager/SetNodePoolAutoscaling", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ClusterManagerServer).SetNodePoolAutoscaling(ctx, req.(*SetNodePoolAutoscalingRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _ClusterManager_SetLoggingService_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(SetLoggingServiceRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ClusterManagerServer).SetLoggingService(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.container.v1.ClusterManager/SetLoggingService", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ClusterManagerServer).SetLoggingService(ctx, req.(*SetLoggingServiceRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _ClusterManager_SetMonitoringService_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(SetMonitoringServiceRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ClusterManagerServer).SetMonitoringService(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.container.v1.ClusterManager/SetMonitoringService", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ClusterManagerServer).SetMonitoringService(ctx, req.(*SetMonitoringServiceRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _ClusterManager_SetAddonsConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(SetAddonsConfigRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ClusterManagerServer).SetAddonsConfig(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.container.v1.ClusterManager/SetAddonsConfig", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ClusterManagerServer).SetAddonsConfig(ctx, req.(*SetAddonsConfigRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _ClusterManager_SetLocations_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(SetLocationsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ClusterManagerServer).SetLocations(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.container.v1.ClusterManager/SetLocations", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ClusterManagerServer).SetLocations(ctx, req.(*SetLocationsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _ClusterManager_UpdateMaster_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(UpdateMasterRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ClusterManagerServer).UpdateMaster(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.container.v1.ClusterManager/UpdateMaster", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ClusterManagerServer).UpdateMaster(ctx, req.(*UpdateMasterRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _ClusterManager_SetMasterAuth_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(SetMasterAuthRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ClusterManagerServer).SetMasterAuth(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.container.v1.ClusterManager/SetMasterAuth", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ClusterManagerServer).SetMasterAuth(ctx, req.(*SetMasterAuthRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _ClusterManager_DeleteCluster_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteClusterRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ClusterManagerServer).DeleteCluster(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.container.v1.ClusterManager/DeleteCluster", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ClusterManagerServer).DeleteCluster(ctx, req.(*DeleteClusterRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _ClusterManager_ListOperations_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListOperationsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ClusterManagerServer).ListOperations(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.container.v1.ClusterManager/ListOperations", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ClusterManagerServer).ListOperations(ctx, req.(*ListOperationsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _ClusterManager_GetOperation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetOperationRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ClusterManagerServer).GetOperation(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.container.v1.ClusterManager/GetOperation", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ClusterManagerServer).GetOperation(ctx, req.(*GetOperationRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _ClusterManager_CancelOperation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CancelOperationRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ClusterManagerServer).CancelOperation(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.container.v1.ClusterManager/CancelOperation", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ClusterManagerServer).CancelOperation(ctx, req.(*CancelOperationRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _ClusterManager_GetServerConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetServerConfigRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ClusterManagerServer).GetServerConfig(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.container.v1.ClusterManager/GetServerConfig", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ClusterManagerServer).GetServerConfig(ctx, req.(*GetServerConfigRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _ClusterManager_GetJSONWebKeys_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetJSONWebKeysRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ClusterManagerServer).GetJSONWebKeys(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.container.v1.ClusterManager/GetJSONWebKeys", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ClusterManagerServer).GetJSONWebKeys(ctx, req.(*GetJSONWebKeysRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _ClusterManager_ListNodePools_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListNodePoolsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ClusterManagerServer).ListNodePools(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.container.v1.ClusterManager/ListNodePools", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ClusterManagerServer).ListNodePools(ctx, req.(*ListNodePoolsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _ClusterManager_GetNodePool_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetNodePoolRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ClusterManagerServer).GetNodePool(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.container.v1.ClusterManager/GetNodePool", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ClusterManagerServer).GetNodePool(ctx, req.(*GetNodePoolRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _ClusterManager_CreateNodePool_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CreateNodePoolRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ClusterManagerServer).CreateNodePool(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.container.v1.ClusterManager/CreateNodePool", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ClusterManagerServer).CreateNodePool(ctx, req.(*CreateNodePoolRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _ClusterManager_DeleteNodePool_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteNodePoolRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ClusterManagerServer).DeleteNodePool(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.container.v1.ClusterManager/DeleteNodePool", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ClusterManagerServer).DeleteNodePool(ctx, req.(*DeleteNodePoolRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _ClusterManager_RollbackNodePoolUpgrade_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RollbackNodePoolUpgradeRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ClusterManagerServer).RollbackNodePoolUpgrade(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.container.v1.ClusterManager/RollbackNodePoolUpgrade", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ClusterManagerServer).RollbackNodePoolUpgrade(ctx, req.(*RollbackNodePoolUpgradeRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _ClusterManager_SetNodePoolManagement_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(SetNodePoolManagementRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ClusterManagerServer).SetNodePoolManagement(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.container.v1.ClusterManager/SetNodePoolManagement", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ClusterManagerServer).SetNodePoolManagement(ctx, req.(*SetNodePoolManagementRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _ClusterManager_SetLabels_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(SetLabelsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ClusterManagerServer).SetLabels(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.container.v1.ClusterManager/SetLabels", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ClusterManagerServer).SetLabels(ctx, req.(*SetLabelsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _ClusterManager_SetLegacyAbac_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(SetLegacyAbacRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ClusterManagerServer).SetLegacyAbac(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.container.v1.ClusterManager/SetLegacyAbac", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ClusterManagerServer).SetLegacyAbac(ctx, req.(*SetLegacyAbacRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _ClusterManager_StartIPRotation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(StartIPRotationRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ClusterManagerServer).StartIPRotation(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.container.v1.ClusterManager/StartIPRotation", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ClusterManagerServer).StartIPRotation(ctx, req.(*StartIPRotationRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _ClusterManager_CompleteIPRotation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CompleteIPRotationRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ClusterManagerServer).CompleteIPRotation(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.container.v1.ClusterManager/CompleteIPRotation", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ClusterManagerServer).CompleteIPRotation(ctx, req.(*CompleteIPRotationRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _ClusterManager_SetNodePoolSize_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(SetNodePoolSizeRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ClusterManagerServer).SetNodePoolSize(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.container.v1.ClusterManager/SetNodePoolSize", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ClusterManagerServer).SetNodePoolSize(ctx, req.(*SetNodePoolSizeRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _ClusterManager_SetNetworkPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(SetNetworkPolicyRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ClusterManagerServer).SetNetworkPolicy(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.container.v1.ClusterManager/SetNetworkPolicy", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ClusterManagerServer).SetNetworkPolicy(ctx, req.(*SetNetworkPolicyRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _ClusterManager_SetMaintenancePolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(SetMaintenancePolicyRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ClusterManagerServer).SetMaintenancePolicy(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.container.v1.ClusterManager/SetMaintenancePolicy", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ClusterManagerServer).SetMaintenancePolicy(ctx, req.(*SetMaintenancePolicyRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _ClusterManager_ListUsableSubnetworks_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListUsableSubnetworksRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ClusterManagerServer).ListUsableSubnetworks(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.container.v1.ClusterManager/ListUsableSubnetworks", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ClusterManagerServer).ListUsableSubnetworks(ctx, req.(*ListUsableSubnetworksRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _ClusterManager_serviceDesc = grpc.ServiceDesc{ - ServiceName: "google.container.v1.ClusterManager", - HandlerType: (*ClusterManagerServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "ListClusters", - Handler: _ClusterManager_ListClusters_Handler, - }, - { - MethodName: "GetCluster", - Handler: _ClusterManager_GetCluster_Handler, - }, - { - MethodName: "CreateCluster", - Handler: _ClusterManager_CreateCluster_Handler, - }, - { - MethodName: "UpdateCluster", - Handler: _ClusterManager_UpdateCluster_Handler, - }, - { - MethodName: "UpdateNodePool", - Handler: _ClusterManager_UpdateNodePool_Handler, - }, - { - MethodName: "SetNodePoolAutoscaling", - Handler: _ClusterManager_SetNodePoolAutoscaling_Handler, - }, - { - MethodName: "SetLoggingService", - Handler: _ClusterManager_SetLoggingService_Handler, - }, - { - MethodName: "SetMonitoringService", - Handler: _ClusterManager_SetMonitoringService_Handler, - }, - { - MethodName: "SetAddonsConfig", - Handler: _ClusterManager_SetAddonsConfig_Handler, - }, - { - MethodName: "SetLocations", - Handler: _ClusterManager_SetLocations_Handler, - }, - { - MethodName: "UpdateMaster", - Handler: _ClusterManager_UpdateMaster_Handler, - }, - { - MethodName: "SetMasterAuth", - Handler: _ClusterManager_SetMasterAuth_Handler, - }, - { - MethodName: "DeleteCluster", - Handler: _ClusterManager_DeleteCluster_Handler, - }, - { - MethodName: "ListOperations", - Handler: _ClusterManager_ListOperations_Handler, - }, - { - MethodName: "GetOperation", - Handler: _ClusterManager_GetOperation_Handler, - }, - { - MethodName: "CancelOperation", - Handler: _ClusterManager_CancelOperation_Handler, - }, - { - MethodName: "GetServerConfig", - Handler: _ClusterManager_GetServerConfig_Handler, - }, - { - MethodName: "GetJSONWebKeys", - Handler: _ClusterManager_GetJSONWebKeys_Handler, - }, - { - MethodName: "ListNodePools", - Handler: _ClusterManager_ListNodePools_Handler, - }, - { - MethodName: "GetNodePool", - Handler: _ClusterManager_GetNodePool_Handler, - }, - { - MethodName: "CreateNodePool", - Handler: _ClusterManager_CreateNodePool_Handler, - }, - { - MethodName: "DeleteNodePool", - Handler: _ClusterManager_DeleteNodePool_Handler, - }, - { - MethodName: "RollbackNodePoolUpgrade", - Handler: _ClusterManager_RollbackNodePoolUpgrade_Handler, - }, - { - MethodName: "SetNodePoolManagement", - Handler: _ClusterManager_SetNodePoolManagement_Handler, - }, - { - MethodName: "SetLabels", - Handler: _ClusterManager_SetLabels_Handler, - }, - { - MethodName: "SetLegacyAbac", - Handler: _ClusterManager_SetLegacyAbac_Handler, - }, - { - MethodName: "StartIPRotation", - Handler: _ClusterManager_StartIPRotation_Handler, - }, - { - MethodName: "CompleteIPRotation", - Handler: _ClusterManager_CompleteIPRotation_Handler, - }, - { - MethodName: "SetNodePoolSize", - Handler: _ClusterManager_SetNodePoolSize_Handler, - }, - { - MethodName: "SetNetworkPolicy", - Handler: _ClusterManager_SetNetworkPolicy_Handler, - }, - { - MethodName: "SetMaintenancePolicy", - Handler: _ClusterManager_SetMaintenancePolicy_Handler, - }, - { - MethodName: "ListUsableSubnetworks", - Handler: _ClusterManager_ListUsableSubnetworks_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "google/container/v1/cluster_service.proto", -} diff --git a/vendor/google.golang.org/genproto/googleapis/devtools/cloudtrace/v2/trace.pb.go b/vendor/google.golang.org/genproto/googleapis/devtools/cloudtrace/v2/trace.pb.go deleted file mode 100644 index b86571d53c..0000000000 --- a/vendor/google.golang.org/genproto/googleapis/devtools/cloudtrace/v2/trace.pb.go +++ /dev/null @@ -1,1963 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.25.0 -// protoc v3.13.0 -// source: google/devtools/cloudtrace/v2/trace.proto - -package cloudtrace - -import ( - reflect "reflect" - sync "sync" - - proto "github.com/golang/protobuf/proto" - _ "google.golang.org/genproto/googleapis/api/annotations" - status "google.golang.org/genproto/googleapis/rpc/status" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - timestamppb "google.golang.org/protobuf/types/known/timestamppb" - wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - -// Type of span. Can be used to specify additional relationships between spans -// in addition to a parent/child relationship. -type Span_SpanKind int32 - -const ( - // Unspecified. Do NOT use as default. - // Implementations MAY assume SpanKind.INTERNAL to be default. - Span_SPAN_KIND_UNSPECIFIED Span_SpanKind = 0 - // Indicates that the span is used internally. Default value. - Span_INTERNAL Span_SpanKind = 1 - // Indicates that the span covers server-side handling of an RPC or other - // remote network request. - Span_SERVER Span_SpanKind = 2 - // Indicates that the span covers the client-side wrapper around an RPC or - // other remote request. - Span_CLIENT Span_SpanKind = 3 - // Indicates that the span describes producer sending a message to a broker. - // Unlike client and server, there is no direct critical path latency - // relationship between producer and consumer spans (e.g. publishing a - // message to a pubsub service). - Span_PRODUCER Span_SpanKind = 4 - // Indicates that the span describes consumer receiving a message from a - // broker. Unlike client and server, there is no direct critical path - // latency relationship between producer and consumer spans (e.g. receiving - // a message from a pubsub service subscription). - Span_CONSUMER Span_SpanKind = 5 -) - -// Enum value maps for Span_SpanKind. -var ( - Span_SpanKind_name = map[int32]string{ - 0: "SPAN_KIND_UNSPECIFIED", - 1: "INTERNAL", - 2: "SERVER", - 3: "CLIENT", - 4: "PRODUCER", - 5: "CONSUMER", - } - Span_SpanKind_value = map[string]int32{ - "SPAN_KIND_UNSPECIFIED": 0, - "INTERNAL": 1, - "SERVER": 2, - "CLIENT": 3, - "PRODUCER": 4, - "CONSUMER": 5, - } -) - -func (x Span_SpanKind) Enum() *Span_SpanKind { - p := new(Span_SpanKind) - *p = x - return p -} - -func (x Span_SpanKind) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (Span_SpanKind) Descriptor() protoreflect.EnumDescriptor { - return file_google_devtools_cloudtrace_v2_trace_proto_enumTypes[0].Descriptor() -} - -func (Span_SpanKind) Type() protoreflect.EnumType { - return &file_google_devtools_cloudtrace_v2_trace_proto_enumTypes[0] -} - -func (x Span_SpanKind) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use Span_SpanKind.Descriptor instead. -func (Span_SpanKind) EnumDescriptor() ([]byte, []int) { - return file_google_devtools_cloudtrace_v2_trace_proto_rawDescGZIP(), []int{0, 0} -} - -// Indicates whether the message was sent or received. -type Span_TimeEvent_MessageEvent_Type int32 - -const ( - // Unknown event type. - Span_TimeEvent_MessageEvent_TYPE_UNSPECIFIED Span_TimeEvent_MessageEvent_Type = 0 - // Indicates a sent message. - Span_TimeEvent_MessageEvent_SENT Span_TimeEvent_MessageEvent_Type = 1 - // Indicates a received message. - Span_TimeEvent_MessageEvent_RECEIVED Span_TimeEvent_MessageEvent_Type = 2 -) - -// Enum value maps for Span_TimeEvent_MessageEvent_Type. -var ( - Span_TimeEvent_MessageEvent_Type_name = map[int32]string{ - 0: "TYPE_UNSPECIFIED", - 1: "SENT", - 2: "RECEIVED", - } - Span_TimeEvent_MessageEvent_Type_value = map[string]int32{ - "TYPE_UNSPECIFIED": 0, - "SENT": 1, - "RECEIVED": 2, - } -) - -func (x Span_TimeEvent_MessageEvent_Type) Enum() *Span_TimeEvent_MessageEvent_Type { - p := new(Span_TimeEvent_MessageEvent_Type) - *p = x - return p -} - -func (x Span_TimeEvent_MessageEvent_Type) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (Span_TimeEvent_MessageEvent_Type) Descriptor() protoreflect.EnumDescriptor { - return file_google_devtools_cloudtrace_v2_trace_proto_enumTypes[1].Descriptor() -} - -func (Span_TimeEvent_MessageEvent_Type) Type() protoreflect.EnumType { - return &file_google_devtools_cloudtrace_v2_trace_proto_enumTypes[1] -} - -func (x Span_TimeEvent_MessageEvent_Type) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use Span_TimeEvent_MessageEvent_Type.Descriptor instead. -func (Span_TimeEvent_MessageEvent_Type) EnumDescriptor() ([]byte, []int) { - return file_google_devtools_cloudtrace_v2_trace_proto_rawDescGZIP(), []int{0, 1, 1, 0} -} - -// The relationship of the current span relative to the linked span: child, -// parent, or unspecified. -type Span_Link_Type int32 - -const ( - // The relationship of the two spans is unknown. - Span_Link_TYPE_UNSPECIFIED Span_Link_Type = 0 - // The linked span is a child of the current span. - Span_Link_CHILD_LINKED_SPAN Span_Link_Type = 1 - // The linked span is a parent of the current span. - Span_Link_PARENT_LINKED_SPAN Span_Link_Type = 2 -) - -// Enum value maps for Span_Link_Type. -var ( - Span_Link_Type_name = map[int32]string{ - 0: "TYPE_UNSPECIFIED", - 1: "CHILD_LINKED_SPAN", - 2: "PARENT_LINKED_SPAN", - } - Span_Link_Type_value = map[string]int32{ - "TYPE_UNSPECIFIED": 0, - "CHILD_LINKED_SPAN": 1, - "PARENT_LINKED_SPAN": 2, - } -) - -func (x Span_Link_Type) Enum() *Span_Link_Type { - p := new(Span_Link_Type) - *p = x - return p -} - -func (x Span_Link_Type) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (Span_Link_Type) Descriptor() protoreflect.EnumDescriptor { - return file_google_devtools_cloudtrace_v2_trace_proto_enumTypes[2].Descriptor() -} - -func (Span_Link_Type) Type() protoreflect.EnumType { - return &file_google_devtools_cloudtrace_v2_trace_proto_enumTypes[2] -} - -func (x Span_Link_Type) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use Span_Link_Type.Descriptor instead. -func (Span_Link_Type) EnumDescriptor() ([]byte, []int) { - return file_google_devtools_cloudtrace_v2_trace_proto_rawDescGZIP(), []int{0, 3, 0} -} - -// A span represents a single operation within a trace. Spans can be -// nested to form a trace tree. Often, a trace contains a root span -// that describes the end-to-end latency, and one or more subspans for -// its sub-operations. A trace can also contain multiple root spans, -// or none at all. Spans do not need to be contiguous—there may be -// gaps or overlaps between spans in a trace. -type Span struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The resource name of the span in the following format: - // - // projects/[PROJECT_ID]/traces/[TRACE_ID]/spans/[SPAN_ID] - // - // [TRACE_ID] is a unique identifier for a trace within a project; - // it is a 32-character hexadecimal encoding of a 16-byte array. - // - // [SPAN_ID] is a unique identifier for a span within a trace; it - // is a 16-character hexadecimal encoding of an 8-byte array. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Required. The [SPAN_ID] portion of the span's resource name. - SpanId string `protobuf:"bytes,2,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"` - // The [SPAN_ID] of this span's parent span. If this is a root span, - // then this field must be empty. - ParentSpanId string `protobuf:"bytes,3,opt,name=parent_span_id,json=parentSpanId,proto3" json:"parent_span_id,omitempty"` - // Required. A description of the span's operation (up to 128 bytes). - // Stackdriver Trace displays the description in the - // Google Cloud Platform Console. - // For example, the display name can be a qualified method name or a file name - // and a line number where the operation is called. A best practice is to use - // the same display name within an application and at the same call point. - // This makes it easier to correlate spans in different traces. - DisplayName *TruncatableString `protobuf:"bytes,4,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` - // Required. The start time of the span. On the client side, this is the time kept by - // the local machine where the span execution starts. On the server side, this - // is the time when the server's application handler starts running. - StartTime *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"` - // Required. The end time of the span. On the client side, this is the time kept by - // the local machine where the span execution ends. On the server side, this - // is the time when the server application handler stops running. - EndTime *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"` - // A set of attributes on the span. You can have up to 32 attributes per - // span. - Attributes *Span_Attributes `protobuf:"bytes,7,opt,name=attributes,proto3" json:"attributes,omitempty"` - // Stack trace captured at the start of the span. - StackTrace *StackTrace `protobuf:"bytes,8,opt,name=stack_trace,json=stackTrace,proto3" json:"stack_trace,omitempty"` - // A set of time events. You can have up to 32 annotations and 128 message - // events per span. - TimeEvents *Span_TimeEvents `protobuf:"bytes,9,opt,name=time_events,json=timeEvents,proto3" json:"time_events,omitempty"` - // Links associated with the span. You can have up to 128 links per Span. - Links *Span_Links `protobuf:"bytes,10,opt,name=links,proto3" json:"links,omitempty"` - // Optional. The final status for this span. - Status *status.Status `protobuf:"bytes,11,opt,name=status,proto3" json:"status,omitempty"` - // Optional. Set this parameter to indicate whether this span is in - // the same process as its parent. If you do not set this parameter, - // Stackdriver Trace is unable to take advantage of this helpful - // information. - SameProcessAsParentSpan *wrapperspb.BoolValue `protobuf:"bytes,12,opt,name=same_process_as_parent_span,json=sameProcessAsParentSpan,proto3" json:"same_process_as_parent_span,omitempty"` - // Optional. The number of child spans that were generated while this span - // was active. If set, allows implementation to detect missing child spans. - ChildSpanCount *wrapperspb.Int32Value `protobuf:"bytes,13,opt,name=child_span_count,json=childSpanCount,proto3" json:"child_span_count,omitempty"` - // Optional. Distinguishes between spans generated in a particular context. For example, - // two spans with the same name may be distinguished using `CLIENT` (caller) - // and `SERVER` (callee) to identify an RPC call. - SpanKind Span_SpanKind `protobuf:"varint,14,opt,name=span_kind,json=spanKind,proto3,enum=google.devtools.cloudtrace.v2.Span_SpanKind" json:"span_kind,omitempty"` -} - -func (x *Span) Reset() { - *x = Span{} - if protoimpl.UnsafeEnabled { - mi := &file_google_devtools_cloudtrace_v2_trace_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Span) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Span) ProtoMessage() {} - -func (x *Span) ProtoReflect() protoreflect.Message { - mi := &file_google_devtools_cloudtrace_v2_trace_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Span.ProtoReflect.Descriptor instead. -func (*Span) Descriptor() ([]byte, []int) { - return file_google_devtools_cloudtrace_v2_trace_proto_rawDescGZIP(), []int{0} -} - -func (x *Span) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *Span) GetSpanId() string { - if x != nil { - return x.SpanId - } - return "" -} - -func (x *Span) GetParentSpanId() string { - if x != nil { - return x.ParentSpanId - } - return "" -} - -func (x *Span) GetDisplayName() *TruncatableString { - if x != nil { - return x.DisplayName - } - return nil -} - -func (x *Span) GetStartTime() *timestamppb.Timestamp { - if x != nil { - return x.StartTime - } - return nil -} - -func (x *Span) GetEndTime() *timestamppb.Timestamp { - if x != nil { - return x.EndTime - } - return nil -} - -func (x *Span) GetAttributes() *Span_Attributes { - if x != nil { - return x.Attributes - } - return nil -} - -func (x *Span) GetStackTrace() *StackTrace { - if x != nil { - return x.StackTrace - } - return nil -} - -func (x *Span) GetTimeEvents() *Span_TimeEvents { - if x != nil { - return x.TimeEvents - } - return nil -} - -func (x *Span) GetLinks() *Span_Links { - if x != nil { - return x.Links - } - return nil -} - -func (x *Span) GetStatus() *status.Status { - if x != nil { - return x.Status - } - return nil -} - -func (x *Span) GetSameProcessAsParentSpan() *wrapperspb.BoolValue { - if x != nil { - return x.SameProcessAsParentSpan - } - return nil -} - -func (x *Span) GetChildSpanCount() *wrapperspb.Int32Value { - if x != nil { - return x.ChildSpanCount - } - return nil -} - -func (x *Span) GetSpanKind() Span_SpanKind { - if x != nil { - return x.SpanKind - } - return Span_SPAN_KIND_UNSPECIFIED -} - -// The allowed types for [VALUE] in a `[KEY]:[VALUE]` attribute. -type AttributeValue struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The type of the value. - // - // Types that are assignable to Value: - // *AttributeValue_StringValue - // *AttributeValue_IntValue - // *AttributeValue_BoolValue - Value isAttributeValue_Value `protobuf_oneof:"value"` -} - -func (x *AttributeValue) Reset() { - *x = AttributeValue{} - if protoimpl.UnsafeEnabled { - mi := &file_google_devtools_cloudtrace_v2_trace_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *AttributeValue) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*AttributeValue) ProtoMessage() {} - -func (x *AttributeValue) ProtoReflect() protoreflect.Message { - mi := &file_google_devtools_cloudtrace_v2_trace_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use AttributeValue.ProtoReflect.Descriptor instead. -func (*AttributeValue) Descriptor() ([]byte, []int) { - return file_google_devtools_cloudtrace_v2_trace_proto_rawDescGZIP(), []int{1} -} - -func (m *AttributeValue) GetValue() isAttributeValue_Value { - if m != nil { - return m.Value - } - return nil -} - -func (x *AttributeValue) GetStringValue() *TruncatableString { - if x, ok := x.GetValue().(*AttributeValue_StringValue); ok { - return x.StringValue - } - return nil -} - -func (x *AttributeValue) GetIntValue() int64 { - if x, ok := x.GetValue().(*AttributeValue_IntValue); ok { - return x.IntValue - } - return 0 -} - -func (x *AttributeValue) GetBoolValue() bool { - if x, ok := x.GetValue().(*AttributeValue_BoolValue); ok { - return x.BoolValue - } - return false -} - -type isAttributeValue_Value interface { - isAttributeValue_Value() -} - -type AttributeValue_StringValue struct { - // A string up to 256 bytes long. - StringValue *TruncatableString `protobuf:"bytes,1,opt,name=string_value,json=stringValue,proto3,oneof"` -} - -type AttributeValue_IntValue struct { - // A 64-bit signed integer. - IntValue int64 `protobuf:"varint,2,opt,name=int_value,json=intValue,proto3,oneof"` -} - -type AttributeValue_BoolValue struct { - // A Boolean value represented by `true` or `false`. - BoolValue bool `protobuf:"varint,3,opt,name=bool_value,json=boolValue,proto3,oneof"` -} - -func (*AttributeValue_StringValue) isAttributeValue_Value() {} - -func (*AttributeValue_IntValue) isAttributeValue_Value() {} - -func (*AttributeValue_BoolValue) isAttributeValue_Value() {} - -// A call stack appearing in a trace. -type StackTrace struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Stack frames in this stack trace. A maximum of 128 frames are allowed. - StackFrames *StackTrace_StackFrames `protobuf:"bytes,1,opt,name=stack_frames,json=stackFrames,proto3" json:"stack_frames,omitempty"` - // The hash ID is used to conserve network bandwidth for duplicate - // stack traces within a single trace. - // - // Often multiple spans will have identical stack traces. - // The first occurrence of a stack trace should contain both the - // `stackFrame` content and a value in `stackTraceHashId`. - // - // Subsequent spans within the same request can refer - // to that stack trace by only setting `stackTraceHashId`. - StackTraceHashId int64 `protobuf:"varint,2,opt,name=stack_trace_hash_id,json=stackTraceHashId,proto3" json:"stack_trace_hash_id,omitempty"` -} - -func (x *StackTrace) Reset() { - *x = StackTrace{} - if protoimpl.UnsafeEnabled { - mi := &file_google_devtools_cloudtrace_v2_trace_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *StackTrace) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StackTrace) ProtoMessage() {} - -func (x *StackTrace) ProtoReflect() protoreflect.Message { - mi := &file_google_devtools_cloudtrace_v2_trace_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StackTrace.ProtoReflect.Descriptor instead. -func (*StackTrace) Descriptor() ([]byte, []int) { - return file_google_devtools_cloudtrace_v2_trace_proto_rawDescGZIP(), []int{2} -} - -func (x *StackTrace) GetStackFrames() *StackTrace_StackFrames { - if x != nil { - return x.StackFrames - } - return nil -} - -func (x *StackTrace) GetStackTraceHashId() int64 { - if x != nil { - return x.StackTraceHashId - } - return 0 -} - -// Binary module. -type Module struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // For example: main binary, kernel modules, and dynamic libraries - // such as libc.so, sharedlib.so (up to 256 bytes). - Module *TruncatableString `protobuf:"bytes,1,opt,name=module,proto3" json:"module,omitempty"` - // A unique identifier for the module, usually a hash of its - // contents (up to 128 bytes). - BuildId *TruncatableString `protobuf:"bytes,2,opt,name=build_id,json=buildId,proto3" json:"build_id,omitempty"` -} - -func (x *Module) Reset() { - *x = Module{} - if protoimpl.UnsafeEnabled { - mi := &file_google_devtools_cloudtrace_v2_trace_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Module) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Module) ProtoMessage() {} - -func (x *Module) ProtoReflect() protoreflect.Message { - mi := &file_google_devtools_cloudtrace_v2_trace_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Module.ProtoReflect.Descriptor instead. -func (*Module) Descriptor() ([]byte, []int) { - return file_google_devtools_cloudtrace_v2_trace_proto_rawDescGZIP(), []int{3} -} - -func (x *Module) GetModule() *TruncatableString { - if x != nil { - return x.Module - } - return nil -} - -func (x *Module) GetBuildId() *TruncatableString { - if x != nil { - return x.BuildId - } - return nil -} - -// Represents a string that might be shortened to a specified length. -type TruncatableString struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The shortened string. For example, if the original string is 500 - // bytes long and the limit of the string is 128 bytes, then - // `value` contains the first 128 bytes of the 500-byte string. - // - // Truncation always happens on a UTF8 character boundary. If there - // are multi-byte characters in the string, then the length of the - // shortened string might be less than the size limit. - Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` - // The number of bytes removed from the original string. If this - // value is 0, then the string was not shortened. - TruncatedByteCount int32 `protobuf:"varint,2,opt,name=truncated_byte_count,json=truncatedByteCount,proto3" json:"truncated_byte_count,omitempty"` -} - -func (x *TruncatableString) Reset() { - *x = TruncatableString{} - if protoimpl.UnsafeEnabled { - mi := &file_google_devtools_cloudtrace_v2_trace_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *TruncatableString) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TruncatableString) ProtoMessage() {} - -func (x *TruncatableString) ProtoReflect() protoreflect.Message { - mi := &file_google_devtools_cloudtrace_v2_trace_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TruncatableString.ProtoReflect.Descriptor instead. -func (*TruncatableString) Descriptor() ([]byte, []int) { - return file_google_devtools_cloudtrace_v2_trace_proto_rawDescGZIP(), []int{4} -} - -func (x *TruncatableString) GetValue() string { - if x != nil { - return x.Value - } - return "" -} - -func (x *TruncatableString) GetTruncatedByteCount() int32 { - if x != nil { - return x.TruncatedByteCount - } - return 0 -} - -// A set of attributes, each in the format `[KEY]:[VALUE]`. -type Span_Attributes struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The set of attributes. Each attribute's key can be up to 128 bytes - // long. The value can be a string up to 256 bytes, a signed 64-bit integer, - // or the Boolean values `true` and `false`. For example: - // - // "/instance_id": { "string_value": { "value": "my-instance" } } - // "/http/request_bytes": { "int_value": 300 } - // "abc.com/myattribute": { "bool_value": false } - AttributeMap map[string]*AttributeValue `protobuf:"bytes,1,rep,name=attribute_map,json=attributeMap,proto3" json:"attribute_map,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - // The number of attributes that were discarded. Attributes can be discarded - // because their keys are too long or because there are too many attributes. - // If this value is 0 then all attributes are valid. - DroppedAttributesCount int32 `protobuf:"varint,2,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` -} - -func (x *Span_Attributes) Reset() { - *x = Span_Attributes{} - if protoimpl.UnsafeEnabled { - mi := &file_google_devtools_cloudtrace_v2_trace_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Span_Attributes) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Span_Attributes) ProtoMessage() {} - -func (x *Span_Attributes) ProtoReflect() protoreflect.Message { - mi := &file_google_devtools_cloudtrace_v2_trace_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Span_Attributes.ProtoReflect.Descriptor instead. -func (*Span_Attributes) Descriptor() ([]byte, []int) { - return file_google_devtools_cloudtrace_v2_trace_proto_rawDescGZIP(), []int{0, 0} -} - -func (x *Span_Attributes) GetAttributeMap() map[string]*AttributeValue { - if x != nil { - return x.AttributeMap - } - return nil -} - -func (x *Span_Attributes) GetDroppedAttributesCount() int32 { - if x != nil { - return x.DroppedAttributesCount - } - return 0 -} - -// A time-stamped annotation or message event in the Span. -type Span_TimeEvent struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The timestamp indicating the time the event occurred. - Time *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=time,proto3" json:"time,omitempty"` - // A `TimeEvent` can contain either an `Annotation` object or a - // `MessageEvent` object, but not both. - // - // Types that are assignable to Value: - // *Span_TimeEvent_Annotation_ - // *Span_TimeEvent_MessageEvent_ - Value isSpan_TimeEvent_Value `protobuf_oneof:"value"` -} - -func (x *Span_TimeEvent) Reset() { - *x = Span_TimeEvent{} - if protoimpl.UnsafeEnabled { - mi := &file_google_devtools_cloudtrace_v2_trace_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Span_TimeEvent) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Span_TimeEvent) ProtoMessage() {} - -func (x *Span_TimeEvent) ProtoReflect() protoreflect.Message { - mi := &file_google_devtools_cloudtrace_v2_trace_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Span_TimeEvent.ProtoReflect.Descriptor instead. -func (*Span_TimeEvent) Descriptor() ([]byte, []int) { - return file_google_devtools_cloudtrace_v2_trace_proto_rawDescGZIP(), []int{0, 1} -} - -func (x *Span_TimeEvent) GetTime() *timestamppb.Timestamp { - if x != nil { - return x.Time - } - return nil -} - -func (m *Span_TimeEvent) GetValue() isSpan_TimeEvent_Value { - if m != nil { - return m.Value - } - return nil -} - -func (x *Span_TimeEvent) GetAnnotation() *Span_TimeEvent_Annotation { - if x, ok := x.GetValue().(*Span_TimeEvent_Annotation_); ok { - return x.Annotation - } - return nil -} - -func (x *Span_TimeEvent) GetMessageEvent() *Span_TimeEvent_MessageEvent { - if x, ok := x.GetValue().(*Span_TimeEvent_MessageEvent_); ok { - return x.MessageEvent - } - return nil -} - -type isSpan_TimeEvent_Value interface { - isSpan_TimeEvent_Value() -} - -type Span_TimeEvent_Annotation_ struct { - // Text annotation with a set of attributes. - Annotation *Span_TimeEvent_Annotation `protobuf:"bytes,2,opt,name=annotation,proto3,oneof"` -} - -type Span_TimeEvent_MessageEvent_ struct { - // An event describing a message sent/received between Spans. - MessageEvent *Span_TimeEvent_MessageEvent `protobuf:"bytes,3,opt,name=message_event,json=messageEvent,proto3,oneof"` -} - -func (*Span_TimeEvent_Annotation_) isSpan_TimeEvent_Value() {} - -func (*Span_TimeEvent_MessageEvent_) isSpan_TimeEvent_Value() {} - -// A collection of `TimeEvent`s. A `TimeEvent` is a time-stamped annotation -// on the span, consisting of either user-supplied key:value pairs, or -// details of a message sent/received between Spans. -type Span_TimeEvents struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // A collection of `TimeEvent`s. - TimeEvent []*Span_TimeEvent `protobuf:"bytes,1,rep,name=time_event,json=timeEvent,proto3" json:"time_event,omitempty"` - // The number of dropped annotations in all the included time events. - // If the value is 0, then no annotations were dropped. - DroppedAnnotationsCount int32 `protobuf:"varint,2,opt,name=dropped_annotations_count,json=droppedAnnotationsCount,proto3" json:"dropped_annotations_count,omitempty"` - // The number of dropped message events in all the included time events. - // If the value is 0, then no message events were dropped. - DroppedMessageEventsCount int32 `protobuf:"varint,3,opt,name=dropped_message_events_count,json=droppedMessageEventsCount,proto3" json:"dropped_message_events_count,omitempty"` -} - -func (x *Span_TimeEvents) Reset() { - *x = Span_TimeEvents{} - if protoimpl.UnsafeEnabled { - mi := &file_google_devtools_cloudtrace_v2_trace_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Span_TimeEvents) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Span_TimeEvents) ProtoMessage() {} - -func (x *Span_TimeEvents) ProtoReflect() protoreflect.Message { - mi := &file_google_devtools_cloudtrace_v2_trace_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Span_TimeEvents.ProtoReflect.Descriptor instead. -func (*Span_TimeEvents) Descriptor() ([]byte, []int) { - return file_google_devtools_cloudtrace_v2_trace_proto_rawDescGZIP(), []int{0, 2} -} - -func (x *Span_TimeEvents) GetTimeEvent() []*Span_TimeEvent { - if x != nil { - return x.TimeEvent - } - return nil -} - -func (x *Span_TimeEvents) GetDroppedAnnotationsCount() int32 { - if x != nil { - return x.DroppedAnnotationsCount - } - return 0 -} - -func (x *Span_TimeEvents) GetDroppedMessageEventsCount() int32 { - if x != nil { - return x.DroppedMessageEventsCount - } - return 0 -} - -// A pointer from the current span to another span in the same trace or in a -// different trace. For example, this can be used in batching operations, -// where a single batch handler processes multiple requests from different -// traces or when the handler receives a request from a different project. -type Span_Link struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The [TRACE_ID] for a trace within a project. - TraceId string `protobuf:"bytes,1,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"` - // The [SPAN_ID] for a span within a trace. - SpanId string `protobuf:"bytes,2,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"` - // The relationship of the current span relative to the linked span. - Type Span_Link_Type `protobuf:"varint,3,opt,name=type,proto3,enum=google.devtools.cloudtrace.v2.Span_Link_Type" json:"type,omitempty"` - // A set of attributes on the link. You have have up to 32 attributes per - // link. - Attributes *Span_Attributes `protobuf:"bytes,4,opt,name=attributes,proto3" json:"attributes,omitempty"` -} - -func (x *Span_Link) Reset() { - *x = Span_Link{} - if protoimpl.UnsafeEnabled { - mi := &file_google_devtools_cloudtrace_v2_trace_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Span_Link) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Span_Link) ProtoMessage() {} - -func (x *Span_Link) ProtoReflect() protoreflect.Message { - mi := &file_google_devtools_cloudtrace_v2_trace_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Span_Link.ProtoReflect.Descriptor instead. -func (*Span_Link) Descriptor() ([]byte, []int) { - return file_google_devtools_cloudtrace_v2_trace_proto_rawDescGZIP(), []int{0, 3} -} - -func (x *Span_Link) GetTraceId() string { - if x != nil { - return x.TraceId - } - return "" -} - -func (x *Span_Link) GetSpanId() string { - if x != nil { - return x.SpanId - } - return "" -} - -func (x *Span_Link) GetType() Span_Link_Type { - if x != nil { - return x.Type - } - return Span_Link_TYPE_UNSPECIFIED -} - -func (x *Span_Link) GetAttributes() *Span_Attributes { - if x != nil { - return x.Attributes - } - return nil -} - -// A collection of links, which are references from this span to a span -// in the same or different trace. -type Span_Links struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // A collection of links. - Link []*Span_Link `protobuf:"bytes,1,rep,name=link,proto3" json:"link,omitempty"` - // The number of dropped links after the maximum size was enforced. If - // this value is 0, then no links were dropped. - DroppedLinksCount int32 `protobuf:"varint,2,opt,name=dropped_links_count,json=droppedLinksCount,proto3" json:"dropped_links_count,omitempty"` -} - -func (x *Span_Links) Reset() { - *x = Span_Links{} - if protoimpl.UnsafeEnabled { - mi := &file_google_devtools_cloudtrace_v2_trace_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Span_Links) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Span_Links) ProtoMessage() {} - -func (x *Span_Links) ProtoReflect() protoreflect.Message { - mi := &file_google_devtools_cloudtrace_v2_trace_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Span_Links.ProtoReflect.Descriptor instead. -func (*Span_Links) Descriptor() ([]byte, []int) { - return file_google_devtools_cloudtrace_v2_trace_proto_rawDescGZIP(), []int{0, 4} -} - -func (x *Span_Links) GetLink() []*Span_Link { - if x != nil { - return x.Link - } - return nil -} - -func (x *Span_Links) GetDroppedLinksCount() int32 { - if x != nil { - return x.DroppedLinksCount - } - return 0 -} - -// Text annotation with a set of attributes. -type Span_TimeEvent_Annotation struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // A user-supplied message describing the event. The maximum length for - // the description is 256 bytes. - Description *TruncatableString `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` - // A set of attributes on the annotation. You can have up to 4 attributes - // per Annotation. - Attributes *Span_Attributes `protobuf:"bytes,2,opt,name=attributes,proto3" json:"attributes,omitempty"` -} - -func (x *Span_TimeEvent_Annotation) Reset() { - *x = Span_TimeEvent_Annotation{} - if protoimpl.UnsafeEnabled { - mi := &file_google_devtools_cloudtrace_v2_trace_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Span_TimeEvent_Annotation) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Span_TimeEvent_Annotation) ProtoMessage() {} - -func (x *Span_TimeEvent_Annotation) ProtoReflect() protoreflect.Message { - mi := &file_google_devtools_cloudtrace_v2_trace_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Span_TimeEvent_Annotation.ProtoReflect.Descriptor instead. -func (*Span_TimeEvent_Annotation) Descriptor() ([]byte, []int) { - return file_google_devtools_cloudtrace_v2_trace_proto_rawDescGZIP(), []int{0, 1, 0} -} - -func (x *Span_TimeEvent_Annotation) GetDescription() *TruncatableString { - if x != nil { - return x.Description - } - return nil -} - -func (x *Span_TimeEvent_Annotation) GetAttributes() *Span_Attributes { - if x != nil { - return x.Attributes - } - return nil -} - -// An event describing a message sent/received between Spans. -type Span_TimeEvent_MessageEvent struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Type of MessageEvent. Indicates whether the message was sent or - // received. - Type Span_TimeEvent_MessageEvent_Type `protobuf:"varint,1,opt,name=type,proto3,enum=google.devtools.cloudtrace.v2.Span_TimeEvent_MessageEvent_Type" json:"type,omitempty"` - // An identifier for the MessageEvent's message that can be used to match - // SENT and RECEIVED MessageEvents. It is recommended to be unique within - // a Span. - Id int64 `protobuf:"varint,2,opt,name=id,proto3" json:"id,omitempty"` - // The number of uncompressed bytes sent or received. - UncompressedSizeBytes int64 `protobuf:"varint,3,opt,name=uncompressed_size_bytes,json=uncompressedSizeBytes,proto3" json:"uncompressed_size_bytes,omitempty"` - // The number of compressed bytes sent or received. If missing assumed to - // be the same size as uncompressed. - CompressedSizeBytes int64 `protobuf:"varint,4,opt,name=compressed_size_bytes,json=compressedSizeBytes,proto3" json:"compressed_size_bytes,omitempty"` -} - -func (x *Span_TimeEvent_MessageEvent) Reset() { - *x = Span_TimeEvent_MessageEvent{} - if protoimpl.UnsafeEnabled { - mi := &file_google_devtools_cloudtrace_v2_trace_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Span_TimeEvent_MessageEvent) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Span_TimeEvent_MessageEvent) ProtoMessage() {} - -func (x *Span_TimeEvent_MessageEvent) ProtoReflect() protoreflect.Message { - mi := &file_google_devtools_cloudtrace_v2_trace_proto_msgTypes[12] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Span_TimeEvent_MessageEvent.ProtoReflect.Descriptor instead. -func (*Span_TimeEvent_MessageEvent) Descriptor() ([]byte, []int) { - return file_google_devtools_cloudtrace_v2_trace_proto_rawDescGZIP(), []int{0, 1, 1} -} - -func (x *Span_TimeEvent_MessageEvent) GetType() Span_TimeEvent_MessageEvent_Type { - if x != nil { - return x.Type - } - return Span_TimeEvent_MessageEvent_TYPE_UNSPECIFIED -} - -func (x *Span_TimeEvent_MessageEvent) GetId() int64 { - if x != nil { - return x.Id - } - return 0 -} - -func (x *Span_TimeEvent_MessageEvent) GetUncompressedSizeBytes() int64 { - if x != nil { - return x.UncompressedSizeBytes - } - return 0 -} - -func (x *Span_TimeEvent_MessageEvent) GetCompressedSizeBytes() int64 { - if x != nil { - return x.CompressedSizeBytes - } - return 0 -} - -// Represents a single stack frame in a stack trace. -type StackTrace_StackFrame struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The fully-qualified name that uniquely identifies the function or - // method that is active in this frame (up to 1024 bytes). - FunctionName *TruncatableString `protobuf:"bytes,1,opt,name=function_name,json=functionName,proto3" json:"function_name,omitempty"` - // An un-mangled function name, if `function_name` is - // [mangled](http://www.avabodh.com/cxxin/namemangling.html). The name can - // be fully-qualified (up to 1024 bytes). - OriginalFunctionName *TruncatableString `protobuf:"bytes,2,opt,name=original_function_name,json=originalFunctionName,proto3" json:"original_function_name,omitempty"` - // The name of the source file where the function call appears (up to 256 - // bytes). - FileName *TruncatableString `protobuf:"bytes,3,opt,name=file_name,json=fileName,proto3" json:"file_name,omitempty"` - // The line number in `file_name` where the function call appears. - LineNumber int64 `protobuf:"varint,4,opt,name=line_number,json=lineNumber,proto3" json:"line_number,omitempty"` - // The column number where the function call appears, if available. - // This is important in JavaScript because of its anonymous functions. - ColumnNumber int64 `protobuf:"varint,5,opt,name=column_number,json=columnNumber,proto3" json:"column_number,omitempty"` - // The binary module from where the code was loaded. - LoadModule *Module `protobuf:"bytes,6,opt,name=load_module,json=loadModule,proto3" json:"load_module,omitempty"` - // The version of the deployed source code (up to 128 bytes). - SourceVersion *TruncatableString `protobuf:"bytes,7,opt,name=source_version,json=sourceVersion,proto3" json:"source_version,omitempty"` -} - -func (x *StackTrace_StackFrame) Reset() { - *x = StackTrace_StackFrame{} - if protoimpl.UnsafeEnabled { - mi := &file_google_devtools_cloudtrace_v2_trace_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *StackTrace_StackFrame) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StackTrace_StackFrame) ProtoMessage() {} - -func (x *StackTrace_StackFrame) ProtoReflect() protoreflect.Message { - mi := &file_google_devtools_cloudtrace_v2_trace_proto_msgTypes[13] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StackTrace_StackFrame.ProtoReflect.Descriptor instead. -func (*StackTrace_StackFrame) Descriptor() ([]byte, []int) { - return file_google_devtools_cloudtrace_v2_trace_proto_rawDescGZIP(), []int{2, 0} -} - -func (x *StackTrace_StackFrame) GetFunctionName() *TruncatableString { - if x != nil { - return x.FunctionName - } - return nil -} - -func (x *StackTrace_StackFrame) GetOriginalFunctionName() *TruncatableString { - if x != nil { - return x.OriginalFunctionName - } - return nil -} - -func (x *StackTrace_StackFrame) GetFileName() *TruncatableString { - if x != nil { - return x.FileName - } - return nil -} - -func (x *StackTrace_StackFrame) GetLineNumber() int64 { - if x != nil { - return x.LineNumber - } - return 0 -} - -func (x *StackTrace_StackFrame) GetColumnNumber() int64 { - if x != nil { - return x.ColumnNumber - } - return 0 -} - -func (x *StackTrace_StackFrame) GetLoadModule() *Module { - if x != nil { - return x.LoadModule - } - return nil -} - -func (x *StackTrace_StackFrame) GetSourceVersion() *TruncatableString { - if x != nil { - return x.SourceVersion - } - return nil -} - -// A collection of stack frames, which can be truncated. -type StackTrace_StackFrames struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Stack frames in this call stack. - Frame []*StackTrace_StackFrame `protobuf:"bytes,1,rep,name=frame,proto3" json:"frame,omitempty"` - // The number of stack frames that were dropped because there - // were too many stack frames. - // If this value is 0, then no stack frames were dropped. - DroppedFramesCount int32 `protobuf:"varint,2,opt,name=dropped_frames_count,json=droppedFramesCount,proto3" json:"dropped_frames_count,omitempty"` -} - -func (x *StackTrace_StackFrames) Reset() { - *x = StackTrace_StackFrames{} - if protoimpl.UnsafeEnabled { - mi := &file_google_devtools_cloudtrace_v2_trace_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *StackTrace_StackFrames) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StackTrace_StackFrames) ProtoMessage() {} - -func (x *StackTrace_StackFrames) ProtoReflect() protoreflect.Message { - mi := &file_google_devtools_cloudtrace_v2_trace_proto_msgTypes[14] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StackTrace_StackFrames.ProtoReflect.Descriptor instead. -func (*StackTrace_StackFrames) Descriptor() ([]byte, []int) { - return file_google_devtools_cloudtrace_v2_trace_proto_rawDescGZIP(), []int{2, 1} -} - -func (x *StackTrace_StackFrames) GetFrame() []*StackTrace_StackFrame { - if x != nil { - return x.Frame - } - return nil -} - -func (x *StackTrace_StackFrames) GetDroppedFramesCount() int32 { - if x != nil { - return x.DroppedFramesCount - } - return 0 -} - -var File_google_devtools_cloudtrace_v2_trace_proto protoreflect.FileDescriptor - -var file_google_devtools_cloudtrace_v2_trace_proto_rawDesc = []byte{ - 0x0a, 0x29, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x64, 0x65, 0x76, 0x74, 0x6f, 0x6f, 0x6c, - 0x73, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x32, 0x2f, - 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1d, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x64, 0x65, 0x76, 0x74, 0x6f, 0x6f, 0x6c, 0x73, 0x2e, 0x63, 0x6c, 0x6f, - 0x75, 0x64, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x32, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, - 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, - 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, - 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, - 0x72, 0x70, 0x63, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, - 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xb0, - 0x15, 0x0a, 0x04, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x12, 0x1c, 0x0a, 0x07, 0x73, 0x70, 0x61, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x73, 0x70, 0x61, 0x6e, 0x49, 0x64, 0x12, 0x24, - 0x0a, 0x0e, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x70, 0x61, 0x6e, 0x5f, 0x69, 0x64, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x70, - 0x61, 0x6e, 0x49, 0x64, 0x12, 0x58, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x64, 0x65, 0x76, 0x74, 0x6f, 0x6f, 0x6c, 0x73, 0x2e, 0x63, 0x6c, 0x6f, - 0x75, 0x64, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x72, 0x75, 0x6e, 0x63, - 0x61, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x42, 0x03, 0xe0, 0x41, - 0x02, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x3e, - 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, - 0xe0, 0x41, 0x02, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x3a, - 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, - 0x02, 0x52, 0x07, 0x65, 0x6e, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x4e, 0x0a, 0x0a, 0x61, 0x74, - 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x64, 0x65, 0x76, 0x74, 0x6f, 0x6f, 0x6c, 0x73, - 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, - 0x70, 0x61, 0x6e, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x52, 0x0a, - 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x4a, 0x0a, 0x0b, 0x73, 0x74, - 0x61, 0x63, 0x6b, 0x5f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x64, 0x65, 0x76, 0x74, 0x6f, 0x6f, 0x6c, - 0x73, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x32, 0x2e, - 0x53, 0x74, 0x61, 0x63, 0x6b, 0x54, 0x72, 0x61, 0x63, 0x65, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x63, - 0x6b, 0x54, 0x72, 0x61, 0x63, 0x65, 0x12, 0x4f, 0x0a, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x65, - 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x64, 0x65, 0x76, 0x74, 0x6f, 0x6f, 0x6c, 0x73, 0x2e, 0x63, 0x6c, - 0x6f, 0x75, 0x64, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x70, 0x61, 0x6e, - 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x0a, 0x74, 0x69, 0x6d, - 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x3f, 0x0a, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, - 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x64, 0x65, 0x76, 0x74, 0x6f, 0x6f, 0x6c, 0x73, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x74, 0x72, - 0x61, 0x63, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x2e, 0x4c, 0x69, 0x6e, 0x6b, - 0x73, 0x52, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x12, 0x2f, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x42, 0x03, 0xe0, 0x41, - 0x01, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x5d, 0x0a, 0x1b, 0x73, 0x61, 0x6d, - 0x65, 0x5f, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x61, 0x73, 0x5f, 0x70, 0x61, 0x72, - 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, - 0x17, 0x73, 0x61, 0x6d, 0x65, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x41, 0x73, 0x50, 0x61, - 0x72, 0x65, 0x6e, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x4a, 0x0a, 0x10, 0x63, 0x68, 0x69, 0x6c, - 0x64, 0x5f, 0x73, 0x70, 0x61, 0x6e, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0d, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, - 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0e, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x53, 0x70, 0x61, 0x6e, 0x43, - 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x4e, 0x0a, 0x09, 0x73, 0x70, 0x61, 0x6e, 0x5f, 0x6b, 0x69, 0x6e, - 0x64, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x64, 0x65, 0x76, 0x74, 0x6f, 0x6f, 0x6c, 0x73, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x74, - 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x2e, 0x53, 0x70, 0x61, - 0x6e, 0x4b, 0x69, 0x6e, 0x64, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x08, 0x73, 0x70, 0x61, 0x6e, - 0x4b, 0x69, 0x6e, 0x64, 0x1a, 0x9d, 0x02, 0x0a, 0x0a, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, - 0x74, 0x65, 0x73, 0x12, 0x65, 0x0a, 0x0d, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, - 0x5f, 0x6d, 0x61, 0x70, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x64, 0x65, 0x76, 0x74, 0x6f, 0x6f, 0x6c, 0x73, 0x2e, 0x63, 0x6c, 0x6f, - 0x75, 0x64, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x2e, - 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, - 0x62, 0x75, 0x74, 0x65, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x61, 0x74, - 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x4d, 0x61, 0x70, 0x12, 0x38, 0x0a, 0x18, 0x64, 0x72, - 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, - 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x16, 0x64, 0x72, - 0x6f, 0x70, 0x70, 0x65, 0x64, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x43, - 0x6f, 0x75, 0x6e, 0x74, 0x1a, 0x6e, 0x0a, 0x11, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, - 0x65, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x43, 0x0a, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x64, 0x65, 0x76, 0x74, 0x6f, 0x6f, 0x6c, 0x73, 0x2e, 0x63, 0x6c, 0x6f, - 0x75, 0x64, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, - 0x62, 0x75, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x3a, 0x02, 0x38, 0x01, 0x1a, 0xce, 0x05, 0x0a, 0x09, 0x54, 0x69, 0x6d, 0x65, 0x45, 0x76, 0x65, - 0x6e, 0x74, 0x12, 0x2e, 0x0a, 0x04, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x04, 0x74, 0x69, - 0x6d, 0x65, 0x12, 0x5a, 0x0a, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x64, 0x65, 0x76, 0x74, 0x6f, 0x6f, 0x6c, 0x73, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x74, 0x72, - 0x61, 0x63, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x2e, 0x54, 0x69, 0x6d, 0x65, - 0x45, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x48, 0x00, 0x52, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x61, - 0x0a, 0x0d, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x64, - 0x65, 0x76, 0x74, 0x6f, 0x6f, 0x6c, 0x73, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x74, 0x72, 0x61, - 0x63, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x45, - 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x76, 0x65, 0x6e, - 0x74, 0x48, 0x00, 0x52, 0x0c, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x76, 0x65, 0x6e, - 0x74, 0x1a, 0xb0, 0x01, 0x0a, 0x0a, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x52, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x64, - 0x65, 0x76, 0x74, 0x6f, 0x6f, 0x6c, 0x73, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x74, 0x72, 0x61, - 0x63, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x72, 0x75, 0x6e, 0x63, 0x61, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4e, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, - 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x64, 0x65, 0x76, 0x74, 0x6f, 0x6f, 0x6c, 0x73, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, - 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x2e, 0x41, 0x74, - 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, - 0x75, 0x74, 0x65, 0x73, 0x1a, 0x95, 0x02, 0x0a, 0x0c, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x53, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x3f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x64, 0x65, 0x76, - 0x74, 0x6f, 0x6f, 0x6c, 0x73, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x74, 0x72, 0x61, 0x63, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x45, 0x76, 0x65, - 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x2e, - 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x12, 0x36, 0x0a, 0x17, 0x75, 0x6e, - 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x5f, - 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x15, 0x75, 0x6e, 0x63, - 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x53, 0x69, 0x7a, 0x65, 0x42, 0x79, 0x74, - 0x65, 0x73, 0x12, 0x32, 0x0a, 0x15, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, - 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x03, 0x52, 0x13, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x53, 0x69, 0x7a, - 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0x34, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x14, - 0x0a, 0x10, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, - 0x45, 0x44, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x53, 0x45, 0x4e, 0x54, 0x10, 0x01, 0x12, 0x0c, - 0x0a, 0x08, 0x52, 0x45, 0x43, 0x45, 0x49, 0x56, 0x45, 0x44, 0x10, 0x02, 0x42, 0x07, 0x0a, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0xd7, 0x01, 0x0a, 0x0a, 0x54, 0x69, 0x6d, 0x65, 0x45, 0x76, - 0x65, 0x6e, 0x74, 0x73, 0x12, 0x4c, 0x0a, 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x65, 0x76, 0x65, - 0x6e, 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x64, 0x65, 0x76, 0x74, 0x6f, 0x6f, 0x6c, 0x73, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, - 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x2e, 0x54, 0x69, - 0x6d, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x45, 0x76, 0x65, - 0x6e, 0x74, 0x12, 0x3a, 0x0a, 0x19, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x61, 0x6e, - 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x17, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x41, 0x6e, - 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x3f, - 0x0a, 0x1c, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x05, 0x52, 0x19, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x4d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x1a, - 0x9a, 0x02, 0x0a, 0x04, 0x4c, 0x69, 0x6e, 0x6b, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x72, 0x61, 0x63, - 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x74, 0x72, 0x61, 0x63, - 0x65, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x73, 0x70, 0x61, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x70, 0x61, 0x6e, 0x49, 0x64, 0x12, 0x41, 0x0a, 0x04, - 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x64, 0x65, 0x76, 0x74, 0x6f, 0x6f, 0x6c, 0x73, 0x2e, 0x63, 0x6c, 0x6f, - 0x75, 0x64, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x2e, - 0x4c, 0x69, 0x6e, 0x6b, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, - 0x4e, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x64, 0x65, 0x76, - 0x74, 0x6f, 0x6f, 0x6c, 0x73, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x74, 0x72, 0x61, 0x63, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, - 0x74, 0x65, 0x73, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x22, - 0x4b, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x15, 0x0a, - 0x11, 0x43, 0x48, 0x49, 0x4c, 0x44, 0x5f, 0x4c, 0x49, 0x4e, 0x4b, 0x45, 0x44, 0x5f, 0x53, 0x50, - 0x41, 0x4e, 0x10, 0x01, 0x12, 0x16, 0x0a, 0x12, 0x50, 0x41, 0x52, 0x45, 0x4e, 0x54, 0x5f, 0x4c, - 0x49, 0x4e, 0x4b, 0x45, 0x44, 0x5f, 0x53, 0x50, 0x41, 0x4e, 0x10, 0x02, 0x1a, 0x75, 0x0a, 0x05, - 0x4c, 0x69, 0x6e, 0x6b, 0x73, 0x12, 0x3c, 0x0a, 0x04, 0x6c, 0x69, 0x6e, 0x6b, 0x18, 0x01, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x64, 0x65, 0x76, - 0x74, 0x6f, 0x6f, 0x6c, 0x73, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x74, 0x72, 0x61, 0x63, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x2e, 0x4c, 0x69, 0x6e, 0x6b, 0x52, 0x04, 0x6c, - 0x69, 0x6e, 0x6b, 0x12, 0x2e, 0x0a, 0x13, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x6c, - 0x69, 0x6e, 0x6b, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x11, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x4c, 0x69, 0x6e, 0x6b, 0x73, 0x43, 0x6f, - 0x75, 0x6e, 0x74, 0x22, 0x67, 0x0a, 0x08, 0x53, 0x70, 0x61, 0x6e, 0x4b, 0x69, 0x6e, 0x64, 0x12, - 0x19, 0x0a, 0x15, 0x53, 0x50, 0x41, 0x4e, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x55, 0x4e, 0x53, - 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4e, - 0x54, 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x45, 0x52, 0x56, - 0x45, 0x52, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x10, 0x03, - 0x12, 0x0c, 0x0a, 0x08, 0x50, 0x52, 0x4f, 0x44, 0x55, 0x43, 0x45, 0x52, 0x10, 0x04, 0x12, 0x0c, - 0x0a, 0x08, 0x43, 0x4f, 0x4e, 0x53, 0x55, 0x4d, 0x45, 0x52, 0x10, 0x05, 0x3a, 0x53, 0xea, 0x41, - 0x50, 0x0a, 0x1e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x70, 0x61, - 0x6e, 0x12, 0x2e, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x74, 0x72, - 0x61, 0x63, 0x65, 0x7d, 0x2f, 0x73, 0x70, 0x61, 0x6e, 0x73, 0x2f, 0x7b, 0x73, 0x70, 0x61, 0x6e, - 0x7d, 0x22, 0xb0, 0x01, 0x0a, 0x0e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x12, 0x55, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x64, 0x65, 0x76, 0x74, 0x6f, 0x6f, 0x6c, 0x73, 0x2e, 0x63, 0x6c, 0x6f, - 0x75, 0x64, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x72, 0x75, 0x6e, 0x63, - 0x61, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x48, 0x00, 0x52, 0x0b, - 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1d, 0x0a, 0x09, 0x69, - 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, - 0x52, 0x08, 0x69, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0a, 0x62, 0x6f, - 0x6f, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, - 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0x0a, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x22, 0xa7, 0x06, 0x0a, 0x0a, 0x53, 0x74, 0x61, 0x63, 0x6b, 0x54, 0x72, - 0x61, 0x63, 0x65, 0x12, 0x58, 0x0a, 0x0c, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x5f, 0x66, 0x72, 0x61, - 0x6d, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x64, 0x65, 0x76, 0x74, 0x6f, 0x6f, 0x6c, 0x73, 0x2e, 0x63, 0x6c, 0x6f, 0x75, - 0x64, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x63, 0x6b, 0x54, - 0x72, 0x61, 0x63, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x63, 0x6b, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x73, - 0x52, 0x0b, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x2d, 0x0a, - 0x13, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x5f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x68, 0x61, 0x73, - 0x68, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x73, 0x74, 0x61, 0x63, - 0x6b, 0x54, 0x72, 0x61, 0x63, 0x65, 0x48, 0x61, 0x73, 0x68, 0x49, 0x64, 0x1a, 0x81, 0x04, 0x0a, - 0x0a, 0x53, 0x74, 0x61, 0x63, 0x6b, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x12, 0x55, 0x0a, 0x0d, 0x66, - 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x64, 0x65, 0x76, 0x74, - 0x6f, 0x6f, 0x6c, 0x73, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, - 0x76, 0x32, 0x2e, 0x54, 0x72, 0x75, 0x6e, 0x63, 0x61, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x74, - 0x72, 0x69, 0x6e, 0x67, 0x52, 0x0c, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x61, - 0x6d, 0x65, 0x12, 0x66, 0x0a, 0x16, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x66, - 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x64, 0x65, 0x76, 0x74, - 0x6f, 0x6f, 0x6c, 0x73, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, - 0x76, 0x32, 0x2e, 0x54, 0x72, 0x75, 0x6e, 0x63, 0x61, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x74, - 0x72, 0x69, 0x6e, 0x67, 0x52, 0x14, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x46, 0x75, - 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x4d, 0x0a, 0x09, 0x66, 0x69, - 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x64, 0x65, 0x76, 0x74, 0x6f, 0x6f, 0x6c, 0x73, 0x2e, - 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x72, - 0x75, 0x6e, 0x63, 0x61, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x52, - 0x08, 0x66, 0x69, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6c, 0x69, 0x6e, - 0x65, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, - 0x6c, 0x69, 0x6e, 0x65, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x6f, - 0x6c, 0x75, 0x6d, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x03, 0x52, 0x0c, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, - 0x46, 0x0a, 0x0b, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x64, 0x65, - 0x76, 0x74, 0x6f, 0x6f, 0x6c, 0x73, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x74, 0x72, 0x61, 0x63, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x52, 0x0a, 0x6c, 0x6f, 0x61, - 0x64, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x12, 0x57, 0x0a, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x64, 0x65, 0x76, 0x74, 0x6f, 0x6f, 0x6c, - 0x73, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x32, 0x2e, - 0x54, 0x72, 0x75, 0x6e, 0x63, 0x61, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x74, 0x72, 0x69, 0x6e, - 0x67, 0x52, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x1a, 0x8b, 0x01, 0x0a, 0x0b, 0x53, 0x74, 0x61, 0x63, 0x6b, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x73, - 0x12, 0x4a, 0x0a, 0x05, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x34, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x64, 0x65, 0x76, 0x74, 0x6f, 0x6f, 0x6c, - 0x73, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x32, 0x2e, - 0x53, 0x74, 0x61, 0x63, 0x6b, 0x54, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x63, 0x6b, - 0x46, 0x72, 0x61, 0x6d, 0x65, 0x52, 0x05, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x12, 0x30, 0x0a, 0x14, - 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x5f, 0x63, - 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x12, 0x64, 0x72, 0x6f, 0x70, - 0x70, 0x65, 0x64, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x9f, - 0x01, 0x0a, 0x06, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x12, 0x48, 0x0a, 0x06, 0x6d, 0x6f, 0x64, - 0x75, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x64, 0x65, 0x76, 0x74, 0x6f, 0x6f, 0x6c, 0x73, 0x2e, 0x63, 0x6c, 0x6f, 0x75, - 0x64, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x72, 0x75, 0x6e, 0x63, 0x61, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x52, 0x06, 0x6d, 0x6f, 0x64, - 0x75, 0x6c, 0x65, 0x12, 0x4b, 0x0a, 0x08, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x5f, 0x69, 0x64, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x64, - 0x65, 0x76, 0x74, 0x6f, 0x6f, 0x6c, 0x73, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x74, 0x72, 0x61, - 0x63, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x72, 0x75, 0x6e, 0x63, 0x61, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x52, 0x07, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x49, 0x64, - 0x22, 0x5b, 0x0a, 0x11, 0x54, 0x72, 0x75, 0x6e, 0x63, 0x61, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, - 0x74, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x74, - 0x72, 0x75, 0x6e, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x5f, 0x63, 0x6f, - 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x12, 0x74, 0x72, 0x75, 0x6e, 0x63, - 0x61, 0x74, 0x65, 0x64, 0x42, 0x79, 0x74, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0xc5, 0x01, - 0x0a, 0x21, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x64, 0x65, 0x76, - 0x74, 0x6f, 0x6f, 0x6c, 0x73, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x74, 0x72, 0x61, 0x63, 0x65, - 0x2e, 0x76, 0x32, 0x42, 0x0a, 0x54, 0x72, 0x61, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, - 0x01, 0x5a, 0x47, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, - 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x64, 0x65, 0x76, 0x74, 0x6f, 0x6f, 0x6c, - 0x73, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x32, 0x3b, - 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x74, 0x72, 0x61, 0x63, 0x65, 0xaa, 0x02, 0x15, 0x47, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x2e, - 0x56, 0x32, 0xca, 0x02, 0x15, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, - 0x64, 0x5c, 0x54, 0x72, 0x61, 0x63, 0x65, 0x5c, 0x56, 0x32, 0xea, 0x02, 0x18, 0x47, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x54, 0x72, 0x61, 0x63, - 0x65, 0x3a, 0x3a, 0x56, 0x32, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_google_devtools_cloudtrace_v2_trace_proto_rawDescOnce sync.Once - file_google_devtools_cloudtrace_v2_trace_proto_rawDescData = file_google_devtools_cloudtrace_v2_trace_proto_rawDesc -) - -func file_google_devtools_cloudtrace_v2_trace_proto_rawDescGZIP() []byte { - file_google_devtools_cloudtrace_v2_trace_proto_rawDescOnce.Do(func() { - file_google_devtools_cloudtrace_v2_trace_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_devtools_cloudtrace_v2_trace_proto_rawDescData) - }) - return file_google_devtools_cloudtrace_v2_trace_proto_rawDescData -} - -var file_google_devtools_cloudtrace_v2_trace_proto_enumTypes = make([]protoimpl.EnumInfo, 3) -var file_google_devtools_cloudtrace_v2_trace_proto_msgTypes = make([]protoimpl.MessageInfo, 15) -var file_google_devtools_cloudtrace_v2_trace_proto_goTypes = []interface{}{ - (Span_SpanKind)(0), // 0: google.devtools.cloudtrace.v2.Span.SpanKind - (Span_TimeEvent_MessageEvent_Type)(0), // 1: google.devtools.cloudtrace.v2.Span.TimeEvent.MessageEvent.Type - (Span_Link_Type)(0), // 2: google.devtools.cloudtrace.v2.Span.Link.Type - (*Span)(nil), // 3: google.devtools.cloudtrace.v2.Span - (*AttributeValue)(nil), // 4: google.devtools.cloudtrace.v2.AttributeValue - (*StackTrace)(nil), // 5: google.devtools.cloudtrace.v2.StackTrace - (*Module)(nil), // 6: google.devtools.cloudtrace.v2.Module - (*TruncatableString)(nil), // 7: google.devtools.cloudtrace.v2.TruncatableString - (*Span_Attributes)(nil), // 8: google.devtools.cloudtrace.v2.Span.Attributes - (*Span_TimeEvent)(nil), // 9: google.devtools.cloudtrace.v2.Span.TimeEvent - (*Span_TimeEvents)(nil), // 10: google.devtools.cloudtrace.v2.Span.TimeEvents - (*Span_Link)(nil), // 11: google.devtools.cloudtrace.v2.Span.Link - (*Span_Links)(nil), // 12: google.devtools.cloudtrace.v2.Span.Links - nil, // 13: google.devtools.cloudtrace.v2.Span.Attributes.AttributeMapEntry - (*Span_TimeEvent_Annotation)(nil), // 14: google.devtools.cloudtrace.v2.Span.TimeEvent.Annotation - (*Span_TimeEvent_MessageEvent)(nil), // 15: google.devtools.cloudtrace.v2.Span.TimeEvent.MessageEvent - (*StackTrace_StackFrame)(nil), // 16: google.devtools.cloudtrace.v2.StackTrace.StackFrame - (*StackTrace_StackFrames)(nil), // 17: google.devtools.cloudtrace.v2.StackTrace.StackFrames - (*timestamppb.Timestamp)(nil), // 18: google.protobuf.Timestamp - (*status.Status)(nil), // 19: google.rpc.Status - (*wrapperspb.BoolValue)(nil), // 20: google.protobuf.BoolValue - (*wrapperspb.Int32Value)(nil), // 21: google.protobuf.Int32Value -} -var file_google_devtools_cloudtrace_v2_trace_proto_depIdxs = []int32{ - 7, // 0: google.devtools.cloudtrace.v2.Span.display_name:type_name -> google.devtools.cloudtrace.v2.TruncatableString - 18, // 1: google.devtools.cloudtrace.v2.Span.start_time:type_name -> google.protobuf.Timestamp - 18, // 2: google.devtools.cloudtrace.v2.Span.end_time:type_name -> google.protobuf.Timestamp - 8, // 3: google.devtools.cloudtrace.v2.Span.attributes:type_name -> google.devtools.cloudtrace.v2.Span.Attributes - 5, // 4: google.devtools.cloudtrace.v2.Span.stack_trace:type_name -> google.devtools.cloudtrace.v2.StackTrace - 10, // 5: google.devtools.cloudtrace.v2.Span.time_events:type_name -> google.devtools.cloudtrace.v2.Span.TimeEvents - 12, // 6: google.devtools.cloudtrace.v2.Span.links:type_name -> google.devtools.cloudtrace.v2.Span.Links - 19, // 7: google.devtools.cloudtrace.v2.Span.status:type_name -> google.rpc.Status - 20, // 8: google.devtools.cloudtrace.v2.Span.same_process_as_parent_span:type_name -> google.protobuf.BoolValue - 21, // 9: google.devtools.cloudtrace.v2.Span.child_span_count:type_name -> google.protobuf.Int32Value - 0, // 10: google.devtools.cloudtrace.v2.Span.span_kind:type_name -> google.devtools.cloudtrace.v2.Span.SpanKind - 7, // 11: google.devtools.cloudtrace.v2.AttributeValue.string_value:type_name -> google.devtools.cloudtrace.v2.TruncatableString - 17, // 12: google.devtools.cloudtrace.v2.StackTrace.stack_frames:type_name -> google.devtools.cloudtrace.v2.StackTrace.StackFrames - 7, // 13: google.devtools.cloudtrace.v2.Module.module:type_name -> google.devtools.cloudtrace.v2.TruncatableString - 7, // 14: google.devtools.cloudtrace.v2.Module.build_id:type_name -> google.devtools.cloudtrace.v2.TruncatableString - 13, // 15: google.devtools.cloudtrace.v2.Span.Attributes.attribute_map:type_name -> google.devtools.cloudtrace.v2.Span.Attributes.AttributeMapEntry - 18, // 16: google.devtools.cloudtrace.v2.Span.TimeEvent.time:type_name -> google.protobuf.Timestamp - 14, // 17: google.devtools.cloudtrace.v2.Span.TimeEvent.annotation:type_name -> google.devtools.cloudtrace.v2.Span.TimeEvent.Annotation - 15, // 18: google.devtools.cloudtrace.v2.Span.TimeEvent.message_event:type_name -> google.devtools.cloudtrace.v2.Span.TimeEvent.MessageEvent - 9, // 19: google.devtools.cloudtrace.v2.Span.TimeEvents.time_event:type_name -> google.devtools.cloudtrace.v2.Span.TimeEvent - 2, // 20: google.devtools.cloudtrace.v2.Span.Link.type:type_name -> google.devtools.cloudtrace.v2.Span.Link.Type - 8, // 21: google.devtools.cloudtrace.v2.Span.Link.attributes:type_name -> google.devtools.cloudtrace.v2.Span.Attributes - 11, // 22: google.devtools.cloudtrace.v2.Span.Links.link:type_name -> google.devtools.cloudtrace.v2.Span.Link - 4, // 23: google.devtools.cloudtrace.v2.Span.Attributes.AttributeMapEntry.value:type_name -> google.devtools.cloudtrace.v2.AttributeValue - 7, // 24: google.devtools.cloudtrace.v2.Span.TimeEvent.Annotation.description:type_name -> google.devtools.cloudtrace.v2.TruncatableString - 8, // 25: google.devtools.cloudtrace.v2.Span.TimeEvent.Annotation.attributes:type_name -> google.devtools.cloudtrace.v2.Span.Attributes - 1, // 26: google.devtools.cloudtrace.v2.Span.TimeEvent.MessageEvent.type:type_name -> google.devtools.cloudtrace.v2.Span.TimeEvent.MessageEvent.Type - 7, // 27: google.devtools.cloudtrace.v2.StackTrace.StackFrame.function_name:type_name -> google.devtools.cloudtrace.v2.TruncatableString - 7, // 28: google.devtools.cloudtrace.v2.StackTrace.StackFrame.original_function_name:type_name -> google.devtools.cloudtrace.v2.TruncatableString - 7, // 29: google.devtools.cloudtrace.v2.StackTrace.StackFrame.file_name:type_name -> google.devtools.cloudtrace.v2.TruncatableString - 6, // 30: google.devtools.cloudtrace.v2.StackTrace.StackFrame.load_module:type_name -> google.devtools.cloudtrace.v2.Module - 7, // 31: google.devtools.cloudtrace.v2.StackTrace.StackFrame.source_version:type_name -> google.devtools.cloudtrace.v2.TruncatableString - 16, // 32: google.devtools.cloudtrace.v2.StackTrace.StackFrames.frame:type_name -> google.devtools.cloudtrace.v2.StackTrace.StackFrame - 33, // [33:33] is the sub-list for method output_type - 33, // [33:33] is the sub-list for method input_type - 33, // [33:33] is the sub-list for extension type_name - 33, // [33:33] is the sub-list for extension extendee - 0, // [0:33] is the sub-list for field type_name -} - -func init() { file_google_devtools_cloudtrace_v2_trace_proto_init() } -func file_google_devtools_cloudtrace_v2_trace_proto_init() { - if File_google_devtools_cloudtrace_v2_trace_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_google_devtools_cloudtrace_v2_trace_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Span); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_devtools_cloudtrace_v2_trace_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AttributeValue); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_devtools_cloudtrace_v2_trace_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StackTrace); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_devtools_cloudtrace_v2_trace_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Module); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_devtools_cloudtrace_v2_trace_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TruncatableString); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_devtools_cloudtrace_v2_trace_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Span_Attributes); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_devtools_cloudtrace_v2_trace_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Span_TimeEvent); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_devtools_cloudtrace_v2_trace_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Span_TimeEvents); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_devtools_cloudtrace_v2_trace_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Span_Link); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_devtools_cloudtrace_v2_trace_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Span_Links); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_devtools_cloudtrace_v2_trace_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Span_TimeEvent_Annotation); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_devtools_cloudtrace_v2_trace_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Span_TimeEvent_MessageEvent); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_devtools_cloudtrace_v2_trace_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StackTrace_StackFrame); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_devtools_cloudtrace_v2_trace_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StackTrace_StackFrames); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_google_devtools_cloudtrace_v2_trace_proto_msgTypes[1].OneofWrappers = []interface{}{ - (*AttributeValue_StringValue)(nil), - (*AttributeValue_IntValue)(nil), - (*AttributeValue_BoolValue)(nil), - } - file_google_devtools_cloudtrace_v2_trace_proto_msgTypes[6].OneofWrappers = []interface{}{ - (*Span_TimeEvent_Annotation_)(nil), - (*Span_TimeEvent_MessageEvent_)(nil), - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_devtools_cloudtrace_v2_trace_proto_rawDesc, - NumEnums: 3, - NumMessages: 15, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_google_devtools_cloudtrace_v2_trace_proto_goTypes, - DependencyIndexes: file_google_devtools_cloudtrace_v2_trace_proto_depIdxs, - EnumInfos: file_google_devtools_cloudtrace_v2_trace_proto_enumTypes, - MessageInfos: file_google_devtools_cloudtrace_v2_trace_proto_msgTypes, - }.Build() - File_google_devtools_cloudtrace_v2_trace_proto = out.File - file_google_devtools_cloudtrace_v2_trace_proto_rawDesc = nil - file_google_devtools_cloudtrace_v2_trace_proto_goTypes = nil - file_google_devtools_cloudtrace_v2_trace_proto_depIdxs = nil -} diff --git a/vendor/google.golang.org/genproto/googleapis/devtools/cloudtrace/v2/tracing.pb.go b/vendor/google.golang.org/genproto/googleapis/devtools/cloudtrace/v2/tracing.pb.go deleted file mode 100644 index 0bce8eb60c..0000000000 --- a/vendor/google.golang.org/genproto/googleapis/devtools/cloudtrace/v2/tracing.pb.go +++ /dev/null @@ -1,376 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.25.0 -// protoc v3.13.0 -// source: google/devtools/cloudtrace/v2/tracing.proto - -package cloudtrace - -import ( - context "context" - reflect "reflect" - sync "sync" - - proto "github.com/golang/protobuf/proto" - _ "google.golang.org/genproto/googleapis/api/annotations" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - emptypb "google.golang.org/protobuf/types/known/emptypb" - _ "google.golang.org/protobuf/types/known/timestamppb" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - -// The request message for the `BatchWriteSpans` method. -type BatchWriteSpansRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The name of the project where the spans belong. The format is - // `projects/[PROJECT_ID]`. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Required. A list of new spans. The span names must not match existing - // spans, or the results are undefined. - Spans []*Span `protobuf:"bytes,2,rep,name=spans,proto3" json:"spans,omitempty"` -} - -func (x *BatchWriteSpansRequest) Reset() { - *x = BatchWriteSpansRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_devtools_cloudtrace_v2_tracing_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *BatchWriteSpansRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*BatchWriteSpansRequest) ProtoMessage() {} - -func (x *BatchWriteSpansRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_devtools_cloudtrace_v2_tracing_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use BatchWriteSpansRequest.ProtoReflect.Descriptor instead. -func (*BatchWriteSpansRequest) Descriptor() ([]byte, []int) { - return file_google_devtools_cloudtrace_v2_tracing_proto_rawDescGZIP(), []int{0} -} - -func (x *BatchWriteSpansRequest) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *BatchWriteSpansRequest) GetSpans() []*Span { - if x != nil { - return x.Spans - } - return nil -} - -var File_google_devtools_cloudtrace_v2_tracing_proto protoreflect.FileDescriptor - -var file_google_devtools_cloudtrace_v2_tracing_proto_rawDesc = []byte{ - 0x0a, 0x2b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x64, 0x65, 0x76, 0x74, 0x6f, 0x6f, 0x6c, - 0x73, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x32, 0x2f, - 0x74, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1d, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x64, 0x65, 0x76, 0x74, 0x6f, 0x6f, 0x6c, 0x73, 0x2e, 0x63, - 0x6c, 0x6f, 0x75, 0x64, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x32, 0x1a, 0x1c, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, - 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, - 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, - 0x29, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x64, 0x65, 0x76, 0x74, 0x6f, 0x6f, 0x6c, 0x73, - 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x32, 0x2f, 0x74, - 0x72, 0x61, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, - 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, - 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa1, 0x01, 0x0a, 0x16, 0x42, 0x61, 0x74, - 0x63, 0x68, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x47, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, - 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x05, - 0x73, 0x70, 0x61, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x64, 0x65, 0x76, 0x74, 0x6f, 0x6f, 0x6c, 0x73, 0x2e, 0x63, 0x6c, - 0x6f, 0x75, 0x64, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x70, 0x61, 0x6e, - 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x05, 0x73, 0x70, 0x61, 0x6e, 0x73, 0x32, 0xba, 0x03, 0x0a, - 0x0c, 0x54, 0x72, 0x61, 0x63, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0xa1, 0x01, - 0x0a, 0x0f, 0x42, 0x61, 0x74, 0x63, 0x68, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x70, 0x61, 0x6e, - 0x73, 0x12, 0x35, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x64, 0x65, 0x76, 0x74, 0x6f, - 0x6f, 0x6c, 0x73, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, - 0x32, 0x2e, 0x42, 0x61, 0x74, 0x63, 0x68, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x70, 0x61, 0x6e, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, - 0x22, 0x3f, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2c, 0x22, 0x27, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, - 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, - 0x74, 0x72, 0x61, 0x63, 0x65, 0x73, 0x3a, 0x62, 0x61, 0x74, 0x63, 0x68, 0x57, 0x72, 0x69, 0x74, - 0x65, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x0a, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x73, 0x70, 0x61, 0x6e, - 0x73, 0x12, 0x89, 0x01, 0x0a, 0x0a, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x70, 0x61, 0x6e, - 0x12, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x64, 0x65, 0x76, 0x74, 0x6f, 0x6f, - 0x6c, 0x73, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x32, - 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x1a, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x64, - 0x65, 0x76, 0x74, 0x6f, 0x6f, 0x6c, 0x73, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x74, 0x72, 0x61, - 0x63, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x22, 0x31, 0x82, 0xd3, 0xe4, 0x93, - 0x02, 0x2b, 0x22, 0x26, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, - 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x73, 0x2f, - 0x2a, 0x2f, 0x73, 0x70, 0x61, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x01, 0x2a, 0x1a, 0x7a, 0xca, - 0x41, 0x19, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x5b, 0x68, 0x74, - 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, - 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, - 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, - 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x74, 0x72, 0x61, - 0x63, 0x65, 0x2e, 0x61, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x42, 0xc7, 0x01, 0x0a, 0x21, 0x63, 0x6f, - 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x64, 0x65, 0x76, 0x74, 0x6f, 0x6f, 0x6c, - 0x73, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x32, 0x42, - 0x0c, 0x54, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, - 0x47, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, - 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x64, 0x65, 0x76, 0x74, 0x6f, 0x6f, 0x6c, 0x73, 0x2f, - 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x32, 0x3b, 0x63, 0x6c, - 0x6f, 0x75, 0x64, 0x74, 0x72, 0x61, 0x63, 0x65, 0xaa, 0x02, 0x15, 0x47, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x56, 0x32, - 0xca, 0x02, 0x15, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, - 0x54, 0x72, 0x61, 0x63, 0x65, 0x5c, 0x56, 0x32, 0xea, 0x02, 0x18, 0x47, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x54, 0x72, 0x61, 0x63, 0x65, 0x3a, - 0x3a, 0x56, 0x32, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_google_devtools_cloudtrace_v2_tracing_proto_rawDescOnce sync.Once - file_google_devtools_cloudtrace_v2_tracing_proto_rawDescData = file_google_devtools_cloudtrace_v2_tracing_proto_rawDesc -) - -func file_google_devtools_cloudtrace_v2_tracing_proto_rawDescGZIP() []byte { - file_google_devtools_cloudtrace_v2_tracing_proto_rawDescOnce.Do(func() { - file_google_devtools_cloudtrace_v2_tracing_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_devtools_cloudtrace_v2_tracing_proto_rawDescData) - }) - return file_google_devtools_cloudtrace_v2_tracing_proto_rawDescData -} - -var file_google_devtools_cloudtrace_v2_tracing_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_devtools_cloudtrace_v2_tracing_proto_goTypes = []interface{}{ - (*BatchWriteSpansRequest)(nil), // 0: google.devtools.cloudtrace.v2.BatchWriteSpansRequest - (*Span)(nil), // 1: google.devtools.cloudtrace.v2.Span - (*emptypb.Empty)(nil), // 2: google.protobuf.Empty -} -var file_google_devtools_cloudtrace_v2_tracing_proto_depIdxs = []int32{ - 1, // 0: google.devtools.cloudtrace.v2.BatchWriteSpansRequest.spans:type_name -> google.devtools.cloudtrace.v2.Span - 0, // 1: google.devtools.cloudtrace.v2.TraceService.BatchWriteSpans:input_type -> google.devtools.cloudtrace.v2.BatchWriteSpansRequest - 1, // 2: google.devtools.cloudtrace.v2.TraceService.CreateSpan:input_type -> google.devtools.cloudtrace.v2.Span - 2, // 3: google.devtools.cloudtrace.v2.TraceService.BatchWriteSpans:output_type -> google.protobuf.Empty - 1, // 4: google.devtools.cloudtrace.v2.TraceService.CreateSpan:output_type -> google.devtools.cloudtrace.v2.Span - 3, // [3:5] is the sub-list for method output_type - 1, // [1:3] is the sub-list for method input_type - 1, // [1:1] is the sub-list for extension type_name - 1, // [1:1] is the sub-list for extension extendee - 0, // [0:1] is the sub-list for field type_name -} - -func init() { file_google_devtools_cloudtrace_v2_tracing_proto_init() } -func file_google_devtools_cloudtrace_v2_tracing_proto_init() { - if File_google_devtools_cloudtrace_v2_tracing_proto != nil { - return - } - file_google_devtools_cloudtrace_v2_trace_proto_init() - if !protoimpl.UnsafeEnabled { - file_google_devtools_cloudtrace_v2_tracing_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*BatchWriteSpansRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_devtools_cloudtrace_v2_tracing_proto_rawDesc, - NumEnums: 0, - NumMessages: 1, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_google_devtools_cloudtrace_v2_tracing_proto_goTypes, - DependencyIndexes: file_google_devtools_cloudtrace_v2_tracing_proto_depIdxs, - MessageInfos: file_google_devtools_cloudtrace_v2_tracing_proto_msgTypes, - }.Build() - File_google_devtools_cloudtrace_v2_tracing_proto = out.File - file_google_devtools_cloudtrace_v2_tracing_proto_rawDesc = nil - file_google_devtools_cloudtrace_v2_tracing_proto_goTypes = nil - file_google_devtools_cloudtrace_v2_tracing_proto_depIdxs = nil -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConnInterface - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion6 - -// TraceServiceClient is the client API for TraceService service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type TraceServiceClient interface { - // Sends new spans to new or existing traces. You cannot update - // existing spans. - BatchWriteSpans(ctx context.Context, in *BatchWriteSpansRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) - // Creates a new span. - CreateSpan(ctx context.Context, in *Span, opts ...grpc.CallOption) (*Span, error) -} - -type traceServiceClient struct { - cc grpc.ClientConnInterface -} - -func NewTraceServiceClient(cc grpc.ClientConnInterface) TraceServiceClient { - return &traceServiceClient{cc} -} - -func (c *traceServiceClient) BatchWriteSpans(ctx context.Context, in *BatchWriteSpansRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { - out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/google.devtools.cloudtrace.v2.TraceService/BatchWriteSpans", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *traceServiceClient) CreateSpan(ctx context.Context, in *Span, opts ...grpc.CallOption) (*Span, error) { - out := new(Span) - err := c.cc.Invoke(ctx, "/google.devtools.cloudtrace.v2.TraceService/CreateSpan", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// TraceServiceServer is the server API for TraceService service. -type TraceServiceServer interface { - // Sends new spans to new or existing traces. You cannot update - // existing spans. - BatchWriteSpans(context.Context, *BatchWriteSpansRequest) (*emptypb.Empty, error) - // Creates a new span. - CreateSpan(context.Context, *Span) (*Span, error) -} - -// UnimplementedTraceServiceServer can be embedded to have forward compatible implementations. -type UnimplementedTraceServiceServer struct { -} - -func (*UnimplementedTraceServiceServer) BatchWriteSpans(context.Context, *BatchWriteSpansRequest) (*emptypb.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method BatchWriteSpans not implemented") -} -func (*UnimplementedTraceServiceServer) CreateSpan(context.Context, *Span) (*Span, error) { - return nil, status.Errorf(codes.Unimplemented, "method CreateSpan not implemented") -} - -func RegisterTraceServiceServer(s *grpc.Server, srv TraceServiceServer) { - s.RegisterService(&_TraceService_serviceDesc, srv) -} - -func _TraceService_BatchWriteSpans_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(BatchWriteSpansRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(TraceServiceServer).BatchWriteSpans(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.devtools.cloudtrace.v2.TraceService/BatchWriteSpans", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(TraceServiceServer).BatchWriteSpans(ctx, req.(*BatchWriteSpansRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _TraceService_CreateSpan_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(Span) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(TraceServiceServer).CreateSpan(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.devtools.cloudtrace.v2.TraceService/CreateSpan", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(TraceServiceServer).CreateSpan(ctx, req.(*Span)) - } - return interceptor(ctx, in, info, handler) -} - -var _TraceService_serviceDesc = grpc.ServiceDesc{ - ServiceName: "google.devtools.cloudtrace.v2.TraceService", - HandlerType: (*TraceServiceServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "BatchWriteSpans", - Handler: _TraceService_BatchWriteSpans_Handler, - }, - { - MethodName: "CreateSpan", - Handler: _TraceService_CreateSpan_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "google/devtools/cloudtrace/v2/tracing.proto", -} diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/alert.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/alert.pb.go deleted file mode 100644 index 87315aaae0..0000000000 --- a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/alert.pb.go +++ /dev/null @@ -1,1215 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.25.0 -// protoc v3.13.0 -// source: google/monitoring/v3/alert.proto - -package monitoring - -import ( - reflect "reflect" - sync "sync" - - proto "github.com/golang/protobuf/proto" - _ "google.golang.org/genproto/googleapis/api/annotations" - status "google.golang.org/genproto/googleapis/rpc/status" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - durationpb "google.golang.org/protobuf/types/known/durationpb" - wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - -// Operators for combining conditions. -type AlertPolicy_ConditionCombinerType int32 - -const ( - // An unspecified combiner. - AlertPolicy_COMBINE_UNSPECIFIED AlertPolicy_ConditionCombinerType = 0 - // Combine conditions using the logical `AND` operator. An - // incident is created only if all the conditions are met - // simultaneously. This combiner is satisfied if all conditions are - // met, even if they are met on completely different resources. - AlertPolicy_AND AlertPolicy_ConditionCombinerType = 1 - // Combine conditions using the logical `OR` operator. An incident - // is created if any of the listed conditions is met. - AlertPolicy_OR AlertPolicy_ConditionCombinerType = 2 - // Combine conditions using logical `AND` operator, but unlike the regular - // `AND` option, an incident is created only if all conditions are met - // simultaneously on at least one resource. - AlertPolicy_AND_WITH_MATCHING_RESOURCE AlertPolicy_ConditionCombinerType = 3 -) - -// Enum value maps for AlertPolicy_ConditionCombinerType. -var ( - AlertPolicy_ConditionCombinerType_name = map[int32]string{ - 0: "COMBINE_UNSPECIFIED", - 1: "AND", - 2: "OR", - 3: "AND_WITH_MATCHING_RESOURCE", - } - AlertPolicy_ConditionCombinerType_value = map[string]int32{ - "COMBINE_UNSPECIFIED": 0, - "AND": 1, - "OR": 2, - "AND_WITH_MATCHING_RESOURCE": 3, - } -) - -func (x AlertPolicy_ConditionCombinerType) Enum() *AlertPolicy_ConditionCombinerType { - p := new(AlertPolicy_ConditionCombinerType) - *p = x - return p -} - -func (x AlertPolicy_ConditionCombinerType) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (AlertPolicy_ConditionCombinerType) Descriptor() protoreflect.EnumDescriptor { - return file_google_monitoring_v3_alert_proto_enumTypes[0].Descriptor() -} - -func (AlertPolicy_ConditionCombinerType) Type() protoreflect.EnumType { - return &file_google_monitoring_v3_alert_proto_enumTypes[0] -} - -func (x AlertPolicy_ConditionCombinerType) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use AlertPolicy_ConditionCombinerType.Descriptor instead. -func (AlertPolicy_ConditionCombinerType) EnumDescriptor() ([]byte, []int) { - return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 0} -} - -// A description of the conditions under which some aspect of your system is -// considered to be "unhealthy" and the ways to notify people or services about -// this state. For an overview of alert policies, see -// [Introduction to Alerting](https://cloud.google.com/monitoring/alerts/). -type AlertPolicy struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required if the policy exists. The resource name for this policy. The - // format is: - // - // projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] - // - // `[ALERT_POLICY_ID]` is assigned by Stackdriver Monitoring when the policy - // is created. When calling the - // [alertPolicies.create][google.monitoring.v3.AlertPolicyService.CreateAlertPolicy] - // method, do not include the `name` field in the alerting policy passed as - // part of the request. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // A short name or phrase used to identify the policy in dashboards, - // notifications, and incidents. To avoid confusion, don't use the same - // display name for multiple policies in the same project. The name is - // limited to 512 Unicode characters. - DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` - // Documentation that is included with notifications and incidents related to - // this policy. Best practice is for the documentation to include information - // to help responders understand, mitigate, escalate, and correct the - // underlying problems detected by the alerting policy. Notification channels - // that have limited capacity might not show this documentation. - Documentation *AlertPolicy_Documentation `protobuf:"bytes,13,opt,name=documentation,proto3" json:"documentation,omitempty"` - // User-supplied key/value data to be used for organizing and - // identifying the `AlertPolicy` objects. - // - // The field can contain up to 64 entries. Each key and value is limited to - // 63 Unicode characters or 128 bytes, whichever is smaller. Labels and - // values can contain only lowercase letters, numerals, underscores, and - // dashes. Keys must begin with a letter. - UserLabels map[string]string `protobuf:"bytes,16,rep,name=user_labels,json=userLabels,proto3" json:"user_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - // A list of conditions for the policy. The conditions are combined by AND or - // OR according to the `combiner` field. If the combined conditions evaluate - // to true, then an incident is created. A policy can have from one to six - // conditions. - // If `condition_time_series_query_language` is present, it must be the only - // `condition`. - Conditions []*AlertPolicy_Condition `protobuf:"bytes,12,rep,name=conditions,proto3" json:"conditions,omitempty"` - // How to combine the results of multiple conditions to determine if an - // incident should be opened. - // If `condition_time_series_query_language` is present, this must be - // `COMBINE_UNSPECIFIED`. - Combiner AlertPolicy_ConditionCombinerType `protobuf:"varint,6,opt,name=combiner,proto3,enum=google.monitoring.v3.AlertPolicy_ConditionCombinerType" json:"combiner,omitempty"` - // Whether or not the policy is enabled. On write, the default interpretation - // if unset is that the policy is enabled. On read, clients should not make - // any assumption about the state if it has not been populated. The - // field should always be populated on List and Get operations, unless - // a field projection has been specified that strips it out. - Enabled *wrapperspb.BoolValue `protobuf:"bytes,17,opt,name=enabled,proto3" json:"enabled,omitempty"` - // Read-only description of how the alert policy is invalid. OK if the alert - // policy is valid. If not OK, the alert policy will not generate incidents. - Validity *status.Status `protobuf:"bytes,18,opt,name=validity,proto3" json:"validity,omitempty"` - // Identifies the notification channels to which notifications should be sent - // when incidents are opened or closed or when new violations occur on - // an already opened incident. Each element of this array corresponds to - // the `name` field in each of the - // [`NotificationChannel`][google.monitoring.v3.NotificationChannel] - // objects that are returned from the [`ListNotificationChannels`] - // [google.monitoring.v3.NotificationChannelService.ListNotificationChannels] - // method. The format of the entries in this field is: - // - // projects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID] - NotificationChannels []string `protobuf:"bytes,14,rep,name=notification_channels,json=notificationChannels,proto3" json:"notification_channels,omitempty"` - // A read-only record of the creation of the alerting policy. If provided - // in a call to create or update, this field will be ignored. - CreationRecord *MutationRecord `protobuf:"bytes,10,opt,name=creation_record,json=creationRecord,proto3" json:"creation_record,omitempty"` - // A read-only record of the most recent change to the alerting policy. If - // provided in a call to create or update, this field will be ignored. - MutationRecord *MutationRecord `protobuf:"bytes,11,opt,name=mutation_record,json=mutationRecord,proto3" json:"mutation_record,omitempty"` -} - -func (x *AlertPolicy) Reset() { - *x = AlertPolicy{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_alert_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *AlertPolicy) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*AlertPolicy) ProtoMessage() {} - -func (x *AlertPolicy) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_alert_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use AlertPolicy.ProtoReflect.Descriptor instead. -func (*AlertPolicy) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0} -} - -func (x *AlertPolicy) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *AlertPolicy) GetDisplayName() string { - if x != nil { - return x.DisplayName - } - return "" -} - -func (x *AlertPolicy) GetDocumentation() *AlertPolicy_Documentation { - if x != nil { - return x.Documentation - } - return nil -} - -func (x *AlertPolicy) GetUserLabels() map[string]string { - if x != nil { - return x.UserLabels - } - return nil -} - -func (x *AlertPolicy) GetConditions() []*AlertPolicy_Condition { - if x != nil { - return x.Conditions - } - return nil -} - -func (x *AlertPolicy) GetCombiner() AlertPolicy_ConditionCombinerType { - if x != nil { - return x.Combiner - } - return AlertPolicy_COMBINE_UNSPECIFIED -} - -func (x *AlertPolicy) GetEnabled() *wrapperspb.BoolValue { - if x != nil { - return x.Enabled - } - return nil -} - -func (x *AlertPolicy) GetValidity() *status.Status { - if x != nil { - return x.Validity - } - return nil -} - -func (x *AlertPolicy) GetNotificationChannels() []string { - if x != nil { - return x.NotificationChannels - } - return nil -} - -func (x *AlertPolicy) GetCreationRecord() *MutationRecord { - if x != nil { - return x.CreationRecord - } - return nil -} - -func (x *AlertPolicy) GetMutationRecord() *MutationRecord { - if x != nil { - return x.MutationRecord - } - return nil -} - -// A content string and a MIME type that describes the content string's -// format. -type AlertPolicy_Documentation struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The text of the documentation, interpreted according to `mime_type`. - // The content may not exceed 8,192 Unicode characters and may not exceed - // more than 10,240 bytes when encoded in UTF-8 format, whichever is - // smaller. - Content string `protobuf:"bytes,1,opt,name=content,proto3" json:"content,omitempty"` - // The format of the `content` field. Presently, only the value - // `"text/markdown"` is supported. See - // [Markdown](https://en.wikipedia.org/wiki/Markdown) for more information. - MimeType string `protobuf:"bytes,2,opt,name=mime_type,json=mimeType,proto3" json:"mime_type,omitempty"` -} - -func (x *AlertPolicy_Documentation) Reset() { - *x = AlertPolicy_Documentation{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_alert_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *AlertPolicy_Documentation) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*AlertPolicy_Documentation) ProtoMessage() {} - -func (x *AlertPolicy_Documentation) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_alert_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use AlertPolicy_Documentation.ProtoReflect.Descriptor instead. -func (*AlertPolicy_Documentation) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 0} -} - -func (x *AlertPolicy_Documentation) GetContent() string { - if x != nil { - return x.Content - } - return "" -} - -func (x *AlertPolicy_Documentation) GetMimeType() string { - if x != nil { - return x.MimeType - } - return "" -} - -// A condition is a true/false test that determines when an alerting policy -// should open an incident. If a condition evaluates to true, it signifies -// that something is wrong. -type AlertPolicy_Condition struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required if the condition exists. The unique resource name for this - // condition. Its format is: - // - // projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[POLICY_ID]/conditions/[CONDITION_ID] - // - // `[CONDITION_ID]` is assigned by Stackdriver Monitoring when the - // condition is created as part of a new or updated alerting policy. - // - // When calling the - // [alertPolicies.create][google.monitoring.v3.AlertPolicyService.CreateAlertPolicy] - // method, do not include the `name` field in the conditions of the - // requested alerting policy. Stackdriver Monitoring creates the - // condition identifiers and includes them in the new policy. - // - // When calling the - // [alertPolicies.update][google.monitoring.v3.AlertPolicyService.UpdateAlertPolicy] - // method to update a policy, including a condition `name` causes the - // existing condition to be updated. Conditions without names are added to - // the updated policy. Existing conditions are deleted if they are not - // updated. - // - // Best practice is to preserve `[CONDITION_ID]` if you make only small - // changes, such as those to condition thresholds, durations, or trigger - // values. Otherwise, treat the change as a new condition and let the - // existing condition be deleted. - Name string `protobuf:"bytes,12,opt,name=name,proto3" json:"name,omitempty"` - // A short name or phrase used to identify the condition in dashboards, - // notifications, and incidents. To avoid confusion, don't use the same - // display name for multiple conditions in the same policy. - DisplayName string `protobuf:"bytes,6,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` - // Only one of the following condition types will be specified. - // - // Types that are assignable to Condition: - // *AlertPolicy_Condition_ConditionThreshold - // *AlertPolicy_Condition_ConditionAbsent - Condition isAlertPolicy_Condition_Condition `protobuf_oneof:"condition"` -} - -func (x *AlertPolicy_Condition) Reset() { - *x = AlertPolicy_Condition{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_alert_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *AlertPolicy_Condition) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*AlertPolicy_Condition) ProtoMessage() {} - -func (x *AlertPolicy_Condition) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_alert_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use AlertPolicy_Condition.ProtoReflect.Descriptor instead. -func (*AlertPolicy_Condition) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 1} -} - -func (x *AlertPolicy_Condition) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *AlertPolicy_Condition) GetDisplayName() string { - if x != nil { - return x.DisplayName - } - return "" -} - -func (m *AlertPolicy_Condition) GetCondition() isAlertPolicy_Condition_Condition { - if m != nil { - return m.Condition - } - return nil -} - -func (x *AlertPolicy_Condition) GetConditionThreshold() *AlertPolicy_Condition_MetricThreshold { - if x, ok := x.GetCondition().(*AlertPolicy_Condition_ConditionThreshold); ok { - return x.ConditionThreshold - } - return nil -} - -func (x *AlertPolicy_Condition) GetConditionAbsent() *AlertPolicy_Condition_MetricAbsence { - if x, ok := x.GetCondition().(*AlertPolicy_Condition_ConditionAbsent); ok { - return x.ConditionAbsent - } - return nil -} - -type isAlertPolicy_Condition_Condition interface { - isAlertPolicy_Condition_Condition() -} - -type AlertPolicy_Condition_ConditionThreshold struct { - // A condition that compares a time series against a threshold. - ConditionThreshold *AlertPolicy_Condition_MetricThreshold `protobuf:"bytes,1,opt,name=condition_threshold,json=conditionThreshold,proto3,oneof"` -} - -type AlertPolicy_Condition_ConditionAbsent struct { - // A condition that checks that a time series continues to - // receive new data points. - ConditionAbsent *AlertPolicy_Condition_MetricAbsence `protobuf:"bytes,2,opt,name=condition_absent,json=conditionAbsent,proto3,oneof"` -} - -func (*AlertPolicy_Condition_ConditionThreshold) isAlertPolicy_Condition_Condition() {} - -func (*AlertPolicy_Condition_ConditionAbsent) isAlertPolicy_Condition_Condition() {} - -// Specifies how many time series must fail a predicate to trigger a -// condition. If not specified, then a `{count: 1}` trigger is used. -type AlertPolicy_Condition_Trigger struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // A type of trigger. - // - // Types that are assignable to Type: - // *AlertPolicy_Condition_Trigger_Count - // *AlertPolicy_Condition_Trigger_Percent - Type isAlertPolicy_Condition_Trigger_Type `protobuf_oneof:"type"` -} - -func (x *AlertPolicy_Condition_Trigger) Reset() { - *x = AlertPolicy_Condition_Trigger{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_alert_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *AlertPolicy_Condition_Trigger) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*AlertPolicy_Condition_Trigger) ProtoMessage() {} - -func (x *AlertPolicy_Condition_Trigger) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_alert_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use AlertPolicy_Condition_Trigger.ProtoReflect.Descriptor instead. -func (*AlertPolicy_Condition_Trigger) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 1, 0} -} - -func (m *AlertPolicy_Condition_Trigger) GetType() isAlertPolicy_Condition_Trigger_Type { - if m != nil { - return m.Type - } - return nil -} - -func (x *AlertPolicy_Condition_Trigger) GetCount() int32 { - if x, ok := x.GetType().(*AlertPolicy_Condition_Trigger_Count); ok { - return x.Count - } - return 0 -} - -func (x *AlertPolicy_Condition_Trigger) GetPercent() float64 { - if x, ok := x.GetType().(*AlertPolicy_Condition_Trigger_Percent); ok { - return x.Percent - } - return 0 -} - -type isAlertPolicy_Condition_Trigger_Type interface { - isAlertPolicy_Condition_Trigger_Type() -} - -type AlertPolicy_Condition_Trigger_Count struct { - // The absolute number of time series that must fail - // the predicate for the condition to be triggered. - Count int32 `protobuf:"varint,1,opt,name=count,proto3,oneof"` -} - -type AlertPolicy_Condition_Trigger_Percent struct { - // The percentage of time series that must fail the - // predicate for the condition to be triggered. - Percent float64 `protobuf:"fixed64,2,opt,name=percent,proto3,oneof"` -} - -func (*AlertPolicy_Condition_Trigger_Count) isAlertPolicy_Condition_Trigger_Type() {} - -func (*AlertPolicy_Condition_Trigger_Percent) isAlertPolicy_Condition_Trigger_Type() {} - -// A condition type that compares a collection of time series -// against a threshold. -type AlertPolicy_Condition_MetricThreshold struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // A [filter](https://cloud.google.com/monitoring/api/v3/filters) that - // identifies which time series should be compared with the threshold. - // - // The filter is similar to the one that is specified in the - // [`ListTimeSeries` - // request](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list) - // (that call is useful to verify the time series that will be retrieved / - // processed) and must specify the metric type and optionally may contain - // restrictions on resource type, resource labels, and metric labels. - // This field may not exceed 2048 Unicode characters in length. - Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"` - // Specifies the alignment of data points in individual time series as - // well as how to combine the retrieved time series together (such as - // when aggregating multiple streams on each resource to a single - // stream for each resource or when aggregating streams across all - // members of a group of resrouces). Multiple aggregations - // are applied in the order specified. - // - // This field is similar to the one in the [`ListTimeSeries` - // request](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list). - // It is advisable to use the `ListTimeSeries` method when debugging this - // field. - Aggregations []*Aggregation `protobuf:"bytes,8,rep,name=aggregations,proto3" json:"aggregations,omitempty"` - // A [filter](https://cloud.google.com/monitoring/api/v3/filters) that - // identifies a time series that should be used as the denominator of a - // ratio that will be compared with the threshold. If a - // `denominator_filter` is specified, the time series specified by the - // `filter` field will be used as the numerator. - // - // The filter must specify the metric type and optionally may contain - // restrictions on resource type, resource labels, and metric labels. - // This field may not exceed 2048 Unicode characters in length. - DenominatorFilter string `protobuf:"bytes,9,opt,name=denominator_filter,json=denominatorFilter,proto3" json:"denominator_filter,omitempty"` - // Specifies the alignment of data points in individual time series - // selected by `denominatorFilter` as - // well as how to combine the retrieved time series together (such as - // when aggregating multiple streams on each resource to a single - // stream for each resource or when aggregating streams across all - // members of a group of resources). - // - // When computing ratios, the `aggregations` and - // `denominator_aggregations` fields must use the same alignment period - // and produce time series that have the same periodicity and labels. - DenominatorAggregations []*Aggregation `protobuf:"bytes,10,rep,name=denominator_aggregations,json=denominatorAggregations,proto3" json:"denominator_aggregations,omitempty"` - // The comparison to apply between the time series (indicated by `filter` - // and `aggregation`) and the threshold (indicated by `threshold_value`). - // The comparison is applied on each time series, with the time series - // on the left-hand side and the threshold on the right-hand side. - // - // Only `COMPARISON_LT` and `COMPARISON_GT` are supported currently. - Comparison ComparisonType `protobuf:"varint,4,opt,name=comparison,proto3,enum=google.monitoring.v3.ComparisonType" json:"comparison,omitempty"` - // A value against which to compare the time series. - ThresholdValue float64 `protobuf:"fixed64,5,opt,name=threshold_value,json=thresholdValue,proto3" json:"threshold_value,omitempty"` - // The amount of time that a time series must violate the - // threshold to be considered failing. Currently, only values - // that are a multiple of a minute--e.g., 0, 60, 120, or 300 - // seconds--are supported. If an invalid value is given, an - // error will be returned. When choosing a duration, it is useful to - // keep in mind the frequency of the underlying time series data - // (which may also be affected by any alignments specified in the - // `aggregations` field); a good duration is long enough so that a single - // outlier does not generate spurious alerts, but short enough that - // unhealthy states are detected and alerted on quickly. - Duration *durationpb.Duration `protobuf:"bytes,6,opt,name=duration,proto3" json:"duration,omitempty"` - // The number/percent of time series for which the comparison must hold - // in order for the condition to trigger. If unspecified, then the - // condition will trigger if the comparison is true for any of the - // time series that have been identified by `filter` and `aggregations`, - // or by the ratio, if `denominator_filter` and `denominator_aggregations` - // are specified. - Trigger *AlertPolicy_Condition_Trigger `protobuf:"bytes,7,opt,name=trigger,proto3" json:"trigger,omitempty"` -} - -func (x *AlertPolicy_Condition_MetricThreshold) Reset() { - *x = AlertPolicy_Condition_MetricThreshold{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_alert_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *AlertPolicy_Condition_MetricThreshold) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*AlertPolicy_Condition_MetricThreshold) ProtoMessage() {} - -func (x *AlertPolicy_Condition_MetricThreshold) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_alert_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use AlertPolicy_Condition_MetricThreshold.ProtoReflect.Descriptor instead. -func (*AlertPolicy_Condition_MetricThreshold) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 1, 1} -} - -func (x *AlertPolicy_Condition_MetricThreshold) GetFilter() string { - if x != nil { - return x.Filter - } - return "" -} - -func (x *AlertPolicy_Condition_MetricThreshold) GetAggregations() []*Aggregation { - if x != nil { - return x.Aggregations - } - return nil -} - -func (x *AlertPolicy_Condition_MetricThreshold) GetDenominatorFilter() string { - if x != nil { - return x.DenominatorFilter - } - return "" -} - -func (x *AlertPolicy_Condition_MetricThreshold) GetDenominatorAggregations() []*Aggregation { - if x != nil { - return x.DenominatorAggregations - } - return nil -} - -func (x *AlertPolicy_Condition_MetricThreshold) GetComparison() ComparisonType { - if x != nil { - return x.Comparison - } - return ComparisonType_COMPARISON_UNSPECIFIED -} - -func (x *AlertPolicy_Condition_MetricThreshold) GetThresholdValue() float64 { - if x != nil { - return x.ThresholdValue - } - return 0 -} - -func (x *AlertPolicy_Condition_MetricThreshold) GetDuration() *durationpb.Duration { - if x != nil { - return x.Duration - } - return nil -} - -func (x *AlertPolicy_Condition_MetricThreshold) GetTrigger() *AlertPolicy_Condition_Trigger { - if x != nil { - return x.Trigger - } - return nil -} - -// A condition type that checks that monitored resources -// are reporting data. The configuration defines a metric and -// a set of monitored resources. The predicate is considered in violation -// when a time series for the specified metric of a monitored -// resource does not include any data in the specified `duration`. -type AlertPolicy_Condition_MetricAbsence struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // A [filter](https://cloud.google.com/monitoring/api/v3/filters) that - // identifies which time series should be compared with the threshold. - // - // The filter is similar to the one that is specified in the - // [`ListTimeSeries` - // request](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list) - // (that call is useful to verify the time series that will be retrieved / - // processed) and must specify the metric type and optionally may contain - // restrictions on resource type, resource labels, and metric labels. - // This field may not exceed 2048 Unicode characters in length. - Filter string `protobuf:"bytes,1,opt,name=filter,proto3" json:"filter,omitempty"` - // Specifies the alignment of data points in individual time series as - // well as how to combine the retrieved time series together (such as - // when aggregating multiple streams on each resource to a single - // stream for each resource or when aggregating streams across all - // members of a group of resrouces). Multiple aggregations - // are applied in the order specified. - // - // This field is similar to the one in the [`ListTimeSeries` - // request](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list). - // It is advisable to use the `ListTimeSeries` method when debugging this - // field. - Aggregations []*Aggregation `protobuf:"bytes,5,rep,name=aggregations,proto3" json:"aggregations,omitempty"` - // The amount of time that a time series must fail to report new - // data to be considered failing. Currently, only values that - // are a multiple of a minute--e.g. 60, 120, or 300 - // seconds--are supported. If an invalid value is given, an - // error will be returned. The `Duration.nanos` field is - // ignored. - Duration *durationpb.Duration `protobuf:"bytes,2,opt,name=duration,proto3" json:"duration,omitempty"` - // The number/percent of time series for which the comparison must hold - // in order for the condition to trigger. If unspecified, then the - // condition will trigger if the comparison is true for any of the - // time series that have been identified by `filter` and `aggregations`. - Trigger *AlertPolicy_Condition_Trigger `protobuf:"bytes,3,opt,name=trigger,proto3" json:"trigger,omitempty"` -} - -func (x *AlertPolicy_Condition_MetricAbsence) Reset() { - *x = AlertPolicy_Condition_MetricAbsence{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_alert_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *AlertPolicy_Condition_MetricAbsence) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*AlertPolicy_Condition_MetricAbsence) ProtoMessage() {} - -func (x *AlertPolicy_Condition_MetricAbsence) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_alert_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use AlertPolicy_Condition_MetricAbsence.ProtoReflect.Descriptor instead. -func (*AlertPolicy_Condition_MetricAbsence) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 1, 2} -} - -func (x *AlertPolicy_Condition_MetricAbsence) GetFilter() string { - if x != nil { - return x.Filter - } - return "" -} - -func (x *AlertPolicy_Condition_MetricAbsence) GetAggregations() []*Aggregation { - if x != nil { - return x.Aggregations - } - return nil -} - -func (x *AlertPolicy_Condition_MetricAbsence) GetDuration() *durationpb.Duration { - if x != nil { - return x.Duration - } - return nil -} - -func (x *AlertPolicy_Condition_MetricAbsence) GetTrigger() *AlertPolicy_Condition_Trigger { - if x != nil { - return x.Trigger - } - return nil -} - -var File_google_monitoring_v3_alert_proto protoreflect.FileDescriptor - -var file_google_monitoring_v3_alert_proto_rawDesc = []byte{ - 0x0a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, - 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, - 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, - 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, - 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x6d, 0x75, 0x74, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x73, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xf7, 0x13, 0x0a, 0x0b, - 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, - 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, - 0x6d, 0x65, 0x12, 0x55, 0x0a, 0x0d, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, - 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x44, 0x6f, 0x63, - 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x64, 0x6f, 0x63, 0x75, - 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x52, 0x0a, 0x0b, 0x75, 0x73, 0x65, - 0x72, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x10, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x31, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, - 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x2e, 0x55, 0x73, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x52, 0x0a, 0x75, 0x73, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x4b, 0x0a, - 0x0a, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, - 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, - 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x53, 0x0a, 0x08, 0x63, 0x6f, - 0x6d, 0x62, 0x69, 0x6e, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x37, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, - 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, - 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6d, 0x62, 0x69, 0x6e, 0x65, - 0x72, 0x54, 0x79, 0x70, 0x65, 0x52, 0x08, 0x63, 0x6f, 0x6d, 0x62, 0x69, 0x6e, 0x65, 0x72, 0x12, - 0x34, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x07, 0x65, 0x6e, - 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x08, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x69, 0x74, - 0x79, 0x18, 0x12, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x08, 0x76, 0x61, 0x6c, - 0x69, 0x64, 0x69, 0x74, 0x79, 0x12, 0x33, 0x0a, 0x15, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x18, 0x0e, - 0x20, 0x03, 0x28, 0x09, 0x52, 0x14, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x12, 0x4d, 0x0a, 0x0f, 0x63, 0x72, - 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x18, 0x0a, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, - 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x75, 0x74, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x0e, 0x63, 0x72, 0x65, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x12, 0x4d, 0x0a, 0x0f, 0x6d, 0x75, 0x74, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x18, 0x0b, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, - 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x0e, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x1a, 0x46, 0x0a, 0x0d, 0x44, 0x6f, 0x63, 0x75, - 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6f, 0x6e, - 0x74, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, - 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x69, 0x6d, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6d, 0x69, 0x6d, 0x65, 0x54, 0x79, 0x70, 0x65, - 0x1a, 0xf4, 0x0a, 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, - 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, - 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x6e, 0x0a, 0x13, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, - 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, - 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, - 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x48, - 0x00, 0x52, 0x12, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x68, 0x72, 0x65, - 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x66, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x61, 0x62, 0x73, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x39, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, - 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, - 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4d, 0x65, 0x74, - 0x72, 0x69, 0x63, 0x41, 0x62, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x48, 0x00, 0x52, 0x0f, 0x63, 0x6f, - 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x62, 0x73, 0x65, 0x6e, 0x74, 0x1a, 0x45, 0x0a, - 0x07, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, - 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x48, 0x00, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, - 0x12, 0x1a, 0x0a, 0x07, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x01, 0x48, 0x00, 0x52, 0x07, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x42, 0x06, 0x0a, 0x04, - 0x74, 0x79, 0x70, 0x65, 0x1a, 0xf2, 0x03, 0x0a, 0x0f, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, - 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, - 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, - 0x12, 0x45, 0x0a, 0x0c, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x67, - 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x61, 0x67, 0x67, 0x72, 0x65, - 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x2d, 0x0a, 0x12, 0x64, 0x65, 0x6e, 0x6f, 0x6d, - 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x09, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x11, 0x64, 0x65, 0x6e, 0x6f, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, - 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x5c, 0x0a, 0x18, 0x64, 0x65, 0x6e, 0x6f, 0x6d, 0x69, - 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, - 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x17, 0x64, 0x65, 0x6e, - 0x6f, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x44, 0x0a, 0x0a, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x69, 0x73, - 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, - 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x69, 0x73, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, - 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x69, 0x73, 0x6f, 0x6e, 0x12, 0x27, 0x0a, 0x0f, 0x74, 0x68, - 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x01, 0x52, 0x0e, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x12, 0x35, 0x0a, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4d, 0x0a, 0x07, 0x74, 0x72, - 0x69, 0x67, 0x67, 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, - 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, - 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, - 0x52, 0x07, 0x74, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x1a, 0xf4, 0x01, 0x0a, 0x0d, 0x4d, 0x65, - 0x74, 0x72, 0x69, 0x63, 0x41, 0x62, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, - 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x69, 0x6c, - 0x74, 0x65, 0x72, 0x12, 0x45, 0x0a, 0x0c, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, - 0x2e, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x61, 0x67, - 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x35, 0x0a, 0x08, 0x64, 0x75, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, - 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x4d, 0x0a, 0x07, 0x74, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, - 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, - 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, - 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x52, 0x07, 0x74, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, - 0x3a, 0x97, 0x02, 0xea, 0x41, 0x93, 0x02, 0x0a, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, - 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, - 0x6f, 0x6d, 0x2f, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, - 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x46, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, - 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x61, 0x6c, 0x65, 0x72, - 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2f, 0x7b, 0x61, 0x6c, 0x65, 0x72, 0x74, - 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x7d, 0x2f, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x12, - 0x50, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, - 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x61, 0x6c, - 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2f, 0x7b, 0x61, 0x6c, 0x65, - 0x72, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x7d, 0x2f, 0x63, 0x6f, 0x6e, 0x64, 0x69, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x7d, 0x12, 0x44, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x66, 0x6f, 0x6c, 0x64, - 0x65, 0x72, 0x7d, 0x2f, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, - 0x73, 0x2f, 0x7b, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x7d, - 0x2f, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x63, 0x6f, 0x6e, - 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x12, 0x01, 0x2a, 0x42, 0x0b, 0x0a, 0x09, 0x63, 0x6f, - 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x3d, 0x0a, 0x0f, 0x55, 0x73, 0x65, 0x72, 0x4c, - 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, - 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x61, 0x0a, 0x15, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6d, 0x62, 0x69, 0x6e, 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x12, - 0x17, 0x0a, 0x13, 0x43, 0x4f, 0x4d, 0x42, 0x49, 0x4e, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, - 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x4e, 0x44, 0x10, - 0x01, 0x12, 0x06, 0x0a, 0x02, 0x4f, 0x52, 0x10, 0x02, 0x12, 0x1e, 0x0a, 0x1a, 0x41, 0x4e, 0x44, - 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x49, 0x4e, 0x47, 0x5f, 0x52, - 0x45, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x10, 0x03, 0x3a, 0xc9, 0x01, 0xea, 0x41, 0xc5, 0x01, - 0x0a, 0x25, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x41, 0x6c, 0x65, 0x72, - 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, - 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x61, 0x6c, 0x65, 0x72, - 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2f, 0x7b, 0x61, 0x6c, 0x65, 0x72, 0x74, - 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x7d, 0x12, 0x39, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, - 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, - 0x63, 0x69, 0x65, 0x73, 0x2f, 0x7b, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, - 0x63, 0x79, 0x7d, 0x12, 0x2d, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x66, 0x6f, - 0x6c, 0x64, 0x65, 0x72, 0x7d, 0x2f, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, - 0x69, 0x65, 0x73, 0x2f, 0x7b, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x7d, 0x12, 0x01, 0x2a, 0x42, 0xc2, 0x01, 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, - 0x76, 0x33, 0x42, 0x0a, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, - 0x5a, 0x3e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, - 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, - 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, - 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, - 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, - 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, - 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, - 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x33, -} - -var ( - file_google_monitoring_v3_alert_proto_rawDescOnce sync.Once - file_google_monitoring_v3_alert_proto_rawDescData = file_google_monitoring_v3_alert_proto_rawDesc -) - -func file_google_monitoring_v3_alert_proto_rawDescGZIP() []byte { - file_google_monitoring_v3_alert_proto_rawDescOnce.Do(func() { - file_google_monitoring_v3_alert_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_alert_proto_rawDescData) - }) - return file_google_monitoring_v3_alert_proto_rawDescData -} - -var file_google_monitoring_v3_alert_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_google_monitoring_v3_alert_proto_msgTypes = make([]protoimpl.MessageInfo, 7) -var file_google_monitoring_v3_alert_proto_goTypes = []interface{}{ - (AlertPolicy_ConditionCombinerType)(0), // 0: google.monitoring.v3.AlertPolicy.ConditionCombinerType - (*AlertPolicy)(nil), // 1: google.monitoring.v3.AlertPolicy - (*AlertPolicy_Documentation)(nil), // 2: google.monitoring.v3.AlertPolicy.Documentation - (*AlertPolicy_Condition)(nil), // 3: google.monitoring.v3.AlertPolicy.Condition - nil, // 4: google.monitoring.v3.AlertPolicy.UserLabelsEntry - (*AlertPolicy_Condition_Trigger)(nil), // 5: google.monitoring.v3.AlertPolicy.Condition.Trigger - (*AlertPolicy_Condition_MetricThreshold)(nil), // 6: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold - (*AlertPolicy_Condition_MetricAbsence)(nil), // 7: google.monitoring.v3.AlertPolicy.Condition.MetricAbsence - (*wrapperspb.BoolValue)(nil), // 8: google.protobuf.BoolValue - (*status.Status)(nil), // 9: google.rpc.Status - (*MutationRecord)(nil), // 10: google.monitoring.v3.MutationRecord - (*Aggregation)(nil), // 11: google.monitoring.v3.Aggregation - (ComparisonType)(0), // 12: google.monitoring.v3.ComparisonType - (*durationpb.Duration)(nil), // 13: google.protobuf.Duration -} -var file_google_monitoring_v3_alert_proto_depIdxs = []int32{ - 2, // 0: google.monitoring.v3.AlertPolicy.documentation:type_name -> google.monitoring.v3.AlertPolicy.Documentation - 4, // 1: google.monitoring.v3.AlertPolicy.user_labels:type_name -> google.monitoring.v3.AlertPolicy.UserLabelsEntry - 3, // 2: google.monitoring.v3.AlertPolicy.conditions:type_name -> google.monitoring.v3.AlertPolicy.Condition - 0, // 3: google.monitoring.v3.AlertPolicy.combiner:type_name -> google.monitoring.v3.AlertPolicy.ConditionCombinerType - 8, // 4: google.monitoring.v3.AlertPolicy.enabled:type_name -> google.protobuf.BoolValue - 9, // 5: google.monitoring.v3.AlertPolicy.validity:type_name -> google.rpc.Status - 10, // 6: google.monitoring.v3.AlertPolicy.creation_record:type_name -> google.monitoring.v3.MutationRecord - 10, // 7: google.monitoring.v3.AlertPolicy.mutation_record:type_name -> google.monitoring.v3.MutationRecord - 6, // 8: google.monitoring.v3.AlertPolicy.Condition.condition_threshold:type_name -> google.monitoring.v3.AlertPolicy.Condition.MetricThreshold - 7, // 9: google.monitoring.v3.AlertPolicy.Condition.condition_absent:type_name -> google.monitoring.v3.AlertPolicy.Condition.MetricAbsence - 11, // 10: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.aggregations:type_name -> google.monitoring.v3.Aggregation - 11, // 11: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.denominator_aggregations:type_name -> google.monitoring.v3.Aggregation - 12, // 12: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.comparison:type_name -> google.monitoring.v3.ComparisonType - 13, // 13: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.duration:type_name -> google.protobuf.Duration - 5, // 14: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.trigger:type_name -> google.monitoring.v3.AlertPolicy.Condition.Trigger - 11, // 15: google.monitoring.v3.AlertPolicy.Condition.MetricAbsence.aggregations:type_name -> google.monitoring.v3.Aggregation - 13, // 16: google.monitoring.v3.AlertPolicy.Condition.MetricAbsence.duration:type_name -> google.protobuf.Duration - 5, // 17: google.monitoring.v3.AlertPolicy.Condition.MetricAbsence.trigger:type_name -> google.monitoring.v3.AlertPolicy.Condition.Trigger - 18, // [18:18] is the sub-list for method output_type - 18, // [18:18] is the sub-list for method input_type - 18, // [18:18] is the sub-list for extension type_name - 18, // [18:18] is the sub-list for extension extendee - 0, // [0:18] is the sub-list for field type_name -} - -func init() { file_google_monitoring_v3_alert_proto_init() } -func file_google_monitoring_v3_alert_proto_init() { - if File_google_monitoring_v3_alert_proto != nil { - return - } - file_google_monitoring_v3_common_proto_init() - file_google_monitoring_v3_mutation_record_proto_init() - if !protoimpl.UnsafeEnabled { - file_google_monitoring_v3_alert_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AlertPolicy); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_alert_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AlertPolicy_Documentation); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_alert_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AlertPolicy_Condition); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_alert_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AlertPolicy_Condition_Trigger); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_alert_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AlertPolicy_Condition_MetricThreshold); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_alert_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AlertPolicy_Condition_MetricAbsence); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_google_monitoring_v3_alert_proto_msgTypes[2].OneofWrappers = []interface{}{ - (*AlertPolicy_Condition_ConditionThreshold)(nil), - (*AlertPolicy_Condition_ConditionAbsent)(nil), - } - file_google_monitoring_v3_alert_proto_msgTypes[4].OneofWrappers = []interface{}{ - (*AlertPolicy_Condition_Trigger_Count)(nil), - (*AlertPolicy_Condition_Trigger_Percent)(nil), - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_monitoring_v3_alert_proto_rawDesc, - NumEnums: 1, - NumMessages: 7, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_google_monitoring_v3_alert_proto_goTypes, - DependencyIndexes: file_google_monitoring_v3_alert_proto_depIdxs, - EnumInfos: file_google_monitoring_v3_alert_proto_enumTypes, - MessageInfos: file_google_monitoring_v3_alert_proto_msgTypes, - }.Build() - File_google_monitoring_v3_alert_proto = out.File - file_google_monitoring_v3_alert_proto_rawDesc = nil - file_google_monitoring_v3_alert_proto_goTypes = nil - file_google_monitoring_v3_alert_proto_depIdxs = nil -} diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/alert_service.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/alert_service.pb.go deleted file mode 100644 index c1704c4b72..0000000000 --- a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/alert_service.pb.go +++ /dev/null @@ -1,1009 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.25.0 -// protoc v3.13.0 -// source: google/monitoring/v3/alert_service.proto - -package monitoring - -import ( - context "context" - reflect "reflect" - sync "sync" - - proto "github.com/golang/protobuf/proto" - _ "google.golang.org/genproto/googleapis/api/annotations" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - emptypb "google.golang.org/protobuf/types/known/emptypb" - fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - -// The protocol for the `CreateAlertPolicy` request. -type CreateAlertPolicyRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The project in which to create the alerting policy. The format is: - // - // projects/[PROJECT_ID_OR_NUMBER] - // - // Note that this field names the parent container in which the alerting - // policy will be written, not the name of the created policy. The alerting - // policy that is returned will have a name that contains a normalized - // representation of this name as a prefix but adds a suffix of the form - // `/alertPolicies/[ALERT_POLICY_ID]`, identifying the policy in the - // container. - Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` - // Required. The requested alerting policy. You should omit the `name` field in this - // policy. The name will be returned in the new policy, including - // a new `[ALERT_POLICY_ID]` value. - AlertPolicy *AlertPolicy `protobuf:"bytes,2,opt,name=alert_policy,json=alertPolicy,proto3" json:"alert_policy,omitempty"` -} - -func (x *CreateAlertPolicyRequest) Reset() { - *x = CreateAlertPolicyRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_alert_service_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CreateAlertPolicyRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CreateAlertPolicyRequest) ProtoMessage() {} - -func (x *CreateAlertPolicyRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_alert_service_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CreateAlertPolicyRequest.ProtoReflect.Descriptor instead. -func (*CreateAlertPolicyRequest) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_alert_service_proto_rawDescGZIP(), []int{0} -} - -func (x *CreateAlertPolicyRequest) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *CreateAlertPolicyRequest) GetAlertPolicy() *AlertPolicy { - if x != nil { - return x.AlertPolicy - } - return nil -} - -// The protocol for the `GetAlertPolicy` request. -type GetAlertPolicyRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The alerting policy to retrieve. The format is: - // - // projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] - Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` -} - -func (x *GetAlertPolicyRequest) Reset() { - *x = GetAlertPolicyRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_alert_service_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetAlertPolicyRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetAlertPolicyRequest) ProtoMessage() {} - -func (x *GetAlertPolicyRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_alert_service_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetAlertPolicyRequest.ProtoReflect.Descriptor instead. -func (*GetAlertPolicyRequest) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_alert_service_proto_rawDescGZIP(), []int{1} -} - -func (x *GetAlertPolicyRequest) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -// The protocol for the `ListAlertPolicies` request. -type ListAlertPoliciesRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The project whose alert policies are to be listed. The format is: - // - // projects/[PROJECT_ID_OR_NUMBER] - // - // Note that this field names the parent container in which the alerting - // policies to be listed are stored. To retrieve a single alerting policy - // by name, use the - // [GetAlertPolicy][google.monitoring.v3.AlertPolicyService.GetAlertPolicy] - // operation, instead. - Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` - // If provided, this field specifies the criteria that must be met by - // alert policies to be included in the response. - // - // For more details, see [sorting and - // filtering](https://cloud.google.com/monitoring/api/v3/sorting-and-filtering). - Filter string `protobuf:"bytes,5,opt,name=filter,proto3" json:"filter,omitempty"` - // A comma-separated list of fields by which to sort the result. Supports - // the same set of field references as the `filter` field. Entries can be - // prefixed with a minus sign to sort by the field in descending order. - // - // For more details, see [sorting and - // filtering](https://cloud.google.com/monitoring/api/v3/sorting-and-filtering). - OrderBy string `protobuf:"bytes,6,opt,name=order_by,json=orderBy,proto3" json:"order_by,omitempty"` - // The maximum number of results to return in a single response. - PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` - // If this field is not empty then it must contain the `nextPageToken` value - // returned by a previous call to this method. Using this field causes the - // method to return more results from the previous method call. - PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` -} - -func (x *ListAlertPoliciesRequest) Reset() { - *x = ListAlertPoliciesRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_alert_service_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListAlertPoliciesRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListAlertPoliciesRequest) ProtoMessage() {} - -func (x *ListAlertPoliciesRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_alert_service_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListAlertPoliciesRequest.ProtoReflect.Descriptor instead. -func (*ListAlertPoliciesRequest) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_alert_service_proto_rawDescGZIP(), []int{2} -} - -func (x *ListAlertPoliciesRequest) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *ListAlertPoliciesRequest) GetFilter() string { - if x != nil { - return x.Filter - } - return "" -} - -func (x *ListAlertPoliciesRequest) GetOrderBy() string { - if x != nil { - return x.OrderBy - } - return "" -} - -func (x *ListAlertPoliciesRequest) GetPageSize() int32 { - if x != nil { - return x.PageSize - } - return 0 -} - -func (x *ListAlertPoliciesRequest) GetPageToken() string { - if x != nil { - return x.PageToken - } - return "" -} - -// The protocol for the `ListAlertPolicies` response. -type ListAlertPoliciesResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The returned alert policies. - AlertPolicies []*AlertPolicy `protobuf:"bytes,3,rep,name=alert_policies,json=alertPolicies,proto3" json:"alert_policies,omitempty"` - // If there might be more results than were returned, then this field is set - // to a non-empty value. To see the additional results, - // use that value as `page_token` in the next call to this method. - NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` -} - -func (x *ListAlertPoliciesResponse) Reset() { - *x = ListAlertPoliciesResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_alert_service_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListAlertPoliciesResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListAlertPoliciesResponse) ProtoMessage() {} - -func (x *ListAlertPoliciesResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_alert_service_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListAlertPoliciesResponse.ProtoReflect.Descriptor instead. -func (*ListAlertPoliciesResponse) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_alert_service_proto_rawDescGZIP(), []int{3} -} - -func (x *ListAlertPoliciesResponse) GetAlertPolicies() []*AlertPolicy { - if x != nil { - return x.AlertPolicies - } - return nil -} - -func (x *ListAlertPoliciesResponse) GetNextPageToken() string { - if x != nil { - return x.NextPageToken - } - return "" -} - -// The protocol for the `UpdateAlertPolicy` request. -type UpdateAlertPolicyRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Optional. A list of alerting policy field names. If this field is not - // empty, each listed field in the existing alerting policy is set to the - // value of the corresponding field in the supplied policy (`alert_policy`), - // or to the field's default value if the field is not in the supplied - // alerting policy. Fields not listed retain their previous value. - // - // Examples of valid field masks include `display_name`, `documentation`, - // `documentation.content`, `documentation.mime_type`, `user_labels`, - // `user_label.nameofkey`, `enabled`, `conditions`, `combiner`, etc. - // - // If this field is empty, then the supplied alerting policy replaces the - // existing policy. It is the same as deleting the existing policy and - // adding the supplied policy, except for the following: - // - // + The new policy will have the same `[ALERT_POLICY_ID]` as the former - // policy. This gives you continuity with the former policy in your - // notifications and incidents. - // + Conditions in the new policy will keep their former `[CONDITION_ID]` if - // the supplied condition includes the `name` field with that - // `[CONDITION_ID]`. If the supplied condition omits the `name` field, - // then a new `[CONDITION_ID]` is created. - UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` - // Required. The updated alerting policy or the updated values for the - // fields listed in `update_mask`. - // If `update_mask` is not empty, any fields in this policy that are - // not in `update_mask` are ignored. - AlertPolicy *AlertPolicy `protobuf:"bytes,3,opt,name=alert_policy,json=alertPolicy,proto3" json:"alert_policy,omitempty"` -} - -func (x *UpdateAlertPolicyRequest) Reset() { - *x = UpdateAlertPolicyRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_alert_service_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *UpdateAlertPolicyRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*UpdateAlertPolicyRequest) ProtoMessage() {} - -func (x *UpdateAlertPolicyRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_alert_service_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use UpdateAlertPolicyRequest.ProtoReflect.Descriptor instead. -func (*UpdateAlertPolicyRequest) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_alert_service_proto_rawDescGZIP(), []int{4} -} - -func (x *UpdateAlertPolicyRequest) GetUpdateMask() *fieldmaskpb.FieldMask { - if x != nil { - return x.UpdateMask - } - return nil -} - -func (x *UpdateAlertPolicyRequest) GetAlertPolicy() *AlertPolicy { - if x != nil { - return x.AlertPolicy - } - return nil -} - -// The protocol for the `DeleteAlertPolicy` request. -type DeleteAlertPolicyRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The alerting policy to delete. The format is: - // - // projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] - // - // For more information, see [AlertPolicy][google.monitoring.v3.AlertPolicy]. - Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` -} - -func (x *DeleteAlertPolicyRequest) Reset() { - *x = DeleteAlertPolicyRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_alert_service_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *DeleteAlertPolicyRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DeleteAlertPolicyRequest) ProtoMessage() {} - -func (x *DeleteAlertPolicyRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_alert_service_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DeleteAlertPolicyRequest.ProtoReflect.Descriptor instead. -func (*DeleteAlertPolicyRequest) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_alert_service_proto_rawDescGZIP(), []int{5} -} - -func (x *DeleteAlertPolicyRequest) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -var File_google_monitoring_v3_alert_service_proto protoreflect.FileDescriptor - -var file_google_monitoring_v3_alert_service_proto_rawDesc = []byte{ - 0x0a, 0x28, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, - 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, - 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, - 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, - 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, - 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, - 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, - 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa8, 0x01, 0x0a, 0x18, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x41, - 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, - 0x2d, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x27, 0x12, 0x25, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, - 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, - 0x6f, 0x6d, 0x2f, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x49, 0x0a, 0x0c, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x70, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, - 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x03, 0xe0, - 0x41, 0x02, 0x52, 0x0b, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, - 0x5a, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2d, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x27, 0x0a, 0x25, - 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, - 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xcc, 0x01, 0x0a, 0x18, - 0x4c, 0x69, 0x73, 0x74, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2d, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x27, 0x12, 0x25, - 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, - 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, - 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x69, 0x6c, - 0x74, 0x65, 0x72, 0x12, 0x19, 0x0a, 0x08, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x5f, 0x62, 0x79, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x42, 0x79, 0x12, 0x1b, - 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, - 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x8d, 0x01, 0x0a, 0x19, 0x4c, - 0x69, 0x73, 0x74, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x48, 0x0a, 0x0e, 0x61, 0x6c, 0x65, 0x72, - 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, - 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x52, 0x0d, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, - 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, - 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, - 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xa2, 0x01, 0x0a, 0x18, 0x55, - 0x70, 0x64, 0x61, 0x74, 0x65, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3b, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, - 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, - 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, - 0x4d, 0x61, 0x73, 0x6b, 0x12, 0x49, 0x0a, 0x0c, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x70, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, - 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x03, 0xe0, - 0x41, 0x02, 0x52, 0x0b, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, - 0x5d, 0x0a, 0x18, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x41, 0x0a, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2d, 0xe0, 0x41, 0x02, 0xfa, 0x41, - 0x27, 0x0a, 0x25, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x41, 0x6c, 0x65, - 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x32, 0x9e, - 0x08, 0x0a, 0x12, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0xa8, 0x01, 0x0a, 0x11, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x6c, - 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x12, 0x2e, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, - 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, - 0x63, 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, - 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, - 0x63, 0x69, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x32, 0x82, 0xd3, - 0xe4, 0x93, 0x02, 0x25, 0x12, 0x23, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, - 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x61, 0x6c, 0x65, 0x72, - 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x12, 0x96, 0x01, 0x0a, 0x0e, 0x47, 0x65, 0x74, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, - 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x6c, - 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, - 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x22, 0x34, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x27, 0x12, 0x25, 0x2f, 0x76, 0x33, - 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, - 0x2a, 0x2f, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2f, - 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0xb5, 0x01, 0x0a, 0x11, 0x43, 0x72, - 0x65, 0x61, 0x74, 0x65, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, - 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, - 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x41, 0x6c, 0x65, - 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, - 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, - 0x63, 0x79, 0x22, 0x4d, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x33, 0x22, 0x23, 0x2f, 0x76, 0x33, 0x2f, - 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, - 0x7d, 0x2f, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x3a, - 0x0c, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0xda, 0x41, 0x11, - 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x12, 0x91, 0x01, 0x0a, 0x11, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x6c, 0x65, 0x72, - 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x44, - 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, - 0x34, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x27, 0x2a, 0x25, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, - 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x61, 0x6c, - 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0xda, 0x41, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0xcb, 0x01, 0x0a, 0x11, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, - 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x2e, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, - 0x76, 0x33, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, - 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x63, - 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x42, 0x32, 0x32, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x61, 0x6c, 0x65, - 0x72, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x50, - 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x0c, 0x61, 0x6c, 0x65, 0x72, - 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0xda, 0x41, 0x18, 0x75, 0x70, 0x64, 0x61, 0x74, - 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2c, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x70, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x1a, 0xa9, 0x01, 0xca, 0x41, 0x19, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, - 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, - 0x6f, 0x6d, 0xd2, 0x41, 0x89, 0x01, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, - 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, - 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, - 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, - 0x61, 0x75, 0x74, 0x68, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2c, - 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, - 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x42, - 0xc9, 0x01, 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, - 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x42, 0x11, 0x41, 0x6c, - 0x65, 0x72, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, - 0x01, 0x5a, 0x3e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, - 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, - 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, - 0x67, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, - 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, - 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, - 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, - 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x33, -} - -var ( - file_google_monitoring_v3_alert_service_proto_rawDescOnce sync.Once - file_google_monitoring_v3_alert_service_proto_rawDescData = file_google_monitoring_v3_alert_service_proto_rawDesc -) - -func file_google_monitoring_v3_alert_service_proto_rawDescGZIP() []byte { - file_google_monitoring_v3_alert_service_proto_rawDescOnce.Do(func() { - file_google_monitoring_v3_alert_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_alert_service_proto_rawDescData) - }) - return file_google_monitoring_v3_alert_service_proto_rawDescData -} - -var file_google_monitoring_v3_alert_service_proto_msgTypes = make([]protoimpl.MessageInfo, 6) -var file_google_monitoring_v3_alert_service_proto_goTypes = []interface{}{ - (*CreateAlertPolicyRequest)(nil), // 0: google.monitoring.v3.CreateAlertPolicyRequest - (*GetAlertPolicyRequest)(nil), // 1: google.monitoring.v3.GetAlertPolicyRequest - (*ListAlertPoliciesRequest)(nil), // 2: google.monitoring.v3.ListAlertPoliciesRequest - (*ListAlertPoliciesResponse)(nil), // 3: google.monitoring.v3.ListAlertPoliciesResponse - (*UpdateAlertPolicyRequest)(nil), // 4: google.monitoring.v3.UpdateAlertPolicyRequest - (*DeleteAlertPolicyRequest)(nil), // 5: google.monitoring.v3.DeleteAlertPolicyRequest - (*AlertPolicy)(nil), // 6: google.monitoring.v3.AlertPolicy - (*fieldmaskpb.FieldMask)(nil), // 7: google.protobuf.FieldMask - (*emptypb.Empty)(nil), // 8: google.protobuf.Empty -} -var file_google_monitoring_v3_alert_service_proto_depIdxs = []int32{ - 6, // 0: google.monitoring.v3.CreateAlertPolicyRequest.alert_policy:type_name -> google.monitoring.v3.AlertPolicy - 6, // 1: google.monitoring.v3.ListAlertPoliciesResponse.alert_policies:type_name -> google.monitoring.v3.AlertPolicy - 7, // 2: google.monitoring.v3.UpdateAlertPolicyRequest.update_mask:type_name -> google.protobuf.FieldMask - 6, // 3: google.monitoring.v3.UpdateAlertPolicyRequest.alert_policy:type_name -> google.monitoring.v3.AlertPolicy - 2, // 4: google.monitoring.v3.AlertPolicyService.ListAlertPolicies:input_type -> google.monitoring.v3.ListAlertPoliciesRequest - 1, // 5: google.monitoring.v3.AlertPolicyService.GetAlertPolicy:input_type -> google.monitoring.v3.GetAlertPolicyRequest - 0, // 6: google.monitoring.v3.AlertPolicyService.CreateAlertPolicy:input_type -> google.monitoring.v3.CreateAlertPolicyRequest - 5, // 7: google.monitoring.v3.AlertPolicyService.DeleteAlertPolicy:input_type -> google.monitoring.v3.DeleteAlertPolicyRequest - 4, // 8: google.monitoring.v3.AlertPolicyService.UpdateAlertPolicy:input_type -> google.monitoring.v3.UpdateAlertPolicyRequest - 3, // 9: google.monitoring.v3.AlertPolicyService.ListAlertPolicies:output_type -> google.monitoring.v3.ListAlertPoliciesResponse - 6, // 10: google.monitoring.v3.AlertPolicyService.GetAlertPolicy:output_type -> google.monitoring.v3.AlertPolicy - 6, // 11: google.monitoring.v3.AlertPolicyService.CreateAlertPolicy:output_type -> google.monitoring.v3.AlertPolicy - 8, // 12: google.monitoring.v3.AlertPolicyService.DeleteAlertPolicy:output_type -> google.protobuf.Empty - 6, // 13: google.monitoring.v3.AlertPolicyService.UpdateAlertPolicy:output_type -> google.monitoring.v3.AlertPolicy - 9, // [9:14] is the sub-list for method output_type - 4, // [4:9] is the sub-list for method input_type - 4, // [4:4] is the sub-list for extension type_name - 4, // [4:4] is the sub-list for extension extendee - 0, // [0:4] is the sub-list for field type_name -} - -func init() { file_google_monitoring_v3_alert_service_proto_init() } -func file_google_monitoring_v3_alert_service_proto_init() { - if File_google_monitoring_v3_alert_service_proto != nil { - return - } - file_google_monitoring_v3_alert_proto_init() - if !protoimpl.UnsafeEnabled { - file_google_monitoring_v3_alert_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateAlertPolicyRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_alert_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetAlertPolicyRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_alert_service_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListAlertPoliciesRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_alert_service_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListAlertPoliciesResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_alert_service_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UpdateAlertPolicyRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_alert_service_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteAlertPolicyRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_monitoring_v3_alert_service_proto_rawDesc, - NumEnums: 0, - NumMessages: 6, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_google_monitoring_v3_alert_service_proto_goTypes, - DependencyIndexes: file_google_monitoring_v3_alert_service_proto_depIdxs, - MessageInfos: file_google_monitoring_v3_alert_service_proto_msgTypes, - }.Build() - File_google_monitoring_v3_alert_service_proto = out.File - file_google_monitoring_v3_alert_service_proto_rawDesc = nil - file_google_monitoring_v3_alert_service_proto_goTypes = nil - file_google_monitoring_v3_alert_service_proto_depIdxs = nil -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConnInterface - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion6 - -// AlertPolicyServiceClient is the client API for AlertPolicyService service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type AlertPolicyServiceClient interface { - // Lists the existing alerting policies for the project. - ListAlertPolicies(ctx context.Context, in *ListAlertPoliciesRequest, opts ...grpc.CallOption) (*ListAlertPoliciesResponse, error) - // Gets a single alerting policy. - GetAlertPolicy(ctx context.Context, in *GetAlertPolicyRequest, opts ...grpc.CallOption) (*AlertPolicy, error) - // Creates a new alerting policy. - CreateAlertPolicy(ctx context.Context, in *CreateAlertPolicyRequest, opts ...grpc.CallOption) (*AlertPolicy, error) - // Deletes an alerting policy. - DeleteAlertPolicy(ctx context.Context, in *DeleteAlertPolicyRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) - // Updates an alerting policy. You can either replace the entire policy with - // a new one or replace only certain fields in the current alerting policy by - // specifying the fields to be updated via `updateMask`. Returns the - // updated alerting policy. - UpdateAlertPolicy(ctx context.Context, in *UpdateAlertPolicyRequest, opts ...grpc.CallOption) (*AlertPolicy, error) -} - -type alertPolicyServiceClient struct { - cc grpc.ClientConnInterface -} - -func NewAlertPolicyServiceClient(cc grpc.ClientConnInterface) AlertPolicyServiceClient { - return &alertPolicyServiceClient{cc} -} - -func (c *alertPolicyServiceClient) ListAlertPolicies(ctx context.Context, in *ListAlertPoliciesRequest, opts ...grpc.CallOption) (*ListAlertPoliciesResponse, error) { - out := new(ListAlertPoliciesResponse) - err := c.cc.Invoke(ctx, "/google.monitoring.v3.AlertPolicyService/ListAlertPolicies", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *alertPolicyServiceClient) GetAlertPolicy(ctx context.Context, in *GetAlertPolicyRequest, opts ...grpc.CallOption) (*AlertPolicy, error) { - out := new(AlertPolicy) - err := c.cc.Invoke(ctx, "/google.monitoring.v3.AlertPolicyService/GetAlertPolicy", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *alertPolicyServiceClient) CreateAlertPolicy(ctx context.Context, in *CreateAlertPolicyRequest, opts ...grpc.CallOption) (*AlertPolicy, error) { - out := new(AlertPolicy) - err := c.cc.Invoke(ctx, "/google.monitoring.v3.AlertPolicyService/CreateAlertPolicy", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *alertPolicyServiceClient) DeleteAlertPolicy(ctx context.Context, in *DeleteAlertPolicyRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { - out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/google.monitoring.v3.AlertPolicyService/DeleteAlertPolicy", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *alertPolicyServiceClient) UpdateAlertPolicy(ctx context.Context, in *UpdateAlertPolicyRequest, opts ...grpc.CallOption) (*AlertPolicy, error) { - out := new(AlertPolicy) - err := c.cc.Invoke(ctx, "/google.monitoring.v3.AlertPolicyService/UpdateAlertPolicy", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// AlertPolicyServiceServer is the server API for AlertPolicyService service. -type AlertPolicyServiceServer interface { - // Lists the existing alerting policies for the project. - ListAlertPolicies(context.Context, *ListAlertPoliciesRequest) (*ListAlertPoliciesResponse, error) - // Gets a single alerting policy. - GetAlertPolicy(context.Context, *GetAlertPolicyRequest) (*AlertPolicy, error) - // Creates a new alerting policy. - CreateAlertPolicy(context.Context, *CreateAlertPolicyRequest) (*AlertPolicy, error) - // Deletes an alerting policy. - DeleteAlertPolicy(context.Context, *DeleteAlertPolicyRequest) (*emptypb.Empty, error) - // Updates an alerting policy. You can either replace the entire policy with - // a new one or replace only certain fields in the current alerting policy by - // specifying the fields to be updated via `updateMask`. Returns the - // updated alerting policy. - UpdateAlertPolicy(context.Context, *UpdateAlertPolicyRequest) (*AlertPolicy, error) -} - -// UnimplementedAlertPolicyServiceServer can be embedded to have forward compatible implementations. -type UnimplementedAlertPolicyServiceServer struct { -} - -func (*UnimplementedAlertPolicyServiceServer) ListAlertPolicies(context.Context, *ListAlertPoliciesRequest) (*ListAlertPoliciesResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ListAlertPolicies not implemented") -} -func (*UnimplementedAlertPolicyServiceServer) GetAlertPolicy(context.Context, *GetAlertPolicyRequest) (*AlertPolicy, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetAlertPolicy not implemented") -} -func (*UnimplementedAlertPolicyServiceServer) CreateAlertPolicy(context.Context, *CreateAlertPolicyRequest) (*AlertPolicy, error) { - return nil, status.Errorf(codes.Unimplemented, "method CreateAlertPolicy not implemented") -} -func (*UnimplementedAlertPolicyServiceServer) DeleteAlertPolicy(context.Context, *DeleteAlertPolicyRequest) (*emptypb.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method DeleteAlertPolicy not implemented") -} -func (*UnimplementedAlertPolicyServiceServer) UpdateAlertPolicy(context.Context, *UpdateAlertPolicyRequest) (*AlertPolicy, error) { - return nil, status.Errorf(codes.Unimplemented, "method UpdateAlertPolicy not implemented") -} - -func RegisterAlertPolicyServiceServer(s *grpc.Server, srv AlertPolicyServiceServer) { - s.RegisterService(&_AlertPolicyService_serviceDesc, srv) -} - -func _AlertPolicyService_ListAlertPolicies_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListAlertPoliciesRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AlertPolicyServiceServer).ListAlertPolicies(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.monitoring.v3.AlertPolicyService/ListAlertPolicies", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AlertPolicyServiceServer).ListAlertPolicies(ctx, req.(*ListAlertPoliciesRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _AlertPolicyService_GetAlertPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetAlertPolicyRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AlertPolicyServiceServer).GetAlertPolicy(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.monitoring.v3.AlertPolicyService/GetAlertPolicy", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AlertPolicyServiceServer).GetAlertPolicy(ctx, req.(*GetAlertPolicyRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _AlertPolicyService_CreateAlertPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CreateAlertPolicyRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AlertPolicyServiceServer).CreateAlertPolicy(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.monitoring.v3.AlertPolicyService/CreateAlertPolicy", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AlertPolicyServiceServer).CreateAlertPolicy(ctx, req.(*CreateAlertPolicyRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _AlertPolicyService_DeleteAlertPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteAlertPolicyRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AlertPolicyServiceServer).DeleteAlertPolicy(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.monitoring.v3.AlertPolicyService/DeleteAlertPolicy", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AlertPolicyServiceServer).DeleteAlertPolicy(ctx, req.(*DeleteAlertPolicyRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _AlertPolicyService_UpdateAlertPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(UpdateAlertPolicyRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AlertPolicyServiceServer).UpdateAlertPolicy(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.monitoring.v3.AlertPolicyService/UpdateAlertPolicy", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AlertPolicyServiceServer).UpdateAlertPolicy(ctx, req.(*UpdateAlertPolicyRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _AlertPolicyService_serviceDesc = grpc.ServiceDesc{ - ServiceName: "google.monitoring.v3.AlertPolicyService", - HandlerType: (*AlertPolicyServiceServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "ListAlertPolicies", - Handler: _AlertPolicyService_ListAlertPolicies_Handler, - }, - { - MethodName: "GetAlertPolicy", - Handler: _AlertPolicyService_GetAlertPolicy_Handler, - }, - { - MethodName: "CreateAlertPolicy", - Handler: _AlertPolicyService_CreateAlertPolicy_Handler, - }, - { - MethodName: "DeleteAlertPolicy", - Handler: _AlertPolicyService_DeleteAlertPolicy_Handler, - }, - { - MethodName: "UpdateAlertPolicy", - Handler: _AlertPolicyService_UpdateAlertPolicy_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "google/monitoring/v3/alert_service.proto", -} diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/common.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/common.pb.go deleted file mode 100644 index 87b735ae38..0000000000 --- a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/common.pb.go +++ /dev/null @@ -1,1150 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.25.0 -// protoc v3.13.0 -// source: google/monitoring/v3/common.proto - -package monitoring - -import ( - reflect "reflect" - sync "sync" - - proto "github.com/golang/protobuf/proto" - distribution "google.golang.org/genproto/googleapis/api/distribution" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - durationpb "google.golang.org/protobuf/types/known/durationpb" - timestamppb "google.golang.org/protobuf/types/known/timestamppb" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - -// Specifies an ordering relationship on two arguments, called `left` and -// `right`. -type ComparisonType int32 - -const ( - // No ordering relationship is specified. - ComparisonType_COMPARISON_UNSPECIFIED ComparisonType = 0 - // True if the left argument is greater than the right argument. - ComparisonType_COMPARISON_GT ComparisonType = 1 - // True if the left argument is greater than or equal to the right argument. - ComparisonType_COMPARISON_GE ComparisonType = 2 - // True if the left argument is less than the right argument. - ComparisonType_COMPARISON_LT ComparisonType = 3 - // True if the left argument is less than or equal to the right argument. - ComparisonType_COMPARISON_LE ComparisonType = 4 - // True if the left argument is equal to the right argument. - ComparisonType_COMPARISON_EQ ComparisonType = 5 - // True if the left argument is not equal to the right argument. - ComparisonType_COMPARISON_NE ComparisonType = 6 -) - -// Enum value maps for ComparisonType. -var ( - ComparisonType_name = map[int32]string{ - 0: "COMPARISON_UNSPECIFIED", - 1: "COMPARISON_GT", - 2: "COMPARISON_GE", - 3: "COMPARISON_LT", - 4: "COMPARISON_LE", - 5: "COMPARISON_EQ", - 6: "COMPARISON_NE", - } - ComparisonType_value = map[string]int32{ - "COMPARISON_UNSPECIFIED": 0, - "COMPARISON_GT": 1, - "COMPARISON_GE": 2, - "COMPARISON_LT": 3, - "COMPARISON_LE": 4, - "COMPARISON_EQ": 5, - "COMPARISON_NE": 6, - } -) - -func (x ComparisonType) Enum() *ComparisonType { - p := new(ComparisonType) - *p = x - return p -} - -func (x ComparisonType) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (ComparisonType) Descriptor() protoreflect.EnumDescriptor { - return file_google_monitoring_v3_common_proto_enumTypes[0].Descriptor() -} - -func (ComparisonType) Type() protoreflect.EnumType { - return &file_google_monitoring_v3_common_proto_enumTypes[0] -} - -func (x ComparisonType) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use ComparisonType.Descriptor instead. -func (ComparisonType) EnumDescriptor() ([]byte, []int) { - return file_google_monitoring_v3_common_proto_rawDescGZIP(), []int{0} -} - -// The tier of service for a Workspace. Please see the -// [service tiers -// documentation](https://cloud.google.com/monitoring/workspaces/tiers) for more -// details. -// -// Deprecated: Do not use. -type ServiceTier int32 - -const ( - // An invalid sentinel value, used to indicate that a tier has not - // been provided explicitly. - ServiceTier_SERVICE_TIER_UNSPECIFIED ServiceTier = 0 - // The Stackdriver Basic tier, a free tier of service that provides basic - // features, a moderate allotment of logs, and access to built-in metrics. - // A number of features are not available in this tier. For more details, - // see [the service tiers - // documentation](https://cloud.google.com/monitoring/workspaces/tiers). - ServiceTier_SERVICE_TIER_BASIC ServiceTier = 1 - // The Stackdriver Premium tier, a higher, more expensive tier of service - // that provides access to all Stackdriver features, lets you use Stackdriver - // with AWS accounts, and has a larger allotments for logs and metrics. For - // more details, see [the service tiers - // documentation](https://cloud.google.com/monitoring/workspaces/tiers). - ServiceTier_SERVICE_TIER_PREMIUM ServiceTier = 2 -) - -// Enum value maps for ServiceTier. -var ( - ServiceTier_name = map[int32]string{ - 0: "SERVICE_TIER_UNSPECIFIED", - 1: "SERVICE_TIER_BASIC", - 2: "SERVICE_TIER_PREMIUM", - } - ServiceTier_value = map[string]int32{ - "SERVICE_TIER_UNSPECIFIED": 0, - "SERVICE_TIER_BASIC": 1, - "SERVICE_TIER_PREMIUM": 2, - } -) - -func (x ServiceTier) Enum() *ServiceTier { - p := new(ServiceTier) - *p = x - return p -} - -func (x ServiceTier) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (ServiceTier) Descriptor() protoreflect.EnumDescriptor { - return file_google_monitoring_v3_common_proto_enumTypes[1].Descriptor() -} - -func (ServiceTier) Type() protoreflect.EnumType { - return &file_google_monitoring_v3_common_proto_enumTypes[1] -} - -func (x ServiceTier) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use ServiceTier.Descriptor instead. -func (ServiceTier) EnumDescriptor() ([]byte, []int) { - return file_google_monitoring_v3_common_proto_rawDescGZIP(), []int{1} -} - -// The `Aligner` specifies the operation that will be applied to the data -// points in each alignment period in a time series. Except for -// `ALIGN_NONE`, which specifies that no operation be applied, each alignment -// operation replaces the set of data values in each alignment period with -// a single value: the result of applying the operation to the data values. -// An aligned time series has a single data value at the end of each -// `alignment_period`. -// -// An alignment operation can change the data type of the values, too. For -// example, if you apply a counting operation to boolean values, the data -// `value_type` in the original time series is `BOOLEAN`, but the `value_type` -// in the aligned result is `INT64`. -type Aggregation_Aligner int32 - -const ( - // No alignment. Raw data is returned. Not valid if cross-series reduction - // is requested. The `value_type` of the result is the same as the - // `value_type` of the input. - Aggregation_ALIGN_NONE Aggregation_Aligner = 0 - // Align and convert to - // [DELTA][google.api.MetricDescriptor.MetricKind.DELTA]. - // The output is `delta = y1 - y0`. - // - // This alignment is valid for - // [CUMULATIVE][google.api.MetricDescriptor.MetricKind.CUMULATIVE] and - // `DELTA` metrics. If the selected alignment period results in periods - // with no data, then the aligned value for such a period is created by - // interpolation. The `value_type` of the aligned result is the same as - // the `value_type` of the input. - Aggregation_ALIGN_DELTA Aggregation_Aligner = 1 - // Align and convert to a rate. The result is computed as - // `rate = (y1 - y0)/(t1 - t0)`, or "delta over time". - // Think of this aligner as providing the slope of the line that passes - // through the value at the start and at the end of the `alignment_period`. - // - // This aligner is valid for `CUMULATIVE` - // and `DELTA` metrics with numeric values. If the selected alignment - // period results in periods with no data, then the aligned value for - // such a period is created by interpolation. The output is a `GAUGE` - // metric with `value_type` `DOUBLE`. - // - // If, by "rate", you mean "percentage change", see the - // `ALIGN_PERCENT_CHANGE` aligner instead. - Aggregation_ALIGN_RATE Aggregation_Aligner = 2 - // Align by interpolating between adjacent points around the alignment - // period boundary. This aligner is valid for `GAUGE` metrics with - // numeric values. The `value_type` of the aligned result is the same as the - // `value_type` of the input. - Aggregation_ALIGN_INTERPOLATE Aggregation_Aligner = 3 - // Align by moving the most recent data point before the end of the - // alignment period to the boundary at the end of the alignment - // period. This aligner is valid for `GAUGE` metrics. The `value_type` of - // the aligned result is the same as the `value_type` of the input. - Aggregation_ALIGN_NEXT_OLDER Aggregation_Aligner = 4 - // Align the time series by returning the minimum value in each alignment - // period. This aligner is valid for `GAUGE` and `DELTA` metrics with - // numeric values. The `value_type` of the aligned result is the same as - // the `value_type` of the input. - Aggregation_ALIGN_MIN Aggregation_Aligner = 10 - // Align the time series by returning the maximum value in each alignment - // period. This aligner is valid for `GAUGE` and `DELTA` metrics with - // numeric values. The `value_type` of the aligned result is the same as - // the `value_type` of the input. - Aggregation_ALIGN_MAX Aggregation_Aligner = 11 - // Align the time series by returning the mean value in each alignment - // period. This aligner is valid for `GAUGE` and `DELTA` metrics with - // numeric values. The `value_type` of the aligned result is `DOUBLE`. - Aggregation_ALIGN_MEAN Aggregation_Aligner = 12 - // Align the time series by returning the number of values in each alignment - // period. This aligner is valid for `GAUGE` and `DELTA` metrics with - // numeric or Boolean values. The `value_type` of the aligned result is - // `INT64`. - Aggregation_ALIGN_COUNT Aggregation_Aligner = 13 - // Align the time series by returning the sum of the values in each - // alignment period. This aligner is valid for `GAUGE` and `DELTA` - // metrics with numeric and distribution values. The `value_type` of the - // aligned result is the same as the `value_type` of the input. - Aggregation_ALIGN_SUM Aggregation_Aligner = 14 - // Align the time series by returning the standard deviation of the values - // in each alignment period. This aligner is valid for `GAUGE` and - // `DELTA` metrics with numeric values. The `value_type` of the output is - // `DOUBLE`. - Aggregation_ALIGN_STDDEV Aggregation_Aligner = 15 - // Align the time series by returning the number of `True` values in - // each alignment period. This aligner is valid for `GAUGE` metrics with - // Boolean values. The `value_type` of the output is `INT64`. - Aggregation_ALIGN_COUNT_TRUE Aggregation_Aligner = 16 - // Align the time series by returning the number of `False` values in - // each alignment period. This aligner is valid for `GAUGE` metrics with - // Boolean values. The `value_type` of the output is `INT64`. - Aggregation_ALIGN_COUNT_FALSE Aggregation_Aligner = 24 - // Align the time series by returning the ratio of the number of `True` - // values to the total number of values in each alignment period. This - // aligner is valid for `GAUGE` metrics with Boolean values. The output - // value is in the range [0.0, 1.0] and has `value_type` `DOUBLE`. - Aggregation_ALIGN_FRACTION_TRUE Aggregation_Aligner = 17 - // Align the time series by using [percentile - // aggregation](https://en.wikipedia.org/wiki/Percentile). The resulting - // data point in each alignment period is the 99th percentile of all data - // points in the period. This aligner is valid for `GAUGE` and `DELTA` - // metrics with distribution values. The output is a `GAUGE` metric with - // `value_type` `DOUBLE`. - Aggregation_ALIGN_PERCENTILE_99 Aggregation_Aligner = 18 - // Align the time series by using [percentile - // aggregation](https://en.wikipedia.org/wiki/Percentile). The resulting - // data point in each alignment period is the 95th percentile of all data - // points in the period. This aligner is valid for `GAUGE` and `DELTA` - // metrics with distribution values. The output is a `GAUGE` metric with - // `value_type` `DOUBLE`. - Aggregation_ALIGN_PERCENTILE_95 Aggregation_Aligner = 19 - // Align the time series by using [percentile - // aggregation](https://en.wikipedia.org/wiki/Percentile). The resulting - // data point in each alignment period is the 50th percentile of all data - // points in the period. This aligner is valid for `GAUGE` and `DELTA` - // metrics with distribution values. The output is a `GAUGE` metric with - // `value_type` `DOUBLE`. - Aggregation_ALIGN_PERCENTILE_50 Aggregation_Aligner = 20 - // Align the time series by using [percentile - // aggregation](https://en.wikipedia.org/wiki/Percentile). The resulting - // data point in each alignment period is the 5th percentile of all data - // points in the period. This aligner is valid for `GAUGE` and `DELTA` - // metrics with distribution values. The output is a `GAUGE` metric with - // `value_type` `DOUBLE`. - Aggregation_ALIGN_PERCENTILE_05 Aggregation_Aligner = 21 - // Align and convert to a percentage change. This aligner is valid for - // `GAUGE` and `DELTA` metrics with numeric values. This alignment returns - // `((current - previous)/previous) * 100`, where the value of `previous` is - // determined based on the `alignment_period`. - // - // If the values of `current` and `previous` are both 0, then the returned - // value is 0. If only `previous` is 0, the returned value is infinity. - // - // A 10-minute moving mean is computed at each point of the alignment period - // prior to the above calculation to smooth the metric and prevent false - // positives from very short-lived spikes. The moving mean is only - // applicable for data whose values are `>= 0`. Any values `< 0` are - // treated as a missing datapoint, and are ignored. While `DELTA` - // metrics are accepted by this alignment, special care should be taken that - // the values for the metric will always be positive. The output is a - // `GAUGE` metric with `value_type` `DOUBLE`. - Aggregation_ALIGN_PERCENT_CHANGE Aggregation_Aligner = 23 -) - -// Enum value maps for Aggregation_Aligner. -var ( - Aggregation_Aligner_name = map[int32]string{ - 0: "ALIGN_NONE", - 1: "ALIGN_DELTA", - 2: "ALIGN_RATE", - 3: "ALIGN_INTERPOLATE", - 4: "ALIGN_NEXT_OLDER", - 10: "ALIGN_MIN", - 11: "ALIGN_MAX", - 12: "ALIGN_MEAN", - 13: "ALIGN_COUNT", - 14: "ALIGN_SUM", - 15: "ALIGN_STDDEV", - 16: "ALIGN_COUNT_TRUE", - 24: "ALIGN_COUNT_FALSE", - 17: "ALIGN_FRACTION_TRUE", - 18: "ALIGN_PERCENTILE_99", - 19: "ALIGN_PERCENTILE_95", - 20: "ALIGN_PERCENTILE_50", - 21: "ALIGN_PERCENTILE_05", - 23: "ALIGN_PERCENT_CHANGE", - } - Aggregation_Aligner_value = map[string]int32{ - "ALIGN_NONE": 0, - "ALIGN_DELTA": 1, - "ALIGN_RATE": 2, - "ALIGN_INTERPOLATE": 3, - "ALIGN_NEXT_OLDER": 4, - "ALIGN_MIN": 10, - "ALIGN_MAX": 11, - "ALIGN_MEAN": 12, - "ALIGN_COUNT": 13, - "ALIGN_SUM": 14, - "ALIGN_STDDEV": 15, - "ALIGN_COUNT_TRUE": 16, - "ALIGN_COUNT_FALSE": 24, - "ALIGN_FRACTION_TRUE": 17, - "ALIGN_PERCENTILE_99": 18, - "ALIGN_PERCENTILE_95": 19, - "ALIGN_PERCENTILE_50": 20, - "ALIGN_PERCENTILE_05": 21, - "ALIGN_PERCENT_CHANGE": 23, - } -) - -func (x Aggregation_Aligner) Enum() *Aggregation_Aligner { - p := new(Aggregation_Aligner) - *p = x - return p -} - -func (x Aggregation_Aligner) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (Aggregation_Aligner) Descriptor() protoreflect.EnumDescriptor { - return file_google_monitoring_v3_common_proto_enumTypes[2].Descriptor() -} - -func (Aggregation_Aligner) Type() protoreflect.EnumType { - return &file_google_monitoring_v3_common_proto_enumTypes[2] -} - -func (x Aggregation_Aligner) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use Aggregation_Aligner.Descriptor instead. -func (Aggregation_Aligner) EnumDescriptor() ([]byte, []int) { - return file_google_monitoring_v3_common_proto_rawDescGZIP(), []int{2, 0} -} - -// A Reducer operation describes how to aggregate data points from multiple -// time series into a single time series, where the value of each data point -// in the resulting series is a function of all the already aligned values in -// the input time series. -type Aggregation_Reducer int32 - -const ( - // No cross-time series reduction. The output of the `Aligner` is - // returned. - Aggregation_REDUCE_NONE Aggregation_Reducer = 0 - // Reduce by computing the mean value across time series for each - // alignment period. This reducer is valid for - // [DELTA][google.api.MetricDescriptor.MetricKind.DELTA] and - // [GAUGE][google.api.MetricDescriptor.MetricKind.GAUGE] metrics with - // numeric or distribution values. The `value_type` of the output is - // [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE]. - Aggregation_REDUCE_MEAN Aggregation_Reducer = 1 - // Reduce by computing the minimum value across time series for each - // alignment period. This reducer is valid for `DELTA` and `GAUGE` metrics - // with numeric values. The `value_type` of the output is the same as the - // `value_type` of the input. - Aggregation_REDUCE_MIN Aggregation_Reducer = 2 - // Reduce by computing the maximum value across time series for each - // alignment period. This reducer is valid for `DELTA` and `GAUGE` metrics - // with numeric values. The `value_type` of the output is the same as the - // `value_type` of the input. - Aggregation_REDUCE_MAX Aggregation_Reducer = 3 - // Reduce by computing the sum across time series for each - // alignment period. This reducer is valid for `DELTA` and `GAUGE` metrics - // with numeric and distribution values. The `value_type` of the output is - // the same as the `value_type` of the input. - Aggregation_REDUCE_SUM Aggregation_Reducer = 4 - // Reduce by computing the standard deviation across time series - // for each alignment period. This reducer is valid for `DELTA` and - // `GAUGE` metrics with numeric or distribution values. The `value_type` - // of the output is `DOUBLE`. - Aggregation_REDUCE_STDDEV Aggregation_Reducer = 5 - // Reduce by computing the number of data points across time series - // for each alignment period. This reducer is valid for `DELTA` and - // `GAUGE` metrics of numeric, Boolean, distribution, and string - // `value_type`. The `value_type` of the output is `INT64`. - Aggregation_REDUCE_COUNT Aggregation_Reducer = 6 - // Reduce by computing the number of `True`-valued data points across time - // series for each alignment period. This reducer is valid for `DELTA` and - // `GAUGE` metrics of Boolean `value_type`. The `value_type` of the output - // is `INT64`. - Aggregation_REDUCE_COUNT_TRUE Aggregation_Reducer = 7 - // Reduce by computing the number of `False`-valued data points across time - // series for each alignment period. This reducer is valid for `DELTA` and - // `GAUGE` metrics of Boolean `value_type`. The `value_type` of the output - // is `INT64`. - Aggregation_REDUCE_COUNT_FALSE Aggregation_Reducer = 15 - // Reduce by computing the ratio of the number of `True`-valued data points - // to the total number of data points for each alignment period. This - // reducer is valid for `DELTA` and `GAUGE` metrics of Boolean `value_type`. - // The output value is in the range [0.0, 1.0] and has `value_type` - // `DOUBLE`. - Aggregation_REDUCE_FRACTION_TRUE Aggregation_Reducer = 8 - // Reduce by computing the [99th - // percentile](https://en.wikipedia.org/wiki/Percentile) of data points - // across time series for each alignment period. This reducer is valid for - // `GAUGE` and `DELTA` metrics of numeric and distribution type. The value - // of the output is `DOUBLE`. - Aggregation_REDUCE_PERCENTILE_99 Aggregation_Reducer = 9 - // Reduce by computing the [95th - // percentile](https://en.wikipedia.org/wiki/Percentile) of data points - // across time series for each alignment period. This reducer is valid for - // `GAUGE` and `DELTA` metrics of numeric and distribution type. The value - // of the output is `DOUBLE`. - Aggregation_REDUCE_PERCENTILE_95 Aggregation_Reducer = 10 - // Reduce by computing the [50th - // percentile](https://en.wikipedia.org/wiki/Percentile) of data points - // across time series for each alignment period. This reducer is valid for - // `GAUGE` and `DELTA` metrics of numeric and distribution type. The value - // of the output is `DOUBLE`. - Aggregation_REDUCE_PERCENTILE_50 Aggregation_Reducer = 11 - // Reduce by computing the [5th - // percentile](https://en.wikipedia.org/wiki/Percentile) of data points - // across time series for each alignment period. This reducer is valid for - // `GAUGE` and `DELTA` metrics of numeric and distribution type. The value - // of the output is `DOUBLE`. - Aggregation_REDUCE_PERCENTILE_05 Aggregation_Reducer = 12 -) - -// Enum value maps for Aggregation_Reducer. -var ( - Aggregation_Reducer_name = map[int32]string{ - 0: "REDUCE_NONE", - 1: "REDUCE_MEAN", - 2: "REDUCE_MIN", - 3: "REDUCE_MAX", - 4: "REDUCE_SUM", - 5: "REDUCE_STDDEV", - 6: "REDUCE_COUNT", - 7: "REDUCE_COUNT_TRUE", - 15: "REDUCE_COUNT_FALSE", - 8: "REDUCE_FRACTION_TRUE", - 9: "REDUCE_PERCENTILE_99", - 10: "REDUCE_PERCENTILE_95", - 11: "REDUCE_PERCENTILE_50", - 12: "REDUCE_PERCENTILE_05", - } - Aggregation_Reducer_value = map[string]int32{ - "REDUCE_NONE": 0, - "REDUCE_MEAN": 1, - "REDUCE_MIN": 2, - "REDUCE_MAX": 3, - "REDUCE_SUM": 4, - "REDUCE_STDDEV": 5, - "REDUCE_COUNT": 6, - "REDUCE_COUNT_TRUE": 7, - "REDUCE_COUNT_FALSE": 15, - "REDUCE_FRACTION_TRUE": 8, - "REDUCE_PERCENTILE_99": 9, - "REDUCE_PERCENTILE_95": 10, - "REDUCE_PERCENTILE_50": 11, - "REDUCE_PERCENTILE_05": 12, - } -) - -func (x Aggregation_Reducer) Enum() *Aggregation_Reducer { - p := new(Aggregation_Reducer) - *p = x - return p -} - -func (x Aggregation_Reducer) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (Aggregation_Reducer) Descriptor() protoreflect.EnumDescriptor { - return file_google_monitoring_v3_common_proto_enumTypes[3].Descriptor() -} - -func (Aggregation_Reducer) Type() protoreflect.EnumType { - return &file_google_monitoring_v3_common_proto_enumTypes[3] -} - -func (x Aggregation_Reducer) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use Aggregation_Reducer.Descriptor instead. -func (Aggregation_Reducer) EnumDescriptor() ([]byte, []int) { - return file_google_monitoring_v3_common_proto_rawDescGZIP(), []int{2, 1} -} - -// A single strongly-typed value. -type TypedValue struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The typed value field. - // - // Types that are assignable to Value: - // *TypedValue_BoolValue - // *TypedValue_Int64Value - // *TypedValue_DoubleValue - // *TypedValue_StringValue - // *TypedValue_DistributionValue - Value isTypedValue_Value `protobuf_oneof:"value"` -} - -func (x *TypedValue) Reset() { - *x = TypedValue{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_common_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *TypedValue) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TypedValue) ProtoMessage() {} - -func (x *TypedValue) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_common_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TypedValue.ProtoReflect.Descriptor instead. -func (*TypedValue) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_common_proto_rawDescGZIP(), []int{0} -} - -func (m *TypedValue) GetValue() isTypedValue_Value { - if m != nil { - return m.Value - } - return nil -} - -func (x *TypedValue) GetBoolValue() bool { - if x, ok := x.GetValue().(*TypedValue_BoolValue); ok { - return x.BoolValue - } - return false -} - -func (x *TypedValue) GetInt64Value() int64 { - if x, ok := x.GetValue().(*TypedValue_Int64Value); ok { - return x.Int64Value - } - return 0 -} - -func (x *TypedValue) GetDoubleValue() float64 { - if x, ok := x.GetValue().(*TypedValue_DoubleValue); ok { - return x.DoubleValue - } - return 0 -} - -func (x *TypedValue) GetStringValue() string { - if x, ok := x.GetValue().(*TypedValue_StringValue); ok { - return x.StringValue - } - return "" -} - -func (x *TypedValue) GetDistributionValue() *distribution.Distribution { - if x, ok := x.GetValue().(*TypedValue_DistributionValue); ok { - return x.DistributionValue - } - return nil -} - -type isTypedValue_Value interface { - isTypedValue_Value() -} - -type TypedValue_BoolValue struct { - // A Boolean value: `true` or `false`. - BoolValue bool `protobuf:"varint,1,opt,name=bool_value,json=boolValue,proto3,oneof"` -} - -type TypedValue_Int64Value struct { - // A 64-bit integer. Its range is approximately ±9.2x1018. - Int64Value int64 `protobuf:"varint,2,opt,name=int64_value,json=int64Value,proto3,oneof"` -} - -type TypedValue_DoubleValue struct { - // A 64-bit double-precision floating-point number. Its magnitude - // is approximately ±10±300 and it has 16 - // significant digits of precision. - DoubleValue float64 `protobuf:"fixed64,3,opt,name=double_value,json=doubleValue,proto3,oneof"` -} - -type TypedValue_StringValue struct { - // A variable-length string value. - StringValue string `protobuf:"bytes,4,opt,name=string_value,json=stringValue,proto3,oneof"` -} - -type TypedValue_DistributionValue struct { - // A distribution value. - DistributionValue *distribution.Distribution `protobuf:"bytes,5,opt,name=distribution_value,json=distributionValue,proto3,oneof"` -} - -func (*TypedValue_BoolValue) isTypedValue_Value() {} - -func (*TypedValue_Int64Value) isTypedValue_Value() {} - -func (*TypedValue_DoubleValue) isTypedValue_Value() {} - -func (*TypedValue_StringValue) isTypedValue_Value() {} - -func (*TypedValue_DistributionValue) isTypedValue_Value() {} - -// A closed time interval. It extends from the start time to the end time, and -// includes both: `[startTime, endTime]`. Valid time intervals depend on the -// [`MetricKind`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors#MetricKind) -// of the metric value. In no case can the end time be earlier than the start -// time. -// -// * For a `GAUGE` metric, the `startTime` value is technically optional; if -// no value is specified, the start time defaults to the value of the -// end time, and the interval represents a single point in time. If both -// start and end times are specified, they must be identical. Such an -// interval is valid only for `GAUGE` metrics, which are point-in-time -// measurements. -// -// * For `DELTA` and `CUMULATIVE` metrics, the start time must be earlier -// than the end time. -// -// * In all cases, the start time of the next interval must be -// at least a millisecond after the end time of the previous interval. -// Because the interval is closed, if the start time of a new interval -// is the same as the end time of the previous interval, data written -// at the new start time could overwrite data written at the previous -// end time. -type TimeInterval struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The end of the time interval. - EndTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"` - // Optional. The beginning of the time interval. The default value - // for the start time is the end time. The start time must not be - // later than the end time. - StartTime *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"` -} - -func (x *TimeInterval) Reset() { - *x = TimeInterval{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_common_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *TimeInterval) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TimeInterval) ProtoMessage() {} - -func (x *TimeInterval) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_common_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TimeInterval.ProtoReflect.Descriptor instead. -func (*TimeInterval) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_common_proto_rawDescGZIP(), []int{1} -} - -func (x *TimeInterval) GetEndTime() *timestamppb.Timestamp { - if x != nil { - return x.EndTime - } - return nil -} - -func (x *TimeInterval) GetStartTime() *timestamppb.Timestamp { - if x != nil { - return x.StartTime - } - return nil -} - -// Describes how to combine multiple time series to provide a different view of -// the data. Aggregation of time series is done in two steps. First, each time -// series in the set is _aligned_ to the same time interval boundaries, then the -// set of time series is optionally _reduced_ in number. -// -// Alignment consists of applying the `per_series_aligner` operation -// to each time series after its data has been divided into regular -// `alignment_period` time intervals. This process takes _all_ of the data -// points in an alignment period, applies a mathematical transformation such as -// averaging, minimum, maximum, delta, etc., and converts them into a single -// data point per period. -// -// Reduction is when the aligned and transformed time series can optionally be -// combined, reducing the number of time series through similar mathematical -// transformations. Reduction involves applying a `cross_series_reducer` to -// all the time series, optionally sorting the time series into subsets with -// `group_by_fields`, and applying the reducer to each subset. -// -// The raw time series data can contain a huge amount of information from -// multiple sources. Alignment and reduction transforms this mass of data into -// a more manageable and representative collection of data, for example "the -// 95% latency across the average of all tasks in a cluster". This -// representative data can be more easily graphed and comprehended, and the -// individual time series data is still available for later drilldown. For more -// details, see [Filtering and -// aggregation](https://cloud.google.com/monitoring/api/v3/aggregation). -type Aggregation struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The `alignment_period` specifies a time interval, in seconds, that is used - // to divide the data in all the - // [time series][google.monitoring.v3.TimeSeries] into consistent blocks of - // time. This will be done before the per-series aligner can be applied to - // the data. - // - // The value must be at least 60 seconds. If a per-series aligner other than - // `ALIGN_NONE` is specified, this field is required or an error is returned. - // If no per-series aligner is specified, or the aligner `ALIGN_NONE` is - // specified, then this field is ignored. - AlignmentPeriod *durationpb.Duration `protobuf:"bytes,1,opt,name=alignment_period,json=alignmentPeriod,proto3" json:"alignment_period,omitempty"` - // An `Aligner` describes how to bring the data points in a single - // time series into temporal alignment. Except for `ALIGN_NONE`, all - // alignments cause all the data points in an `alignment_period` to be - // mathematically grouped together, resulting in a single data point for - // each `alignment_period` with end timestamp at the end of the period. - // - // Not all alignment operations may be applied to all time series. The valid - // choices depend on the `metric_kind` and `value_type` of the original time - // series. Alignment can change the `metric_kind` or the `value_type` of - // the time series. - // - // Time series data must be aligned in order to perform cross-time - // series reduction. If `cross_series_reducer` is specified, then - // `per_series_aligner` must be specified and not equal to `ALIGN_NONE` - // and `alignment_period` must be specified; otherwise, an error is - // returned. - PerSeriesAligner Aggregation_Aligner `protobuf:"varint,2,opt,name=per_series_aligner,json=perSeriesAligner,proto3,enum=google.monitoring.v3.Aggregation_Aligner" json:"per_series_aligner,omitempty"` - // The reduction operation to be used to combine time series into a single - // time series, where the value of each data point in the resulting series is - // a function of all the already aligned values in the input time series. - // - // Not all reducer operations can be applied to all time series. The valid - // choices depend on the `metric_kind` and the `value_type` of the original - // time series. Reduction can yield a time series with a different - // `metric_kind` or `value_type` than the input time series. - // - // Time series data must first be aligned (see `per_series_aligner`) in order - // to perform cross-time series reduction. If `cross_series_reducer` is - // specified, then `per_series_aligner` must be specified, and must not be - // `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an - // error is returned. - CrossSeriesReducer Aggregation_Reducer `protobuf:"varint,4,opt,name=cross_series_reducer,json=crossSeriesReducer,proto3,enum=google.monitoring.v3.Aggregation_Reducer" json:"cross_series_reducer,omitempty"` - // The set of fields to preserve when `cross_series_reducer` is - // specified. The `group_by_fields` determine how the time series are - // partitioned into subsets prior to applying the aggregation - // operation. Each subset contains time series that have the same - // value for each of the grouping fields. Each individual time - // series is a member of exactly one subset. The - // `cross_series_reducer` is applied to each subset of time series. - // It is not possible to reduce across different resource types, so - // this field implicitly contains `resource.type`. Fields not - // specified in `group_by_fields` are aggregated away. If - // `group_by_fields` is not specified and all the time series have - // the same resource type, then the time series are aggregated into - // a single output time series. If `cross_series_reducer` is not - // defined, this field is ignored. - GroupByFields []string `protobuf:"bytes,5,rep,name=group_by_fields,json=groupByFields,proto3" json:"group_by_fields,omitempty"` -} - -func (x *Aggregation) Reset() { - *x = Aggregation{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_common_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Aggregation) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Aggregation) ProtoMessage() {} - -func (x *Aggregation) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_common_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Aggregation.ProtoReflect.Descriptor instead. -func (*Aggregation) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_common_proto_rawDescGZIP(), []int{2} -} - -func (x *Aggregation) GetAlignmentPeriod() *durationpb.Duration { - if x != nil { - return x.AlignmentPeriod - } - return nil -} - -func (x *Aggregation) GetPerSeriesAligner() Aggregation_Aligner { - if x != nil { - return x.PerSeriesAligner - } - return Aggregation_ALIGN_NONE -} - -func (x *Aggregation) GetCrossSeriesReducer() Aggregation_Reducer { - if x != nil { - return x.CrossSeriesReducer - } - return Aggregation_REDUCE_NONE -} - -func (x *Aggregation) GetGroupByFields() []string { - if x != nil { - return x.GroupByFields - } - return nil -} - -var File_google_monitoring_v3_common_proto protoreflect.FileDescriptor - -var file_google_monitoring_v3_common_proto_rawDesc = []byte{ - 0x0a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, - 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, - 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x1a, 0x1d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x64, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, - 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, - 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xee, 0x01, 0x0a, 0x0a, 0x54, 0x79, - 0x70, 0x65, 0x64, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0a, 0x62, 0x6f, 0x6f, 0x6c, - 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x09, - 0x62, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x69, 0x6e, 0x74, - 0x36, 0x34, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, - 0x52, 0x0a, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, - 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, - 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x49, 0x0a, 0x12, 0x64, 0x69, 0x73, 0x74, 0x72, 0x69, - 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x44, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x11, - 0x64, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x42, 0x07, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x80, 0x01, 0x0a, 0x0c, 0x54, - 0x69, 0x6d, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x35, 0x0a, 0x08, 0x65, - 0x6e, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x07, 0x65, 0x6e, 0x64, 0x54, 0x69, - 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, - 0x6d, 0x70, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x22, 0xf3, 0x07, - 0x0a, 0x0b, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x44, 0x0a, - 0x10, 0x61, 0x6c, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x70, 0x65, 0x72, 0x69, 0x6f, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x0f, 0x61, 0x6c, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x50, 0x65, 0x72, - 0x69, 0x6f, 0x64, 0x12, 0x57, 0x0a, 0x12, 0x70, 0x65, 0x72, 0x5f, 0x73, 0x65, 0x72, 0x69, 0x65, - 0x73, 0x5f, 0x61, 0x6c, 0x69, 0x67, 0x6e, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, - 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, - 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x2e, 0x41, 0x6c, 0x69, 0x67, 0x6e, 0x65, 0x72, 0x52, 0x10, 0x70, 0x65, 0x72, 0x53, - 0x65, 0x72, 0x69, 0x65, 0x73, 0x41, 0x6c, 0x69, 0x67, 0x6e, 0x65, 0x72, 0x12, 0x5b, 0x0a, 0x14, - 0x63, 0x72, 0x6f, 0x73, 0x73, 0x5f, 0x73, 0x65, 0x72, 0x69, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x64, - 0x75, 0x63, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, - 0x33, 0x2e, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, - 0x64, 0x75, 0x63, 0x65, 0x72, 0x52, 0x12, 0x63, 0x72, 0x6f, 0x73, 0x73, 0x53, 0x65, 0x72, 0x69, - 0x65, 0x73, 0x52, 0x65, 0x64, 0x75, 0x63, 0x65, 0x72, 0x12, 0x26, 0x0a, 0x0f, 0x67, 0x72, 0x6f, - 0x75, 0x70, 0x5f, 0x62, 0x79, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x05, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x0d, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x42, 0x79, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x73, 0x22, 0x8b, 0x03, 0x0a, 0x07, 0x41, 0x6c, 0x69, 0x67, 0x6e, 0x65, 0x72, 0x12, 0x0e, 0x0a, - 0x0a, 0x41, 0x4c, 0x49, 0x47, 0x4e, 0x5f, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x0f, 0x0a, - 0x0b, 0x41, 0x4c, 0x49, 0x47, 0x4e, 0x5f, 0x44, 0x45, 0x4c, 0x54, 0x41, 0x10, 0x01, 0x12, 0x0e, - 0x0a, 0x0a, 0x41, 0x4c, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x41, 0x54, 0x45, 0x10, 0x02, 0x12, 0x15, - 0x0a, 0x11, 0x41, 0x4c, 0x49, 0x47, 0x4e, 0x5f, 0x49, 0x4e, 0x54, 0x45, 0x52, 0x50, 0x4f, 0x4c, - 0x41, 0x54, 0x45, 0x10, 0x03, 0x12, 0x14, 0x0a, 0x10, 0x41, 0x4c, 0x49, 0x47, 0x4e, 0x5f, 0x4e, - 0x45, 0x58, 0x54, 0x5f, 0x4f, 0x4c, 0x44, 0x45, 0x52, 0x10, 0x04, 0x12, 0x0d, 0x0a, 0x09, 0x41, - 0x4c, 0x49, 0x47, 0x4e, 0x5f, 0x4d, 0x49, 0x4e, 0x10, 0x0a, 0x12, 0x0d, 0x0a, 0x09, 0x41, 0x4c, - 0x49, 0x47, 0x4e, 0x5f, 0x4d, 0x41, 0x58, 0x10, 0x0b, 0x12, 0x0e, 0x0a, 0x0a, 0x41, 0x4c, 0x49, - 0x47, 0x4e, 0x5f, 0x4d, 0x45, 0x41, 0x4e, 0x10, 0x0c, 0x12, 0x0f, 0x0a, 0x0b, 0x41, 0x4c, 0x49, - 0x47, 0x4e, 0x5f, 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x10, 0x0d, 0x12, 0x0d, 0x0a, 0x09, 0x41, 0x4c, - 0x49, 0x47, 0x4e, 0x5f, 0x53, 0x55, 0x4d, 0x10, 0x0e, 0x12, 0x10, 0x0a, 0x0c, 0x41, 0x4c, 0x49, - 0x47, 0x4e, 0x5f, 0x53, 0x54, 0x44, 0x44, 0x45, 0x56, 0x10, 0x0f, 0x12, 0x14, 0x0a, 0x10, 0x41, - 0x4c, 0x49, 0x47, 0x4e, 0x5f, 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x5f, 0x54, 0x52, 0x55, 0x45, 0x10, - 0x10, 0x12, 0x15, 0x0a, 0x11, 0x41, 0x4c, 0x49, 0x47, 0x4e, 0x5f, 0x43, 0x4f, 0x55, 0x4e, 0x54, - 0x5f, 0x46, 0x41, 0x4c, 0x53, 0x45, 0x10, 0x18, 0x12, 0x17, 0x0a, 0x13, 0x41, 0x4c, 0x49, 0x47, - 0x4e, 0x5f, 0x46, 0x52, 0x41, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x54, 0x52, 0x55, 0x45, 0x10, - 0x11, 0x12, 0x17, 0x0a, 0x13, 0x41, 0x4c, 0x49, 0x47, 0x4e, 0x5f, 0x50, 0x45, 0x52, 0x43, 0x45, - 0x4e, 0x54, 0x49, 0x4c, 0x45, 0x5f, 0x39, 0x39, 0x10, 0x12, 0x12, 0x17, 0x0a, 0x13, 0x41, 0x4c, - 0x49, 0x47, 0x4e, 0x5f, 0x50, 0x45, 0x52, 0x43, 0x45, 0x4e, 0x54, 0x49, 0x4c, 0x45, 0x5f, 0x39, - 0x35, 0x10, 0x13, 0x12, 0x17, 0x0a, 0x13, 0x41, 0x4c, 0x49, 0x47, 0x4e, 0x5f, 0x50, 0x45, 0x52, - 0x43, 0x45, 0x4e, 0x54, 0x49, 0x4c, 0x45, 0x5f, 0x35, 0x30, 0x10, 0x14, 0x12, 0x17, 0x0a, 0x13, - 0x41, 0x4c, 0x49, 0x47, 0x4e, 0x5f, 0x50, 0x45, 0x52, 0x43, 0x45, 0x4e, 0x54, 0x49, 0x4c, 0x45, - 0x5f, 0x30, 0x35, 0x10, 0x15, 0x12, 0x18, 0x0a, 0x14, 0x41, 0x4c, 0x49, 0x47, 0x4e, 0x5f, 0x50, - 0x45, 0x52, 0x43, 0x45, 0x4e, 0x54, 0x5f, 0x43, 0x48, 0x41, 0x4e, 0x47, 0x45, 0x10, 0x17, 0x22, - 0xb1, 0x02, 0x0a, 0x07, 0x52, 0x65, 0x64, 0x75, 0x63, 0x65, 0x72, 0x12, 0x0f, 0x0a, 0x0b, 0x52, - 0x45, 0x44, 0x55, 0x43, 0x45, 0x5f, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, - 0x52, 0x45, 0x44, 0x55, 0x43, 0x45, 0x5f, 0x4d, 0x45, 0x41, 0x4e, 0x10, 0x01, 0x12, 0x0e, 0x0a, - 0x0a, 0x52, 0x45, 0x44, 0x55, 0x43, 0x45, 0x5f, 0x4d, 0x49, 0x4e, 0x10, 0x02, 0x12, 0x0e, 0x0a, - 0x0a, 0x52, 0x45, 0x44, 0x55, 0x43, 0x45, 0x5f, 0x4d, 0x41, 0x58, 0x10, 0x03, 0x12, 0x0e, 0x0a, - 0x0a, 0x52, 0x45, 0x44, 0x55, 0x43, 0x45, 0x5f, 0x53, 0x55, 0x4d, 0x10, 0x04, 0x12, 0x11, 0x0a, - 0x0d, 0x52, 0x45, 0x44, 0x55, 0x43, 0x45, 0x5f, 0x53, 0x54, 0x44, 0x44, 0x45, 0x56, 0x10, 0x05, - 0x12, 0x10, 0x0a, 0x0c, 0x52, 0x45, 0x44, 0x55, 0x43, 0x45, 0x5f, 0x43, 0x4f, 0x55, 0x4e, 0x54, - 0x10, 0x06, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x44, 0x55, 0x43, 0x45, 0x5f, 0x43, 0x4f, 0x55, - 0x4e, 0x54, 0x5f, 0x54, 0x52, 0x55, 0x45, 0x10, 0x07, 0x12, 0x16, 0x0a, 0x12, 0x52, 0x45, 0x44, - 0x55, 0x43, 0x45, 0x5f, 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x5f, 0x46, 0x41, 0x4c, 0x53, 0x45, 0x10, - 0x0f, 0x12, 0x18, 0x0a, 0x14, 0x52, 0x45, 0x44, 0x55, 0x43, 0x45, 0x5f, 0x46, 0x52, 0x41, 0x43, - 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x54, 0x52, 0x55, 0x45, 0x10, 0x08, 0x12, 0x18, 0x0a, 0x14, 0x52, - 0x45, 0x44, 0x55, 0x43, 0x45, 0x5f, 0x50, 0x45, 0x52, 0x43, 0x45, 0x4e, 0x54, 0x49, 0x4c, 0x45, - 0x5f, 0x39, 0x39, 0x10, 0x09, 0x12, 0x18, 0x0a, 0x14, 0x52, 0x45, 0x44, 0x55, 0x43, 0x45, 0x5f, - 0x50, 0x45, 0x52, 0x43, 0x45, 0x4e, 0x54, 0x49, 0x4c, 0x45, 0x5f, 0x39, 0x35, 0x10, 0x0a, 0x12, - 0x18, 0x0a, 0x14, 0x52, 0x45, 0x44, 0x55, 0x43, 0x45, 0x5f, 0x50, 0x45, 0x52, 0x43, 0x45, 0x4e, - 0x54, 0x49, 0x4c, 0x45, 0x5f, 0x35, 0x30, 0x10, 0x0b, 0x12, 0x18, 0x0a, 0x14, 0x52, 0x45, 0x44, - 0x55, 0x43, 0x45, 0x5f, 0x50, 0x45, 0x52, 0x43, 0x45, 0x4e, 0x54, 0x49, 0x4c, 0x45, 0x5f, 0x30, - 0x35, 0x10, 0x0c, 0x2a, 0x9e, 0x01, 0x0a, 0x0e, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x69, 0x73, - 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x16, 0x43, 0x4f, 0x4d, 0x50, 0x41, 0x52, - 0x49, 0x53, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, - 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x43, 0x4f, 0x4d, 0x50, 0x41, 0x52, 0x49, 0x53, 0x4f, 0x4e, - 0x5f, 0x47, 0x54, 0x10, 0x01, 0x12, 0x11, 0x0a, 0x0d, 0x43, 0x4f, 0x4d, 0x50, 0x41, 0x52, 0x49, - 0x53, 0x4f, 0x4e, 0x5f, 0x47, 0x45, 0x10, 0x02, 0x12, 0x11, 0x0a, 0x0d, 0x43, 0x4f, 0x4d, 0x50, - 0x41, 0x52, 0x49, 0x53, 0x4f, 0x4e, 0x5f, 0x4c, 0x54, 0x10, 0x03, 0x12, 0x11, 0x0a, 0x0d, 0x43, - 0x4f, 0x4d, 0x50, 0x41, 0x52, 0x49, 0x53, 0x4f, 0x4e, 0x5f, 0x4c, 0x45, 0x10, 0x04, 0x12, 0x11, - 0x0a, 0x0d, 0x43, 0x4f, 0x4d, 0x50, 0x41, 0x52, 0x49, 0x53, 0x4f, 0x4e, 0x5f, 0x45, 0x51, 0x10, - 0x05, 0x12, 0x11, 0x0a, 0x0d, 0x43, 0x4f, 0x4d, 0x50, 0x41, 0x52, 0x49, 0x53, 0x4f, 0x4e, 0x5f, - 0x4e, 0x45, 0x10, 0x06, 0x2a, 0x61, 0x0a, 0x0b, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x54, - 0x69, 0x65, 0x72, 0x12, 0x1c, 0x0a, 0x18, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x54, - 0x49, 0x45, 0x52, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, - 0x00, 0x12, 0x16, 0x0a, 0x12, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x54, 0x49, 0x45, - 0x52, 0x5f, 0x42, 0x41, 0x53, 0x49, 0x43, 0x10, 0x01, 0x12, 0x18, 0x0a, 0x14, 0x53, 0x45, 0x52, - 0x56, 0x49, 0x43, 0x45, 0x5f, 0x54, 0x49, 0x45, 0x52, 0x5f, 0x50, 0x52, 0x45, 0x4d, 0x49, 0x55, - 0x4d, 0x10, 0x02, 0x1a, 0x02, 0x18, 0x01, 0x42, 0xc3, 0x01, 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, - 0x67, 0x2e, 0x76, 0x33, 0x42, 0x0b, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, - 0x6f, 0x50, 0x01, 0x5a, 0x3e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, - 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, - 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, - 0x69, 0x6e, 0x67, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, - 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x56, 0x33, - 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, - 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x33, 0xea, 0x02, 0x1d, - 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x4d, - 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56, 0x33, 0x62, 0x06, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_google_monitoring_v3_common_proto_rawDescOnce sync.Once - file_google_monitoring_v3_common_proto_rawDescData = file_google_monitoring_v3_common_proto_rawDesc -) - -func file_google_monitoring_v3_common_proto_rawDescGZIP() []byte { - file_google_monitoring_v3_common_proto_rawDescOnce.Do(func() { - file_google_monitoring_v3_common_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_common_proto_rawDescData) - }) - return file_google_monitoring_v3_common_proto_rawDescData -} - -var file_google_monitoring_v3_common_proto_enumTypes = make([]protoimpl.EnumInfo, 4) -var file_google_monitoring_v3_common_proto_msgTypes = make([]protoimpl.MessageInfo, 3) -var file_google_monitoring_v3_common_proto_goTypes = []interface{}{ - (ComparisonType)(0), // 0: google.monitoring.v3.ComparisonType - (ServiceTier)(0), // 1: google.monitoring.v3.ServiceTier - (Aggregation_Aligner)(0), // 2: google.monitoring.v3.Aggregation.Aligner - (Aggregation_Reducer)(0), // 3: google.monitoring.v3.Aggregation.Reducer - (*TypedValue)(nil), // 4: google.monitoring.v3.TypedValue - (*TimeInterval)(nil), // 5: google.monitoring.v3.TimeInterval - (*Aggregation)(nil), // 6: google.monitoring.v3.Aggregation - (*distribution.Distribution)(nil), // 7: google.api.Distribution - (*timestamppb.Timestamp)(nil), // 8: google.protobuf.Timestamp - (*durationpb.Duration)(nil), // 9: google.protobuf.Duration -} -var file_google_monitoring_v3_common_proto_depIdxs = []int32{ - 7, // 0: google.monitoring.v3.TypedValue.distribution_value:type_name -> google.api.Distribution - 8, // 1: google.monitoring.v3.TimeInterval.end_time:type_name -> google.protobuf.Timestamp - 8, // 2: google.monitoring.v3.TimeInterval.start_time:type_name -> google.protobuf.Timestamp - 9, // 3: google.monitoring.v3.Aggregation.alignment_period:type_name -> google.protobuf.Duration - 2, // 4: google.monitoring.v3.Aggregation.per_series_aligner:type_name -> google.monitoring.v3.Aggregation.Aligner - 3, // 5: google.monitoring.v3.Aggregation.cross_series_reducer:type_name -> google.monitoring.v3.Aggregation.Reducer - 6, // [6:6] is the sub-list for method output_type - 6, // [6:6] is the sub-list for method input_type - 6, // [6:6] is the sub-list for extension type_name - 6, // [6:6] is the sub-list for extension extendee - 0, // [0:6] is the sub-list for field type_name -} - -func init() { file_google_monitoring_v3_common_proto_init() } -func file_google_monitoring_v3_common_proto_init() { - if File_google_monitoring_v3_common_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_google_monitoring_v3_common_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TypedValue); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_common_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TimeInterval); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_common_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Aggregation); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_google_monitoring_v3_common_proto_msgTypes[0].OneofWrappers = []interface{}{ - (*TypedValue_BoolValue)(nil), - (*TypedValue_Int64Value)(nil), - (*TypedValue_DoubleValue)(nil), - (*TypedValue_StringValue)(nil), - (*TypedValue_DistributionValue)(nil), - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_monitoring_v3_common_proto_rawDesc, - NumEnums: 4, - NumMessages: 3, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_google_monitoring_v3_common_proto_goTypes, - DependencyIndexes: file_google_monitoring_v3_common_proto_depIdxs, - EnumInfos: file_google_monitoring_v3_common_proto_enumTypes, - MessageInfos: file_google_monitoring_v3_common_proto_msgTypes, - }.Build() - File_google_monitoring_v3_common_proto = out.File - file_google_monitoring_v3_common_proto_rawDesc = nil - file_google_monitoring_v3_common_proto_goTypes = nil - file_google_monitoring_v3_common_proto_depIdxs = nil -} diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/dropped_labels.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/dropped_labels.pb.go deleted file mode 100644 index 38e5e5b4e7..0000000000 --- a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/dropped_labels.pb.go +++ /dev/null @@ -1,201 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.25.0 -// protoc v3.13.0 -// source: google/monitoring/v3/dropped_labels.proto - -package monitoring - -import ( - reflect "reflect" - sync "sync" - - proto "github.com/golang/protobuf/proto" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - -// A set of (label, value) pairs which were dropped during aggregation, attached -// to google.api.Distribution.Exemplars in google.api.Distribution values during -// aggregation. -// -// These values are used in combination with the label values that remain on the -// aggregated Distribution timeseries to construct the full label set for the -// exemplar values. The resulting full label set may be used to identify the -// specific task/job/instance (for example) which may be contributing to a -// long-tail, while allowing the storage savings of only storing aggregated -// distribution values for a large group. -// -// Note that there are no guarantees on ordering of the labels from -// exemplar-to-exemplar and from distribution-to-distribution in the same -// stream, and there may be duplicates. It is up to clients to resolve any -// ambiguities. -type DroppedLabels struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Map from label to its value, for all labels dropped in any aggregation. - Label map[string]string `protobuf:"bytes,1,rep,name=label,proto3" json:"label,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` -} - -func (x *DroppedLabels) Reset() { - *x = DroppedLabels{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_dropped_labels_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *DroppedLabels) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DroppedLabels) ProtoMessage() {} - -func (x *DroppedLabels) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_dropped_labels_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DroppedLabels.ProtoReflect.Descriptor instead. -func (*DroppedLabels) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_dropped_labels_proto_rawDescGZIP(), []int{0} -} - -func (x *DroppedLabels) GetLabel() map[string]string { - if x != nil { - return x.Label - } - return nil -} - -var File_google_monitoring_v3_dropped_labels_proto protoreflect.FileDescriptor - -var file_google_monitoring_v3_dropped_labels_proto_rawDesc = []byte{ - 0x0a, 0x29, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, - 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x6c, - 0x61, 0x62, 0x65, 0x6c, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, - 0x33, 0x22, 0x8f, 0x01, 0x0a, 0x0d, 0x44, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x4c, 0x61, 0x62, - 0x65, 0x6c, 0x73, 0x12, 0x44, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, - 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x72, 0x6f, 0x70, 0x70, 0x65, - 0x64, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x1a, 0x38, 0x0a, 0x0a, 0x4c, 0x61, 0x62, - 0x65, 0x6c, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, - 0x02, 0x38, 0x01, 0x42, 0xca, 0x01, 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, - 0x42, 0x12, 0x44, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, - 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x6d, 0x6f, - 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, - 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, - 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, - 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x33, - 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, - 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56, 0x33, - 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_google_monitoring_v3_dropped_labels_proto_rawDescOnce sync.Once - file_google_monitoring_v3_dropped_labels_proto_rawDescData = file_google_monitoring_v3_dropped_labels_proto_rawDesc -) - -func file_google_monitoring_v3_dropped_labels_proto_rawDescGZIP() []byte { - file_google_monitoring_v3_dropped_labels_proto_rawDescOnce.Do(func() { - file_google_monitoring_v3_dropped_labels_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_dropped_labels_proto_rawDescData) - }) - return file_google_monitoring_v3_dropped_labels_proto_rawDescData -} - -var file_google_monitoring_v3_dropped_labels_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_google_monitoring_v3_dropped_labels_proto_goTypes = []interface{}{ - (*DroppedLabels)(nil), // 0: google.monitoring.v3.DroppedLabels - nil, // 1: google.monitoring.v3.DroppedLabels.LabelEntry -} -var file_google_monitoring_v3_dropped_labels_proto_depIdxs = []int32{ - 1, // 0: google.monitoring.v3.DroppedLabels.label:type_name -> google.monitoring.v3.DroppedLabels.LabelEntry - 1, // [1:1] is the sub-list for method output_type - 1, // [1:1] is the sub-list for method input_type - 1, // [1:1] is the sub-list for extension type_name - 1, // [1:1] is the sub-list for extension extendee - 0, // [0:1] is the sub-list for field type_name -} - -func init() { file_google_monitoring_v3_dropped_labels_proto_init() } -func file_google_monitoring_v3_dropped_labels_proto_init() { - if File_google_monitoring_v3_dropped_labels_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_google_monitoring_v3_dropped_labels_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DroppedLabels); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_monitoring_v3_dropped_labels_proto_rawDesc, - NumEnums: 0, - NumMessages: 2, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_google_monitoring_v3_dropped_labels_proto_goTypes, - DependencyIndexes: file_google_monitoring_v3_dropped_labels_proto_depIdxs, - MessageInfos: file_google_monitoring_v3_dropped_labels_proto_msgTypes, - }.Build() - File_google_monitoring_v3_dropped_labels_proto = out.File - file_google_monitoring_v3_dropped_labels_proto_rawDesc = nil - file_google_monitoring_v3_dropped_labels_proto_goTypes = nil - file_google_monitoring_v3_dropped_labels_proto_depIdxs = nil -} diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/group.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/group.pb.go deleted file mode 100644 index 3e9fabc0d1..0000000000 --- a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/group.pb.go +++ /dev/null @@ -1,270 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.25.0 -// protoc v3.13.0 -// source: google/monitoring/v3/group.proto - -package monitoring - -import ( - reflect "reflect" - sync "sync" - - proto "github.com/golang/protobuf/proto" - _ "google.golang.org/genproto/googleapis/api/annotations" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - -// The description of a dynamic collection of monitored resources. Each group -// has a filter that is matched against monitored resources and their associated -// metadata. If a group's filter matches an available monitored resource, then -// that resource is a member of that group. Groups can contain any number of -// monitored resources, and each monitored resource can be a member of any -// number of groups. -// -// Groups can be nested in parent-child hierarchies. The `parentName` field -// identifies an optional parent for each group. If a group has a parent, then -// the only monitored resources available to be matched by the group's filter -// are the resources contained in the parent group. In other words, a group -// contains the monitored resources that match its filter and the filters of all -// the group's ancestors. A group without a parent can contain any monitored -// resource. -// -// For example, consider an infrastructure running a set of instances with two -// user-defined tags: `"environment"` and `"role"`. A parent group has a filter, -// `environment="production"`. A child of that parent group has a filter, -// `role="transcoder"`. The parent group contains all instances in the -// production environment, regardless of their roles. The child group contains -// instances that have the transcoder role *and* are in the production -// environment. -// -// The monitored resources contained in a group can change at any moment, -// depending on what resources exist and what filters are associated with the -// group and its ancestors. -type Group struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Output only. The name of this group. The format is: - // - // projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] - // - // When creating a group, this field is ignored and a new name is created - // consisting of the project specified in the call to `CreateGroup` - // and a unique `[GROUP_ID]` that is generated automatically. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // A user-assigned name for this group, used only for display purposes. - DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` - // The name of the group's parent, if it has one. The format is: - // - // projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] - // - // For groups with no parent, `parent_name` is the empty string, `""`. - ParentName string `protobuf:"bytes,3,opt,name=parent_name,json=parentName,proto3" json:"parent_name,omitempty"` - // The filter used to determine which monitored resources belong to this - // group. - Filter string `protobuf:"bytes,5,opt,name=filter,proto3" json:"filter,omitempty"` - // If true, the members of this group are considered to be a cluster. - // The system can perform additional analysis on groups that are clusters. - IsCluster bool `protobuf:"varint,6,opt,name=is_cluster,json=isCluster,proto3" json:"is_cluster,omitempty"` -} - -func (x *Group) Reset() { - *x = Group{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_group_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Group) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Group) ProtoMessage() {} - -func (x *Group) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_group_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Group.ProtoReflect.Descriptor instead. -func (*Group) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_group_proto_rawDescGZIP(), []int{0} -} - -func (x *Group) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *Group) GetDisplayName() string { - if x != nil { - return x.DisplayName - } - return "" -} - -func (x *Group) GetParentName() string { - if x != nil { - return x.ParentName - } - return "" -} - -func (x *Group) GetFilter() string { - if x != nil { - return x.Filter - } - return "" -} - -func (x *Group) GetIsCluster() bool { - if x != nil { - return x.IsCluster - } - return false -} - -var File_google_monitoring_v3_group_proto protoreflect.FileDescriptor - -var file_google_monitoring_v3_group_proto_rawDesc = []byte{ - 0x0a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, - 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, - 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x22, 0xb2, 0x02, 0x0a, 0x05, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x12, 0x0a, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, - 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x65, 0x6e, - 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x1d, 0x0a, - 0x0a, 0x69, 0x73, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x09, 0x69, 0x73, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x3a, 0x99, 0x01, 0xea, - 0x41, 0x95, 0x01, 0x0a, 0x1f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x47, - 0x72, 0x6f, 0x75, 0x70, 0x12, 0x21, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, - 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2f, - 0x7b, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x7d, 0x12, 0x2b, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2f, 0x7b, 0x67, 0x72, - 0x6f, 0x75, 0x70, 0x7d, 0x12, 0x1f, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x66, - 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x7d, 0x2f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2f, 0x7b, 0x67, - 0x72, 0x6f, 0x75, 0x70, 0x7d, 0x12, 0x01, 0x2a, 0x42, 0xc2, 0x01, 0x0a, 0x18, 0x63, 0x6f, 0x6d, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, - 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x42, 0x0a, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x50, 0x72, 0x6f, 0x74, - 0x6f, 0x50, 0x01, 0x5a, 0x3e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, - 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, - 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, - 0x69, 0x6e, 0x67, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, - 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x56, 0x33, - 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, - 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x33, 0xea, 0x02, 0x1d, - 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x4d, - 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56, 0x33, 0x62, 0x06, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_google_monitoring_v3_group_proto_rawDescOnce sync.Once - file_google_monitoring_v3_group_proto_rawDescData = file_google_monitoring_v3_group_proto_rawDesc -) - -func file_google_monitoring_v3_group_proto_rawDescGZIP() []byte { - file_google_monitoring_v3_group_proto_rawDescOnce.Do(func() { - file_google_monitoring_v3_group_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_group_proto_rawDescData) - }) - return file_google_monitoring_v3_group_proto_rawDescData -} - -var file_google_monitoring_v3_group_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_monitoring_v3_group_proto_goTypes = []interface{}{ - (*Group)(nil), // 0: google.monitoring.v3.Group -} -var file_google_monitoring_v3_group_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_google_monitoring_v3_group_proto_init() } -func file_google_monitoring_v3_group_proto_init() { - if File_google_monitoring_v3_group_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_google_monitoring_v3_group_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Group); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_monitoring_v3_group_proto_rawDesc, - NumEnums: 0, - NumMessages: 1, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_google_monitoring_v3_group_proto_goTypes, - DependencyIndexes: file_google_monitoring_v3_group_proto_depIdxs, - MessageInfos: file_google_monitoring_v3_group_proto_msgTypes, - }.Build() - File_google_monitoring_v3_group_proto = out.File - file_google_monitoring_v3_group_proto_rawDesc = nil - file_google_monitoring_v3_group_proto_goTypes = nil - file_google_monitoring_v3_group_proto_depIdxs = nil -} diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/group_service.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/group_service.pb.go deleted file mode 100644 index cc81ca4ed6..0000000000 --- a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/group_service.pb.go +++ /dev/null @@ -1,1318 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.25.0 -// protoc v3.13.0 -// source: google/monitoring/v3/group_service.proto - -package monitoring - -import ( - context "context" - reflect "reflect" - sync "sync" - - proto "github.com/golang/protobuf/proto" - _ "google.golang.org/genproto/googleapis/api/annotations" - monitoredres "google.golang.org/genproto/googleapis/api/monitoredres" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - emptypb "google.golang.org/protobuf/types/known/emptypb" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - -// The `ListGroup` request. -type ListGroupsRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The project whose groups are to be listed. The format is: - // - // projects/[PROJECT_ID_OR_NUMBER] - Name string `protobuf:"bytes,7,opt,name=name,proto3" json:"name,omitempty"` - // An optional filter consisting of a single group name. The filters limit - // the groups returned based on their parent-child relationship with the - // specified group. If no filter is specified, all groups are returned. - // - // Types that are assignable to Filter: - // *ListGroupsRequest_ChildrenOfGroup - // *ListGroupsRequest_AncestorsOfGroup - // *ListGroupsRequest_DescendantsOfGroup - Filter isListGroupsRequest_Filter `protobuf_oneof:"filter"` - // A positive number that is the maximum number of results to return. - PageSize int32 `protobuf:"varint,5,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` - // If this field is not empty then it must contain the `next_page_token` value - // returned by a previous call to this method. Using this field causes the - // method to return additional results from the previous method call. - PageToken string `protobuf:"bytes,6,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` -} - -func (x *ListGroupsRequest) Reset() { - *x = ListGroupsRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_group_service_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListGroupsRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListGroupsRequest) ProtoMessage() {} - -func (x *ListGroupsRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_group_service_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListGroupsRequest.ProtoReflect.Descriptor instead. -func (*ListGroupsRequest) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_group_service_proto_rawDescGZIP(), []int{0} -} - -func (x *ListGroupsRequest) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (m *ListGroupsRequest) GetFilter() isListGroupsRequest_Filter { - if m != nil { - return m.Filter - } - return nil -} - -func (x *ListGroupsRequest) GetChildrenOfGroup() string { - if x, ok := x.GetFilter().(*ListGroupsRequest_ChildrenOfGroup); ok { - return x.ChildrenOfGroup - } - return "" -} - -func (x *ListGroupsRequest) GetAncestorsOfGroup() string { - if x, ok := x.GetFilter().(*ListGroupsRequest_AncestorsOfGroup); ok { - return x.AncestorsOfGroup - } - return "" -} - -func (x *ListGroupsRequest) GetDescendantsOfGroup() string { - if x, ok := x.GetFilter().(*ListGroupsRequest_DescendantsOfGroup); ok { - return x.DescendantsOfGroup - } - return "" -} - -func (x *ListGroupsRequest) GetPageSize() int32 { - if x != nil { - return x.PageSize - } - return 0 -} - -func (x *ListGroupsRequest) GetPageToken() string { - if x != nil { - return x.PageToken - } - return "" -} - -type isListGroupsRequest_Filter interface { - isListGroupsRequest_Filter() -} - -type ListGroupsRequest_ChildrenOfGroup struct { - // A group name. The format is: - // - // projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] - // - // Returns groups whose `parent_name` field contains the group - // name. If no groups have this parent, the results are empty. - ChildrenOfGroup string `protobuf:"bytes,2,opt,name=children_of_group,json=childrenOfGroup,proto3,oneof"` -} - -type ListGroupsRequest_AncestorsOfGroup struct { - // A group name. The format is: - // - // projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] - // - // Returns groups that are ancestors of the specified group. - // The groups are returned in order, starting with the immediate parent and - // ending with the most distant ancestor. If the specified group has no - // immediate parent, the results are empty. - AncestorsOfGroup string `protobuf:"bytes,3,opt,name=ancestors_of_group,json=ancestorsOfGroup,proto3,oneof"` -} - -type ListGroupsRequest_DescendantsOfGroup struct { - // A group name. The format is: - // - // projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] - // - // Returns the descendants of the specified group. This is a superset of - // the results returned by the `children_of_group` filter, and includes - // children-of-children, and so forth. - DescendantsOfGroup string `protobuf:"bytes,4,opt,name=descendants_of_group,json=descendantsOfGroup,proto3,oneof"` -} - -func (*ListGroupsRequest_ChildrenOfGroup) isListGroupsRequest_Filter() {} - -func (*ListGroupsRequest_AncestorsOfGroup) isListGroupsRequest_Filter() {} - -func (*ListGroupsRequest_DescendantsOfGroup) isListGroupsRequest_Filter() {} - -// The `ListGroups` response. -type ListGroupsResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The groups that match the specified filters. - Group []*Group `protobuf:"bytes,1,rep,name=group,proto3" json:"group,omitempty"` - // If there are more results than have been returned, then this field is set - // to a non-empty value. To see the additional results, - // use that value as `page_token` in the next call to this method. - NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` -} - -func (x *ListGroupsResponse) Reset() { - *x = ListGroupsResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_group_service_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListGroupsResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListGroupsResponse) ProtoMessage() {} - -func (x *ListGroupsResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_group_service_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListGroupsResponse.ProtoReflect.Descriptor instead. -func (*ListGroupsResponse) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_group_service_proto_rawDescGZIP(), []int{1} -} - -func (x *ListGroupsResponse) GetGroup() []*Group { - if x != nil { - return x.Group - } - return nil -} - -func (x *ListGroupsResponse) GetNextPageToken() string { - if x != nil { - return x.NextPageToken - } - return "" -} - -// The `GetGroup` request. -type GetGroupRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The group to retrieve. The format is: - // - // projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] - Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` -} - -func (x *GetGroupRequest) Reset() { - *x = GetGroupRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_group_service_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetGroupRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetGroupRequest) ProtoMessage() {} - -func (x *GetGroupRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_group_service_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetGroupRequest.ProtoReflect.Descriptor instead. -func (*GetGroupRequest) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_group_service_proto_rawDescGZIP(), []int{2} -} - -func (x *GetGroupRequest) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -// The `CreateGroup` request. -type CreateGroupRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The project in which to create the group. The format is: - // - // projects/[PROJECT_ID_OR_NUMBER] - Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` - // Required. A group definition. It is an error to define the `name` field because - // the system assigns the name. - Group *Group `protobuf:"bytes,2,opt,name=group,proto3" json:"group,omitempty"` - // If true, validate this request but do not create the group. - ValidateOnly bool `protobuf:"varint,3,opt,name=validate_only,json=validateOnly,proto3" json:"validate_only,omitempty"` -} - -func (x *CreateGroupRequest) Reset() { - *x = CreateGroupRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_group_service_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CreateGroupRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CreateGroupRequest) ProtoMessage() {} - -func (x *CreateGroupRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_group_service_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CreateGroupRequest.ProtoReflect.Descriptor instead. -func (*CreateGroupRequest) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_group_service_proto_rawDescGZIP(), []int{3} -} - -func (x *CreateGroupRequest) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *CreateGroupRequest) GetGroup() *Group { - if x != nil { - return x.Group - } - return nil -} - -func (x *CreateGroupRequest) GetValidateOnly() bool { - if x != nil { - return x.ValidateOnly - } - return false -} - -// The `UpdateGroup` request. -type UpdateGroupRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The new definition of the group. All fields of the existing group, - // excepting `name`, are replaced with the corresponding fields of this group. - Group *Group `protobuf:"bytes,2,opt,name=group,proto3" json:"group,omitempty"` - // If true, validate this request but do not update the existing group. - ValidateOnly bool `protobuf:"varint,3,opt,name=validate_only,json=validateOnly,proto3" json:"validate_only,omitempty"` -} - -func (x *UpdateGroupRequest) Reset() { - *x = UpdateGroupRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_group_service_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *UpdateGroupRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*UpdateGroupRequest) ProtoMessage() {} - -func (x *UpdateGroupRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_group_service_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use UpdateGroupRequest.ProtoReflect.Descriptor instead. -func (*UpdateGroupRequest) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_group_service_proto_rawDescGZIP(), []int{4} -} - -func (x *UpdateGroupRequest) GetGroup() *Group { - if x != nil { - return x.Group - } - return nil -} - -func (x *UpdateGroupRequest) GetValidateOnly() bool { - if x != nil { - return x.ValidateOnly - } - return false -} - -// The `DeleteGroup` request. The default behavior is to be able to delete a -// single group without any descendants. -type DeleteGroupRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The group to delete. The format is: - // - // projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] - Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` - // If this field is true, then the request means to delete a group with all - // its descendants. Otherwise, the request means to delete a group only when - // it has no descendants. The default value is false. - Recursive bool `protobuf:"varint,4,opt,name=recursive,proto3" json:"recursive,omitempty"` -} - -func (x *DeleteGroupRequest) Reset() { - *x = DeleteGroupRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_group_service_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *DeleteGroupRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DeleteGroupRequest) ProtoMessage() {} - -func (x *DeleteGroupRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_group_service_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DeleteGroupRequest.ProtoReflect.Descriptor instead. -func (*DeleteGroupRequest) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_group_service_proto_rawDescGZIP(), []int{5} -} - -func (x *DeleteGroupRequest) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *DeleteGroupRequest) GetRecursive() bool { - if x != nil { - return x.Recursive - } - return false -} - -// The `ListGroupMembers` request. -type ListGroupMembersRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The group whose members are listed. The format is: - // - // projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] - Name string `protobuf:"bytes,7,opt,name=name,proto3" json:"name,omitempty"` - // A positive number that is the maximum number of results to return. - PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` - // If this field is not empty then it must contain the `next_page_token` value - // returned by a previous call to this method. Using this field causes the - // method to return additional results from the previous method call. - PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` - // An optional [list - // filter](https://cloud.google.com/monitoring/api/learn_more#filtering) - // describing the members to be returned. The filter may reference the type, - // labels, and metadata of monitored resources that comprise the group. For - // example, to return only resources representing Compute Engine VM instances, - // use this filter: - // - // `resource.type = "gce_instance"` - Filter string `protobuf:"bytes,5,opt,name=filter,proto3" json:"filter,omitempty"` - // An optional time interval for which results should be returned. Only - // members that were part of the group during the specified interval are - // included in the response. If no interval is provided then the group - // membership over the last minute is returned. - Interval *TimeInterval `protobuf:"bytes,6,opt,name=interval,proto3" json:"interval,omitempty"` -} - -func (x *ListGroupMembersRequest) Reset() { - *x = ListGroupMembersRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_group_service_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListGroupMembersRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListGroupMembersRequest) ProtoMessage() {} - -func (x *ListGroupMembersRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_group_service_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListGroupMembersRequest.ProtoReflect.Descriptor instead. -func (*ListGroupMembersRequest) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_group_service_proto_rawDescGZIP(), []int{6} -} - -func (x *ListGroupMembersRequest) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *ListGroupMembersRequest) GetPageSize() int32 { - if x != nil { - return x.PageSize - } - return 0 -} - -func (x *ListGroupMembersRequest) GetPageToken() string { - if x != nil { - return x.PageToken - } - return "" -} - -func (x *ListGroupMembersRequest) GetFilter() string { - if x != nil { - return x.Filter - } - return "" -} - -func (x *ListGroupMembersRequest) GetInterval() *TimeInterval { - if x != nil { - return x.Interval - } - return nil -} - -// The `ListGroupMembers` response. -type ListGroupMembersResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // A set of monitored resources in the group. - Members []*monitoredres.MonitoredResource `protobuf:"bytes,1,rep,name=members,proto3" json:"members,omitempty"` - // If there are more results than have been returned, then this field is - // set to a non-empty value. To see the additional results, use that value as - // `page_token` in the next call to this method. - NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` - // The total number of elements matching this request. - TotalSize int32 `protobuf:"varint,3,opt,name=total_size,json=totalSize,proto3" json:"total_size,omitempty"` -} - -func (x *ListGroupMembersResponse) Reset() { - *x = ListGroupMembersResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_group_service_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListGroupMembersResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListGroupMembersResponse) ProtoMessage() {} - -func (x *ListGroupMembersResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_group_service_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListGroupMembersResponse.ProtoReflect.Descriptor instead. -func (*ListGroupMembersResponse) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_group_service_proto_rawDescGZIP(), []int{7} -} - -func (x *ListGroupMembersResponse) GetMembers() []*monitoredres.MonitoredResource { - if x != nil { - return x.Members - } - return nil -} - -func (x *ListGroupMembersResponse) GetNextPageToken() string { - if x != nil { - return x.NextPageToken - } - return "" -} - -func (x *ListGroupMembersResponse) GetTotalSize() int32 { - if x != nil { - return x.TotalSize - } - return 0 -} - -var File_google_monitoring_v3_group_service_proto protoreflect.FileDescriptor - -var file_google_monitoring_v3_group_service_proto_rawDesc = []byte{ - 0x0a, 0x28, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, - 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, - 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, - 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, - 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, - 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, - 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x5f, 0x72, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x63, - 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, - 0x33, 0x2f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, - 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x9a, 0x03, 0x0a, 0x11, 0x4c, - 0x69, 0x73, 0x74, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x3b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x42, 0x27, - 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x21, 0x12, 0x1f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, - 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, - 0x6d, 0x2f, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x52, 0x0a, - 0x11, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x5f, 0x6f, 0x66, 0x5f, 0x67, 0x72, 0x6f, - 0x75, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x24, 0xfa, 0x41, 0x21, 0x0a, 0x1f, 0x6d, - 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x48, 0x00, - 0x52, 0x0f, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x4f, 0x66, 0x47, 0x72, 0x6f, 0x75, - 0x70, 0x12, 0x54, 0x0a, 0x12, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x73, 0x5f, 0x6f, - 0x66, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x24, 0xfa, - 0x41, 0x21, 0x0a, 0x1f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x47, 0x72, - 0x6f, 0x75, 0x70, 0x48, 0x00, 0x52, 0x10, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x73, - 0x4f, 0x66, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x58, 0x0a, 0x14, 0x64, 0x65, 0x73, 0x63, 0x65, - 0x6e, 0x64, 0x61, 0x6e, 0x74, 0x73, 0x5f, 0x6f, 0x66, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x24, 0xfa, 0x41, 0x21, 0x0a, 0x1f, 0x6d, 0x6f, 0x6e, 0x69, - 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, - 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x48, 0x00, 0x52, 0x12, 0x64, - 0x65, 0x73, 0x63, 0x65, 0x6e, 0x64, 0x61, 0x6e, 0x74, 0x73, 0x4f, 0x66, 0x47, 0x72, 0x6f, 0x75, - 0x70, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, - 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x08, 0x0a, - 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0x6f, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x47, - 0x72, 0x6f, 0x75, 0x70, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x31, 0x0a, - 0x05, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, - 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x05, 0x67, 0x72, 0x6f, 0x75, 0x70, - 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, - 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, - 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x4e, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x47, - 0x72, 0x6f, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3b, 0x0a, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x27, 0xe0, 0x41, 0x02, 0xfa, 0x41, - 0x21, 0x0a, 0x1f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x47, 0x72, 0x6f, - 0x75, 0x70, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xae, 0x01, 0x0a, 0x12, 0x43, 0x72, 0x65, - 0x61, 0x74, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x3b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x27, 0xe0, - 0x41, 0x02, 0xfa, 0x41, 0x21, 0x12, 0x1f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, - 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, - 0x2f, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x36, 0x0a, 0x05, - 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, - 0x76, 0x33, 0x2e, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x05, 0x67, - 0x72, 0x6f, 0x75, 0x70, 0x12, 0x23, 0x0a, 0x0d, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, - 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x76, 0x61, 0x6c, - 0x69, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x6e, 0x6c, 0x79, 0x22, 0x71, 0x0a, 0x12, 0x55, 0x70, 0x64, - 0x61, 0x74, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x36, 0x0a, 0x05, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, - 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x02, - 0x52, 0x05, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x23, 0x0a, 0x0d, 0x76, 0x61, 0x6c, 0x69, 0x64, - 0x61, 0x74, 0x65, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, - 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x6e, 0x6c, 0x79, 0x22, 0x6f, 0x0a, 0x12, - 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x3b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x27, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x21, 0x0a, 0x1f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, - 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, - 0x63, 0x6f, 0x6d, 0x2f, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, - 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x09, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x22, 0xea, 0x01, - 0x0a, 0x17, 0x4c, 0x69, 0x73, 0x74, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x4d, 0x65, 0x6d, 0x62, 0x65, - 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x42, 0x27, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x21, 0x0a, - 0x1f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x47, 0x72, 0x6f, 0x75, 0x70, - 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, - 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, - 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, - 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, - 0x65, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x3e, 0x0a, 0x08, 0x69, 0x6e, - 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, - 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, - 0x52, 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x22, 0x9a, 0x01, 0x0a, 0x18, 0x4c, - 0x69, 0x73, 0x74, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6d, 0x65, 0x6d, 0x62, 0x65, - 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x07, 0x6d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, - 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, - 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, - 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x6f, 0x74, 0x61, - 0x6c, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x74, 0x6f, - 0x74, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65, 0x32, 0x98, 0x08, 0x0a, 0x0c, 0x47, 0x72, 0x6f, 0x75, - 0x70, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x8c, 0x01, 0x0a, 0x0a, 0x4c, 0x69, 0x73, - 0x74, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, - 0x69, 0x73, 0x74, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, - 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x47, 0x72, 0x6f, 0x75, - 0x70, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2b, 0x82, 0xd3, 0xe4, 0x93, - 0x02, 0x1e, 0x12, 0x1c, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, - 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, - 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x7d, 0x0a, 0x08, 0x47, 0x65, 0x74, 0x47, 0x72, - 0x6f, 0x75, 0x70, 0x12, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, - 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x65, 0x74, 0x47, 0x72, - 0x6f, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, - 0x33, 0x2e, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x22, 0x2d, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x20, 0x12, - 0x1e, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, - 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2f, 0x2a, 0x7d, 0xda, - 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x8e, 0x01, 0x0a, 0x0b, 0x43, 0x72, 0x65, 0x61, 0x74, - 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x72, - 0x65, 0x61, 0x74, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, - 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x22, 0x38, 0x82, - 0xd3, 0xe4, 0x93, 0x02, 0x25, 0x22, 0x1c, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, - 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x67, 0x72, 0x6f, - 0x75, 0x70, 0x73, 0x3a, 0x05, 0x67, 0x72, 0x6f, 0x75, 0x70, 0xda, 0x41, 0x0a, 0x6e, 0x61, 0x6d, - 0x65, 0x2c, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x91, 0x01, 0x0a, 0x0b, 0x55, 0x70, 0x64, 0x61, - 0x74, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, - 0x70, 0x64, 0x61, 0x74, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, - 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x22, 0x3b, - 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2d, 0x1a, 0x24, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x67, 0x72, 0x6f, - 0x75, 0x70, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, - 0x2f, 0x2a, 0x2f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x05, 0x67, 0x72, - 0x6f, 0x75, 0x70, 0xda, 0x41, 0x05, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x7e, 0x0a, 0x0b, 0x44, - 0x65, 0x6c, 0x65, 0x74, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, - 0x33, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x2d, 0x82, 0xd3, - 0xe4, 0x93, 0x02, 0x20, 0x2a, 0x1e, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, - 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x67, 0x72, 0x6f, 0x75, 0x70, - 0x73, 0x2f, 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0xa8, 0x01, 0x0a, 0x10, - 0x4c, 0x69, 0x73, 0x74, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, - 0x12, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, - 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x47, 0x72, 0x6f, 0x75, - 0x70, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, - 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x47, 0x72, 0x6f, 0x75, 0x70, - 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x35, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x28, 0x12, 0x26, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, - 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x67, 0x72, - 0x6f, 0x75, 0x70, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x6d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0xda, - 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x1a, 0xa9, 0x01, 0xca, 0x41, 0x19, 0x6d, 0x6f, 0x6e, 0x69, - 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, - 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x89, 0x01, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, - 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, - 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, - 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, - 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, - 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, - 0x6e, 0x67, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, - 0x74, 0x68, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x72, 0x65, - 0x61, 0x64, 0x42, 0xc9, 0x01, 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x42, - 0x11, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, - 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, - 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, - 0x72, 0x69, 0x6e, 0x67, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, - 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x56, - 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, - 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x33, 0xea, 0x02, - 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, - 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56, 0x33, 0x62, 0x06, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_google_monitoring_v3_group_service_proto_rawDescOnce sync.Once - file_google_monitoring_v3_group_service_proto_rawDescData = file_google_monitoring_v3_group_service_proto_rawDesc -) - -func file_google_monitoring_v3_group_service_proto_rawDescGZIP() []byte { - file_google_monitoring_v3_group_service_proto_rawDescOnce.Do(func() { - file_google_monitoring_v3_group_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_group_service_proto_rawDescData) - }) - return file_google_monitoring_v3_group_service_proto_rawDescData -} - -var file_google_monitoring_v3_group_service_proto_msgTypes = make([]protoimpl.MessageInfo, 8) -var file_google_monitoring_v3_group_service_proto_goTypes = []interface{}{ - (*ListGroupsRequest)(nil), // 0: google.monitoring.v3.ListGroupsRequest - (*ListGroupsResponse)(nil), // 1: google.monitoring.v3.ListGroupsResponse - (*GetGroupRequest)(nil), // 2: google.monitoring.v3.GetGroupRequest - (*CreateGroupRequest)(nil), // 3: google.monitoring.v3.CreateGroupRequest - (*UpdateGroupRequest)(nil), // 4: google.monitoring.v3.UpdateGroupRequest - (*DeleteGroupRequest)(nil), // 5: google.monitoring.v3.DeleteGroupRequest - (*ListGroupMembersRequest)(nil), // 6: google.monitoring.v3.ListGroupMembersRequest - (*ListGroupMembersResponse)(nil), // 7: google.monitoring.v3.ListGroupMembersResponse - (*Group)(nil), // 8: google.monitoring.v3.Group - (*TimeInterval)(nil), // 9: google.monitoring.v3.TimeInterval - (*monitoredres.MonitoredResource)(nil), // 10: google.api.MonitoredResource - (*emptypb.Empty)(nil), // 11: google.protobuf.Empty -} -var file_google_monitoring_v3_group_service_proto_depIdxs = []int32{ - 8, // 0: google.monitoring.v3.ListGroupsResponse.group:type_name -> google.monitoring.v3.Group - 8, // 1: google.monitoring.v3.CreateGroupRequest.group:type_name -> google.monitoring.v3.Group - 8, // 2: google.monitoring.v3.UpdateGroupRequest.group:type_name -> google.monitoring.v3.Group - 9, // 3: google.monitoring.v3.ListGroupMembersRequest.interval:type_name -> google.monitoring.v3.TimeInterval - 10, // 4: google.monitoring.v3.ListGroupMembersResponse.members:type_name -> google.api.MonitoredResource - 0, // 5: google.monitoring.v3.GroupService.ListGroups:input_type -> google.monitoring.v3.ListGroupsRequest - 2, // 6: google.monitoring.v3.GroupService.GetGroup:input_type -> google.monitoring.v3.GetGroupRequest - 3, // 7: google.monitoring.v3.GroupService.CreateGroup:input_type -> google.monitoring.v3.CreateGroupRequest - 4, // 8: google.monitoring.v3.GroupService.UpdateGroup:input_type -> google.monitoring.v3.UpdateGroupRequest - 5, // 9: google.monitoring.v3.GroupService.DeleteGroup:input_type -> google.monitoring.v3.DeleteGroupRequest - 6, // 10: google.monitoring.v3.GroupService.ListGroupMembers:input_type -> google.monitoring.v3.ListGroupMembersRequest - 1, // 11: google.monitoring.v3.GroupService.ListGroups:output_type -> google.monitoring.v3.ListGroupsResponse - 8, // 12: google.monitoring.v3.GroupService.GetGroup:output_type -> google.monitoring.v3.Group - 8, // 13: google.monitoring.v3.GroupService.CreateGroup:output_type -> google.monitoring.v3.Group - 8, // 14: google.monitoring.v3.GroupService.UpdateGroup:output_type -> google.monitoring.v3.Group - 11, // 15: google.monitoring.v3.GroupService.DeleteGroup:output_type -> google.protobuf.Empty - 7, // 16: google.monitoring.v3.GroupService.ListGroupMembers:output_type -> google.monitoring.v3.ListGroupMembersResponse - 11, // [11:17] is the sub-list for method output_type - 5, // [5:11] is the sub-list for method input_type - 5, // [5:5] is the sub-list for extension type_name - 5, // [5:5] is the sub-list for extension extendee - 0, // [0:5] is the sub-list for field type_name -} - -func init() { file_google_monitoring_v3_group_service_proto_init() } -func file_google_monitoring_v3_group_service_proto_init() { - if File_google_monitoring_v3_group_service_proto != nil { - return - } - file_google_monitoring_v3_common_proto_init() - file_google_monitoring_v3_group_proto_init() - if !protoimpl.UnsafeEnabled { - file_google_monitoring_v3_group_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListGroupsRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_group_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListGroupsResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_group_service_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetGroupRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_group_service_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateGroupRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_group_service_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UpdateGroupRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_group_service_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteGroupRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_group_service_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListGroupMembersRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_group_service_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListGroupMembersResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_google_monitoring_v3_group_service_proto_msgTypes[0].OneofWrappers = []interface{}{ - (*ListGroupsRequest_ChildrenOfGroup)(nil), - (*ListGroupsRequest_AncestorsOfGroup)(nil), - (*ListGroupsRequest_DescendantsOfGroup)(nil), - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_monitoring_v3_group_service_proto_rawDesc, - NumEnums: 0, - NumMessages: 8, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_google_monitoring_v3_group_service_proto_goTypes, - DependencyIndexes: file_google_monitoring_v3_group_service_proto_depIdxs, - MessageInfos: file_google_monitoring_v3_group_service_proto_msgTypes, - }.Build() - File_google_monitoring_v3_group_service_proto = out.File - file_google_monitoring_v3_group_service_proto_rawDesc = nil - file_google_monitoring_v3_group_service_proto_goTypes = nil - file_google_monitoring_v3_group_service_proto_depIdxs = nil -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConnInterface - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion6 - -// GroupServiceClient is the client API for GroupService service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type GroupServiceClient interface { - // Lists the existing groups. - ListGroups(ctx context.Context, in *ListGroupsRequest, opts ...grpc.CallOption) (*ListGroupsResponse, error) - // Gets a single group. - GetGroup(ctx context.Context, in *GetGroupRequest, opts ...grpc.CallOption) (*Group, error) - // Creates a new group. - CreateGroup(ctx context.Context, in *CreateGroupRequest, opts ...grpc.CallOption) (*Group, error) - // Updates an existing group. - // You can change any group attributes except `name`. - UpdateGroup(ctx context.Context, in *UpdateGroupRequest, opts ...grpc.CallOption) (*Group, error) - // Deletes an existing group. - DeleteGroup(ctx context.Context, in *DeleteGroupRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) - // Lists the monitored resources that are members of a group. - ListGroupMembers(ctx context.Context, in *ListGroupMembersRequest, opts ...grpc.CallOption) (*ListGroupMembersResponse, error) -} - -type groupServiceClient struct { - cc grpc.ClientConnInterface -} - -func NewGroupServiceClient(cc grpc.ClientConnInterface) GroupServiceClient { - return &groupServiceClient{cc} -} - -func (c *groupServiceClient) ListGroups(ctx context.Context, in *ListGroupsRequest, opts ...grpc.CallOption) (*ListGroupsResponse, error) { - out := new(ListGroupsResponse) - err := c.cc.Invoke(ctx, "/google.monitoring.v3.GroupService/ListGroups", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *groupServiceClient) GetGroup(ctx context.Context, in *GetGroupRequest, opts ...grpc.CallOption) (*Group, error) { - out := new(Group) - err := c.cc.Invoke(ctx, "/google.monitoring.v3.GroupService/GetGroup", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *groupServiceClient) CreateGroup(ctx context.Context, in *CreateGroupRequest, opts ...grpc.CallOption) (*Group, error) { - out := new(Group) - err := c.cc.Invoke(ctx, "/google.monitoring.v3.GroupService/CreateGroup", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *groupServiceClient) UpdateGroup(ctx context.Context, in *UpdateGroupRequest, opts ...grpc.CallOption) (*Group, error) { - out := new(Group) - err := c.cc.Invoke(ctx, "/google.monitoring.v3.GroupService/UpdateGroup", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *groupServiceClient) DeleteGroup(ctx context.Context, in *DeleteGroupRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { - out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/google.monitoring.v3.GroupService/DeleteGroup", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *groupServiceClient) ListGroupMembers(ctx context.Context, in *ListGroupMembersRequest, opts ...grpc.CallOption) (*ListGroupMembersResponse, error) { - out := new(ListGroupMembersResponse) - err := c.cc.Invoke(ctx, "/google.monitoring.v3.GroupService/ListGroupMembers", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// GroupServiceServer is the server API for GroupService service. -type GroupServiceServer interface { - // Lists the existing groups. - ListGroups(context.Context, *ListGroupsRequest) (*ListGroupsResponse, error) - // Gets a single group. - GetGroup(context.Context, *GetGroupRequest) (*Group, error) - // Creates a new group. - CreateGroup(context.Context, *CreateGroupRequest) (*Group, error) - // Updates an existing group. - // You can change any group attributes except `name`. - UpdateGroup(context.Context, *UpdateGroupRequest) (*Group, error) - // Deletes an existing group. - DeleteGroup(context.Context, *DeleteGroupRequest) (*emptypb.Empty, error) - // Lists the monitored resources that are members of a group. - ListGroupMembers(context.Context, *ListGroupMembersRequest) (*ListGroupMembersResponse, error) -} - -// UnimplementedGroupServiceServer can be embedded to have forward compatible implementations. -type UnimplementedGroupServiceServer struct { -} - -func (*UnimplementedGroupServiceServer) ListGroups(context.Context, *ListGroupsRequest) (*ListGroupsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ListGroups not implemented") -} -func (*UnimplementedGroupServiceServer) GetGroup(context.Context, *GetGroupRequest) (*Group, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetGroup not implemented") -} -func (*UnimplementedGroupServiceServer) CreateGroup(context.Context, *CreateGroupRequest) (*Group, error) { - return nil, status.Errorf(codes.Unimplemented, "method CreateGroup not implemented") -} -func (*UnimplementedGroupServiceServer) UpdateGroup(context.Context, *UpdateGroupRequest) (*Group, error) { - return nil, status.Errorf(codes.Unimplemented, "method UpdateGroup not implemented") -} -func (*UnimplementedGroupServiceServer) DeleteGroup(context.Context, *DeleteGroupRequest) (*emptypb.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method DeleteGroup not implemented") -} -func (*UnimplementedGroupServiceServer) ListGroupMembers(context.Context, *ListGroupMembersRequest) (*ListGroupMembersResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ListGroupMembers not implemented") -} - -func RegisterGroupServiceServer(s *grpc.Server, srv GroupServiceServer) { - s.RegisterService(&_GroupService_serviceDesc, srv) -} - -func _GroupService_ListGroups_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListGroupsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(GroupServiceServer).ListGroups(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.monitoring.v3.GroupService/ListGroups", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(GroupServiceServer).ListGroups(ctx, req.(*ListGroupsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _GroupService_GetGroup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetGroupRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(GroupServiceServer).GetGroup(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.monitoring.v3.GroupService/GetGroup", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(GroupServiceServer).GetGroup(ctx, req.(*GetGroupRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _GroupService_CreateGroup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CreateGroupRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(GroupServiceServer).CreateGroup(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.monitoring.v3.GroupService/CreateGroup", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(GroupServiceServer).CreateGroup(ctx, req.(*CreateGroupRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _GroupService_UpdateGroup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(UpdateGroupRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(GroupServiceServer).UpdateGroup(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.monitoring.v3.GroupService/UpdateGroup", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(GroupServiceServer).UpdateGroup(ctx, req.(*UpdateGroupRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _GroupService_DeleteGroup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteGroupRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(GroupServiceServer).DeleteGroup(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.monitoring.v3.GroupService/DeleteGroup", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(GroupServiceServer).DeleteGroup(ctx, req.(*DeleteGroupRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _GroupService_ListGroupMembers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListGroupMembersRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(GroupServiceServer).ListGroupMembers(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.monitoring.v3.GroupService/ListGroupMembers", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(GroupServiceServer).ListGroupMembers(ctx, req.(*ListGroupMembersRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _GroupService_serviceDesc = grpc.ServiceDesc{ - ServiceName: "google.monitoring.v3.GroupService", - HandlerType: (*GroupServiceServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "ListGroups", - Handler: _GroupService_ListGroups_Handler, - }, - { - MethodName: "GetGroup", - Handler: _GroupService_GetGroup_Handler, - }, - { - MethodName: "CreateGroup", - Handler: _GroupService_CreateGroup_Handler, - }, - { - MethodName: "UpdateGroup", - Handler: _GroupService_UpdateGroup_Handler, - }, - { - MethodName: "DeleteGroup", - Handler: _GroupService_DeleteGroup_Handler, - }, - { - MethodName: "ListGroupMembers", - Handler: _GroupService_ListGroupMembers_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "google/monitoring/v3/group_service.proto", -} diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/metric.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/metric.pb.go deleted file mode 100644 index ad976bbfe9..0000000000 --- a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/metric.pb.go +++ /dev/null @@ -1,1177 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.25.0 -// protoc v3.13.0 -// source: google/monitoring/v3/metric.proto - -package monitoring - -import ( - reflect "reflect" - sync "sync" - - proto "github.com/golang/protobuf/proto" - _ "google.golang.org/genproto/googleapis/api/distribution" - label "google.golang.org/genproto/googleapis/api/label" - metric "google.golang.org/genproto/googleapis/api/metric" - monitoredres "google.golang.org/genproto/googleapis/api/monitoredres" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - _ "google.golang.org/protobuf/types/known/durationpb" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - -// A single data point in a time series. -type Point struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The time interval to which the data point applies. For `GAUGE` metrics, - // the start time is optional, but if it is supplied, it must equal the - // end time. For `DELTA` metrics, the start - // and end time should specify a non-zero interval, with subsequent points - // specifying contiguous and non-overlapping intervals. For `CUMULATIVE` - // metrics, the start and end time should specify a non-zero interval, with - // subsequent points specifying the same start time and increasing end times, - // until an event resets the cumulative value to zero and sets a new start - // time for the following points. - Interval *TimeInterval `protobuf:"bytes,1,opt,name=interval,proto3" json:"interval,omitempty"` - // The value of the data point. - Value *TypedValue `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` -} - -func (x *Point) Reset() { - *x = Point{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_metric_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Point) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Point) ProtoMessage() {} - -func (x *Point) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_metric_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Point.ProtoReflect.Descriptor instead. -func (*Point) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_metric_proto_rawDescGZIP(), []int{0} -} - -func (x *Point) GetInterval() *TimeInterval { - if x != nil { - return x.Interval - } - return nil -} - -func (x *Point) GetValue() *TypedValue { - if x != nil { - return x.Value - } - return nil -} - -// A collection of data points that describes the time-varying values -// of a metric. A time series is identified by a combination of a -// fully-specified monitored resource and a fully-specified metric. -// This type is used for both listing and creating time series. -type TimeSeries struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The associated metric. A fully-specified metric used to identify the time - // series. - Metric *metric.Metric `protobuf:"bytes,1,opt,name=metric,proto3" json:"metric,omitempty"` - // The associated monitored resource. Custom metrics can use only certain - // monitored resource types in their time series data. - Resource *monitoredres.MonitoredResource `protobuf:"bytes,2,opt,name=resource,proto3" json:"resource,omitempty"` - // Output only. The associated monitored resource metadata. When reading a - // a timeseries, this field will include metadata labels that are explicitly - // named in the reduction. When creating a timeseries, this field is ignored. - Metadata *monitoredres.MonitoredResourceMetadata `protobuf:"bytes,7,opt,name=metadata,proto3" json:"metadata,omitempty"` - // The metric kind of the time series. When listing time series, this metric - // kind might be different from the metric kind of the associated metric if - // this time series is an alignment or reduction of other time series. - // - // When creating a time series, this field is optional. If present, it must be - // the same as the metric kind of the associated metric. If the associated - // metric's descriptor must be auto-created, then this field specifies the - // metric kind of the new descriptor and must be either `GAUGE` (the default) - // or `CUMULATIVE`. - MetricKind metric.MetricDescriptor_MetricKind `protobuf:"varint,3,opt,name=metric_kind,json=metricKind,proto3,enum=google.api.MetricDescriptor_MetricKind" json:"metric_kind,omitempty"` - // The value type of the time series. When listing time series, this value - // type might be different from the value type of the associated metric if - // this time series is an alignment or reduction of other time series. - // - // When creating a time series, this field is optional. If present, it must be - // the same as the type of the data in the `points` field. - ValueType metric.MetricDescriptor_ValueType `protobuf:"varint,4,opt,name=value_type,json=valueType,proto3,enum=google.api.MetricDescriptor_ValueType" json:"value_type,omitempty"` - // The data points of this time series. When listing time series, points are - // returned in reverse time order. - // - // When creating a time series, this field must contain exactly one point and - // the point's type must be the same as the value type of the associated - // metric. If the associated metric's descriptor must be auto-created, then - // the value type of the descriptor is determined by the point's type, which - // must be `BOOL`, `INT64`, `DOUBLE`, or `DISTRIBUTION`. - Points []*Point `protobuf:"bytes,5,rep,name=points,proto3" json:"points,omitempty"` -} - -func (x *TimeSeries) Reset() { - *x = TimeSeries{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_metric_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *TimeSeries) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TimeSeries) ProtoMessage() {} - -func (x *TimeSeries) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_metric_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TimeSeries.ProtoReflect.Descriptor instead. -func (*TimeSeries) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_metric_proto_rawDescGZIP(), []int{1} -} - -func (x *TimeSeries) GetMetric() *metric.Metric { - if x != nil { - return x.Metric - } - return nil -} - -func (x *TimeSeries) GetResource() *monitoredres.MonitoredResource { - if x != nil { - return x.Resource - } - return nil -} - -func (x *TimeSeries) GetMetadata() *monitoredres.MonitoredResourceMetadata { - if x != nil { - return x.Metadata - } - return nil -} - -func (x *TimeSeries) GetMetricKind() metric.MetricDescriptor_MetricKind { - if x != nil { - return x.MetricKind - } - return metric.MetricDescriptor_METRIC_KIND_UNSPECIFIED -} - -func (x *TimeSeries) GetValueType() metric.MetricDescriptor_ValueType { - if x != nil { - return x.ValueType - } - return metric.MetricDescriptor_VALUE_TYPE_UNSPECIFIED -} - -func (x *TimeSeries) GetPoints() []*Point { - if x != nil { - return x.Points - } - return nil -} - -// A descriptor for the labels and points in a timeseries. -type TimeSeriesDescriptor struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Descriptors for the labels. - LabelDescriptors []*label.LabelDescriptor `protobuf:"bytes,1,rep,name=label_descriptors,json=labelDescriptors,proto3" json:"label_descriptors,omitempty"` - // Descriptors for the point data value columns. - PointDescriptors []*TimeSeriesDescriptor_ValueDescriptor `protobuf:"bytes,5,rep,name=point_descriptors,json=pointDescriptors,proto3" json:"point_descriptors,omitempty"` -} - -func (x *TimeSeriesDescriptor) Reset() { - *x = TimeSeriesDescriptor{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_metric_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *TimeSeriesDescriptor) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TimeSeriesDescriptor) ProtoMessage() {} - -func (x *TimeSeriesDescriptor) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_metric_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TimeSeriesDescriptor.ProtoReflect.Descriptor instead. -func (*TimeSeriesDescriptor) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_metric_proto_rawDescGZIP(), []int{2} -} - -func (x *TimeSeriesDescriptor) GetLabelDescriptors() []*label.LabelDescriptor { - if x != nil { - return x.LabelDescriptors - } - return nil -} - -func (x *TimeSeriesDescriptor) GetPointDescriptors() []*TimeSeriesDescriptor_ValueDescriptor { - if x != nil { - return x.PointDescriptors - } - return nil -} - -// Represents the values of a time series associated with a -// TimeSeriesDescriptor. -type TimeSeriesData struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The values of the labels in the time series identifier, given in the same - // order as the `label_descriptors` field of the TimeSeriesDescriptor - // associated with this object. Each value must have a value of the type - // given in the corresponding entry of `label_descriptors`. - LabelValues []*LabelValue `protobuf:"bytes,1,rep,name=label_values,json=labelValues,proto3" json:"label_values,omitempty"` - // The points in the time series. - PointData []*TimeSeriesData_PointData `protobuf:"bytes,2,rep,name=point_data,json=pointData,proto3" json:"point_data,omitempty"` -} - -func (x *TimeSeriesData) Reset() { - *x = TimeSeriesData{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_metric_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *TimeSeriesData) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TimeSeriesData) ProtoMessage() {} - -func (x *TimeSeriesData) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_metric_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TimeSeriesData.ProtoReflect.Descriptor instead. -func (*TimeSeriesData) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_metric_proto_rawDescGZIP(), []int{3} -} - -func (x *TimeSeriesData) GetLabelValues() []*LabelValue { - if x != nil { - return x.LabelValues - } - return nil -} - -func (x *TimeSeriesData) GetPointData() []*TimeSeriesData_PointData { - if x != nil { - return x.PointData - } - return nil -} - -// A label value. -type LabelValue struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The label value can be a bool, int64, or string. - // - // Types that are assignable to Value: - // *LabelValue_BoolValue - // *LabelValue_Int64Value - // *LabelValue_StringValue - Value isLabelValue_Value `protobuf_oneof:"value"` -} - -func (x *LabelValue) Reset() { - *x = LabelValue{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_metric_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *LabelValue) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*LabelValue) ProtoMessage() {} - -func (x *LabelValue) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_metric_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use LabelValue.ProtoReflect.Descriptor instead. -func (*LabelValue) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_metric_proto_rawDescGZIP(), []int{4} -} - -func (m *LabelValue) GetValue() isLabelValue_Value { - if m != nil { - return m.Value - } - return nil -} - -func (x *LabelValue) GetBoolValue() bool { - if x, ok := x.GetValue().(*LabelValue_BoolValue); ok { - return x.BoolValue - } - return false -} - -func (x *LabelValue) GetInt64Value() int64 { - if x, ok := x.GetValue().(*LabelValue_Int64Value); ok { - return x.Int64Value - } - return 0 -} - -func (x *LabelValue) GetStringValue() string { - if x, ok := x.GetValue().(*LabelValue_StringValue); ok { - return x.StringValue - } - return "" -} - -type isLabelValue_Value interface { - isLabelValue_Value() -} - -type LabelValue_BoolValue struct { - // A bool label value. - BoolValue bool `protobuf:"varint,1,opt,name=bool_value,json=boolValue,proto3,oneof"` -} - -type LabelValue_Int64Value struct { - // An int64 label value. - Int64Value int64 `protobuf:"varint,2,opt,name=int64_value,json=int64Value,proto3,oneof"` -} - -type LabelValue_StringValue struct { - // A string label value. - StringValue string `protobuf:"bytes,3,opt,name=string_value,json=stringValue,proto3,oneof"` -} - -func (*LabelValue_BoolValue) isLabelValue_Value() {} - -func (*LabelValue_Int64Value) isLabelValue_Value() {} - -func (*LabelValue_StringValue) isLabelValue_Value() {} - -// An error associated with a query in the time series query language format. -type QueryError struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The location of the time series query language text that this error applies - // to. - Locator *TextLocator `protobuf:"bytes,1,opt,name=locator,proto3" json:"locator,omitempty"` - // The error message. - Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` -} - -func (x *QueryError) Reset() { - *x = QueryError{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_metric_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *QueryError) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*QueryError) ProtoMessage() {} - -func (x *QueryError) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_metric_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use QueryError.ProtoReflect.Descriptor instead. -func (*QueryError) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_metric_proto_rawDescGZIP(), []int{5} -} - -func (x *QueryError) GetLocator() *TextLocator { - if x != nil { - return x.Locator - } - return nil -} - -func (x *QueryError) GetMessage() string { - if x != nil { - return x.Message - } - return "" -} - -// A locator for text. Indicates a particular part of the text of a request or -// of an object referenced in the request. -// -// For example, suppose the request field `text` contains: -// -// text: "The quick brown fox jumps over the lazy dog." -// -// Then the locator: -// -// source: "text" -// start_position { -// line: 1 -// column: 17 -// } -// end_position { -// line: 1 -// column: 19 -// } -// -// refers to the part of the text: "fox". -type TextLocator struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The source of the text. The source may be a field in the request, in which - // case its format is the format of the - // google.rpc.BadRequest.FieldViolation.field field in - // https://cloud.google.com/apis/design/errors#error_details. It may also be - // be a source other than the request field (e.g. a macro definition - // referenced in the text of the query), in which case this is the name of - // the source (e.g. the macro name). - Source string `protobuf:"bytes,1,opt,name=source,proto3" json:"source,omitempty"` - // The position of the first byte within the text. - StartPosition *TextLocator_Position `protobuf:"bytes,2,opt,name=start_position,json=startPosition,proto3" json:"start_position,omitempty"` - // The position of the last byte within the text. - EndPosition *TextLocator_Position `protobuf:"bytes,3,opt,name=end_position,json=endPosition,proto3" json:"end_position,omitempty"` - // If `source`, `start_position`, and `end_position` describe a call on - // some object (e.g. a macro in the time series query language text) and a - // location is to be designated in that object's text, `nested_locator` - // identifies the location within that object. - NestedLocator *TextLocator `protobuf:"bytes,4,opt,name=nested_locator,json=nestedLocator,proto3" json:"nested_locator,omitempty"` - // When `nested_locator` is set, this field gives the reason for the nesting. - // Usually, the reason is a macro invocation. In that case, the macro name - // (including the leading '@') signals the location of the macro call - // in the text and a macro argument name (including the leading '$') signals - // the location of the macro argument inside the macro body that got - // substituted away. - NestingReason string `protobuf:"bytes,5,opt,name=nesting_reason,json=nestingReason,proto3" json:"nesting_reason,omitempty"` -} - -func (x *TextLocator) Reset() { - *x = TextLocator{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_metric_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *TextLocator) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TextLocator) ProtoMessage() {} - -func (x *TextLocator) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_metric_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TextLocator.ProtoReflect.Descriptor instead. -func (*TextLocator) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_metric_proto_rawDescGZIP(), []int{6} -} - -func (x *TextLocator) GetSource() string { - if x != nil { - return x.Source - } - return "" -} - -func (x *TextLocator) GetStartPosition() *TextLocator_Position { - if x != nil { - return x.StartPosition - } - return nil -} - -func (x *TextLocator) GetEndPosition() *TextLocator_Position { - if x != nil { - return x.EndPosition - } - return nil -} - -func (x *TextLocator) GetNestedLocator() *TextLocator { - if x != nil { - return x.NestedLocator - } - return nil -} - -func (x *TextLocator) GetNestingReason() string { - if x != nil { - return x.NestingReason - } - return "" -} - -// A descriptor for the value columns in a data point. -type TimeSeriesDescriptor_ValueDescriptor struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The value key. - Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - // The value type. - ValueType metric.MetricDescriptor_ValueType `protobuf:"varint,2,opt,name=value_type,json=valueType,proto3,enum=google.api.MetricDescriptor_ValueType" json:"value_type,omitempty"` - // The value stream kind. - MetricKind metric.MetricDescriptor_MetricKind `protobuf:"varint,3,opt,name=metric_kind,json=metricKind,proto3,enum=google.api.MetricDescriptor_MetricKind" json:"metric_kind,omitempty"` -} - -func (x *TimeSeriesDescriptor_ValueDescriptor) Reset() { - *x = TimeSeriesDescriptor_ValueDescriptor{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_metric_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *TimeSeriesDescriptor_ValueDescriptor) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TimeSeriesDescriptor_ValueDescriptor) ProtoMessage() {} - -func (x *TimeSeriesDescriptor_ValueDescriptor) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_metric_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TimeSeriesDescriptor_ValueDescriptor.ProtoReflect.Descriptor instead. -func (*TimeSeriesDescriptor_ValueDescriptor) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_metric_proto_rawDescGZIP(), []int{2, 0} -} - -func (x *TimeSeriesDescriptor_ValueDescriptor) GetKey() string { - if x != nil { - return x.Key - } - return "" -} - -func (x *TimeSeriesDescriptor_ValueDescriptor) GetValueType() metric.MetricDescriptor_ValueType { - if x != nil { - return x.ValueType - } - return metric.MetricDescriptor_VALUE_TYPE_UNSPECIFIED -} - -func (x *TimeSeriesDescriptor_ValueDescriptor) GetMetricKind() metric.MetricDescriptor_MetricKind { - if x != nil { - return x.MetricKind - } - return metric.MetricDescriptor_METRIC_KIND_UNSPECIFIED -} - -// A point's value columns and time interval. Each point has one or more -// point values corresponding to the entries in `point_descriptors` field in -// the TimeSeriesDescriptor associated with this object. -type TimeSeriesData_PointData struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The values that make up the point. - Values []*TypedValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` - // The time interval associated with the point. - TimeInterval *TimeInterval `protobuf:"bytes,2,opt,name=time_interval,json=timeInterval,proto3" json:"time_interval,omitempty"` -} - -func (x *TimeSeriesData_PointData) Reset() { - *x = TimeSeriesData_PointData{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_metric_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *TimeSeriesData_PointData) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TimeSeriesData_PointData) ProtoMessage() {} - -func (x *TimeSeriesData_PointData) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_metric_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TimeSeriesData_PointData.ProtoReflect.Descriptor instead. -func (*TimeSeriesData_PointData) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_metric_proto_rawDescGZIP(), []int{3, 0} -} - -func (x *TimeSeriesData_PointData) GetValues() []*TypedValue { - if x != nil { - return x.Values - } - return nil -} - -func (x *TimeSeriesData_PointData) GetTimeInterval() *TimeInterval { - if x != nil { - return x.TimeInterval - } - return nil -} - -// The position of a byte within the text. -type TextLocator_Position struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The line, starting with 1, where the byte is positioned. - Line int32 `protobuf:"varint,1,opt,name=line,proto3" json:"line,omitempty"` - // The column within the line, starting with 1, where the byte is - // positioned. This is a byte index even though the text is UTF-8. - Column int32 `protobuf:"varint,2,opt,name=column,proto3" json:"column,omitempty"` -} - -func (x *TextLocator_Position) Reset() { - *x = TextLocator_Position{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_metric_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *TextLocator_Position) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TextLocator_Position) ProtoMessage() {} - -func (x *TextLocator_Position) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_metric_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TextLocator_Position.ProtoReflect.Descriptor instead. -func (*TextLocator_Position) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_metric_proto_rawDescGZIP(), []int{6, 0} -} - -func (x *TextLocator_Position) GetLine() int32 { - if x != nil { - return x.Line - } - return 0 -} - -func (x *TextLocator_Position) GetColumn() int32 { - if x != nil { - return x.Column - } - return 0 -} - -var File_google_monitoring_v3_metric_proto protoreflect.FileDescriptor - -var file_google_monitoring_v3_metric_proto_rawDesc = []byte{ - 0x0a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, - 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, - 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x1a, 0x1d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x64, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, - 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x16, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6d, 0x65, 0x74, - 0x72, 0x69, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x5f, - 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, - 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x22, 0x7f, 0x0a, 0x05, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x3e, 0x0a, 0x08, 0x69, 0x6e, - 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, - 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, - 0x52, 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x36, 0x0a, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, - 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x22, 0xfc, 0x02, 0x0a, 0x0a, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, - 0x73, 0x12, 0x2a, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, - 0x65, 0x74, 0x72, 0x69, 0x63, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, 0x39, 0x0a, - 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x6f, 0x6e, - 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x08, - 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x41, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, - 0x64, 0x61, 0x74, 0x61, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, - 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, - 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x48, 0x0a, 0x0b, 0x6d, - 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, - 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, - 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x4d, - 0x65, 0x74, 0x72, 0x69, 0x63, 0x4b, 0x69, 0x6e, 0x64, 0x52, 0x0a, 0x6d, 0x65, 0x74, 0x72, 0x69, - 0x63, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x45, 0x0a, 0x0a, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x74, - 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x54, 0x79, 0x70, - 0x65, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x33, 0x0a, 0x06, - 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, - 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x06, 0x70, 0x6f, 0x69, 0x6e, 0x74, - 0x73, 0x22, 0x80, 0x03, 0x0a, 0x14, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x12, 0x48, 0x0a, 0x11, 0x6c, 0x61, - 0x62, 0x65, 0x6c, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, - 0x6f, 0x72, 0x52, 0x10, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x73, 0x12, 0x67, 0x0a, 0x11, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, 0x64, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x3a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, - 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, - 0x73, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x10, 0x70, 0x6f, 0x69, - 0x6e, 0x74, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x1a, 0xb4, 0x01, - 0x0a, 0x0f, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, - 0x72, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, - 0x6b, 0x65, 0x79, 0x12, 0x45, 0x0a, 0x0a, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x74, 0x79, 0x70, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, - 0x09, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x48, 0x0a, 0x0b, 0x6d, 0x65, - 0x74, 0x72, 0x69, 0x63, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, - 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, - 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x4d, 0x65, - 0x74, 0x72, 0x69, 0x63, 0x4b, 0x69, 0x6e, 0x64, 0x52, 0x0a, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, - 0x4b, 0x69, 0x6e, 0x64, 0x22, 0xb5, 0x02, 0x0a, 0x0e, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, - 0x69, 0x65, 0x73, 0x44, 0x61, 0x74, 0x61, 0x12, 0x43, 0x0a, 0x0c, 0x6c, 0x61, 0x62, 0x65, 0x6c, - 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, - 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, - 0x0b, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x4d, 0x0a, 0x0a, - 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, - 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, - 0x65, 0x73, 0x44, 0x61, 0x74, 0x61, 0x2e, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x44, 0x61, 0x74, 0x61, - 0x52, 0x09, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x44, 0x61, 0x74, 0x61, 0x1a, 0x8e, 0x01, 0x0a, 0x09, - 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x44, 0x61, 0x74, 0x61, 0x12, 0x38, 0x0a, 0x06, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, - 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x73, 0x12, 0x47, 0x0a, 0x0d, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x65, - 0x72, 0x76, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, - 0x33, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x52, 0x0c, - 0x74, 0x69, 0x6d, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x22, 0x7e, 0x0a, 0x0a, - 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0a, 0x62, 0x6f, - 0x6f, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, - 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x69, - 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, - 0x48, 0x00, 0x52, 0x0a, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, - 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x42, 0x07, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x63, 0x0a, 0x0a, - 0x51, 0x75, 0x65, 0x72, 0x79, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x3b, 0x0a, 0x07, 0x6c, 0x6f, - 0x63, 0x61, 0x74, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, - 0x76, 0x33, 0x2e, 0x54, 0x65, 0x78, 0x74, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x07, - 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x22, 0xf0, 0x02, 0x0a, 0x0b, 0x54, 0x65, 0x78, 0x74, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, - 0x72, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x51, 0x0a, 0x0e, 0x73, 0x74, 0x61, - 0x72, 0x74, 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, - 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x65, 0x78, 0x74, 0x4c, 0x6f, 0x63, - 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x73, - 0x74, 0x61, 0x72, 0x74, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4d, 0x0a, 0x0c, - 0x65, 0x6e, 0x64, 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, - 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x65, 0x78, 0x74, 0x4c, 0x6f, - 0x63, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, - 0x65, 0x6e, 0x64, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x48, 0x0a, 0x0e, 0x6e, - 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, - 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x65, 0x78, 0x74, 0x4c, - 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x0d, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x4c, 0x6f, - 0x63, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x25, 0x0a, 0x0e, 0x6e, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, - 0x5f, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, - 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x1a, 0x36, 0x0a, 0x08, - 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6c, 0x69, 0x6e, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x16, 0x0a, 0x06, - 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x63, 0x6f, - 0x6c, 0x75, 0x6d, 0x6e, 0x42, 0xc3, 0x01, 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, - 0x33, 0x42, 0x0b, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, - 0x5a, 0x3e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, - 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, - 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, - 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, - 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, - 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, - 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, - 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x33, -} - -var ( - file_google_monitoring_v3_metric_proto_rawDescOnce sync.Once - file_google_monitoring_v3_metric_proto_rawDescData = file_google_monitoring_v3_metric_proto_rawDesc -) - -func file_google_monitoring_v3_metric_proto_rawDescGZIP() []byte { - file_google_monitoring_v3_metric_proto_rawDescOnce.Do(func() { - file_google_monitoring_v3_metric_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_metric_proto_rawDescData) - }) - return file_google_monitoring_v3_metric_proto_rawDescData -} - -var file_google_monitoring_v3_metric_proto_msgTypes = make([]protoimpl.MessageInfo, 10) -var file_google_monitoring_v3_metric_proto_goTypes = []interface{}{ - (*Point)(nil), // 0: google.monitoring.v3.Point - (*TimeSeries)(nil), // 1: google.monitoring.v3.TimeSeries - (*TimeSeriesDescriptor)(nil), // 2: google.monitoring.v3.TimeSeriesDescriptor - (*TimeSeriesData)(nil), // 3: google.monitoring.v3.TimeSeriesData - (*LabelValue)(nil), // 4: google.monitoring.v3.LabelValue - (*QueryError)(nil), // 5: google.monitoring.v3.QueryError - (*TextLocator)(nil), // 6: google.monitoring.v3.TextLocator - (*TimeSeriesDescriptor_ValueDescriptor)(nil), // 7: google.monitoring.v3.TimeSeriesDescriptor.ValueDescriptor - (*TimeSeriesData_PointData)(nil), // 8: google.monitoring.v3.TimeSeriesData.PointData - (*TextLocator_Position)(nil), // 9: google.monitoring.v3.TextLocator.Position - (*TimeInterval)(nil), // 10: google.monitoring.v3.TimeInterval - (*TypedValue)(nil), // 11: google.monitoring.v3.TypedValue - (*metric.Metric)(nil), // 12: google.api.Metric - (*monitoredres.MonitoredResource)(nil), // 13: google.api.MonitoredResource - (*monitoredres.MonitoredResourceMetadata)(nil), // 14: google.api.MonitoredResourceMetadata - (metric.MetricDescriptor_MetricKind)(0), // 15: google.api.MetricDescriptor.MetricKind - (metric.MetricDescriptor_ValueType)(0), // 16: google.api.MetricDescriptor.ValueType - (*label.LabelDescriptor)(nil), // 17: google.api.LabelDescriptor -} -var file_google_monitoring_v3_metric_proto_depIdxs = []int32{ - 10, // 0: google.monitoring.v3.Point.interval:type_name -> google.monitoring.v3.TimeInterval - 11, // 1: google.monitoring.v3.Point.value:type_name -> google.monitoring.v3.TypedValue - 12, // 2: google.monitoring.v3.TimeSeries.metric:type_name -> google.api.Metric - 13, // 3: google.monitoring.v3.TimeSeries.resource:type_name -> google.api.MonitoredResource - 14, // 4: google.monitoring.v3.TimeSeries.metadata:type_name -> google.api.MonitoredResourceMetadata - 15, // 5: google.monitoring.v3.TimeSeries.metric_kind:type_name -> google.api.MetricDescriptor.MetricKind - 16, // 6: google.monitoring.v3.TimeSeries.value_type:type_name -> google.api.MetricDescriptor.ValueType - 0, // 7: google.monitoring.v3.TimeSeries.points:type_name -> google.monitoring.v3.Point - 17, // 8: google.monitoring.v3.TimeSeriesDescriptor.label_descriptors:type_name -> google.api.LabelDescriptor - 7, // 9: google.monitoring.v3.TimeSeriesDescriptor.point_descriptors:type_name -> google.monitoring.v3.TimeSeriesDescriptor.ValueDescriptor - 4, // 10: google.monitoring.v3.TimeSeriesData.label_values:type_name -> google.monitoring.v3.LabelValue - 8, // 11: google.monitoring.v3.TimeSeriesData.point_data:type_name -> google.monitoring.v3.TimeSeriesData.PointData - 6, // 12: google.monitoring.v3.QueryError.locator:type_name -> google.monitoring.v3.TextLocator - 9, // 13: google.monitoring.v3.TextLocator.start_position:type_name -> google.monitoring.v3.TextLocator.Position - 9, // 14: google.monitoring.v3.TextLocator.end_position:type_name -> google.monitoring.v3.TextLocator.Position - 6, // 15: google.monitoring.v3.TextLocator.nested_locator:type_name -> google.monitoring.v3.TextLocator - 16, // 16: google.monitoring.v3.TimeSeriesDescriptor.ValueDescriptor.value_type:type_name -> google.api.MetricDescriptor.ValueType - 15, // 17: google.monitoring.v3.TimeSeriesDescriptor.ValueDescriptor.metric_kind:type_name -> google.api.MetricDescriptor.MetricKind - 11, // 18: google.monitoring.v3.TimeSeriesData.PointData.values:type_name -> google.monitoring.v3.TypedValue - 10, // 19: google.monitoring.v3.TimeSeriesData.PointData.time_interval:type_name -> google.monitoring.v3.TimeInterval - 20, // [20:20] is the sub-list for method output_type - 20, // [20:20] is the sub-list for method input_type - 20, // [20:20] is the sub-list for extension type_name - 20, // [20:20] is the sub-list for extension extendee - 0, // [0:20] is the sub-list for field type_name -} - -func init() { file_google_monitoring_v3_metric_proto_init() } -func file_google_monitoring_v3_metric_proto_init() { - if File_google_monitoring_v3_metric_proto != nil { - return - } - file_google_monitoring_v3_common_proto_init() - if !protoimpl.UnsafeEnabled { - file_google_monitoring_v3_metric_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Point); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_metric_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TimeSeries); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_metric_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TimeSeriesDescriptor); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_metric_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TimeSeriesData); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_metric_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*LabelValue); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_metric_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*QueryError); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_metric_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TextLocator); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_metric_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TimeSeriesDescriptor_ValueDescriptor); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_metric_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TimeSeriesData_PointData); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_metric_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TextLocator_Position); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_google_monitoring_v3_metric_proto_msgTypes[4].OneofWrappers = []interface{}{ - (*LabelValue_BoolValue)(nil), - (*LabelValue_Int64Value)(nil), - (*LabelValue_StringValue)(nil), - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_monitoring_v3_metric_proto_rawDesc, - NumEnums: 0, - NumMessages: 10, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_google_monitoring_v3_metric_proto_goTypes, - DependencyIndexes: file_google_monitoring_v3_metric_proto_depIdxs, - MessageInfos: file_google_monitoring_v3_metric_proto_msgTypes, - }.Build() - File_google_monitoring_v3_metric_proto = out.File - file_google_monitoring_v3_metric_proto_rawDesc = nil - file_google_monitoring_v3_metric_proto_goTypes = nil - file_google_monitoring_v3_metric_proto_depIdxs = nil -} diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/metric_service.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/metric_service.pb.go deleted file mode 100644 index 015d8e2ba8..0000000000 --- a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/metric_service.pb.go +++ /dev/null @@ -1,2360 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.25.0 -// protoc v3.13.0 -// source: google/monitoring/v3/metric_service.proto - -package monitoring - -import ( - context "context" - reflect "reflect" - sync "sync" - - proto "github.com/golang/protobuf/proto" - _ "google.golang.org/genproto/googleapis/api/annotations" - metric "google.golang.org/genproto/googleapis/api/metric" - monitoredres "google.golang.org/genproto/googleapis/api/monitoredres" - status "google.golang.org/genproto/googleapis/rpc/status" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status1 "google.golang.org/grpc/status" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - _ "google.golang.org/protobuf/types/known/durationpb" - emptypb "google.golang.org/protobuf/types/known/emptypb" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - -// Controls which fields are returned by `ListTimeSeries`. -type ListTimeSeriesRequest_TimeSeriesView int32 - -const ( - // Returns the identity of the metric(s), the time series, - // and the time series data. - ListTimeSeriesRequest_FULL ListTimeSeriesRequest_TimeSeriesView = 0 - // Returns the identity of the metric and the time series resource, - // but not the time series data. - ListTimeSeriesRequest_HEADERS ListTimeSeriesRequest_TimeSeriesView = 1 -) - -// Enum value maps for ListTimeSeriesRequest_TimeSeriesView. -var ( - ListTimeSeriesRequest_TimeSeriesView_name = map[int32]string{ - 0: "FULL", - 1: "HEADERS", - } - ListTimeSeriesRequest_TimeSeriesView_value = map[string]int32{ - "FULL": 0, - "HEADERS": 1, - } -) - -func (x ListTimeSeriesRequest_TimeSeriesView) Enum() *ListTimeSeriesRequest_TimeSeriesView { - p := new(ListTimeSeriesRequest_TimeSeriesView) - *p = x - return p -} - -func (x ListTimeSeriesRequest_TimeSeriesView) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (ListTimeSeriesRequest_TimeSeriesView) Descriptor() protoreflect.EnumDescriptor { - return file_google_monitoring_v3_metric_service_proto_enumTypes[0].Descriptor() -} - -func (ListTimeSeriesRequest_TimeSeriesView) Type() protoreflect.EnumType { - return &file_google_monitoring_v3_metric_service_proto_enumTypes[0] -} - -func (x ListTimeSeriesRequest_TimeSeriesView) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use ListTimeSeriesRequest_TimeSeriesView.Descriptor instead. -func (ListTimeSeriesRequest_TimeSeriesView) EnumDescriptor() ([]byte, []int) { - return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{8, 0} -} - -// The `ListMonitoredResourceDescriptors` request. -type ListMonitoredResourceDescriptorsRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The project on which to execute the request. The format is: - // - // projects/[PROJECT_ID_OR_NUMBER] - Name string `protobuf:"bytes,5,opt,name=name,proto3" json:"name,omitempty"` - // An optional [filter](https://cloud.google.com/monitoring/api/v3/filters) - // describing the descriptors to be returned. The filter can reference the - // descriptor's type and labels. For example, the following filter returns - // only Google Compute Engine descriptors that have an `id` label: - // - // resource.type = starts_with("gce_") AND resource.label:id - Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"` - // A positive number that is the maximum number of results to return. - PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` - // If this field is not empty then it must contain the `nextPageToken` value - // returned by a previous call to this method. Using this field causes the - // method to return additional results from the previous method call. - PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` -} - -func (x *ListMonitoredResourceDescriptorsRequest) Reset() { - *x = ListMonitoredResourceDescriptorsRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListMonitoredResourceDescriptorsRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListMonitoredResourceDescriptorsRequest) ProtoMessage() {} - -func (x *ListMonitoredResourceDescriptorsRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListMonitoredResourceDescriptorsRequest.ProtoReflect.Descriptor instead. -func (*ListMonitoredResourceDescriptorsRequest) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{0} -} - -func (x *ListMonitoredResourceDescriptorsRequest) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *ListMonitoredResourceDescriptorsRequest) GetFilter() string { - if x != nil { - return x.Filter - } - return "" -} - -func (x *ListMonitoredResourceDescriptorsRequest) GetPageSize() int32 { - if x != nil { - return x.PageSize - } - return 0 -} - -func (x *ListMonitoredResourceDescriptorsRequest) GetPageToken() string { - if x != nil { - return x.PageToken - } - return "" -} - -// The `ListMonitoredResourceDescriptors` response. -type ListMonitoredResourceDescriptorsResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The monitored resource descriptors that are available to this project - // and that match `filter`, if present. - ResourceDescriptors []*monitoredres.MonitoredResourceDescriptor `protobuf:"bytes,1,rep,name=resource_descriptors,json=resourceDescriptors,proto3" json:"resource_descriptors,omitempty"` - // If there are more results than have been returned, then this field is set - // to a non-empty value. To see the additional results, - // use that value as `page_token` in the next call to this method. - NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` -} - -func (x *ListMonitoredResourceDescriptorsResponse) Reset() { - *x = ListMonitoredResourceDescriptorsResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListMonitoredResourceDescriptorsResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListMonitoredResourceDescriptorsResponse) ProtoMessage() {} - -func (x *ListMonitoredResourceDescriptorsResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListMonitoredResourceDescriptorsResponse.ProtoReflect.Descriptor instead. -func (*ListMonitoredResourceDescriptorsResponse) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{1} -} - -func (x *ListMonitoredResourceDescriptorsResponse) GetResourceDescriptors() []*monitoredres.MonitoredResourceDescriptor { - if x != nil { - return x.ResourceDescriptors - } - return nil -} - -func (x *ListMonitoredResourceDescriptorsResponse) GetNextPageToken() string { - if x != nil { - return x.NextPageToken - } - return "" -} - -// The `GetMonitoredResourceDescriptor` request. -type GetMonitoredResourceDescriptorRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The monitored resource descriptor to get. The format is: - // - // projects/[PROJECT_ID_OR_NUMBER]/monitoredResourceDescriptors/[RESOURCE_TYPE] - // - // The `[RESOURCE_TYPE]` is a predefined type, such as - // `cloudsql_database`. - Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` -} - -func (x *GetMonitoredResourceDescriptorRequest) Reset() { - *x = GetMonitoredResourceDescriptorRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetMonitoredResourceDescriptorRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetMonitoredResourceDescriptorRequest) ProtoMessage() {} - -func (x *GetMonitoredResourceDescriptorRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetMonitoredResourceDescriptorRequest.ProtoReflect.Descriptor instead. -func (*GetMonitoredResourceDescriptorRequest) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{2} -} - -func (x *GetMonitoredResourceDescriptorRequest) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -// The `ListMetricDescriptors` request. -type ListMetricDescriptorsRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The project on which to execute the request. The format is: - // - // projects/[PROJECT_ID_OR_NUMBER] - Name string `protobuf:"bytes,5,opt,name=name,proto3" json:"name,omitempty"` - // If this field is empty, all custom and - // system-defined metric descriptors are returned. - // Otherwise, the [filter](https://cloud.google.com/monitoring/api/v3/filters) - // specifies which metric descriptors are to be - // returned. For example, the following filter matches all - // [custom metrics](https://cloud.google.com/monitoring/custom-metrics): - // - // metric.type = starts_with("custom.googleapis.com/") - Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"` - // A positive number that is the maximum number of results to return. - PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` - // If this field is not empty then it must contain the `nextPageToken` value - // returned by a previous call to this method. Using this field causes the - // method to return additional results from the previous method call. - PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` -} - -func (x *ListMetricDescriptorsRequest) Reset() { - *x = ListMetricDescriptorsRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListMetricDescriptorsRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListMetricDescriptorsRequest) ProtoMessage() {} - -func (x *ListMetricDescriptorsRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListMetricDescriptorsRequest.ProtoReflect.Descriptor instead. -func (*ListMetricDescriptorsRequest) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{3} -} - -func (x *ListMetricDescriptorsRequest) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *ListMetricDescriptorsRequest) GetFilter() string { - if x != nil { - return x.Filter - } - return "" -} - -func (x *ListMetricDescriptorsRequest) GetPageSize() int32 { - if x != nil { - return x.PageSize - } - return 0 -} - -func (x *ListMetricDescriptorsRequest) GetPageToken() string { - if x != nil { - return x.PageToken - } - return "" -} - -// The `ListMetricDescriptors` response. -type ListMetricDescriptorsResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The metric descriptors that are available to the project - // and that match the value of `filter`, if present. - MetricDescriptors []*metric.MetricDescriptor `protobuf:"bytes,1,rep,name=metric_descriptors,json=metricDescriptors,proto3" json:"metric_descriptors,omitempty"` - // If there are more results than have been returned, then this field is set - // to a non-empty value. To see the additional results, - // use that value as `page_token` in the next call to this method. - NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` -} - -func (x *ListMetricDescriptorsResponse) Reset() { - *x = ListMetricDescriptorsResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListMetricDescriptorsResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListMetricDescriptorsResponse) ProtoMessage() {} - -func (x *ListMetricDescriptorsResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListMetricDescriptorsResponse.ProtoReflect.Descriptor instead. -func (*ListMetricDescriptorsResponse) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{4} -} - -func (x *ListMetricDescriptorsResponse) GetMetricDescriptors() []*metric.MetricDescriptor { - if x != nil { - return x.MetricDescriptors - } - return nil -} - -func (x *ListMetricDescriptorsResponse) GetNextPageToken() string { - if x != nil { - return x.NextPageToken - } - return "" -} - -// The `GetMetricDescriptor` request. -type GetMetricDescriptorRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The metric descriptor on which to execute the request. The format is: - // - // projects/[PROJECT_ID_OR_NUMBER]/metricDescriptors/[METRIC_ID] - // - // An example value of `[METRIC_ID]` is - // `"compute.googleapis.com/instance/disk/read_bytes_count"`. - Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` -} - -func (x *GetMetricDescriptorRequest) Reset() { - *x = GetMetricDescriptorRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetMetricDescriptorRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetMetricDescriptorRequest) ProtoMessage() {} - -func (x *GetMetricDescriptorRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetMetricDescriptorRequest.ProtoReflect.Descriptor instead. -func (*GetMetricDescriptorRequest) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{5} -} - -func (x *GetMetricDescriptorRequest) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -// The `CreateMetricDescriptor` request. -type CreateMetricDescriptorRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The project on which to execute the request. The format is: - // - // projects/[PROJECT_ID_OR_NUMBER] - Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` - // Required. The new [custom metric](https://cloud.google.com/monitoring/custom-metrics) - // descriptor. - MetricDescriptor *metric.MetricDescriptor `protobuf:"bytes,2,opt,name=metric_descriptor,json=metricDescriptor,proto3" json:"metric_descriptor,omitempty"` -} - -func (x *CreateMetricDescriptorRequest) Reset() { - *x = CreateMetricDescriptorRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CreateMetricDescriptorRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CreateMetricDescriptorRequest) ProtoMessage() {} - -func (x *CreateMetricDescriptorRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CreateMetricDescriptorRequest.ProtoReflect.Descriptor instead. -func (*CreateMetricDescriptorRequest) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{6} -} - -func (x *CreateMetricDescriptorRequest) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *CreateMetricDescriptorRequest) GetMetricDescriptor() *metric.MetricDescriptor { - if x != nil { - return x.MetricDescriptor - } - return nil -} - -// The `DeleteMetricDescriptor` request. -type DeleteMetricDescriptorRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The metric descriptor on which to execute the request. The format is: - // - // projects/[PROJECT_ID_OR_NUMBER]/metricDescriptors/[METRIC_ID] - // - // An example of `[METRIC_ID]` is: - // `"custom.googleapis.com/my_test_metric"`. - Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` -} - -func (x *DeleteMetricDescriptorRequest) Reset() { - *x = DeleteMetricDescriptorRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *DeleteMetricDescriptorRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DeleteMetricDescriptorRequest) ProtoMessage() {} - -func (x *DeleteMetricDescriptorRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DeleteMetricDescriptorRequest.ProtoReflect.Descriptor instead. -func (*DeleteMetricDescriptorRequest) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{7} -} - -func (x *DeleteMetricDescriptorRequest) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -// The `ListTimeSeries` request. -type ListTimeSeriesRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The project on which to execute the request. The format is: - // - // projects/[PROJECT_ID_OR_NUMBER] - Name string `protobuf:"bytes,10,opt,name=name,proto3" json:"name,omitempty"` - // Required. A [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) - // that specifies which time series should be returned. The filter must - // specify a single metric type, and can additionally specify metric labels - // and other information. For example: - // - // metric.type = "compute.googleapis.com/instance/cpu/usage_time" AND - // metric.labels.instance_name = "my-instance-name" - Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"` - // Required. The time interval for which results should be returned. Only time series - // that contain data points in the specified interval are included - // in the response. - Interval *TimeInterval `protobuf:"bytes,4,opt,name=interval,proto3" json:"interval,omitempty"` - // Specifies the alignment of data points in individual time series as - // well as how to combine the retrieved time series across specified labels. - // - // By default (if no `aggregation` is explicitly specified), the raw time - // series data is returned. - Aggregation *Aggregation `protobuf:"bytes,5,opt,name=aggregation,proto3" json:"aggregation,omitempty"` - // Unsupported: must be left blank. The points in each time series are - // currently returned in reverse time order (most recent to oldest). - OrderBy string `protobuf:"bytes,6,opt,name=order_by,json=orderBy,proto3" json:"order_by,omitempty"` - // Required. Specifies which information is returned about the time series. - View ListTimeSeriesRequest_TimeSeriesView `protobuf:"varint,7,opt,name=view,proto3,enum=google.monitoring.v3.ListTimeSeriesRequest_TimeSeriesView" json:"view,omitempty"` - // A positive number that is the maximum number of results to return. If - // `page_size` is empty or more than 100,000 results, the effective - // `page_size` is 100,000 results. If `view` is set to `FULL`, this is the - // maximum number of `Points` returned. If `view` is set to `HEADERS`, this is - // the maximum number of `TimeSeries` returned. - PageSize int32 `protobuf:"varint,8,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` - // If this field is not empty then it must contain the `nextPageToken` value - // returned by a previous call to this method. Using this field causes the - // method to return additional results from the previous method call. - PageToken string `protobuf:"bytes,9,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` -} - -func (x *ListTimeSeriesRequest) Reset() { - *x = ListTimeSeriesRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListTimeSeriesRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListTimeSeriesRequest) ProtoMessage() {} - -func (x *ListTimeSeriesRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListTimeSeriesRequest.ProtoReflect.Descriptor instead. -func (*ListTimeSeriesRequest) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{8} -} - -func (x *ListTimeSeriesRequest) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *ListTimeSeriesRequest) GetFilter() string { - if x != nil { - return x.Filter - } - return "" -} - -func (x *ListTimeSeriesRequest) GetInterval() *TimeInterval { - if x != nil { - return x.Interval - } - return nil -} - -func (x *ListTimeSeriesRequest) GetAggregation() *Aggregation { - if x != nil { - return x.Aggregation - } - return nil -} - -func (x *ListTimeSeriesRequest) GetOrderBy() string { - if x != nil { - return x.OrderBy - } - return "" -} - -func (x *ListTimeSeriesRequest) GetView() ListTimeSeriesRequest_TimeSeriesView { - if x != nil { - return x.View - } - return ListTimeSeriesRequest_FULL -} - -func (x *ListTimeSeriesRequest) GetPageSize() int32 { - if x != nil { - return x.PageSize - } - return 0 -} - -func (x *ListTimeSeriesRequest) GetPageToken() string { - if x != nil { - return x.PageToken - } - return "" -} - -// The `ListTimeSeries` response. -type ListTimeSeriesResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // One or more time series that match the filter included in the request. - TimeSeries []*TimeSeries `protobuf:"bytes,1,rep,name=time_series,json=timeSeries,proto3" json:"time_series,omitempty"` - // If there are more results than have been returned, then this field is set - // to a non-empty value. To see the additional results, - // use that value as `page_token` in the next call to this method. - NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` - // Query execution errors that may have caused the time series data returned - // to be incomplete. - ExecutionErrors []*status.Status `protobuf:"bytes,3,rep,name=execution_errors,json=executionErrors,proto3" json:"execution_errors,omitempty"` -} - -func (x *ListTimeSeriesResponse) Reset() { - *x = ListTimeSeriesResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListTimeSeriesResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListTimeSeriesResponse) ProtoMessage() {} - -func (x *ListTimeSeriesResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListTimeSeriesResponse.ProtoReflect.Descriptor instead. -func (*ListTimeSeriesResponse) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{9} -} - -func (x *ListTimeSeriesResponse) GetTimeSeries() []*TimeSeries { - if x != nil { - return x.TimeSeries - } - return nil -} - -func (x *ListTimeSeriesResponse) GetNextPageToken() string { - if x != nil { - return x.NextPageToken - } - return "" -} - -func (x *ListTimeSeriesResponse) GetExecutionErrors() []*status.Status { - if x != nil { - return x.ExecutionErrors - } - return nil -} - -// The `CreateTimeSeries` request. -type CreateTimeSeriesRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The project on which to execute the request. The format is: - // - // projects/[PROJECT_ID_OR_NUMBER] - Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` - // Required. The new data to be added to a list of time series. - // Adds at most one data point to each of several time series. The new data - // point must be more recent than any other point in its time series. Each - // `TimeSeries` value must fully specify a unique time series by supplying - // all label values for the metric and the monitored resource. - // - // The maximum number of `TimeSeries` objects per `Create` request is 200. - TimeSeries []*TimeSeries `protobuf:"bytes,2,rep,name=time_series,json=timeSeries,proto3" json:"time_series,omitempty"` -} - -func (x *CreateTimeSeriesRequest) Reset() { - *x = CreateTimeSeriesRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CreateTimeSeriesRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CreateTimeSeriesRequest) ProtoMessage() {} - -func (x *CreateTimeSeriesRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CreateTimeSeriesRequest.ProtoReflect.Descriptor instead. -func (*CreateTimeSeriesRequest) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{10} -} - -func (x *CreateTimeSeriesRequest) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *CreateTimeSeriesRequest) GetTimeSeries() []*TimeSeries { - if x != nil { - return x.TimeSeries - } - return nil -} - -// DEPRECATED. Used to hold per-time-series error status. -type CreateTimeSeriesError struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // DEPRECATED. Time series ID that resulted in the `status` error. - // - // Deprecated: Do not use. - TimeSeries *TimeSeries `protobuf:"bytes,1,opt,name=time_series,json=timeSeries,proto3" json:"time_series,omitempty"` - // DEPRECATED. The status of the requested write operation for `time_series`. - // - // Deprecated: Do not use. - Status *status.Status `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"` -} - -func (x *CreateTimeSeriesError) Reset() { - *x = CreateTimeSeriesError{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CreateTimeSeriesError) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CreateTimeSeriesError) ProtoMessage() {} - -func (x *CreateTimeSeriesError) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CreateTimeSeriesError.ProtoReflect.Descriptor instead. -func (*CreateTimeSeriesError) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{11} -} - -// Deprecated: Do not use. -func (x *CreateTimeSeriesError) GetTimeSeries() *TimeSeries { - if x != nil { - return x.TimeSeries - } - return nil -} - -// Deprecated: Do not use. -func (x *CreateTimeSeriesError) GetStatus() *status.Status { - if x != nil { - return x.Status - } - return nil -} - -// Summary of the result of a failed request to write data to a time series. -type CreateTimeSeriesSummary struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The number of points in the request. - TotalPointCount int32 `protobuf:"varint,1,opt,name=total_point_count,json=totalPointCount,proto3" json:"total_point_count,omitempty"` - // The number of points that were successfully written. - SuccessPointCount int32 `protobuf:"varint,2,opt,name=success_point_count,json=successPointCount,proto3" json:"success_point_count,omitempty"` - // The number of points that failed to be written. Order is not guaranteed. - Errors []*CreateTimeSeriesSummary_Error `protobuf:"bytes,3,rep,name=errors,proto3" json:"errors,omitempty"` -} - -func (x *CreateTimeSeriesSummary) Reset() { - *x = CreateTimeSeriesSummary{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CreateTimeSeriesSummary) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CreateTimeSeriesSummary) ProtoMessage() {} - -func (x *CreateTimeSeriesSummary) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[12] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CreateTimeSeriesSummary.ProtoReflect.Descriptor instead. -func (*CreateTimeSeriesSummary) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{12} -} - -func (x *CreateTimeSeriesSummary) GetTotalPointCount() int32 { - if x != nil { - return x.TotalPointCount - } - return 0 -} - -func (x *CreateTimeSeriesSummary) GetSuccessPointCount() int32 { - if x != nil { - return x.SuccessPointCount - } - return 0 -} - -func (x *CreateTimeSeriesSummary) GetErrors() []*CreateTimeSeriesSummary_Error { - if x != nil { - return x.Errors - } - return nil -} - -// The `QueryTimeSeries` request. -type QueryTimeSeriesRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The project on which to execute the request. The format is: - // - // projects/[PROJECT_ID_OR_NUMBER] - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Required. The query in the monitoring query language format. The default - // time zone is in UTC. - Query string `protobuf:"bytes,7,opt,name=query,proto3" json:"query,omitempty"` - // A positive number that is the maximum number of time_series_data to return. - PageSize int32 `protobuf:"varint,9,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` - // If this field is not empty then it must contain the `nextPageToken` value - // returned by a previous call to this method. Using this field causes the - // method to return additional results from the previous method call. - PageToken string `protobuf:"bytes,10,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` -} - -func (x *QueryTimeSeriesRequest) Reset() { - *x = QueryTimeSeriesRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *QueryTimeSeriesRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*QueryTimeSeriesRequest) ProtoMessage() {} - -func (x *QueryTimeSeriesRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[13] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use QueryTimeSeriesRequest.ProtoReflect.Descriptor instead. -func (*QueryTimeSeriesRequest) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{13} -} - -func (x *QueryTimeSeriesRequest) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *QueryTimeSeriesRequest) GetQuery() string { - if x != nil { - return x.Query - } - return "" -} - -func (x *QueryTimeSeriesRequest) GetPageSize() int32 { - if x != nil { - return x.PageSize - } - return 0 -} - -func (x *QueryTimeSeriesRequest) GetPageToken() string { - if x != nil { - return x.PageToken - } - return "" -} - -// The `QueryTimeSeries` response. -type QueryTimeSeriesResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The descriptor for the time series data. - TimeSeriesDescriptor *TimeSeriesDescriptor `protobuf:"bytes,8,opt,name=time_series_descriptor,json=timeSeriesDescriptor,proto3" json:"time_series_descriptor,omitempty"` - // The time series data. - TimeSeriesData []*TimeSeriesData `protobuf:"bytes,9,rep,name=time_series_data,json=timeSeriesData,proto3" json:"time_series_data,omitempty"` - // If there are more results than have been returned, then this field is set - // to a non-empty value. To see the additional results, use that value as - // `page_token` in the next call to this method. - NextPageToken string `protobuf:"bytes,10,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` - // Query execution errors that may have caused the time series data returned - // to be incomplete. The available data will be available in the - // response. - PartialErrors []*status.Status `protobuf:"bytes,11,rep,name=partial_errors,json=partialErrors,proto3" json:"partial_errors,omitempty"` -} - -func (x *QueryTimeSeriesResponse) Reset() { - *x = QueryTimeSeriesResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *QueryTimeSeriesResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*QueryTimeSeriesResponse) ProtoMessage() {} - -func (x *QueryTimeSeriesResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[14] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use QueryTimeSeriesResponse.ProtoReflect.Descriptor instead. -func (*QueryTimeSeriesResponse) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{14} -} - -func (x *QueryTimeSeriesResponse) GetTimeSeriesDescriptor() *TimeSeriesDescriptor { - if x != nil { - return x.TimeSeriesDescriptor - } - return nil -} - -func (x *QueryTimeSeriesResponse) GetTimeSeriesData() []*TimeSeriesData { - if x != nil { - return x.TimeSeriesData - } - return nil -} - -func (x *QueryTimeSeriesResponse) GetNextPageToken() string { - if x != nil { - return x.NextPageToken - } - return "" -} - -func (x *QueryTimeSeriesResponse) GetPartialErrors() []*status.Status { - if x != nil { - return x.PartialErrors - } - return nil -} - -// This is an error detail intended to be used with INVALID_ARGUMENT errors. -type QueryErrorList struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Errors in parsing the time series query language text. The number of errors - // in the response may be limited. - Errors []*QueryError `protobuf:"bytes,1,rep,name=errors,proto3" json:"errors,omitempty"` - // A summary of all the errors. - ErrorSummary string `protobuf:"bytes,2,opt,name=error_summary,json=errorSummary,proto3" json:"error_summary,omitempty"` -} - -func (x *QueryErrorList) Reset() { - *x = QueryErrorList{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[15] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *QueryErrorList) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*QueryErrorList) ProtoMessage() {} - -func (x *QueryErrorList) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[15] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use QueryErrorList.ProtoReflect.Descriptor instead. -func (*QueryErrorList) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{15} -} - -func (x *QueryErrorList) GetErrors() []*QueryError { - if x != nil { - return x.Errors - } - return nil -} - -func (x *QueryErrorList) GetErrorSummary() string { - if x != nil { - return x.ErrorSummary - } - return "" -} - -// Detailed information about an error category. -type CreateTimeSeriesSummary_Error struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The status of the requested write operation. - Status *status.Status `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` - // The number of points that couldn't be written because of `status`. - PointCount int32 `protobuf:"varint,2,opt,name=point_count,json=pointCount,proto3" json:"point_count,omitempty"` -} - -func (x *CreateTimeSeriesSummary_Error) Reset() { - *x = CreateTimeSeriesSummary_Error{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[16] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CreateTimeSeriesSummary_Error) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CreateTimeSeriesSummary_Error) ProtoMessage() {} - -func (x *CreateTimeSeriesSummary_Error) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[16] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CreateTimeSeriesSummary_Error.ProtoReflect.Descriptor instead. -func (*CreateTimeSeriesSummary_Error) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{12, 0} -} - -func (x *CreateTimeSeriesSummary_Error) GetStatus() *status.Status { - if x != nil { - return x.Status - } - return nil -} - -func (x *CreateTimeSeriesSummary_Error) GetPointCount() int32 { - if x != nil { - return x.PointCount - } - return 0 -} - -var File_google_monitoring_v3_metric_service_proto protoreflect.FileDescriptor - -var file_google_monitoring_v3_metric_service_proto_rawDesc = []byte{ - 0x0a, 0x29, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, - 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, - 0x33, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, - 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, - 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65, - 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, - 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x1a, 0x23, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6d, - 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, - 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, - 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, - 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, - 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, - 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x6d, 0x65, - 0x74, 0x72, 0x69, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, - 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, - 0x72, 0x70, 0x63, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x22, 0xd0, 0x01, 0x0a, 0x27, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, - 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x51, 0x0a, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x3d, 0xe0, 0x41, 0x02, 0xfa, - 0x41, 0x37, 0x12, 0x35, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4d, 0x6f, - 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, - 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, - 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, - 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, - 0x65, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, - 0x6b, 0x65, 0x6e, 0x22, 0xae, 0x01, 0x0a, 0x28, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x6f, 0x6e, 0x69, - 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x5a, 0x0a, 0x14, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x64, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, - 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x13, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x26, 0x0a, 0x0f, - 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, - 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x7a, 0x0a, 0x25, 0x47, 0x65, 0x74, 0x4d, 0x6f, 0x6e, 0x69, 0x74, - 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x51, 0x0a, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x3d, 0xe0, 0x41, 0x02, - 0xfa, 0x41, 0x37, 0x0a, 0x35, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4d, - 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x22, 0xba, 0x01, 0x0a, 0x1c, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x46, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, - 0x32, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2c, 0x12, 0x2a, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, - 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, - 0x6f, 0x6d, 0x2f, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, - 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, - 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, - 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x94, 0x01, - 0x0a, 0x1d, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x4b, 0x0a, 0x12, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x6f, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x11, 0x6d, 0x65, 0x74, 0x72, 0x69, - 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x26, 0x0a, 0x0f, - 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, - 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x64, 0x0a, 0x1a, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, - 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x46, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x32, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2c, 0x0a, 0x2a, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, - 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, - 0x63, 0x6f, 0x6d, 0x2f, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x6f, 0x72, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xb7, 0x01, 0x0a, 0x1d, 0x43, - 0x72, 0x65, 0x61, 0x74, 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x46, 0x0a, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x32, 0xe0, 0x41, 0x02, 0xfa, - 0x41, 0x2c, 0x12, 0x2a, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4d, 0x65, - 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x4e, 0x0a, 0x11, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x64, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, - 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x42, 0x03, 0xe0, - 0x41, 0x02, 0x52, 0x10, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x6f, 0x72, 0x22, 0x67, 0x0a, 0x1d, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4d, 0x65, - 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x46, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x42, 0x32, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2c, 0x0a, 0x2a, 0x6d, 0x6f, 0x6e, - 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, - 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xdc, 0x03, - 0x0a, 0x15, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x47, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x0a, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, - 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, - 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, - 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x12, 0x1b, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x43, 0x0a, - 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, - 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72, - 0x76, 0x61, 0x6c, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, - 0x61, 0x6c, 0x12, 0x43, 0x0a, 0x0b, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, - 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x61, 0x67, 0x67, 0x72, - 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x19, 0x0a, 0x08, 0x6f, 0x72, 0x64, 0x65, 0x72, - 0x5f, 0x62, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6f, 0x72, 0x64, 0x65, 0x72, - 0x42, 0x79, 0x12, 0x53, 0x0a, 0x04, 0x76, 0x69, 0x65, 0x77, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, - 0x32, 0x3a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, - 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x69, 0x6d, 0x65, - 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x54, 0x69, - 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x56, 0x69, 0x65, 0x77, 0x42, 0x03, 0xe0, 0x41, - 0x02, 0x52, 0x04, 0x76, 0x69, 0x65, 0x77, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, - 0x73, 0x69, 0x7a, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, - 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, - 0x65, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, - 0x6b, 0x65, 0x6e, 0x22, 0x27, 0x0a, 0x0e, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, - 0x73, 0x56, 0x69, 0x65, 0x77, 0x12, 0x08, 0x0a, 0x04, 0x46, 0x55, 0x4c, 0x4c, 0x10, 0x00, 0x12, - 0x0b, 0x0a, 0x07, 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x53, 0x10, 0x01, 0x22, 0xc2, 0x01, 0x0a, - 0x16, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x41, 0x0a, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x5f, - 0x73, 0x65, 0x72, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, - 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x52, 0x0a, - 0x74, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, - 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, - 0x65, 0x6e, 0x12, 0x3d, 0x0a, 0x10, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x52, 0x0f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x72, 0x72, 0x6f, 0x72, - 0x73, 0x22, 0xaa, 0x01, 0x0a, 0x17, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, - 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x47, 0x0a, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, - 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, - 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x46, 0x0a, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, - 0x65, 0x72, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, - 0x76, 0x33, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x42, 0x03, 0xe0, - 0x41, 0x02, 0x52, 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x22, 0x8e, - 0x01, 0x0a, 0x15, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, - 0x69, 0x65, 0x73, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x45, 0x0a, 0x0b, 0x74, 0x69, 0x6d, 0x65, - 0x5f, 0x73, 0x65, 0x72, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, - 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x42, - 0x02, 0x18, 0x01, 0x52, 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x12, - 0x2e, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x12, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x42, 0x02, 0x18, 0x01, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, - 0x98, 0x02, 0x0a, 0x17, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, - 0x72, 0x69, 0x65, 0x73, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x2a, 0x0a, 0x11, 0x74, - 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x50, 0x6f, 0x69, - 0x6e, 0x74, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2e, 0x0a, 0x13, 0x73, 0x75, 0x63, 0x63, 0x65, - 0x73, 0x73, 0x5f, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x05, 0x52, 0x11, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x50, 0x6f, 0x69, - 0x6e, 0x74, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x4b, 0x0a, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, - 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x43, - 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x53, - 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x06, 0x65, 0x72, - 0x72, 0x6f, 0x72, 0x73, 0x1a, 0x54, 0x0a, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x2a, 0x0a, - 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x70, 0x6f, 0x69, - 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, - 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x7e, 0x0a, 0x16, 0x51, 0x75, - 0x65, 0x72, 0x79, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, - 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x1b, - 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, - 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, - 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xae, 0x02, 0x0a, 0x17, 0x51, - 0x75, 0x65, 0x72, 0x79, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x60, 0x0a, 0x16, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, - 0x65, 0x72, 0x69, 0x65, 0x73, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, - 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x69, - 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, - 0x6f, 0x72, 0x52, 0x14, 0x74, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x44, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x12, 0x4e, 0x0a, 0x10, 0x74, 0x69, 0x6d, 0x65, - 0x5f, 0x73, 0x65, 0x72, 0x69, 0x65, 0x73, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x09, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, - 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, - 0x72, 0x69, 0x65, 0x73, 0x44, 0x61, 0x74, 0x61, 0x52, 0x0e, 0x74, 0x69, 0x6d, 0x65, 0x53, 0x65, - 0x72, 0x69, 0x65, 0x73, 0x44, 0x61, 0x74, 0x61, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, - 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x0a, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, - 0x12, 0x39, 0x0a, 0x0e, 0x70, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x65, 0x72, 0x72, 0x6f, - 0x72, 0x73, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0d, 0x70, 0x61, - 0x72, 0x74, 0x69, 0x61, 0x6c, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x22, 0x6f, 0x0a, 0x0e, 0x51, - 0x75, 0x65, 0x72, 0x79, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x38, 0x0a, - 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, - 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, - 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, - 0x5f, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, - 0x65, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x32, 0xbe, 0x0d, 0x0a, - 0x0d, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0xe4, - 0x01, 0x0a, 0x20, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, - 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, - 0x6f, 0x72, 0x73, 0x12, 0x3d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, - 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4d, - 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x3e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, - 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x6f, - 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0x41, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x34, 0x12, 0x32, 0x2f, 0x76, 0x33, 0x2f, - 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, - 0x7d, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0xda, 0x41, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0xcc, 0x01, 0x0a, 0x1e, 0x47, 0x65, 0x74, 0x4d, 0x6f, 0x6e, - 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x12, 0x3b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, - 0x47, 0x65, 0x74, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x22, 0x44, - 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x37, 0x12, 0x35, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, - 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6d, 0x6f, 0x6e, - 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x2f, 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x12, 0xb8, 0x01, 0x0a, 0x15, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x65, 0x74, - 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x32, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, - 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, - 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x65, - 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x36, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x29, 0x12, - 0x27, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, - 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, - 0xa0, 0x01, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x12, 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x47, - 0x65, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, - 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x22, 0x39, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2c, 0x12, - 0x2a, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, - 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x2f, 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x12, 0xc8, 0x01, 0x0a, 0x16, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4d, 0x65, 0x74, - 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x12, 0x33, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, - 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69, - 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, - 0x22, 0x5b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3c, 0x22, 0x27, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, - 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, - 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, - 0x73, 0x3a, 0x11, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x6f, 0x72, 0xda, 0x41, 0x16, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x6d, 0x65, 0x74, 0x72, - 0x69, 0x63, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x12, 0xa0, 0x01, - 0x0a, 0x16, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x12, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, - 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x39, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2c, 0x2a, 0x2a, 0x2f, - 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, - 0x73, 0x2f, 0x2a, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x6f, 0x72, 0x73, 0x2f, 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x12, 0xb1, 0x01, 0x0a, 0x0e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, - 0x69, 0x65, 0x73, 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, - 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, - 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, - 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x69, 0x6d, 0x65, - 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x44, - 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x22, 0x12, 0x20, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, - 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x74, 0x69, - 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0xda, 0x41, 0x19, 0x6e, 0x61, 0x6d, 0x65, 0x2c, - 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2c, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x2c, - 0x76, 0x69, 0x65, 0x77, 0x12, 0x99, 0x01, 0x0a, 0x10, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, - 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x12, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, - 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, - 0x22, 0x3e, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x25, 0x22, 0x20, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, - 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, - 0x74, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x10, - 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x69, 0x65, 0x73, - 0x1a, 0xda, 0x01, 0xca, 0x41, 0x19, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, - 0x41, 0xba, 0x01, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, - 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, - 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, - 0x68, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2c, 0x68, 0x74, 0x74, - 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, - 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x6d, 0x6f, 0x6e, - 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x2c, 0x68, 0x74, 0x74, - 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, - 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x6d, 0x6f, 0x6e, - 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x77, 0x72, 0x69, 0x74, 0x65, 0x42, 0xf9, 0x05, - 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, - 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x42, 0x12, 0x4d, 0x65, 0x74, 0x72, - 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, - 0x5a, 0x3e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, - 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, - 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, - 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, - 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, - 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, - 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, - 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56, 0x33, 0xea, 0x41, 0xf0, 0x01, 0x0a, 0x2a, - 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x12, 0x3b, 0x70, 0x72, 0x6f, 0x6a, - 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6d, - 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, - 0x2f, 0x7b, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x45, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x2f, 0x7b, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, - 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x39, - 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x7d, - 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, - 0x72, 0x73, 0x2f, 0x7b, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x6f, 0x72, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x01, 0x2a, 0x20, 0x01, 0xea, 0x41, - 0xb7, 0x02, 0x0a, 0x35, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4d, 0x6f, - 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x12, 0x4f, 0x70, 0x72, 0x6f, 0x6a, 0x65, - 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6d, 0x6f, - 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x2f, 0x7b, 0x6d, 0x6f, 0x6e, 0x69, - 0x74, 0x6f, 0x72, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x64, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x7d, 0x12, 0x59, 0x6f, 0x72, 0x67, 0x61, - 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6f, 0x72, 0x67, 0x61, 0x6e, - 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, - 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x6f, 0x72, 0x73, 0x2f, 0x7b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, - 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x6f, 0x72, 0x7d, 0x12, 0x4d, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x2f, 0x7b, - 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x7d, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, - 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x73, 0x2f, 0x7b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x5f, - 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x7d, 0x12, 0x01, 0x2a, 0x20, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, -} - -var ( - file_google_monitoring_v3_metric_service_proto_rawDescOnce sync.Once - file_google_monitoring_v3_metric_service_proto_rawDescData = file_google_monitoring_v3_metric_service_proto_rawDesc -) - -func file_google_monitoring_v3_metric_service_proto_rawDescGZIP() []byte { - file_google_monitoring_v3_metric_service_proto_rawDescOnce.Do(func() { - file_google_monitoring_v3_metric_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_metric_service_proto_rawDescData) - }) - return file_google_monitoring_v3_metric_service_proto_rawDescData -} - -var file_google_monitoring_v3_metric_service_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_google_monitoring_v3_metric_service_proto_msgTypes = make([]protoimpl.MessageInfo, 17) -var file_google_monitoring_v3_metric_service_proto_goTypes = []interface{}{ - (ListTimeSeriesRequest_TimeSeriesView)(0), // 0: google.monitoring.v3.ListTimeSeriesRequest.TimeSeriesView - (*ListMonitoredResourceDescriptorsRequest)(nil), // 1: google.monitoring.v3.ListMonitoredResourceDescriptorsRequest - (*ListMonitoredResourceDescriptorsResponse)(nil), // 2: google.monitoring.v3.ListMonitoredResourceDescriptorsResponse - (*GetMonitoredResourceDescriptorRequest)(nil), // 3: google.monitoring.v3.GetMonitoredResourceDescriptorRequest - (*ListMetricDescriptorsRequest)(nil), // 4: google.monitoring.v3.ListMetricDescriptorsRequest - (*ListMetricDescriptorsResponse)(nil), // 5: google.monitoring.v3.ListMetricDescriptorsResponse - (*GetMetricDescriptorRequest)(nil), // 6: google.monitoring.v3.GetMetricDescriptorRequest - (*CreateMetricDescriptorRequest)(nil), // 7: google.monitoring.v3.CreateMetricDescriptorRequest - (*DeleteMetricDescriptorRequest)(nil), // 8: google.monitoring.v3.DeleteMetricDescriptorRequest - (*ListTimeSeriesRequest)(nil), // 9: google.monitoring.v3.ListTimeSeriesRequest - (*ListTimeSeriesResponse)(nil), // 10: google.monitoring.v3.ListTimeSeriesResponse - (*CreateTimeSeriesRequest)(nil), // 11: google.monitoring.v3.CreateTimeSeriesRequest - (*CreateTimeSeriesError)(nil), // 12: google.monitoring.v3.CreateTimeSeriesError - (*CreateTimeSeriesSummary)(nil), // 13: google.monitoring.v3.CreateTimeSeriesSummary - (*QueryTimeSeriesRequest)(nil), // 14: google.monitoring.v3.QueryTimeSeriesRequest - (*QueryTimeSeriesResponse)(nil), // 15: google.monitoring.v3.QueryTimeSeriesResponse - (*QueryErrorList)(nil), // 16: google.monitoring.v3.QueryErrorList - (*CreateTimeSeriesSummary_Error)(nil), // 17: google.monitoring.v3.CreateTimeSeriesSummary.Error - (*monitoredres.MonitoredResourceDescriptor)(nil), // 18: google.api.MonitoredResourceDescriptor - (*metric.MetricDescriptor)(nil), // 19: google.api.MetricDescriptor - (*TimeInterval)(nil), // 20: google.monitoring.v3.TimeInterval - (*Aggregation)(nil), // 21: google.monitoring.v3.Aggregation - (*TimeSeries)(nil), // 22: google.monitoring.v3.TimeSeries - (*status.Status)(nil), // 23: google.rpc.Status - (*TimeSeriesDescriptor)(nil), // 24: google.monitoring.v3.TimeSeriesDescriptor - (*TimeSeriesData)(nil), // 25: google.monitoring.v3.TimeSeriesData - (*QueryError)(nil), // 26: google.monitoring.v3.QueryError - (*emptypb.Empty)(nil), // 27: google.protobuf.Empty -} -var file_google_monitoring_v3_metric_service_proto_depIdxs = []int32{ - 18, // 0: google.monitoring.v3.ListMonitoredResourceDescriptorsResponse.resource_descriptors:type_name -> google.api.MonitoredResourceDescriptor - 19, // 1: google.monitoring.v3.ListMetricDescriptorsResponse.metric_descriptors:type_name -> google.api.MetricDescriptor - 19, // 2: google.monitoring.v3.CreateMetricDescriptorRequest.metric_descriptor:type_name -> google.api.MetricDescriptor - 20, // 3: google.monitoring.v3.ListTimeSeriesRequest.interval:type_name -> google.monitoring.v3.TimeInterval - 21, // 4: google.monitoring.v3.ListTimeSeriesRequest.aggregation:type_name -> google.monitoring.v3.Aggregation - 0, // 5: google.monitoring.v3.ListTimeSeriesRequest.view:type_name -> google.monitoring.v3.ListTimeSeriesRequest.TimeSeriesView - 22, // 6: google.monitoring.v3.ListTimeSeriesResponse.time_series:type_name -> google.monitoring.v3.TimeSeries - 23, // 7: google.monitoring.v3.ListTimeSeriesResponse.execution_errors:type_name -> google.rpc.Status - 22, // 8: google.monitoring.v3.CreateTimeSeriesRequest.time_series:type_name -> google.monitoring.v3.TimeSeries - 22, // 9: google.monitoring.v3.CreateTimeSeriesError.time_series:type_name -> google.monitoring.v3.TimeSeries - 23, // 10: google.monitoring.v3.CreateTimeSeriesError.status:type_name -> google.rpc.Status - 17, // 11: google.monitoring.v3.CreateTimeSeriesSummary.errors:type_name -> google.monitoring.v3.CreateTimeSeriesSummary.Error - 24, // 12: google.monitoring.v3.QueryTimeSeriesResponse.time_series_descriptor:type_name -> google.monitoring.v3.TimeSeriesDescriptor - 25, // 13: google.monitoring.v3.QueryTimeSeriesResponse.time_series_data:type_name -> google.monitoring.v3.TimeSeriesData - 23, // 14: google.monitoring.v3.QueryTimeSeriesResponse.partial_errors:type_name -> google.rpc.Status - 26, // 15: google.monitoring.v3.QueryErrorList.errors:type_name -> google.monitoring.v3.QueryError - 23, // 16: google.monitoring.v3.CreateTimeSeriesSummary.Error.status:type_name -> google.rpc.Status - 1, // 17: google.monitoring.v3.MetricService.ListMonitoredResourceDescriptors:input_type -> google.monitoring.v3.ListMonitoredResourceDescriptorsRequest - 3, // 18: google.monitoring.v3.MetricService.GetMonitoredResourceDescriptor:input_type -> google.monitoring.v3.GetMonitoredResourceDescriptorRequest - 4, // 19: google.monitoring.v3.MetricService.ListMetricDescriptors:input_type -> google.monitoring.v3.ListMetricDescriptorsRequest - 6, // 20: google.monitoring.v3.MetricService.GetMetricDescriptor:input_type -> google.monitoring.v3.GetMetricDescriptorRequest - 7, // 21: google.monitoring.v3.MetricService.CreateMetricDescriptor:input_type -> google.monitoring.v3.CreateMetricDescriptorRequest - 8, // 22: google.monitoring.v3.MetricService.DeleteMetricDescriptor:input_type -> google.monitoring.v3.DeleteMetricDescriptorRequest - 9, // 23: google.monitoring.v3.MetricService.ListTimeSeries:input_type -> google.monitoring.v3.ListTimeSeriesRequest - 11, // 24: google.monitoring.v3.MetricService.CreateTimeSeries:input_type -> google.monitoring.v3.CreateTimeSeriesRequest - 2, // 25: google.monitoring.v3.MetricService.ListMonitoredResourceDescriptors:output_type -> google.monitoring.v3.ListMonitoredResourceDescriptorsResponse - 18, // 26: google.monitoring.v3.MetricService.GetMonitoredResourceDescriptor:output_type -> google.api.MonitoredResourceDescriptor - 5, // 27: google.monitoring.v3.MetricService.ListMetricDescriptors:output_type -> google.monitoring.v3.ListMetricDescriptorsResponse - 19, // 28: google.monitoring.v3.MetricService.GetMetricDescriptor:output_type -> google.api.MetricDescriptor - 19, // 29: google.monitoring.v3.MetricService.CreateMetricDescriptor:output_type -> google.api.MetricDescriptor - 27, // 30: google.monitoring.v3.MetricService.DeleteMetricDescriptor:output_type -> google.protobuf.Empty - 10, // 31: google.monitoring.v3.MetricService.ListTimeSeries:output_type -> google.monitoring.v3.ListTimeSeriesResponse - 27, // 32: google.monitoring.v3.MetricService.CreateTimeSeries:output_type -> google.protobuf.Empty - 25, // [25:33] is the sub-list for method output_type - 17, // [17:25] is the sub-list for method input_type - 17, // [17:17] is the sub-list for extension type_name - 17, // [17:17] is the sub-list for extension extendee - 0, // [0:17] is the sub-list for field type_name -} - -func init() { file_google_monitoring_v3_metric_service_proto_init() } -func file_google_monitoring_v3_metric_service_proto_init() { - if File_google_monitoring_v3_metric_service_proto != nil { - return - } - file_google_monitoring_v3_alert_proto_init() - file_google_monitoring_v3_common_proto_init() - file_google_monitoring_v3_metric_proto_init() - if !protoimpl.UnsafeEnabled { - file_google_monitoring_v3_metric_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListMonitoredResourceDescriptorsRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_metric_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListMonitoredResourceDescriptorsResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_metric_service_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetMonitoredResourceDescriptorRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_metric_service_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListMetricDescriptorsRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_metric_service_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListMetricDescriptorsResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_metric_service_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetMetricDescriptorRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_metric_service_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateMetricDescriptorRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_metric_service_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteMetricDescriptorRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_metric_service_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListTimeSeriesRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_metric_service_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListTimeSeriesResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_metric_service_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateTimeSeriesRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_metric_service_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateTimeSeriesError); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_metric_service_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateTimeSeriesSummary); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_metric_service_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*QueryTimeSeriesRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_metric_service_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*QueryTimeSeriesResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_metric_service_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*QueryErrorList); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_metric_service_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateTimeSeriesSummary_Error); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_monitoring_v3_metric_service_proto_rawDesc, - NumEnums: 1, - NumMessages: 17, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_google_monitoring_v3_metric_service_proto_goTypes, - DependencyIndexes: file_google_monitoring_v3_metric_service_proto_depIdxs, - EnumInfos: file_google_monitoring_v3_metric_service_proto_enumTypes, - MessageInfos: file_google_monitoring_v3_metric_service_proto_msgTypes, - }.Build() - File_google_monitoring_v3_metric_service_proto = out.File - file_google_monitoring_v3_metric_service_proto_rawDesc = nil - file_google_monitoring_v3_metric_service_proto_goTypes = nil - file_google_monitoring_v3_metric_service_proto_depIdxs = nil -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConnInterface - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion6 - -// MetricServiceClient is the client API for MetricService service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type MetricServiceClient interface { - // Lists monitored resource descriptors that match a filter. This method does not require a Workspace. - ListMonitoredResourceDescriptors(ctx context.Context, in *ListMonitoredResourceDescriptorsRequest, opts ...grpc.CallOption) (*ListMonitoredResourceDescriptorsResponse, error) - // Gets a single monitored resource descriptor. This method does not require a Workspace. - GetMonitoredResourceDescriptor(ctx context.Context, in *GetMonitoredResourceDescriptorRequest, opts ...grpc.CallOption) (*monitoredres.MonitoredResourceDescriptor, error) - // Lists metric descriptors that match a filter. This method does not require a Workspace. - ListMetricDescriptors(ctx context.Context, in *ListMetricDescriptorsRequest, opts ...grpc.CallOption) (*ListMetricDescriptorsResponse, error) - // Gets a single metric descriptor. This method does not require a Workspace. - GetMetricDescriptor(ctx context.Context, in *GetMetricDescriptorRequest, opts ...grpc.CallOption) (*metric.MetricDescriptor, error) - // Creates a new metric descriptor. - // User-created metric descriptors define - // [custom metrics](https://cloud.google.com/monitoring/custom-metrics). - CreateMetricDescriptor(ctx context.Context, in *CreateMetricDescriptorRequest, opts ...grpc.CallOption) (*metric.MetricDescriptor, error) - // Deletes a metric descriptor. Only user-created - // [custom metrics](https://cloud.google.com/monitoring/custom-metrics) can be - // deleted. - DeleteMetricDescriptor(ctx context.Context, in *DeleteMetricDescriptorRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) - // Lists time series that match a filter. This method does not require a Workspace. - ListTimeSeries(ctx context.Context, in *ListTimeSeriesRequest, opts ...grpc.CallOption) (*ListTimeSeriesResponse, error) - // Creates or adds data to one or more time series. - // The response is empty if all time series in the request were written. - // If any time series could not be written, a corresponding failure message is - // included in the error response. - CreateTimeSeries(ctx context.Context, in *CreateTimeSeriesRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) -} - -type metricServiceClient struct { - cc grpc.ClientConnInterface -} - -func NewMetricServiceClient(cc grpc.ClientConnInterface) MetricServiceClient { - return &metricServiceClient{cc} -} - -func (c *metricServiceClient) ListMonitoredResourceDescriptors(ctx context.Context, in *ListMonitoredResourceDescriptorsRequest, opts ...grpc.CallOption) (*ListMonitoredResourceDescriptorsResponse, error) { - out := new(ListMonitoredResourceDescriptorsResponse) - err := c.cc.Invoke(ctx, "/google.monitoring.v3.MetricService/ListMonitoredResourceDescriptors", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *metricServiceClient) GetMonitoredResourceDescriptor(ctx context.Context, in *GetMonitoredResourceDescriptorRequest, opts ...grpc.CallOption) (*monitoredres.MonitoredResourceDescriptor, error) { - out := new(monitoredres.MonitoredResourceDescriptor) - err := c.cc.Invoke(ctx, "/google.monitoring.v3.MetricService/GetMonitoredResourceDescriptor", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *metricServiceClient) ListMetricDescriptors(ctx context.Context, in *ListMetricDescriptorsRequest, opts ...grpc.CallOption) (*ListMetricDescriptorsResponse, error) { - out := new(ListMetricDescriptorsResponse) - err := c.cc.Invoke(ctx, "/google.monitoring.v3.MetricService/ListMetricDescriptors", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *metricServiceClient) GetMetricDescriptor(ctx context.Context, in *GetMetricDescriptorRequest, opts ...grpc.CallOption) (*metric.MetricDescriptor, error) { - out := new(metric.MetricDescriptor) - err := c.cc.Invoke(ctx, "/google.monitoring.v3.MetricService/GetMetricDescriptor", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *metricServiceClient) CreateMetricDescriptor(ctx context.Context, in *CreateMetricDescriptorRequest, opts ...grpc.CallOption) (*metric.MetricDescriptor, error) { - out := new(metric.MetricDescriptor) - err := c.cc.Invoke(ctx, "/google.monitoring.v3.MetricService/CreateMetricDescriptor", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *metricServiceClient) DeleteMetricDescriptor(ctx context.Context, in *DeleteMetricDescriptorRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { - out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/google.monitoring.v3.MetricService/DeleteMetricDescriptor", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *metricServiceClient) ListTimeSeries(ctx context.Context, in *ListTimeSeriesRequest, opts ...grpc.CallOption) (*ListTimeSeriesResponse, error) { - out := new(ListTimeSeriesResponse) - err := c.cc.Invoke(ctx, "/google.monitoring.v3.MetricService/ListTimeSeries", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *metricServiceClient) CreateTimeSeries(ctx context.Context, in *CreateTimeSeriesRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { - out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/google.monitoring.v3.MetricService/CreateTimeSeries", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// MetricServiceServer is the server API for MetricService service. -type MetricServiceServer interface { - // Lists monitored resource descriptors that match a filter. This method does not require a Workspace. - ListMonitoredResourceDescriptors(context.Context, *ListMonitoredResourceDescriptorsRequest) (*ListMonitoredResourceDescriptorsResponse, error) - // Gets a single monitored resource descriptor. This method does not require a Workspace. - GetMonitoredResourceDescriptor(context.Context, *GetMonitoredResourceDescriptorRequest) (*monitoredres.MonitoredResourceDescriptor, error) - // Lists metric descriptors that match a filter. This method does not require a Workspace. - ListMetricDescriptors(context.Context, *ListMetricDescriptorsRequest) (*ListMetricDescriptorsResponse, error) - // Gets a single metric descriptor. This method does not require a Workspace. - GetMetricDescriptor(context.Context, *GetMetricDescriptorRequest) (*metric.MetricDescriptor, error) - // Creates a new metric descriptor. - // User-created metric descriptors define - // [custom metrics](https://cloud.google.com/monitoring/custom-metrics). - CreateMetricDescriptor(context.Context, *CreateMetricDescriptorRequest) (*metric.MetricDescriptor, error) - // Deletes a metric descriptor. Only user-created - // [custom metrics](https://cloud.google.com/monitoring/custom-metrics) can be - // deleted. - DeleteMetricDescriptor(context.Context, *DeleteMetricDescriptorRequest) (*emptypb.Empty, error) - // Lists time series that match a filter. This method does not require a Workspace. - ListTimeSeries(context.Context, *ListTimeSeriesRequest) (*ListTimeSeriesResponse, error) - // Creates or adds data to one or more time series. - // The response is empty if all time series in the request were written. - // If any time series could not be written, a corresponding failure message is - // included in the error response. - CreateTimeSeries(context.Context, *CreateTimeSeriesRequest) (*emptypb.Empty, error) -} - -// UnimplementedMetricServiceServer can be embedded to have forward compatible implementations. -type UnimplementedMetricServiceServer struct { -} - -func (*UnimplementedMetricServiceServer) ListMonitoredResourceDescriptors(context.Context, *ListMonitoredResourceDescriptorsRequest) (*ListMonitoredResourceDescriptorsResponse, error) { - return nil, status1.Errorf(codes.Unimplemented, "method ListMonitoredResourceDescriptors not implemented") -} -func (*UnimplementedMetricServiceServer) GetMonitoredResourceDescriptor(context.Context, *GetMonitoredResourceDescriptorRequest) (*monitoredres.MonitoredResourceDescriptor, error) { - return nil, status1.Errorf(codes.Unimplemented, "method GetMonitoredResourceDescriptor not implemented") -} -func (*UnimplementedMetricServiceServer) ListMetricDescriptors(context.Context, *ListMetricDescriptorsRequest) (*ListMetricDescriptorsResponse, error) { - return nil, status1.Errorf(codes.Unimplemented, "method ListMetricDescriptors not implemented") -} -func (*UnimplementedMetricServiceServer) GetMetricDescriptor(context.Context, *GetMetricDescriptorRequest) (*metric.MetricDescriptor, error) { - return nil, status1.Errorf(codes.Unimplemented, "method GetMetricDescriptor not implemented") -} -func (*UnimplementedMetricServiceServer) CreateMetricDescriptor(context.Context, *CreateMetricDescriptorRequest) (*metric.MetricDescriptor, error) { - return nil, status1.Errorf(codes.Unimplemented, "method CreateMetricDescriptor not implemented") -} -func (*UnimplementedMetricServiceServer) DeleteMetricDescriptor(context.Context, *DeleteMetricDescriptorRequest) (*emptypb.Empty, error) { - return nil, status1.Errorf(codes.Unimplemented, "method DeleteMetricDescriptor not implemented") -} -func (*UnimplementedMetricServiceServer) ListTimeSeries(context.Context, *ListTimeSeriesRequest) (*ListTimeSeriesResponse, error) { - return nil, status1.Errorf(codes.Unimplemented, "method ListTimeSeries not implemented") -} -func (*UnimplementedMetricServiceServer) CreateTimeSeries(context.Context, *CreateTimeSeriesRequest) (*emptypb.Empty, error) { - return nil, status1.Errorf(codes.Unimplemented, "method CreateTimeSeries not implemented") -} - -func RegisterMetricServiceServer(s *grpc.Server, srv MetricServiceServer) { - s.RegisterService(&_MetricService_serviceDesc, srv) -} - -func _MetricService_ListMonitoredResourceDescriptors_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListMonitoredResourceDescriptorsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MetricServiceServer).ListMonitoredResourceDescriptors(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.monitoring.v3.MetricService/ListMonitoredResourceDescriptors", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MetricServiceServer).ListMonitoredResourceDescriptors(ctx, req.(*ListMonitoredResourceDescriptorsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _MetricService_GetMonitoredResourceDescriptor_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetMonitoredResourceDescriptorRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MetricServiceServer).GetMonitoredResourceDescriptor(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.monitoring.v3.MetricService/GetMonitoredResourceDescriptor", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MetricServiceServer).GetMonitoredResourceDescriptor(ctx, req.(*GetMonitoredResourceDescriptorRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _MetricService_ListMetricDescriptors_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListMetricDescriptorsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MetricServiceServer).ListMetricDescriptors(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.monitoring.v3.MetricService/ListMetricDescriptors", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MetricServiceServer).ListMetricDescriptors(ctx, req.(*ListMetricDescriptorsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _MetricService_GetMetricDescriptor_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetMetricDescriptorRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MetricServiceServer).GetMetricDescriptor(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.monitoring.v3.MetricService/GetMetricDescriptor", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MetricServiceServer).GetMetricDescriptor(ctx, req.(*GetMetricDescriptorRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _MetricService_CreateMetricDescriptor_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CreateMetricDescriptorRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MetricServiceServer).CreateMetricDescriptor(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.monitoring.v3.MetricService/CreateMetricDescriptor", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MetricServiceServer).CreateMetricDescriptor(ctx, req.(*CreateMetricDescriptorRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _MetricService_DeleteMetricDescriptor_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteMetricDescriptorRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MetricServiceServer).DeleteMetricDescriptor(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.monitoring.v3.MetricService/DeleteMetricDescriptor", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MetricServiceServer).DeleteMetricDescriptor(ctx, req.(*DeleteMetricDescriptorRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _MetricService_ListTimeSeries_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListTimeSeriesRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MetricServiceServer).ListTimeSeries(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.monitoring.v3.MetricService/ListTimeSeries", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MetricServiceServer).ListTimeSeries(ctx, req.(*ListTimeSeriesRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _MetricService_CreateTimeSeries_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CreateTimeSeriesRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MetricServiceServer).CreateTimeSeries(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.monitoring.v3.MetricService/CreateTimeSeries", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MetricServiceServer).CreateTimeSeries(ctx, req.(*CreateTimeSeriesRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _MetricService_serviceDesc = grpc.ServiceDesc{ - ServiceName: "google.monitoring.v3.MetricService", - HandlerType: (*MetricServiceServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "ListMonitoredResourceDescriptors", - Handler: _MetricService_ListMonitoredResourceDescriptors_Handler, - }, - { - MethodName: "GetMonitoredResourceDescriptor", - Handler: _MetricService_GetMonitoredResourceDescriptor_Handler, - }, - { - MethodName: "ListMetricDescriptors", - Handler: _MetricService_ListMetricDescriptors_Handler, - }, - { - MethodName: "GetMetricDescriptor", - Handler: _MetricService_GetMetricDescriptor_Handler, - }, - { - MethodName: "CreateMetricDescriptor", - Handler: _MetricService_CreateMetricDescriptor_Handler, - }, - { - MethodName: "DeleteMetricDescriptor", - Handler: _MetricService_DeleteMetricDescriptor_Handler, - }, - { - MethodName: "ListTimeSeries", - Handler: _MetricService_ListTimeSeries_Handler, - }, - { - MethodName: "CreateTimeSeries", - Handler: _MetricService_CreateTimeSeries_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "google/monitoring/v3/metric_service.proto", -} diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/mutation_record.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/mutation_record.pb.go deleted file mode 100644 index e933e82408..0000000000 --- a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/mutation_record.pb.go +++ /dev/null @@ -1,197 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.25.0 -// protoc v3.13.0 -// source: google/monitoring/v3/mutation_record.proto - -package monitoring - -import ( - reflect "reflect" - sync "sync" - - proto "github.com/golang/protobuf/proto" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - timestamppb "google.golang.org/protobuf/types/known/timestamppb" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - -// Describes a change made to a configuration. -type MutationRecord struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // When the change occurred. - MutateTime *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=mutate_time,json=mutateTime,proto3" json:"mutate_time,omitempty"` - // The email address of the user making the change. - MutatedBy string `protobuf:"bytes,2,opt,name=mutated_by,json=mutatedBy,proto3" json:"mutated_by,omitempty"` -} - -func (x *MutationRecord) Reset() { - *x = MutationRecord{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_mutation_record_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *MutationRecord) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*MutationRecord) ProtoMessage() {} - -func (x *MutationRecord) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_mutation_record_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use MutationRecord.ProtoReflect.Descriptor instead. -func (*MutationRecord) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_mutation_record_proto_rawDescGZIP(), []int{0} -} - -func (x *MutationRecord) GetMutateTime() *timestamppb.Timestamp { - if x != nil { - return x.MutateTime - } - return nil -} - -func (x *MutationRecord) GetMutatedBy() string { - if x != nil { - return x.MutatedBy - } - return "" -} - -var File_google_monitoring_v3_mutation_record_proto protoreflect.FileDescriptor - -var file_google_monitoring_v3_mutation_record_proto_rawDesc = []byte{ - 0x0a, 0x2a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, - 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, - 0x76, 0x33, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x22, 0x6c, 0x0a, 0x0e, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x63, 0x6f, 0x72, 0x64, 0x12, 0x3b, 0x0a, 0x0b, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x5f, - 0x74, 0x69, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, - 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x54, 0x69, - 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x62, 0x79, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x64, 0x42, - 0x79, 0x42, 0xcb, 0x01, 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x42, 0x13, - 0x4d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, - 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x6d, 0x6f, 0x6e, - 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, - 0x6f, 0x72, 0x69, 0x6e, 0x67, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, - 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, - 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, - 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x33, 0xea, - 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, - 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56, 0x33, 0x62, - 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_google_monitoring_v3_mutation_record_proto_rawDescOnce sync.Once - file_google_monitoring_v3_mutation_record_proto_rawDescData = file_google_monitoring_v3_mutation_record_proto_rawDesc -) - -func file_google_monitoring_v3_mutation_record_proto_rawDescGZIP() []byte { - file_google_monitoring_v3_mutation_record_proto_rawDescOnce.Do(func() { - file_google_monitoring_v3_mutation_record_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_mutation_record_proto_rawDescData) - }) - return file_google_monitoring_v3_mutation_record_proto_rawDescData -} - -var file_google_monitoring_v3_mutation_record_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_monitoring_v3_mutation_record_proto_goTypes = []interface{}{ - (*MutationRecord)(nil), // 0: google.monitoring.v3.MutationRecord - (*timestamppb.Timestamp)(nil), // 1: google.protobuf.Timestamp -} -var file_google_monitoring_v3_mutation_record_proto_depIdxs = []int32{ - 1, // 0: google.monitoring.v3.MutationRecord.mutate_time:type_name -> google.protobuf.Timestamp - 1, // [1:1] is the sub-list for method output_type - 1, // [1:1] is the sub-list for method input_type - 1, // [1:1] is the sub-list for extension type_name - 1, // [1:1] is the sub-list for extension extendee - 0, // [0:1] is the sub-list for field type_name -} - -func init() { file_google_monitoring_v3_mutation_record_proto_init() } -func file_google_monitoring_v3_mutation_record_proto_init() { - if File_google_monitoring_v3_mutation_record_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_google_monitoring_v3_mutation_record_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MutationRecord); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_monitoring_v3_mutation_record_proto_rawDesc, - NumEnums: 0, - NumMessages: 1, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_google_monitoring_v3_mutation_record_proto_goTypes, - DependencyIndexes: file_google_monitoring_v3_mutation_record_proto_depIdxs, - MessageInfos: file_google_monitoring_v3_mutation_record_proto_msgTypes, - }.Build() - File_google_monitoring_v3_mutation_record_proto = out.File - file_google_monitoring_v3_mutation_record_proto_rawDesc = nil - file_google_monitoring_v3_mutation_record_proto_goTypes = nil - file_google_monitoring_v3_mutation_record_proto_depIdxs = nil -} diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/notification.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/notification.pb.go deleted file mode 100644 index 6105f945ac..0000000000 --- a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/notification.pb.go +++ /dev/null @@ -1,612 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.25.0 -// protoc v3.13.0 -// source: google/monitoring/v3/notification.proto - -package monitoring - -import ( - reflect "reflect" - sync "sync" - - proto "github.com/golang/protobuf/proto" - api "google.golang.org/genproto/googleapis/api" - _ "google.golang.org/genproto/googleapis/api/annotations" - label "google.golang.org/genproto/googleapis/api/label" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - -// Indicates whether the channel has been verified or not. It is illegal -// to specify this field in a -// [`CreateNotificationChannel`][google.monitoring.v3.NotificationChannelService.CreateNotificationChannel] -// or an -// [`UpdateNotificationChannel`][google.monitoring.v3.NotificationChannelService.UpdateNotificationChannel] -// operation. -type NotificationChannel_VerificationStatus int32 - -const ( - // Sentinel value used to indicate that the state is unknown, omitted, or - // is not applicable (as in the case of channels that neither support - // nor require verification in order to function). - NotificationChannel_VERIFICATION_STATUS_UNSPECIFIED NotificationChannel_VerificationStatus = 0 - // The channel has yet to be verified and requires verification to function. - // Note that this state also applies to the case where the verification - // process has been initiated by sending a verification code but where - // the verification code has not been submitted to complete the process. - NotificationChannel_UNVERIFIED NotificationChannel_VerificationStatus = 1 - // It has been proven that notifications can be received on this - // notification channel and that someone on the project has access - // to messages that are delivered to that channel. - NotificationChannel_VERIFIED NotificationChannel_VerificationStatus = 2 -) - -// Enum value maps for NotificationChannel_VerificationStatus. -var ( - NotificationChannel_VerificationStatus_name = map[int32]string{ - 0: "VERIFICATION_STATUS_UNSPECIFIED", - 1: "UNVERIFIED", - 2: "VERIFIED", - } - NotificationChannel_VerificationStatus_value = map[string]int32{ - "VERIFICATION_STATUS_UNSPECIFIED": 0, - "UNVERIFIED": 1, - "VERIFIED": 2, - } -) - -func (x NotificationChannel_VerificationStatus) Enum() *NotificationChannel_VerificationStatus { - p := new(NotificationChannel_VerificationStatus) - *p = x - return p -} - -func (x NotificationChannel_VerificationStatus) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (NotificationChannel_VerificationStatus) Descriptor() protoreflect.EnumDescriptor { - return file_google_monitoring_v3_notification_proto_enumTypes[0].Descriptor() -} - -func (NotificationChannel_VerificationStatus) Type() protoreflect.EnumType { - return &file_google_monitoring_v3_notification_proto_enumTypes[0] -} - -func (x NotificationChannel_VerificationStatus) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use NotificationChannel_VerificationStatus.Descriptor instead. -func (NotificationChannel_VerificationStatus) EnumDescriptor() ([]byte, []int) { - return file_google_monitoring_v3_notification_proto_rawDescGZIP(), []int{1, 0} -} - -// A description of a notification channel. The descriptor includes -// the properties of the channel and the set of labels or fields that -// must be specified to configure channels of a given type. -type NotificationChannelDescriptor struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The full REST resource name for this descriptor. The format is: - // - // projects/[PROJECT_ID_OR_NUMBER]/notificationChannelDescriptors/[TYPE] - // - // In the above, `[TYPE]` is the value of the `type` field. - Name string `protobuf:"bytes,6,opt,name=name,proto3" json:"name,omitempty"` - // The type of notification channel, such as "email", "sms", etc. - // Notification channel types are globally unique. - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - // A human-readable name for the notification channel type. This - // form of the name is suitable for a user interface. - DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` - // A human-readable description of the notification channel - // type. The description may include a description of the properties - // of the channel and pointers to external documentation. - Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` - // The set of labels that must be defined to identify a particular - // channel of the corresponding type. Each label includes a - // description for how that field should be populated. - Labels []*label.LabelDescriptor `protobuf:"bytes,4,rep,name=labels,proto3" json:"labels,omitempty"` - // The tiers that support this notification channel; the project service tier - // must be one of the supported_tiers. - // - // Deprecated: Do not use. - SupportedTiers []ServiceTier `protobuf:"varint,5,rep,packed,name=supported_tiers,json=supportedTiers,proto3,enum=google.monitoring.v3.ServiceTier" json:"supported_tiers,omitempty"` - // The product launch stage for channels of this type. - LaunchStage api.LaunchStage `protobuf:"varint,7,opt,name=launch_stage,json=launchStage,proto3,enum=google.api.LaunchStage" json:"launch_stage,omitempty"` -} - -func (x *NotificationChannelDescriptor) Reset() { - *x = NotificationChannelDescriptor{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_notification_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *NotificationChannelDescriptor) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*NotificationChannelDescriptor) ProtoMessage() {} - -func (x *NotificationChannelDescriptor) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_notification_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use NotificationChannelDescriptor.ProtoReflect.Descriptor instead. -func (*NotificationChannelDescriptor) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_notification_proto_rawDescGZIP(), []int{0} -} - -func (x *NotificationChannelDescriptor) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *NotificationChannelDescriptor) GetType() string { - if x != nil { - return x.Type - } - return "" -} - -func (x *NotificationChannelDescriptor) GetDisplayName() string { - if x != nil { - return x.DisplayName - } - return "" -} - -func (x *NotificationChannelDescriptor) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -func (x *NotificationChannelDescriptor) GetLabels() []*label.LabelDescriptor { - if x != nil { - return x.Labels - } - return nil -} - -// Deprecated: Do not use. -func (x *NotificationChannelDescriptor) GetSupportedTiers() []ServiceTier { - if x != nil { - return x.SupportedTiers - } - return nil -} - -func (x *NotificationChannelDescriptor) GetLaunchStage() api.LaunchStage { - if x != nil { - return x.LaunchStage - } - return api.LaunchStage_LAUNCH_STAGE_UNSPECIFIED -} - -// A `NotificationChannel` is a medium through which an alert is -// delivered when a policy violation is detected. Examples of channels -// include email, SMS, and third-party messaging applications. Fields -// containing sensitive information like authentication tokens or -// contact info are only partially populated on retrieval. -type NotificationChannel struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The type of the notification channel. This field matches the - // value of the [NotificationChannelDescriptor.type][google.monitoring.v3.NotificationChannelDescriptor.type] field. - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - // The full REST resource name for this channel. The format is: - // - // projects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID] - // - // The `[CHANNEL_ID]` is automatically assigned by the server on creation. - Name string `protobuf:"bytes,6,opt,name=name,proto3" json:"name,omitempty"` - // An optional human-readable name for this notification channel. It is - // recommended that you specify a non-empty and unique name in order to - // make it easier to identify the channels in your project, though this is - // not enforced. The display name is limited to 512 Unicode characters. - DisplayName string `protobuf:"bytes,3,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` - // An optional human-readable description of this notification channel. This - // description may provide additional details, beyond the display - // name, for the channel. This may not exceed 1024 Unicode characters. - Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"` - // Configuration fields that define the channel and its behavior. The - // permissible and required labels are specified in the - // [NotificationChannelDescriptor.labels][google.monitoring.v3.NotificationChannelDescriptor.labels] of the - // `NotificationChannelDescriptor` corresponding to the `type` field. - Labels map[string]string `protobuf:"bytes,5,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - // User-supplied key/value data that does not need to conform to - // the corresponding `NotificationChannelDescriptor`'s schema, unlike - // the `labels` field. This field is intended to be used for organizing - // and identifying the `NotificationChannel` objects. - // - // The field can contain up to 64 entries. Each key and value is limited to - // 63 Unicode characters or 128 bytes, whichever is smaller. Labels and - // values can contain only lowercase letters, numerals, underscores, and - // dashes. Keys must begin with a letter. - UserLabels map[string]string `protobuf:"bytes,8,rep,name=user_labels,json=userLabels,proto3" json:"user_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - // Indicates whether this channel has been verified or not. On a - // [`ListNotificationChannels`][google.monitoring.v3.NotificationChannelService.ListNotificationChannels] - // or - // [`GetNotificationChannel`][google.monitoring.v3.NotificationChannelService.GetNotificationChannel] - // operation, this field is expected to be populated. - // - // If the value is `UNVERIFIED`, then it indicates that the channel is - // non-functioning (it both requires verification and lacks verification); - // otherwise, it is assumed that the channel works. - // - // If the channel is neither `VERIFIED` nor `UNVERIFIED`, it implies that - // the channel is of a type that does not require verification or that - // this specific channel has been exempted from verification because it was - // created prior to verification being required for channels of this type. - // - // This field cannot be modified using a standard - // [`UpdateNotificationChannel`][google.monitoring.v3.NotificationChannelService.UpdateNotificationChannel] - // operation. To change the value of this field, you must call - // [`VerifyNotificationChannel`][google.monitoring.v3.NotificationChannelService.VerifyNotificationChannel]. - VerificationStatus NotificationChannel_VerificationStatus `protobuf:"varint,9,opt,name=verification_status,json=verificationStatus,proto3,enum=google.monitoring.v3.NotificationChannel_VerificationStatus" json:"verification_status,omitempty"` - // Whether notifications are forwarded to the described channel. This makes - // it possible to disable delivery of notifications to a particular channel - // without removing the channel from all alerting policies that reference - // the channel. This is a more convenient approach when the change is - // temporary and you want to receive notifications from the same set - // of alerting policies on the channel at some point in the future. - Enabled *wrapperspb.BoolValue `protobuf:"bytes,11,opt,name=enabled,proto3" json:"enabled,omitempty"` -} - -func (x *NotificationChannel) Reset() { - *x = NotificationChannel{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_notification_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *NotificationChannel) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*NotificationChannel) ProtoMessage() {} - -func (x *NotificationChannel) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_notification_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use NotificationChannel.ProtoReflect.Descriptor instead. -func (*NotificationChannel) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_notification_proto_rawDescGZIP(), []int{1} -} - -func (x *NotificationChannel) GetType() string { - if x != nil { - return x.Type - } - return "" -} - -func (x *NotificationChannel) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *NotificationChannel) GetDisplayName() string { - if x != nil { - return x.DisplayName - } - return "" -} - -func (x *NotificationChannel) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -func (x *NotificationChannel) GetLabels() map[string]string { - if x != nil { - return x.Labels - } - return nil -} - -func (x *NotificationChannel) GetUserLabels() map[string]string { - if x != nil { - return x.UserLabels - } - return nil -} - -func (x *NotificationChannel) GetVerificationStatus() NotificationChannel_VerificationStatus { - if x != nil { - return x.VerificationStatus - } - return NotificationChannel_VERIFICATION_STATUS_UNSPECIFIED -} - -func (x *NotificationChannel) GetEnabled() *wrapperspb.BoolValue { - if x != nil { - return x.Enabled - } - return nil -} - -var File_google_monitoring_v3_notification_proto protoreflect.FileDescriptor - -var file_google_monitoring_v3_notification_proto_rawDesc = []byte{ - 0x0a, 0x27, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, - 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x1a, - 0x16, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6c, 0x61, 0x62, 0x65, - 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, - 0x61, 0x70, 0x69, 0x2f, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x67, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, - 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x1a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, - 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xf0, 0x04, 0x0a, 0x1d, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, - 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x21, - 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, - 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x33, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x04, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, - 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x4e, 0x0a, 0x0f, 0x73, 0x75, 0x70, 0x70, - 0x6f, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x65, 0x72, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, - 0x0e, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, - 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x54, 0x69, 0x65, 0x72, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0e, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, - 0x74, 0x65, 0x64, 0x54, 0x69, 0x65, 0x72, 0x73, 0x12, 0x3a, 0x0a, 0x0c, 0x6c, 0x61, 0x75, 0x6e, - 0x63, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x67, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, 0x61, 0x75, 0x6e, - 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, 0x65, 0x52, 0x0b, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, - 0x74, 0x61, 0x67, 0x65, 0x3a, 0xa0, 0x02, 0xea, 0x41, 0x9c, 0x02, 0x0a, 0x37, 0x6d, 0x6f, 0x6e, - 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, - 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x6f, 0x72, 0x12, 0x46, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, - 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x2f, 0x7b, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, - 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x7d, 0x12, 0x50, 0x6f, 0x72, - 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6f, 0x72, 0x67, - 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x2f, 0x7b, 0x63, 0x68, 0x61, 0x6e, 0x6e, - 0x65, 0x6c, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x7d, 0x12, 0x44, - 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x7d, - 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, - 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x2f, - 0x7b, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x7d, 0x12, 0x01, 0x2a, 0x22, 0xa6, 0x07, 0x0a, 0x13, 0x4e, 0x6f, 0x74, 0x69, - 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x12, - 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, - 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, - 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, - 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4d, 0x0a, 0x06, - 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, - 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x5a, 0x0a, 0x0b, 0x75, - 0x73, 0x65, 0x72, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x39, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, - 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x2e, 0x55, 0x73, 0x65, 0x72, - 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x75, 0x73, 0x65, - 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x6d, 0x0a, 0x13, 0x76, 0x65, 0x72, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x09, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, - 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x6f, 0x74, 0x69, - 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x2e, - 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x52, 0x12, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x34, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, - 0x64, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x1a, 0x39, 0x0a, 0x0b, - 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, - 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3d, 0x0a, 0x0f, 0x55, 0x73, 0x65, 0x72, 0x4c, - 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, - 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x57, 0x0a, 0x12, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x23, 0x0a, 0x1f, - 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x54, 0x41, - 0x54, 0x55, 0x53, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, - 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, - 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x02, 0x3a, - 0xfe, 0x01, 0xea, 0x41, 0xfa, 0x01, 0x0a, 0x2d, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, - 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, - 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, - 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x12, 0x3e, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, - 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x2f, 0x7b, - 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x68, 0x61, - 0x6e, 0x6e, 0x65, 0x6c, 0x7d, 0x12, 0x48, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x7d, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x2f, 0x7b, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x7d, 0x12, - 0x3c, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, - 0x7d, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, - 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x2f, 0x7b, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x7d, 0x12, 0x01, 0x2a, - 0x42, 0xc9, 0x01, 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x42, 0x11, 0x4e, - 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x50, 0x01, 0x5a, 0x3e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, - 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, - 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, - 0x6e, 0x67, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, - 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x56, 0x33, 0xca, - 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4d, - 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x33, 0xea, 0x02, 0x1d, 0x47, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x4d, 0x6f, - 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56, 0x33, 0x62, 0x06, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_google_monitoring_v3_notification_proto_rawDescOnce sync.Once - file_google_monitoring_v3_notification_proto_rawDescData = file_google_monitoring_v3_notification_proto_rawDesc -) - -func file_google_monitoring_v3_notification_proto_rawDescGZIP() []byte { - file_google_monitoring_v3_notification_proto_rawDescOnce.Do(func() { - file_google_monitoring_v3_notification_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_notification_proto_rawDescData) - }) - return file_google_monitoring_v3_notification_proto_rawDescData -} - -var file_google_monitoring_v3_notification_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_google_monitoring_v3_notification_proto_msgTypes = make([]protoimpl.MessageInfo, 4) -var file_google_monitoring_v3_notification_proto_goTypes = []interface{}{ - (NotificationChannel_VerificationStatus)(0), // 0: google.monitoring.v3.NotificationChannel.VerificationStatus - (*NotificationChannelDescriptor)(nil), // 1: google.monitoring.v3.NotificationChannelDescriptor - (*NotificationChannel)(nil), // 2: google.monitoring.v3.NotificationChannel - nil, // 3: google.monitoring.v3.NotificationChannel.LabelsEntry - nil, // 4: google.monitoring.v3.NotificationChannel.UserLabelsEntry - (*label.LabelDescriptor)(nil), // 5: google.api.LabelDescriptor - (ServiceTier)(0), // 6: google.monitoring.v3.ServiceTier - (api.LaunchStage)(0), // 7: google.api.LaunchStage - (*wrapperspb.BoolValue)(nil), // 8: google.protobuf.BoolValue -} -var file_google_monitoring_v3_notification_proto_depIdxs = []int32{ - 5, // 0: google.monitoring.v3.NotificationChannelDescriptor.labels:type_name -> google.api.LabelDescriptor - 6, // 1: google.monitoring.v3.NotificationChannelDescriptor.supported_tiers:type_name -> google.monitoring.v3.ServiceTier - 7, // 2: google.monitoring.v3.NotificationChannelDescriptor.launch_stage:type_name -> google.api.LaunchStage - 3, // 3: google.monitoring.v3.NotificationChannel.labels:type_name -> google.monitoring.v3.NotificationChannel.LabelsEntry - 4, // 4: google.monitoring.v3.NotificationChannel.user_labels:type_name -> google.monitoring.v3.NotificationChannel.UserLabelsEntry - 0, // 5: google.monitoring.v3.NotificationChannel.verification_status:type_name -> google.monitoring.v3.NotificationChannel.VerificationStatus - 8, // 6: google.monitoring.v3.NotificationChannel.enabled:type_name -> google.protobuf.BoolValue - 7, // [7:7] is the sub-list for method output_type - 7, // [7:7] is the sub-list for method input_type - 7, // [7:7] is the sub-list for extension type_name - 7, // [7:7] is the sub-list for extension extendee - 0, // [0:7] is the sub-list for field type_name -} - -func init() { file_google_monitoring_v3_notification_proto_init() } -func file_google_monitoring_v3_notification_proto_init() { - if File_google_monitoring_v3_notification_proto != nil { - return - } - file_google_monitoring_v3_common_proto_init() - if !protoimpl.UnsafeEnabled { - file_google_monitoring_v3_notification_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NotificationChannelDescriptor); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_notification_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NotificationChannel); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_monitoring_v3_notification_proto_rawDesc, - NumEnums: 1, - NumMessages: 4, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_google_monitoring_v3_notification_proto_goTypes, - DependencyIndexes: file_google_monitoring_v3_notification_proto_depIdxs, - EnumInfos: file_google_monitoring_v3_notification_proto_enumTypes, - MessageInfos: file_google_monitoring_v3_notification_proto_msgTypes, - }.Build() - File_google_monitoring_v3_notification_proto = out.File - file_google_monitoring_v3_notification_proto_rawDesc = nil - file_google_monitoring_v3_notification_proto_goTypes = nil - file_google_monitoring_v3_notification_proto_depIdxs = nil -} diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/notification_service.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/notification_service.pb.go deleted file mode 100644 index b8fc414d07..0000000000 --- a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/notification_service.pb.go +++ /dev/null @@ -1,1957 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.25.0 -// protoc v3.13.0 -// source: google/monitoring/v3/notification_service.proto - -package monitoring - -import ( - context "context" - reflect "reflect" - sync "sync" - - proto "github.com/golang/protobuf/proto" - _ "google.golang.org/genproto/googleapis/api/annotations" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - emptypb "google.golang.org/protobuf/types/known/emptypb" - fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb" - _ "google.golang.org/protobuf/types/known/structpb" - timestamppb "google.golang.org/protobuf/types/known/timestamppb" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - -// The `ListNotificationChannelDescriptors` request. -type ListNotificationChannelDescriptorsRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The REST resource name of the parent from which to retrieve - // the notification channel descriptors. The expected syntax is: - // - // projects/[PROJECT_ID_OR_NUMBER] - // - // Note that this names the parent container in which to look for the - // descriptors; to retrieve a single descriptor by name, use the - // [GetNotificationChannelDescriptor][google.monitoring.v3.NotificationChannelService.GetNotificationChannelDescriptor] - // operation, instead. - Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` - // The maximum number of results to return in a single response. If - // not set to a positive number, a reasonable value will be chosen by the - // service. - PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` - // If non-empty, `page_token` must contain a value returned as the - // `next_page_token` in a previous response to request the next set - // of results. - PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` -} - -func (x *ListNotificationChannelDescriptorsRequest) Reset() { - *x = ListNotificationChannelDescriptorsRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListNotificationChannelDescriptorsRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListNotificationChannelDescriptorsRequest) ProtoMessage() {} - -func (x *ListNotificationChannelDescriptorsRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListNotificationChannelDescriptorsRequest.ProtoReflect.Descriptor instead. -func (*ListNotificationChannelDescriptorsRequest) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_notification_service_proto_rawDescGZIP(), []int{0} -} - -func (x *ListNotificationChannelDescriptorsRequest) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *ListNotificationChannelDescriptorsRequest) GetPageSize() int32 { - if x != nil { - return x.PageSize - } - return 0 -} - -func (x *ListNotificationChannelDescriptorsRequest) GetPageToken() string { - if x != nil { - return x.PageToken - } - return "" -} - -// The `ListNotificationChannelDescriptors` response. -type ListNotificationChannelDescriptorsResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The monitored resource descriptors supported for the specified - // project, optionally filtered. - ChannelDescriptors []*NotificationChannelDescriptor `protobuf:"bytes,1,rep,name=channel_descriptors,json=channelDescriptors,proto3" json:"channel_descriptors,omitempty"` - // If not empty, indicates that there may be more results that match - // the request. Use the value in the `page_token` field in a - // subsequent request to fetch the next set of results. If empty, - // all results have been returned. - NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` -} - -func (x *ListNotificationChannelDescriptorsResponse) Reset() { - *x = ListNotificationChannelDescriptorsResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListNotificationChannelDescriptorsResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListNotificationChannelDescriptorsResponse) ProtoMessage() {} - -func (x *ListNotificationChannelDescriptorsResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListNotificationChannelDescriptorsResponse.ProtoReflect.Descriptor instead. -func (*ListNotificationChannelDescriptorsResponse) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_notification_service_proto_rawDescGZIP(), []int{1} -} - -func (x *ListNotificationChannelDescriptorsResponse) GetChannelDescriptors() []*NotificationChannelDescriptor { - if x != nil { - return x.ChannelDescriptors - } - return nil -} - -func (x *ListNotificationChannelDescriptorsResponse) GetNextPageToken() string { - if x != nil { - return x.NextPageToken - } - return "" -} - -// The `GetNotificationChannelDescriptor` response. -type GetNotificationChannelDescriptorRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The channel type for which to execute the request. The format is: - // - // projects/[PROJECT_ID_OR_NUMBER]/notificationChannelDescriptors/[CHANNEL_TYPE] - Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` -} - -func (x *GetNotificationChannelDescriptorRequest) Reset() { - *x = GetNotificationChannelDescriptorRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetNotificationChannelDescriptorRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetNotificationChannelDescriptorRequest) ProtoMessage() {} - -func (x *GetNotificationChannelDescriptorRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetNotificationChannelDescriptorRequest.ProtoReflect.Descriptor instead. -func (*GetNotificationChannelDescriptorRequest) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_notification_service_proto_rawDescGZIP(), []int{2} -} - -func (x *GetNotificationChannelDescriptorRequest) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -// The `CreateNotificationChannel` request. -type CreateNotificationChannelRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The project on which to execute the request. The format is: - // - // projects/[PROJECT_ID_OR_NUMBER] - // - // This names the container into which the channel will be - // written, this does not name the newly created channel. The resulting - // channel's name will have a normalized version of this field as a prefix, - // but will add `/notificationChannels/[CHANNEL_ID]` to identify the channel. - Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` - // Required. The definition of the `NotificationChannel` to create. - NotificationChannel *NotificationChannel `protobuf:"bytes,2,opt,name=notification_channel,json=notificationChannel,proto3" json:"notification_channel,omitempty"` -} - -func (x *CreateNotificationChannelRequest) Reset() { - *x = CreateNotificationChannelRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CreateNotificationChannelRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CreateNotificationChannelRequest) ProtoMessage() {} - -func (x *CreateNotificationChannelRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CreateNotificationChannelRequest.ProtoReflect.Descriptor instead. -func (*CreateNotificationChannelRequest) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_notification_service_proto_rawDescGZIP(), []int{3} -} - -func (x *CreateNotificationChannelRequest) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *CreateNotificationChannelRequest) GetNotificationChannel() *NotificationChannel { - if x != nil { - return x.NotificationChannel - } - return nil -} - -// The `ListNotificationChannels` request. -type ListNotificationChannelsRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The project on which to execute the request. The format is: - // - // projects/[PROJECT_ID_OR_NUMBER] - // - // This names the container - // in which to look for the notification channels; it does not name a - // specific channel. To query a specific channel by REST resource name, use - // the - // [`GetNotificationChannel`][google.monitoring.v3.NotificationChannelService.GetNotificationChannel] - // operation. - Name string `protobuf:"bytes,5,opt,name=name,proto3" json:"name,omitempty"` - // If provided, this field specifies the criteria that must be met by - // notification channels to be included in the response. - // - // For more details, see [sorting and - // filtering](https://cloud.google.com/monitoring/api/v3/sorting-and-filtering). - Filter string `protobuf:"bytes,6,opt,name=filter,proto3" json:"filter,omitempty"` - // A comma-separated list of fields by which to sort the result. Supports - // the same set of fields as in `filter`. Entries can be prefixed with - // a minus sign to sort in descending rather than ascending order. - // - // For more details, see [sorting and - // filtering](https://cloud.google.com/monitoring/api/v3/sorting-and-filtering). - OrderBy string `protobuf:"bytes,7,opt,name=order_by,json=orderBy,proto3" json:"order_by,omitempty"` - // The maximum number of results to return in a single response. If - // not set to a positive number, a reasonable value will be chosen by the - // service. - PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` - // If non-empty, `page_token` must contain a value returned as the - // `next_page_token` in a previous response to request the next set - // of results. - PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` -} - -func (x *ListNotificationChannelsRequest) Reset() { - *x = ListNotificationChannelsRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListNotificationChannelsRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListNotificationChannelsRequest) ProtoMessage() {} - -func (x *ListNotificationChannelsRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListNotificationChannelsRequest.ProtoReflect.Descriptor instead. -func (*ListNotificationChannelsRequest) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_notification_service_proto_rawDescGZIP(), []int{4} -} - -func (x *ListNotificationChannelsRequest) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *ListNotificationChannelsRequest) GetFilter() string { - if x != nil { - return x.Filter - } - return "" -} - -func (x *ListNotificationChannelsRequest) GetOrderBy() string { - if x != nil { - return x.OrderBy - } - return "" -} - -func (x *ListNotificationChannelsRequest) GetPageSize() int32 { - if x != nil { - return x.PageSize - } - return 0 -} - -func (x *ListNotificationChannelsRequest) GetPageToken() string { - if x != nil { - return x.PageToken - } - return "" -} - -// The `ListNotificationChannels` response. -type ListNotificationChannelsResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The notification channels defined for the specified project. - NotificationChannels []*NotificationChannel `protobuf:"bytes,3,rep,name=notification_channels,json=notificationChannels,proto3" json:"notification_channels,omitempty"` - // If not empty, indicates that there may be more results that match - // the request. Use the value in the `page_token` field in a - // subsequent request to fetch the next set of results. If empty, - // all results have been returned. - NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` -} - -func (x *ListNotificationChannelsResponse) Reset() { - *x = ListNotificationChannelsResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListNotificationChannelsResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListNotificationChannelsResponse) ProtoMessage() {} - -func (x *ListNotificationChannelsResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListNotificationChannelsResponse.ProtoReflect.Descriptor instead. -func (*ListNotificationChannelsResponse) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_notification_service_proto_rawDescGZIP(), []int{5} -} - -func (x *ListNotificationChannelsResponse) GetNotificationChannels() []*NotificationChannel { - if x != nil { - return x.NotificationChannels - } - return nil -} - -func (x *ListNotificationChannelsResponse) GetNextPageToken() string { - if x != nil { - return x.NextPageToken - } - return "" -} - -// The `GetNotificationChannel` request. -type GetNotificationChannelRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The channel for which to execute the request. The format is: - // - // projects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID] - Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` -} - -func (x *GetNotificationChannelRequest) Reset() { - *x = GetNotificationChannelRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetNotificationChannelRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetNotificationChannelRequest) ProtoMessage() {} - -func (x *GetNotificationChannelRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetNotificationChannelRequest.ProtoReflect.Descriptor instead. -func (*GetNotificationChannelRequest) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_notification_service_proto_rawDescGZIP(), []int{6} -} - -func (x *GetNotificationChannelRequest) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -// The `UpdateNotificationChannel` request. -type UpdateNotificationChannelRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The fields to update. - UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` - // Required. A description of the changes to be applied to the specified - // notification channel. The description must provide a definition for - // fields to be updated; the names of these fields should also be - // included in the `update_mask`. - NotificationChannel *NotificationChannel `protobuf:"bytes,3,opt,name=notification_channel,json=notificationChannel,proto3" json:"notification_channel,omitempty"` -} - -func (x *UpdateNotificationChannelRequest) Reset() { - *x = UpdateNotificationChannelRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *UpdateNotificationChannelRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*UpdateNotificationChannelRequest) ProtoMessage() {} - -func (x *UpdateNotificationChannelRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use UpdateNotificationChannelRequest.ProtoReflect.Descriptor instead. -func (*UpdateNotificationChannelRequest) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_notification_service_proto_rawDescGZIP(), []int{7} -} - -func (x *UpdateNotificationChannelRequest) GetUpdateMask() *fieldmaskpb.FieldMask { - if x != nil { - return x.UpdateMask - } - return nil -} - -func (x *UpdateNotificationChannelRequest) GetNotificationChannel() *NotificationChannel { - if x != nil { - return x.NotificationChannel - } - return nil -} - -// The `DeleteNotificationChannel` request. -type DeleteNotificationChannelRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The channel for which to execute the request. The format is: - // - // projects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID] - Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` - // If true, the notification channel will be deleted regardless of its - // use in alert policies (the policies will be updated to remove the - // channel). If false, channels that are still referenced by an existing - // alerting policy will fail to be deleted in a delete operation. - Force bool `protobuf:"varint,5,opt,name=force,proto3" json:"force,omitempty"` -} - -func (x *DeleteNotificationChannelRequest) Reset() { - *x = DeleteNotificationChannelRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *DeleteNotificationChannelRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DeleteNotificationChannelRequest) ProtoMessage() {} - -func (x *DeleteNotificationChannelRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DeleteNotificationChannelRequest.ProtoReflect.Descriptor instead. -func (*DeleteNotificationChannelRequest) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_notification_service_proto_rawDescGZIP(), []int{8} -} - -func (x *DeleteNotificationChannelRequest) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *DeleteNotificationChannelRequest) GetForce() bool { - if x != nil { - return x.Force - } - return false -} - -// The `SendNotificationChannelVerificationCode` request. -type SendNotificationChannelVerificationCodeRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The notification channel to which to send a verification code. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` -} - -func (x *SendNotificationChannelVerificationCodeRequest) Reset() { - *x = SendNotificationChannelVerificationCodeRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SendNotificationChannelVerificationCodeRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SendNotificationChannelVerificationCodeRequest) ProtoMessage() {} - -func (x *SendNotificationChannelVerificationCodeRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SendNotificationChannelVerificationCodeRequest.ProtoReflect.Descriptor instead. -func (*SendNotificationChannelVerificationCodeRequest) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_notification_service_proto_rawDescGZIP(), []int{9} -} - -func (x *SendNotificationChannelVerificationCodeRequest) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -// The `GetNotificationChannelVerificationCode` request. -type GetNotificationChannelVerificationCodeRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The notification channel for which a verification code is to be generated - // and retrieved. This must name a channel that is already verified; if - // the specified channel is not verified, the request will fail. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // The desired expiration time. If specified, the API will guarantee that - // the returned code will not be valid after the specified timestamp; - // however, the API cannot guarantee that the returned code will be - // valid for at least as long as the requested time (the API puts an upper - // bound on the amount of time for which a code may be valid). If omitted, - // a default expiration will be used, which may be less than the max - // permissible expiration (so specifying an expiration may extend the - // code's lifetime over omitting an expiration, even though the API does - // impose an upper limit on the maximum expiration that is permitted). - ExpireTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=expire_time,json=expireTime,proto3" json:"expire_time,omitempty"` -} - -func (x *GetNotificationChannelVerificationCodeRequest) Reset() { - *x = GetNotificationChannelVerificationCodeRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetNotificationChannelVerificationCodeRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetNotificationChannelVerificationCodeRequest) ProtoMessage() {} - -func (x *GetNotificationChannelVerificationCodeRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetNotificationChannelVerificationCodeRequest.ProtoReflect.Descriptor instead. -func (*GetNotificationChannelVerificationCodeRequest) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_notification_service_proto_rawDescGZIP(), []int{10} -} - -func (x *GetNotificationChannelVerificationCodeRequest) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *GetNotificationChannelVerificationCodeRequest) GetExpireTime() *timestamppb.Timestamp { - if x != nil { - return x.ExpireTime - } - return nil -} - -// The `GetNotificationChannelVerificationCode` request. -type GetNotificationChannelVerificationCodeResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The verification code, which may be used to verify other channels - // that have an equivalent identity (i.e. other channels of the same - // type with the same fingerprint such as other email channels with - // the same email address or other sms channels with the same number). - Code string `protobuf:"bytes,1,opt,name=code,proto3" json:"code,omitempty"` - // The expiration time associated with the code that was returned. If - // an expiration was provided in the request, this is the minimum of the - // requested expiration in the request and the max permitted expiration. - ExpireTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=expire_time,json=expireTime,proto3" json:"expire_time,omitempty"` -} - -func (x *GetNotificationChannelVerificationCodeResponse) Reset() { - *x = GetNotificationChannelVerificationCodeResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetNotificationChannelVerificationCodeResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetNotificationChannelVerificationCodeResponse) ProtoMessage() {} - -func (x *GetNotificationChannelVerificationCodeResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetNotificationChannelVerificationCodeResponse.ProtoReflect.Descriptor instead. -func (*GetNotificationChannelVerificationCodeResponse) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_notification_service_proto_rawDescGZIP(), []int{11} -} - -func (x *GetNotificationChannelVerificationCodeResponse) GetCode() string { - if x != nil { - return x.Code - } - return "" -} - -func (x *GetNotificationChannelVerificationCodeResponse) GetExpireTime() *timestamppb.Timestamp { - if x != nil { - return x.ExpireTime - } - return nil -} - -// The `VerifyNotificationChannel` request. -type VerifyNotificationChannelRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The notification channel to verify. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Required. The verification code that was delivered to the channel as - // a result of invoking the `SendNotificationChannelVerificationCode` API - // method or that was retrieved from a verified channel via - // `GetNotificationChannelVerificationCode`. For example, one might have - // "G-123456" or "TKNZGhhd2EyN3I1MnRnMjRv" (in general, one is only - // guaranteed that the code is valid UTF-8; one should not - // make any assumptions regarding the structure or format of the code). - Code string `protobuf:"bytes,2,opt,name=code,proto3" json:"code,omitempty"` -} - -func (x *VerifyNotificationChannelRequest) Reset() { - *x = VerifyNotificationChannelRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *VerifyNotificationChannelRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*VerifyNotificationChannelRequest) ProtoMessage() {} - -func (x *VerifyNotificationChannelRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[12] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use VerifyNotificationChannelRequest.ProtoReflect.Descriptor instead. -func (*VerifyNotificationChannelRequest) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_notification_service_proto_rawDescGZIP(), []int{12} -} - -func (x *VerifyNotificationChannelRequest) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *VerifyNotificationChannelRequest) GetCode() string { - if x != nil { - return x.Code - } - return "" -} - -var File_google_monitoring_v3_notification_service_proto protoreflect.FileDescriptor - -var file_google_monitoring_v3_notification_service_proto_rawDesc = []byte{ - 0x0a, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, - 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, - 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, - 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, - 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, - 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, - 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x27, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, - 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x22, 0xbc, 0x01, 0x0a, 0x29, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x53, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x3f, 0xe0, - 0x41, 0x02, 0xfa, 0x41, 0x39, 0x12, 0x37, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, - 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, - 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, - 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, - 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, - 0x22, 0xba, 0x01, 0x0a, 0x2a, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x64, 0x0a, 0x13, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, - 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, - 0x72, 0x52, 0x12, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, - 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, - 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x7e, 0x0a, - 0x27, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, - 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x53, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x3f, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x39, 0x0a, 0x37, - 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xd0, 0x01, - 0x0a, 0x20, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x49, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x35, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2f, 0x12, 0x2d, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, - 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, - 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x61, 0x0a, - 0x14, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x68, - 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, - 0x76, 0x33, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, - 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x13, 0x6e, 0x6f, 0x74, - 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, - 0x22, 0xdb, 0x01, 0x0a, 0x1f, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x49, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x09, 0x42, 0x35, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2f, 0x12, 0x2d, 0x6d, 0x6f, 0x6e, 0x69, - 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, - 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, - 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x19, 0x0a, 0x08, 0x6f, 0x72, 0x64, 0x65, 0x72, - 0x5f, 0x62, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6f, 0x72, 0x64, 0x65, 0x72, - 0x42, 0x79, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, - 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xaa, - 0x01, 0x0a, 0x20, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x5e, 0x0a, 0x15, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, - 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x14, 0x6e, - 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, - 0x65, 0x6c, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, - 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, - 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x6a, 0x0a, 0x1d, 0x47, - 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, - 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x49, 0x0a, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x35, 0xe0, 0x41, 0x02, 0xfa, - 0x41, 0x2f, 0x0a, 0x2d, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, - 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, - 0x6c, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xc2, 0x01, 0x0a, 0x20, 0x55, 0x70, 0x64, 0x61, - 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, - 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3b, 0x0a, 0x0b, - 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x52, 0x0a, 0x75, - 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x12, 0x61, 0x0a, 0x14, 0x6e, 0x6f, 0x74, - 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, - 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4e, - 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, - 0x65, 0x6c, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x13, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x22, 0x83, 0x01, 0x0a, - 0x20, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x49, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, - 0x35, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2f, 0x0a, 0x2d, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, - 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, - 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, - 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, - 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72, - 0x63, 0x65, 0x22, 0x7b, 0x0a, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x56, 0x65, 0x72, - 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x49, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x42, 0x35, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2f, 0x0a, 0x2d, 0x6d, 0x6f, 0x6e, 0x69, - 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, - 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, - 0xb7, 0x01, 0x0a, 0x2d, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x49, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, - 0x35, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2f, 0x0a, 0x2d, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, - 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, - 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, - 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x0b, - 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x65, - 0x78, 0x70, 0x69, 0x72, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x22, 0x81, 0x01, 0x0a, 0x2e, 0x47, 0x65, - 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, - 0x6e, 0x6e, 0x65, 0x6c, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x43, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, - 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, - 0x12, 0x3b, 0x0a, 0x0b, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, - 0x70, 0x52, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x22, 0x86, 0x01, - 0x0a, 0x20, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x49, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x35, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2f, 0x0a, 0x2d, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, - 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, - 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x17, 0x0a, - 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, - 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x32, 0xea, 0x12, 0x0a, 0x1a, 0x4e, 0x6f, 0x74, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0xec, 0x01, 0x0a, 0x22, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, - 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, - 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x3f, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, - 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x40, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, - 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x43, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x36, 0x12, 0x34, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, - 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x6e, - 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, - 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0xda, 0x41, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x12, 0xdd, 0x01, 0x0a, 0x20, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, - 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x12, 0x3d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, - 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, - 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, - 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, - 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x22, 0x45, 0x82, - 0xd3, 0xe4, 0x93, 0x02, 0x38, 0x12, 0x36, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, - 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6e, 0x6f, 0x74, 0x69, - 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x2f, 0x2a, 0x7d, 0xda, 0x41, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x12, 0xc4, 0x01, 0x0a, 0x18, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, - 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, - 0x73, 0x12, 0x35, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, - 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, - 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, - 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x39, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2c, 0x12, 0x2a, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, - 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, - 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, - 0x6e, 0x65, 0x6c, 0x73, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0xb5, 0x01, 0x0a, 0x16, - 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, - 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x12, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x65, - 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, - 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, - 0x76, 0x33, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, - 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x22, 0x3b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2e, 0x12, 0x2c, - 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, - 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x2f, 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0xe4, 0x01, 0x0a, 0x19, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, - 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, - 0x6c, 0x12, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, - 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, - 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, - 0x65, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, - 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, - 0x6e, 0x6e, 0x65, 0x6c, 0x22, 0x64, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x42, 0x22, 0x2a, 0x2f, 0x76, - 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, - 0x2f, 0x2a, 0x7d, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x3a, 0x14, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0xda, 0x41, - 0x19, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x12, 0x83, 0x02, 0x0a, 0x19, 0x55, - 0x70, 0x64, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x12, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, - 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, - 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x22, 0x82, 0x01, 0x82, 0xd3, - 0xe4, 0x93, 0x02, 0x59, 0x32, 0x41, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x6f, 0x74, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x2e, - 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, - 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, - 0x6e, 0x65, 0x6c, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x14, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0xda, 0x41, 0x20, - 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2c, 0x6e, 0x6f, 0x74, 0x69, - 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, - 0x12, 0xae, 0x01, 0x0a, 0x19, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x12, 0x36, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, - 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, - 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x41, - 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2e, 0x2a, 0x2c, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, - 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6e, 0x6f, 0x74, - 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, - 0x73, 0x2f, 0x2a, 0x7d, 0xda, 0x41, 0x0a, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x66, 0x6f, 0x72, 0x63, - 0x65, 0x12, 0xdc, 0x01, 0x0a, 0x27, 0x53, 0x65, 0x6e, 0x64, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x56, 0x65, 0x72, - 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x44, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, - 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x56, 0x65, 0x72, 0x69, - 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x53, 0x82, 0xd3, 0xe4, - 0x93, 0x02, 0x46, 0x22, 0x41, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x2f, 0x2a, - 0x7d, 0x3a, 0x73, 0x65, 0x6e, 0x64, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x43, 0x6f, 0x64, 0x65, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x12, 0x87, 0x02, 0x0a, 0x26, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x56, 0x65, 0x72, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x43, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, - 0x76, 0x33, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x44, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, - 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x56, 0x65, - 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x52, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x45, 0x22, 0x40, - 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, - 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x67, 0x65, 0x74, - 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x64, 0x65, - 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0xca, 0x01, 0x0a, 0x19, 0x56, - 0x65, 0x72, 0x69, 0x66, 0x79, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x12, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, - 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, - 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x22, 0x4a, 0x82, 0xd3, 0xe4, - 0x93, 0x02, 0x38, 0x22, 0x33, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x2f, 0x2a, - 0x7d, 0x3a, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x09, 0x6e, 0x61, - 0x6d, 0x65, 0x2c, 0x63, 0x6f, 0x64, 0x65, 0x1a, 0xa9, 0x01, 0xca, 0x41, 0x19, 0x6d, 0x6f, 0x6e, - 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, - 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x89, 0x01, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, - 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, - 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, - 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, - 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, - 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, - 0x69, 0x6e, 0x67, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, - 0x75, 0x74, 0x68, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x72, - 0x65, 0x61, 0x64, 0x42, 0xd0, 0x01, 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, - 0x42, 0x18, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, - 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, - 0x70, 0x69, 0x73, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, - 0x33, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0xaa, 0x02, 0x1a, 0x47, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, - 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, - 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, - 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, - 0x6e, 0x67, 0x3a, 0x3a, 0x56, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_google_monitoring_v3_notification_service_proto_rawDescOnce sync.Once - file_google_monitoring_v3_notification_service_proto_rawDescData = file_google_monitoring_v3_notification_service_proto_rawDesc -) - -func file_google_monitoring_v3_notification_service_proto_rawDescGZIP() []byte { - file_google_monitoring_v3_notification_service_proto_rawDescOnce.Do(func() { - file_google_monitoring_v3_notification_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_notification_service_proto_rawDescData) - }) - return file_google_monitoring_v3_notification_service_proto_rawDescData -} - -var file_google_monitoring_v3_notification_service_proto_msgTypes = make([]protoimpl.MessageInfo, 13) -var file_google_monitoring_v3_notification_service_proto_goTypes = []interface{}{ - (*ListNotificationChannelDescriptorsRequest)(nil), // 0: google.monitoring.v3.ListNotificationChannelDescriptorsRequest - (*ListNotificationChannelDescriptorsResponse)(nil), // 1: google.monitoring.v3.ListNotificationChannelDescriptorsResponse - (*GetNotificationChannelDescriptorRequest)(nil), // 2: google.monitoring.v3.GetNotificationChannelDescriptorRequest - (*CreateNotificationChannelRequest)(nil), // 3: google.monitoring.v3.CreateNotificationChannelRequest - (*ListNotificationChannelsRequest)(nil), // 4: google.monitoring.v3.ListNotificationChannelsRequest - (*ListNotificationChannelsResponse)(nil), // 5: google.monitoring.v3.ListNotificationChannelsResponse - (*GetNotificationChannelRequest)(nil), // 6: google.monitoring.v3.GetNotificationChannelRequest - (*UpdateNotificationChannelRequest)(nil), // 7: google.monitoring.v3.UpdateNotificationChannelRequest - (*DeleteNotificationChannelRequest)(nil), // 8: google.monitoring.v3.DeleteNotificationChannelRequest - (*SendNotificationChannelVerificationCodeRequest)(nil), // 9: google.monitoring.v3.SendNotificationChannelVerificationCodeRequest - (*GetNotificationChannelVerificationCodeRequest)(nil), // 10: google.monitoring.v3.GetNotificationChannelVerificationCodeRequest - (*GetNotificationChannelVerificationCodeResponse)(nil), // 11: google.monitoring.v3.GetNotificationChannelVerificationCodeResponse - (*VerifyNotificationChannelRequest)(nil), // 12: google.monitoring.v3.VerifyNotificationChannelRequest - (*NotificationChannelDescriptor)(nil), // 13: google.monitoring.v3.NotificationChannelDescriptor - (*NotificationChannel)(nil), // 14: google.monitoring.v3.NotificationChannel - (*fieldmaskpb.FieldMask)(nil), // 15: google.protobuf.FieldMask - (*timestamppb.Timestamp)(nil), // 16: google.protobuf.Timestamp - (*emptypb.Empty)(nil), // 17: google.protobuf.Empty -} -var file_google_monitoring_v3_notification_service_proto_depIdxs = []int32{ - 13, // 0: google.monitoring.v3.ListNotificationChannelDescriptorsResponse.channel_descriptors:type_name -> google.monitoring.v3.NotificationChannelDescriptor - 14, // 1: google.monitoring.v3.CreateNotificationChannelRequest.notification_channel:type_name -> google.monitoring.v3.NotificationChannel - 14, // 2: google.monitoring.v3.ListNotificationChannelsResponse.notification_channels:type_name -> google.monitoring.v3.NotificationChannel - 15, // 3: google.monitoring.v3.UpdateNotificationChannelRequest.update_mask:type_name -> google.protobuf.FieldMask - 14, // 4: google.monitoring.v3.UpdateNotificationChannelRequest.notification_channel:type_name -> google.monitoring.v3.NotificationChannel - 16, // 5: google.monitoring.v3.GetNotificationChannelVerificationCodeRequest.expire_time:type_name -> google.protobuf.Timestamp - 16, // 6: google.monitoring.v3.GetNotificationChannelVerificationCodeResponse.expire_time:type_name -> google.protobuf.Timestamp - 0, // 7: google.monitoring.v3.NotificationChannelService.ListNotificationChannelDescriptors:input_type -> google.monitoring.v3.ListNotificationChannelDescriptorsRequest - 2, // 8: google.monitoring.v3.NotificationChannelService.GetNotificationChannelDescriptor:input_type -> google.monitoring.v3.GetNotificationChannelDescriptorRequest - 4, // 9: google.monitoring.v3.NotificationChannelService.ListNotificationChannels:input_type -> google.monitoring.v3.ListNotificationChannelsRequest - 6, // 10: google.monitoring.v3.NotificationChannelService.GetNotificationChannel:input_type -> google.monitoring.v3.GetNotificationChannelRequest - 3, // 11: google.monitoring.v3.NotificationChannelService.CreateNotificationChannel:input_type -> google.monitoring.v3.CreateNotificationChannelRequest - 7, // 12: google.monitoring.v3.NotificationChannelService.UpdateNotificationChannel:input_type -> google.monitoring.v3.UpdateNotificationChannelRequest - 8, // 13: google.monitoring.v3.NotificationChannelService.DeleteNotificationChannel:input_type -> google.monitoring.v3.DeleteNotificationChannelRequest - 9, // 14: google.monitoring.v3.NotificationChannelService.SendNotificationChannelVerificationCode:input_type -> google.monitoring.v3.SendNotificationChannelVerificationCodeRequest - 10, // 15: google.monitoring.v3.NotificationChannelService.GetNotificationChannelVerificationCode:input_type -> google.monitoring.v3.GetNotificationChannelVerificationCodeRequest - 12, // 16: google.monitoring.v3.NotificationChannelService.VerifyNotificationChannel:input_type -> google.monitoring.v3.VerifyNotificationChannelRequest - 1, // 17: google.monitoring.v3.NotificationChannelService.ListNotificationChannelDescriptors:output_type -> google.monitoring.v3.ListNotificationChannelDescriptorsResponse - 13, // 18: google.monitoring.v3.NotificationChannelService.GetNotificationChannelDescriptor:output_type -> google.monitoring.v3.NotificationChannelDescriptor - 5, // 19: google.monitoring.v3.NotificationChannelService.ListNotificationChannels:output_type -> google.monitoring.v3.ListNotificationChannelsResponse - 14, // 20: google.monitoring.v3.NotificationChannelService.GetNotificationChannel:output_type -> google.monitoring.v3.NotificationChannel - 14, // 21: google.monitoring.v3.NotificationChannelService.CreateNotificationChannel:output_type -> google.monitoring.v3.NotificationChannel - 14, // 22: google.monitoring.v3.NotificationChannelService.UpdateNotificationChannel:output_type -> google.monitoring.v3.NotificationChannel - 17, // 23: google.monitoring.v3.NotificationChannelService.DeleteNotificationChannel:output_type -> google.protobuf.Empty - 17, // 24: google.monitoring.v3.NotificationChannelService.SendNotificationChannelVerificationCode:output_type -> google.protobuf.Empty - 11, // 25: google.monitoring.v3.NotificationChannelService.GetNotificationChannelVerificationCode:output_type -> google.monitoring.v3.GetNotificationChannelVerificationCodeResponse - 14, // 26: google.monitoring.v3.NotificationChannelService.VerifyNotificationChannel:output_type -> google.monitoring.v3.NotificationChannel - 17, // [17:27] is the sub-list for method output_type - 7, // [7:17] is the sub-list for method input_type - 7, // [7:7] is the sub-list for extension type_name - 7, // [7:7] is the sub-list for extension extendee - 0, // [0:7] is the sub-list for field type_name -} - -func init() { file_google_monitoring_v3_notification_service_proto_init() } -func file_google_monitoring_v3_notification_service_proto_init() { - if File_google_monitoring_v3_notification_service_proto != nil { - return - } - file_google_monitoring_v3_notification_proto_init() - if !protoimpl.UnsafeEnabled { - file_google_monitoring_v3_notification_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListNotificationChannelDescriptorsRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_notification_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListNotificationChannelDescriptorsResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_notification_service_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetNotificationChannelDescriptorRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_notification_service_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateNotificationChannelRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_notification_service_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListNotificationChannelsRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_notification_service_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListNotificationChannelsResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_notification_service_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetNotificationChannelRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_notification_service_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UpdateNotificationChannelRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_notification_service_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteNotificationChannelRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_notification_service_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SendNotificationChannelVerificationCodeRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_notification_service_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetNotificationChannelVerificationCodeRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_notification_service_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetNotificationChannelVerificationCodeResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_notification_service_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VerifyNotificationChannelRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_monitoring_v3_notification_service_proto_rawDesc, - NumEnums: 0, - NumMessages: 13, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_google_monitoring_v3_notification_service_proto_goTypes, - DependencyIndexes: file_google_monitoring_v3_notification_service_proto_depIdxs, - MessageInfos: file_google_monitoring_v3_notification_service_proto_msgTypes, - }.Build() - File_google_monitoring_v3_notification_service_proto = out.File - file_google_monitoring_v3_notification_service_proto_rawDesc = nil - file_google_monitoring_v3_notification_service_proto_goTypes = nil - file_google_monitoring_v3_notification_service_proto_depIdxs = nil -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConnInterface - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion6 - -// NotificationChannelServiceClient is the client API for NotificationChannelService service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type NotificationChannelServiceClient interface { - // Lists the descriptors for supported channel types. The use of descriptors - // makes it possible for new channel types to be dynamically added. - ListNotificationChannelDescriptors(ctx context.Context, in *ListNotificationChannelDescriptorsRequest, opts ...grpc.CallOption) (*ListNotificationChannelDescriptorsResponse, error) - // Gets a single channel descriptor. The descriptor indicates which fields - // are expected / permitted for a notification channel of the given type. - GetNotificationChannelDescriptor(ctx context.Context, in *GetNotificationChannelDescriptorRequest, opts ...grpc.CallOption) (*NotificationChannelDescriptor, error) - // Lists the notification channels that have been created for the project. - ListNotificationChannels(ctx context.Context, in *ListNotificationChannelsRequest, opts ...grpc.CallOption) (*ListNotificationChannelsResponse, error) - // Gets a single notification channel. The channel includes the relevant - // configuration details with which the channel was created. However, the - // response may truncate or omit passwords, API keys, or other private key - // matter and thus the response may not be 100% identical to the information - // that was supplied in the call to the create method. - GetNotificationChannel(ctx context.Context, in *GetNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error) - // Creates a new notification channel, representing a single notification - // endpoint such as an email address, SMS number, or PagerDuty service. - CreateNotificationChannel(ctx context.Context, in *CreateNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error) - // Updates a notification channel. Fields not specified in the field mask - // remain unchanged. - UpdateNotificationChannel(ctx context.Context, in *UpdateNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error) - // Deletes a notification channel. - DeleteNotificationChannel(ctx context.Context, in *DeleteNotificationChannelRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) - // Causes a verification code to be delivered to the channel. The code - // can then be supplied in `VerifyNotificationChannel` to verify the channel. - SendNotificationChannelVerificationCode(ctx context.Context, in *SendNotificationChannelVerificationCodeRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) - // Requests a verification code for an already verified channel that can then - // be used in a call to VerifyNotificationChannel() on a different channel - // with an equivalent identity in the same or in a different project. This - // makes it possible to copy a channel between projects without requiring - // manual reverification of the channel. If the channel is not in the - // verified state, this method will fail (in other words, this may only be - // used if the SendNotificationChannelVerificationCode and - // VerifyNotificationChannel paths have already been used to put the given - // channel into the verified state). - // - // There is no guarantee that the verification codes returned by this method - // will be of a similar structure or form as the ones that are delivered - // to the channel via SendNotificationChannelVerificationCode; while - // VerifyNotificationChannel() will recognize both the codes delivered via - // SendNotificationChannelVerificationCode() and returned from - // GetNotificationChannelVerificationCode(), it is typically the case that - // the verification codes delivered via - // SendNotificationChannelVerificationCode() will be shorter and also - // have a shorter expiration (e.g. codes such as "G-123456") whereas - // GetVerificationCode() will typically return a much longer, websafe base - // 64 encoded string that has a longer expiration time. - GetNotificationChannelVerificationCode(ctx context.Context, in *GetNotificationChannelVerificationCodeRequest, opts ...grpc.CallOption) (*GetNotificationChannelVerificationCodeResponse, error) - // Verifies a `NotificationChannel` by proving receipt of the code - // delivered to the channel as a result of calling - // `SendNotificationChannelVerificationCode`. - VerifyNotificationChannel(ctx context.Context, in *VerifyNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error) -} - -type notificationChannelServiceClient struct { - cc grpc.ClientConnInterface -} - -func NewNotificationChannelServiceClient(cc grpc.ClientConnInterface) NotificationChannelServiceClient { - return ¬ificationChannelServiceClient{cc} -} - -func (c *notificationChannelServiceClient) ListNotificationChannelDescriptors(ctx context.Context, in *ListNotificationChannelDescriptorsRequest, opts ...grpc.CallOption) (*ListNotificationChannelDescriptorsResponse, error) { - out := new(ListNotificationChannelDescriptorsResponse) - err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/ListNotificationChannelDescriptors", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *notificationChannelServiceClient) GetNotificationChannelDescriptor(ctx context.Context, in *GetNotificationChannelDescriptorRequest, opts ...grpc.CallOption) (*NotificationChannelDescriptor, error) { - out := new(NotificationChannelDescriptor) - err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/GetNotificationChannelDescriptor", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *notificationChannelServiceClient) ListNotificationChannels(ctx context.Context, in *ListNotificationChannelsRequest, opts ...grpc.CallOption) (*ListNotificationChannelsResponse, error) { - out := new(ListNotificationChannelsResponse) - err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/ListNotificationChannels", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *notificationChannelServiceClient) GetNotificationChannel(ctx context.Context, in *GetNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error) { - out := new(NotificationChannel) - err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/GetNotificationChannel", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *notificationChannelServiceClient) CreateNotificationChannel(ctx context.Context, in *CreateNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error) { - out := new(NotificationChannel) - err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/CreateNotificationChannel", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *notificationChannelServiceClient) UpdateNotificationChannel(ctx context.Context, in *UpdateNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error) { - out := new(NotificationChannel) - err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/UpdateNotificationChannel", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *notificationChannelServiceClient) DeleteNotificationChannel(ctx context.Context, in *DeleteNotificationChannelRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { - out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/DeleteNotificationChannel", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *notificationChannelServiceClient) SendNotificationChannelVerificationCode(ctx context.Context, in *SendNotificationChannelVerificationCodeRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { - out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/SendNotificationChannelVerificationCode", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *notificationChannelServiceClient) GetNotificationChannelVerificationCode(ctx context.Context, in *GetNotificationChannelVerificationCodeRequest, opts ...grpc.CallOption) (*GetNotificationChannelVerificationCodeResponse, error) { - out := new(GetNotificationChannelVerificationCodeResponse) - err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/GetNotificationChannelVerificationCode", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *notificationChannelServiceClient) VerifyNotificationChannel(ctx context.Context, in *VerifyNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error) { - out := new(NotificationChannel) - err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/VerifyNotificationChannel", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// NotificationChannelServiceServer is the server API for NotificationChannelService service. -type NotificationChannelServiceServer interface { - // Lists the descriptors for supported channel types. The use of descriptors - // makes it possible for new channel types to be dynamically added. - ListNotificationChannelDescriptors(context.Context, *ListNotificationChannelDescriptorsRequest) (*ListNotificationChannelDescriptorsResponse, error) - // Gets a single channel descriptor. The descriptor indicates which fields - // are expected / permitted for a notification channel of the given type. - GetNotificationChannelDescriptor(context.Context, *GetNotificationChannelDescriptorRequest) (*NotificationChannelDescriptor, error) - // Lists the notification channels that have been created for the project. - ListNotificationChannels(context.Context, *ListNotificationChannelsRequest) (*ListNotificationChannelsResponse, error) - // Gets a single notification channel. The channel includes the relevant - // configuration details with which the channel was created. However, the - // response may truncate or omit passwords, API keys, or other private key - // matter and thus the response may not be 100% identical to the information - // that was supplied in the call to the create method. - GetNotificationChannel(context.Context, *GetNotificationChannelRequest) (*NotificationChannel, error) - // Creates a new notification channel, representing a single notification - // endpoint such as an email address, SMS number, or PagerDuty service. - CreateNotificationChannel(context.Context, *CreateNotificationChannelRequest) (*NotificationChannel, error) - // Updates a notification channel. Fields not specified in the field mask - // remain unchanged. - UpdateNotificationChannel(context.Context, *UpdateNotificationChannelRequest) (*NotificationChannel, error) - // Deletes a notification channel. - DeleteNotificationChannel(context.Context, *DeleteNotificationChannelRequest) (*emptypb.Empty, error) - // Causes a verification code to be delivered to the channel. The code - // can then be supplied in `VerifyNotificationChannel` to verify the channel. - SendNotificationChannelVerificationCode(context.Context, *SendNotificationChannelVerificationCodeRequest) (*emptypb.Empty, error) - // Requests a verification code for an already verified channel that can then - // be used in a call to VerifyNotificationChannel() on a different channel - // with an equivalent identity in the same or in a different project. This - // makes it possible to copy a channel between projects without requiring - // manual reverification of the channel. If the channel is not in the - // verified state, this method will fail (in other words, this may only be - // used if the SendNotificationChannelVerificationCode and - // VerifyNotificationChannel paths have already been used to put the given - // channel into the verified state). - // - // There is no guarantee that the verification codes returned by this method - // will be of a similar structure or form as the ones that are delivered - // to the channel via SendNotificationChannelVerificationCode; while - // VerifyNotificationChannel() will recognize both the codes delivered via - // SendNotificationChannelVerificationCode() and returned from - // GetNotificationChannelVerificationCode(), it is typically the case that - // the verification codes delivered via - // SendNotificationChannelVerificationCode() will be shorter and also - // have a shorter expiration (e.g. codes such as "G-123456") whereas - // GetVerificationCode() will typically return a much longer, websafe base - // 64 encoded string that has a longer expiration time. - GetNotificationChannelVerificationCode(context.Context, *GetNotificationChannelVerificationCodeRequest) (*GetNotificationChannelVerificationCodeResponse, error) - // Verifies a `NotificationChannel` by proving receipt of the code - // delivered to the channel as a result of calling - // `SendNotificationChannelVerificationCode`. - VerifyNotificationChannel(context.Context, *VerifyNotificationChannelRequest) (*NotificationChannel, error) -} - -// UnimplementedNotificationChannelServiceServer can be embedded to have forward compatible implementations. -type UnimplementedNotificationChannelServiceServer struct { -} - -func (*UnimplementedNotificationChannelServiceServer) ListNotificationChannelDescriptors(context.Context, *ListNotificationChannelDescriptorsRequest) (*ListNotificationChannelDescriptorsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ListNotificationChannelDescriptors not implemented") -} -func (*UnimplementedNotificationChannelServiceServer) GetNotificationChannelDescriptor(context.Context, *GetNotificationChannelDescriptorRequest) (*NotificationChannelDescriptor, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetNotificationChannelDescriptor not implemented") -} -func (*UnimplementedNotificationChannelServiceServer) ListNotificationChannels(context.Context, *ListNotificationChannelsRequest) (*ListNotificationChannelsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ListNotificationChannels not implemented") -} -func (*UnimplementedNotificationChannelServiceServer) GetNotificationChannel(context.Context, *GetNotificationChannelRequest) (*NotificationChannel, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetNotificationChannel not implemented") -} -func (*UnimplementedNotificationChannelServiceServer) CreateNotificationChannel(context.Context, *CreateNotificationChannelRequest) (*NotificationChannel, error) { - return nil, status.Errorf(codes.Unimplemented, "method CreateNotificationChannel not implemented") -} -func (*UnimplementedNotificationChannelServiceServer) UpdateNotificationChannel(context.Context, *UpdateNotificationChannelRequest) (*NotificationChannel, error) { - return nil, status.Errorf(codes.Unimplemented, "method UpdateNotificationChannel not implemented") -} -func (*UnimplementedNotificationChannelServiceServer) DeleteNotificationChannel(context.Context, *DeleteNotificationChannelRequest) (*emptypb.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method DeleteNotificationChannel not implemented") -} -func (*UnimplementedNotificationChannelServiceServer) SendNotificationChannelVerificationCode(context.Context, *SendNotificationChannelVerificationCodeRequest) (*emptypb.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method SendNotificationChannelVerificationCode not implemented") -} -func (*UnimplementedNotificationChannelServiceServer) GetNotificationChannelVerificationCode(context.Context, *GetNotificationChannelVerificationCodeRequest) (*GetNotificationChannelVerificationCodeResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetNotificationChannelVerificationCode not implemented") -} -func (*UnimplementedNotificationChannelServiceServer) VerifyNotificationChannel(context.Context, *VerifyNotificationChannelRequest) (*NotificationChannel, error) { - return nil, status.Errorf(codes.Unimplemented, "method VerifyNotificationChannel not implemented") -} - -func RegisterNotificationChannelServiceServer(s *grpc.Server, srv NotificationChannelServiceServer) { - s.RegisterService(&_NotificationChannelService_serviceDesc, srv) -} - -func _NotificationChannelService_ListNotificationChannelDescriptors_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListNotificationChannelDescriptorsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(NotificationChannelServiceServer).ListNotificationChannelDescriptors(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.monitoring.v3.NotificationChannelService/ListNotificationChannelDescriptors", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(NotificationChannelServiceServer).ListNotificationChannelDescriptors(ctx, req.(*ListNotificationChannelDescriptorsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _NotificationChannelService_GetNotificationChannelDescriptor_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetNotificationChannelDescriptorRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(NotificationChannelServiceServer).GetNotificationChannelDescriptor(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.monitoring.v3.NotificationChannelService/GetNotificationChannelDescriptor", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(NotificationChannelServiceServer).GetNotificationChannelDescriptor(ctx, req.(*GetNotificationChannelDescriptorRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _NotificationChannelService_ListNotificationChannels_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListNotificationChannelsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(NotificationChannelServiceServer).ListNotificationChannels(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.monitoring.v3.NotificationChannelService/ListNotificationChannels", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(NotificationChannelServiceServer).ListNotificationChannels(ctx, req.(*ListNotificationChannelsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _NotificationChannelService_GetNotificationChannel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetNotificationChannelRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(NotificationChannelServiceServer).GetNotificationChannel(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.monitoring.v3.NotificationChannelService/GetNotificationChannel", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(NotificationChannelServiceServer).GetNotificationChannel(ctx, req.(*GetNotificationChannelRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _NotificationChannelService_CreateNotificationChannel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CreateNotificationChannelRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(NotificationChannelServiceServer).CreateNotificationChannel(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.monitoring.v3.NotificationChannelService/CreateNotificationChannel", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(NotificationChannelServiceServer).CreateNotificationChannel(ctx, req.(*CreateNotificationChannelRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _NotificationChannelService_UpdateNotificationChannel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(UpdateNotificationChannelRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(NotificationChannelServiceServer).UpdateNotificationChannel(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.monitoring.v3.NotificationChannelService/UpdateNotificationChannel", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(NotificationChannelServiceServer).UpdateNotificationChannel(ctx, req.(*UpdateNotificationChannelRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _NotificationChannelService_DeleteNotificationChannel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteNotificationChannelRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(NotificationChannelServiceServer).DeleteNotificationChannel(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.monitoring.v3.NotificationChannelService/DeleteNotificationChannel", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(NotificationChannelServiceServer).DeleteNotificationChannel(ctx, req.(*DeleteNotificationChannelRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _NotificationChannelService_SendNotificationChannelVerificationCode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(SendNotificationChannelVerificationCodeRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(NotificationChannelServiceServer).SendNotificationChannelVerificationCode(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.monitoring.v3.NotificationChannelService/SendNotificationChannelVerificationCode", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(NotificationChannelServiceServer).SendNotificationChannelVerificationCode(ctx, req.(*SendNotificationChannelVerificationCodeRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _NotificationChannelService_GetNotificationChannelVerificationCode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetNotificationChannelVerificationCodeRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(NotificationChannelServiceServer).GetNotificationChannelVerificationCode(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.monitoring.v3.NotificationChannelService/GetNotificationChannelVerificationCode", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(NotificationChannelServiceServer).GetNotificationChannelVerificationCode(ctx, req.(*GetNotificationChannelVerificationCodeRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _NotificationChannelService_VerifyNotificationChannel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(VerifyNotificationChannelRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(NotificationChannelServiceServer).VerifyNotificationChannel(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.monitoring.v3.NotificationChannelService/VerifyNotificationChannel", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(NotificationChannelServiceServer).VerifyNotificationChannel(ctx, req.(*VerifyNotificationChannelRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _NotificationChannelService_serviceDesc = grpc.ServiceDesc{ - ServiceName: "google.monitoring.v3.NotificationChannelService", - HandlerType: (*NotificationChannelServiceServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "ListNotificationChannelDescriptors", - Handler: _NotificationChannelService_ListNotificationChannelDescriptors_Handler, - }, - { - MethodName: "GetNotificationChannelDescriptor", - Handler: _NotificationChannelService_GetNotificationChannelDescriptor_Handler, - }, - { - MethodName: "ListNotificationChannels", - Handler: _NotificationChannelService_ListNotificationChannels_Handler, - }, - { - MethodName: "GetNotificationChannel", - Handler: _NotificationChannelService_GetNotificationChannel_Handler, - }, - { - MethodName: "CreateNotificationChannel", - Handler: _NotificationChannelService_CreateNotificationChannel_Handler, - }, - { - MethodName: "UpdateNotificationChannel", - Handler: _NotificationChannelService_UpdateNotificationChannel_Handler, - }, - { - MethodName: "DeleteNotificationChannel", - Handler: _NotificationChannelService_DeleteNotificationChannel_Handler, - }, - { - MethodName: "SendNotificationChannelVerificationCode", - Handler: _NotificationChannelService_SendNotificationChannelVerificationCode_Handler, - }, - { - MethodName: "GetNotificationChannelVerificationCode", - Handler: _NotificationChannelService_GetNotificationChannelVerificationCode_Handler, - }, - { - MethodName: "VerifyNotificationChannel", - Handler: _NotificationChannelService_VerifyNotificationChannel_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "google/monitoring/v3/notification_service.proto", -} diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/service.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/service.pb.go deleted file mode 100644 index d67808ff10..0000000000 --- a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/service.pb.go +++ /dev/null @@ -1,2310 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.25.0 -// protoc v3.13.0 -// source: google/monitoring/v3/service.proto - -package monitoring - -import ( - reflect "reflect" - sync "sync" - - proto "github.com/golang/protobuf/proto" - _ "google.golang.org/genproto/googleapis/api/annotations" - _ "google.golang.org/genproto/googleapis/api/monitoredres" - calendarperiod "google.golang.org/genproto/googleapis/type/calendarperiod" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - durationpb "google.golang.org/protobuf/types/known/durationpb" - _ "google.golang.org/protobuf/types/known/timestamppb" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - -// `ServiceLevelObjective.View` determines what form of -// `ServiceLevelObjective` is returned from `GetServiceLevelObjective`, -// `ListServiceLevelObjectives`, and `ListServiceLevelObjectiveVersions` RPCs. -type ServiceLevelObjective_View int32 - -const ( - // Same as FULL. - ServiceLevelObjective_VIEW_UNSPECIFIED ServiceLevelObjective_View = 0 - // Return the embedded `ServiceLevelIndicator` in the form in which it was - // defined. If it was defined using a `BasicSli`, return that `BasicSli`. - ServiceLevelObjective_FULL ServiceLevelObjective_View = 2 - // For `ServiceLevelIndicator`s using `BasicSli` articulation, instead - // return the `ServiceLevelIndicator` with its mode of computation fully - // spelled out as a `RequestBasedSli`. For `ServiceLevelIndicator`s using - // `RequestBasedSli` or `WindowsBasedSli`, return the - // `ServiceLevelIndicator` as it was provided. - ServiceLevelObjective_EXPLICIT ServiceLevelObjective_View = 1 -) - -// Enum value maps for ServiceLevelObjective_View. -var ( - ServiceLevelObjective_View_name = map[int32]string{ - 0: "VIEW_UNSPECIFIED", - 2: "FULL", - 1: "EXPLICIT", - } - ServiceLevelObjective_View_value = map[string]int32{ - "VIEW_UNSPECIFIED": 0, - "FULL": 2, - "EXPLICIT": 1, - } -) - -func (x ServiceLevelObjective_View) Enum() *ServiceLevelObjective_View { - p := new(ServiceLevelObjective_View) - *p = x - return p -} - -func (x ServiceLevelObjective_View) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (ServiceLevelObjective_View) Descriptor() protoreflect.EnumDescriptor { - return file_google_monitoring_v3_service_proto_enumTypes[0].Descriptor() -} - -func (ServiceLevelObjective_View) Type() protoreflect.EnumType { - return &file_google_monitoring_v3_service_proto_enumTypes[0] -} - -func (x ServiceLevelObjective_View) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use ServiceLevelObjective_View.Descriptor instead. -func (ServiceLevelObjective_View) EnumDescriptor() ([]byte, []int) { - return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{1, 0} -} - -// A `Service` is a discrete, autonomous, and network-accessible unit, designed -// to solve an individual concern -// ([Wikipedia](https://en.wikipedia.org/wiki/Service-orientation)). In -// Cloud Monitoring, a `Service` acts as the root resource under which -// operational aspects of the service are accessible. -type Service struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Resource name for this Service. The format is: - // - // projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID] - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Name used for UI elements listing this Service. - DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` - // REQUIRED. Service-identifying atoms specifying the underlying service. - // - // Types that are assignable to Identifier: - // *Service_Custom_ - // *Service_AppEngine_ - // *Service_CloudEndpoints_ - // *Service_ClusterIstio_ - // *Service_MeshIstio_ - Identifier isService_Identifier `protobuf_oneof:"identifier"` - // Configuration for how to query telemetry on a Service. - Telemetry *Service_Telemetry `protobuf:"bytes,13,opt,name=telemetry,proto3" json:"telemetry,omitempty"` -} - -func (x *Service) Reset() { - *x = Service{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_service_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Service) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Service) ProtoMessage() {} - -func (x *Service) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_service_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Service.ProtoReflect.Descriptor instead. -func (*Service) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{0} -} - -func (x *Service) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *Service) GetDisplayName() string { - if x != nil { - return x.DisplayName - } - return "" -} - -func (m *Service) GetIdentifier() isService_Identifier { - if m != nil { - return m.Identifier - } - return nil -} - -func (x *Service) GetCustom() *Service_Custom { - if x, ok := x.GetIdentifier().(*Service_Custom_); ok { - return x.Custom - } - return nil -} - -func (x *Service) GetAppEngine() *Service_AppEngine { - if x, ok := x.GetIdentifier().(*Service_AppEngine_); ok { - return x.AppEngine - } - return nil -} - -func (x *Service) GetCloudEndpoints() *Service_CloudEndpoints { - if x, ok := x.GetIdentifier().(*Service_CloudEndpoints_); ok { - return x.CloudEndpoints - } - return nil -} - -// Deprecated: Do not use. -func (x *Service) GetClusterIstio() *Service_ClusterIstio { - if x, ok := x.GetIdentifier().(*Service_ClusterIstio_); ok { - return x.ClusterIstio - } - return nil -} - -func (x *Service) GetMeshIstio() *Service_MeshIstio { - if x, ok := x.GetIdentifier().(*Service_MeshIstio_); ok { - return x.MeshIstio - } - return nil -} - -func (x *Service) GetTelemetry() *Service_Telemetry { - if x != nil { - return x.Telemetry - } - return nil -} - -type isService_Identifier interface { - isService_Identifier() -} - -type Service_Custom_ struct { - // Custom service type. - Custom *Service_Custom `protobuf:"bytes,6,opt,name=custom,proto3,oneof"` -} - -type Service_AppEngine_ struct { - // Type used for App Engine services. - AppEngine *Service_AppEngine `protobuf:"bytes,7,opt,name=app_engine,json=appEngine,proto3,oneof"` -} - -type Service_CloudEndpoints_ struct { - // Type used for Cloud Endpoints services. - CloudEndpoints *Service_CloudEndpoints `protobuf:"bytes,8,opt,name=cloud_endpoints,json=cloudEndpoints,proto3,oneof"` -} - -type Service_ClusterIstio_ struct { - // Type used for Istio services that live in a Kubernetes cluster. - // - // Deprecated: Do not use. - ClusterIstio *Service_ClusterIstio `protobuf:"bytes,9,opt,name=cluster_istio,json=clusterIstio,proto3,oneof"` -} - -type Service_MeshIstio_ struct { - // Type used for Istio services scoped to an Istio mesh. - MeshIstio *Service_MeshIstio `protobuf:"bytes,10,opt,name=mesh_istio,json=meshIstio,proto3,oneof"` -} - -func (*Service_Custom_) isService_Identifier() {} - -func (*Service_AppEngine_) isService_Identifier() {} - -func (*Service_CloudEndpoints_) isService_Identifier() {} - -func (*Service_ClusterIstio_) isService_Identifier() {} - -func (*Service_MeshIstio_) isService_Identifier() {} - -// A Service-Level Objective (SLO) describes a level of desired good service. It -// consists of a service-level indicator (SLI), a performance goal, and a period -// over which the objective is to be evaluated against that goal. The SLO can -// use SLIs defined in a number of different manners. Typical SLOs might include -// "99% of requests in each rolling week have latency below 200 milliseconds" or -// "99.5% of requests in each calendar month return successfully." -type ServiceLevelObjective struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Resource name for this `ServiceLevelObjective`. The format is: - // - // projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]/serviceLevelObjectives/[SLO_NAME] - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Name used for UI elements listing this SLO. - DisplayName string `protobuf:"bytes,11,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` - // The definition of good service, used to measure and calculate the quality - // of the `Service`'s performance with respect to a single aspect of service - // quality. - ServiceLevelIndicator *ServiceLevelIndicator `protobuf:"bytes,3,opt,name=service_level_indicator,json=serviceLevelIndicator,proto3" json:"service_level_indicator,omitempty"` - // The fraction of service that must be good in order for this objective to be - // met. `0 < goal <= 0.999`. - Goal float64 `protobuf:"fixed64,4,opt,name=goal,proto3" json:"goal,omitempty"` - // The time period over which the objective will be evaluated. - // - // Types that are assignable to Period: - // *ServiceLevelObjective_RollingPeriod - // *ServiceLevelObjective_CalendarPeriod - Period isServiceLevelObjective_Period `protobuf_oneof:"period"` -} - -func (x *ServiceLevelObjective) Reset() { - *x = ServiceLevelObjective{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_service_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ServiceLevelObjective) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ServiceLevelObjective) ProtoMessage() {} - -func (x *ServiceLevelObjective) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_service_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ServiceLevelObjective.ProtoReflect.Descriptor instead. -func (*ServiceLevelObjective) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{1} -} - -func (x *ServiceLevelObjective) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *ServiceLevelObjective) GetDisplayName() string { - if x != nil { - return x.DisplayName - } - return "" -} - -func (x *ServiceLevelObjective) GetServiceLevelIndicator() *ServiceLevelIndicator { - if x != nil { - return x.ServiceLevelIndicator - } - return nil -} - -func (x *ServiceLevelObjective) GetGoal() float64 { - if x != nil { - return x.Goal - } - return 0 -} - -func (m *ServiceLevelObjective) GetPeriod() isServiceLevelObjective_Period { - if m != nil { - return m.Period - } - return nil -} - -func (x *ServiceLevelObjective) GetRollingPeriod() *durationpb.Duration { - if x, ok := x.GetPeriod().(*ServiceLevelObjective_RollingPeriod); ok { - return x.RollingPeriod - } - return nil -} - -func (x *ServiceLevelObjective) GetCalendarPeriod() calendarperiod.CalendarPeriod { - if x, ok := x.GetPeriod().(*ServiceLevelObjective_CalendarPeriod); ok { - return x.CalendarPeriod - } - return calendarperiod.CalendarPeriod_CALENDAR_PERIOD_UNSPECIFIED -} - -type isServiceLevelObjective_Period interface { - isServiceLevelObjective_Period() -} - -type ServiceLevelObjective_RollingPeriod struct { - // A rolling time period, semantically "in the past ``". - // Must be an integer multiple of 1 day no larger than 30 days. - RollingPeriod *durationpb.Duration `protobuf:"bytes,5,opt,name=rolling_period,json=rollingPeriod,proto3,oneof"` -} - -type ServiceLevelObjective_CalendarPeriod struct { - // A calendar period, semantically "since the start of the current - // ``". At this time, only `DAY`, `WEEK`, `FORTNIGHT`, and - // `MONTH` are supported. - CalendarPeriod calendarperiod.CalendarPeriod `protobuf:"varint,6,opt,name=calendar_period,json=calendarPeriod,proto3,enum=google.type.CalendarPeriod,oneof"` -} - -func (*ServiceLevelObjective_RollingPeriod) isServiceLevelObjective_Period() {} - -func (*ServiceLevelObjective_CalendarPeriod) isServiceLevelObjective_Period() {} - -// A Service-Level Indicator (SLI) describes the "performance" of a service. For -// some services, the SLI is well-defined. In such cases, the SLI can be -// described easily by referencing the well-known SLI and providing the needed -// parameters. Alternatively, a "custom" SLI can be defined with a query to the -// underlying metric store. An SLI is defined to be `good_service / -// total_service` over any queried time interval. The value of performance -// always falls into the range `0 <= performance <= 1`. A custom SLI describes -// how to compute this ratio, whether this is by dividing values from a pair of -// time series, cutting a `Distribution` into good and bad counts, or counting -// time windows in which the service complies with a criterion. For separation -// of concerns, a single Service-Level Indicator measures performance for only -// one aspect of service quality, such as fraction of successful queries or -// fast-enough queries. -type ServiceLevelIndicator struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Service level indicators can be grouped by whether the "unit" of service - // being measured is based on counts of good requests or on counts of good - // time windows - // - // Types that are assignable to Type: - // *ServiceLevelIndicator_BasicSli - // *ServiceLevelIndicator_RequestBased - // *ServiceLevelIndicator_WindowsBased - Type isServiceLevelIndicator_Type `protobuf_oneof:"type"` -} - -func (x *ServiceLevelIndicator) Reset() { - *x = ServiceLevelIndicator{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_service_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ServiceLevelIndicator) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ServiceLevelIndicator) ProtoMessage() {} - -func (x *ServiceLevelIndicator) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_service_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ServiceLevelIndicator.ProtoReflect.Descriptor instead. -func (*ServiceLevelIndicator) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{2} -} - -func (m *ServiceLevelIndicator) GetType() isServiceLevelIndicator_Type { - if m != nil { - return m.Type - } - return nil -} - -func (x *ServiceLevelIndicator) GetBasicSli() *BasicSli { - if x, ok := x.GetType().(*ServiceLevelIndicator_BasicSli); ok { - return x.BasicSli - } - return nil -} - -func (x *ServiceLevelIndicator) GetRequestBased() *RequestBasedSli { - if x, ok := x.GetType().(*ServiceLevelIndicator_RequestBased); ok { - return x.RequestBased - } - return nil -} - -func (x *ServiceLevelIndicator) GetWindowsBased() *WindowsBasedSli { - if x, ok := x.GetType().(*ServiceLevelIndicator_WindowsBased); ok { - return x.WindowsBased - } - return nil -} - -type isServiceLevelIndicator_Type interface { - isServiceLevelIndicator_Type() -} - -type ServiceLevelIndicator_BasicSli struct { - // Basic SLI on a well-known service type. - BasicSli *BasicSli `protobuf:"bytes,4,opt,name=basic_sli,json=basicSli,proto3,oneof"` -} - -type ServiceLevelIndicator_RequestBased struct { - // Request-based SLIs - RequestBased *RequestBasedSli `protobuf:"bytes,1,opt,name=request_based,json=requestBased,proto3,oneof"` -} - -type ServiceLevelIndicator_WindowsBased struct { - // Windows-based SLIs - WindowsBased *WindowsBasedSli `protobuf:"bytes,2,opt,name=windows_based,json=windowsBased,proto3,oneof"` -} - -func (*ServiceLevelIndicator_BasicSli) isServiceLevelIndicator_Type() {} - -func (*ServiceLevelIndicator_RequestBased) isServiceLevelIndicator_Type() {} - -func (*ServiceLevelIndicator_WindowsBased) isServiceLevelIndicator_Type() {} - -// An SLI measuring performance on a well-known service type. Performance will -// be computed on the basis of pre-defined metrics. The type of the -// `service_resource` determines the metrics to use and the -// `service_resource.labels` and `metric_labels` are used to construct a -// monitoring filter to filter that metric down to just the data relevant to -// this service. -type BasicSli struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // OPTIONAL: The set of RPCs to which this SLI is relevant. Telemetry from - // other methods will not be used to calculate performance for this SLI. If - // omitted, this SLI applies to all the Service's methods. For service types - // that don't support breaking down by method, setting this field will result - // in an error. - Method []string `protobuf:"bytes,7,rep,name=method,proto3" json:"method,omitempty"` - // OPTIONAL: The set of locations to which this SLI is relevant. Telemetry - // from other locations will not be used to calculate performance for this - // SLI. If omitted, this SLI applies to all locations in which the Service has - // activity. For service types that don't support breaking down by location, - // setting this field will result in an error. - Location []string `protobuf:"bytes,8,rep,name=location,proto3" json:"location,omitempty"` - // OPTIONAL: The set of API versions to which this SLI is relevant. Telemetry - // from other API versions will not be used to calculate performance for this - // SLI. If omitted, this SLI applies to all API versions. For service types - // that don't support breaking down by version, setting this field will result - // in an error. - Version []string `protobuf:"bytes,9,rep,name=version,proto3" json:"version,omitempty"` - // This SLI can be evaluated on the basis of availability or latency. - // - // Types that are assignable to SliCriteria: - // *BasicSli_Availability - // *BasicSli_Latency - SliCriteria isBasicSli_SliCriteria `protobuf_oneof:"sli_criteria"` -} - -func (x *BasicSli) Reset() { - *x = BasicSli{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_service_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *BasicSli) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*BasicSli) ProtoMessage() {} - -func (x *BasicSli) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_service_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use BasicSli.ProtoReflect.Descriptor instead. -func (*BasicSli) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{3} -} - -func (x *BasicSli) GetMethod() []string { - if x != nil { - return x.Method - } - return nil -} - -func (x *BasicSli) GetLocation() []string { - if x != nil { - return x.Location - } - return nil -} - -func (x *BasicSli) GetVersion() []string { - if x != nil { - return x.Version - } - return nil -} - -func (m *BasicSli) GetSliCriteria() isBasicSli_SliCriteria { - if m != nil { - return m.SliCriteria - } - return nil -} - -func (x *BasicSli) GetAvailability() *BasicSli_AvailabilityCriteria { - if x, ok := x.GetSliCriteria().(*BasicSli_Availability); ok { - return x.Availability - } - return nil -} - -func (x *BasicSli) GetLatency() *BasicSli_LatencyCriteria { - if x, ok := x.GetSliCriteria().(*BasicSli_Latency); ok { - return x.Latency - } - return nil -} - -type isBasicSli_SliCriteria interface { - isBasicSli_SliCriteria() -} - -type BasicSli_Availability struct { - // Good service is defined to be the count of requests made to this service - // that return successfully. - Availability *BasicSli_AvailabilityCriteria `protobuf:"bytes,2,opt,name=availability,proto3,oneof"` -} - -type BasicSli_Latency struct { - // Good service is defined to be the count of requests made to this service - // that are fast enough with respect to `latency.threshold`. - Latency *BasicSli_LatencyCriteria `protobuf:"bytes,3,opt,name=latency,proto3,oneof"` -} - -func (*BasicSli_Availability) isBasicSli_SliCriteria() {} - -func (*BasicSli_Latency) isBasicSli_SliCriteria() {} - -// Range of numerical values, inclusive of `min` and exclusive of `max`. If the -// open range "< range.max" is desired, set `range.min = -infinity`. If the open -// range ">= range.min" is desired, set `range.max = infinity`. -type Range struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Range minimum. - Min float64 `protobuf:"fixed64,1,opt,name=min,proto3" json:"min,omitempty"` - // Range maximum. - Max float64 `protobuf:"fixed64,2,opt,name=max,proto3" json:"max,omitempty"` -} - -func (x *Range) Reset() { - *x = Range{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_service_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Range) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Range) ProtoMessage() {} - -func (x *Range) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_service_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Range.ProtoReflect.Descriptor instead. -func (*Range) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{4} -} - -func (x *Range) GetMin() float64 { - if x != nil { - return x.Min - } - return 0 -} - -func (x *Range) GetMax() float64 { - if x != nil { - return x.Max - } - return 0 -} - -// Service Level Indicators for which atomic units of service are counted -// directly. -type RequestBasedSli struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The means to compute a ratio of `good_service` to `total_service`. - // - // Types that are assignable to Method: - // *RequestBasedSli_GoodTotalRatio - // *RequestBasedSli_DistributionCut - Method isRequestBasedSli_Method `protobuf_oneof:"method"` -} - -func (x *RequestBasedSli) Reset() { - *x = RequestBasedSli{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_service_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *RequestBasedSli) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RequestBasedSli) ProtoMessage() {} - -func (x *RequestBasedSli) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_service_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RequestBasedSli.ProtoReflect.Descriptor instead. -func (*RequestBasedSli) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{5} -} - -func (m *RequestBasedSli) GetMethod() isRequestBasedSli_Method { - if m != nil { - return m.Method - } - return nil -} - -func (x *RequestBasedSli) GetGoodTotalRatio() *TimeSeriesRatio { - if x, ok := x.GetMethod().(*RequestBasedSli_GoodTotalRatio); ok { - return x.GoodTotalRatio - } - return nil -} - -func (x *RequestBasedSli) GetDistributionCut() *DistributionCut { - if x, ok := x.GetMethod().(*RequestBasedSli_DistributionCut); ok { - return x.DistributionCut - } - return nil -} - -type isRequestBasedSli_Method interface { - isRequestBasedSli_Method() -} - -type RequestBasedSli_GoodTotalRatio struct { - // `good_total_ratio` is used when the ratio of `good_service` to - // `total_service` is computed from two `TimeSeries`. - GoodTotalRatio *TimeSeriesRatio `protobuf:"bytes,1,opt,name=good_total_ratio,json=goodTotalRatio,proto3,oneof"` -} - -type RequestBasedSli_DistributionCut struct { - // `distribution_cut` is used when `good_service` is a count of values - // aggregated in a `Distribution` that fall into a good range. The - // `total_service` is the total count of all values aggregated in the - // `Distribution`. - DistributionCut *DistributionCut `protobuf:"bytes,3,opt,name=distribution_cut,json=distributionCut,proto3,oneof"` -} - -func (*RequestBasedSli_GoodTotalRatio) isRequestBasedSli_Method() {} - -func (*RequestBasedSli_DistributionCut) isRequestBasedSli_Method() {} - -// A `TimeSeriesRatio` specifies two `TimeSeries` to use for computing the -// `good_service / total_service` ratio. The specified `TimeSeries` must have -// `ValueType = DOUBLE` or `ValueType = INT64` and must have `MetricKind = -// DELTA` or `MetricKind = CUMULATIVE`. The `TimeSeriesRatio` must specify -// exactly two of good, bad, and total, and the relationship `good_service + -// bad_service = total_service` will be assumed. -type TimeSeriesRatio struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // A [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) - // specifying a `TimeSeries` quantifying good service provided. Must have - // `ValueType = DOUBLE` or `ValueType = INT64` and must have `MetricKind = - // DELTA` or `MetricKind = CUMULATIVE`. - GoodServiceFilter string `protobuf:"bytes,4,opt,name=good_service_filter,json=goodServiceFilter,proto3" json:"good_service_filter,omitempty"` - // A [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) - // specifying a `TimeSeries` quantifying bad service, either demanded service - // that was not provided or demanded service that was of inadequate quality. - // Must have `ValueType = DOUBLE` or `ValueType = INT64` and must have - // `MetricKind = DELTA` or `MetricKind = CUMULATIVE`. - BadServiceFilter string `protobuf:"bytes,5,opt,name=bad_service_filter,json=badServiceFilter,proto3" json:"bad_service_filter,omitempty"` - // A [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) - // specifying a `TimeSeries` quantifying total demanded service. Must have - // `ValueType = DOUBLE` or `ValueType = INT64` and must have `MetricKind = - // DELTA` or `MetricKind = CUMULATIVE`. - TotalServiceFilter string `protobuf:"bytes,6,opt,name=total_service_filter,json=totalServiceFilter,proto3" json:"total_service_filter,omitempty"` -} - -func (x *TimeSeriesRatio) Reset() { - *x = TimeSeriesRatio{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_service_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *TimeSeriesRatio) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TimeSeriesRatio) ProtoMessage() {} - -func (x *TimeSeriesRatio) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_service_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TimeSeriesRatio.ProtoReflect.Descriptor instead. -func (*TimeSeriesRatio) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{6} -} - -func (x *TimeSeriesRatio) GetGoodServiceFilter() string { - if x != nil { - return x.GoodServiceFilter - } - return "" -} - -func (x *TimeSeriesRatio) GetBadServiceFilter() string { - if x != nil { - return x.BadServiceFilter - } - return "" -} - -func (x *TimeSeriesRatio) GetTotalServiceFilter() string { - if x != nil { - return x.TotalServiceFilter - } - return "" -} - -// A `DistributionCut` defines a `TimeSeries` and thresholds used for measuring -// good service and total service. The `TimeSeries` must have `ValueType = -// DISTRIBUTION` and `MetricKind = DELTA` or `MetricKind = CUMULATIVE`. The -// computed `good_service` will be the count of values x in the `Distribution` -// such that `range.min <= x < range.max`. -type DistributionCut struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // A [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) - // specifying a `TimeSeries` aggregating values. Must have `ValueType = - // DISTRIBUTION` and `MetricKind = DELTA` or `MetricKind = CUMULATIVE`. - DistributionFilter string `protobuf:"bytes,4,opt,name=distribution_filter,json=distributionFilter,proto3" json:"distribution_filter,omitempty"` - // Range of values considered "good." For a one-sided range, set one bound to - // an infinite value. - Range *Range `protobuf:"bytes,5,opt,name=range,proto3" json:"range,omitempty"` -} - -func (x *DistributionCut) Reset() { - *x = DistributionCut{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_service_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *DistributionCut) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DistributionCut) ProtoMessage() {} - -func (x *DistributionCut) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_service_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DistributionCut.ProtoReflect.Descriptor instead. -func (*DistributionCut) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{7} -} - -func (x *DistributionCut) GetDistributionFilter() string { - if x != nil { - return x.DistributionFilter - } - return "" -} - -func (x *DistributionCut) GetRange() *Range { - if x != nil { - return x.Range - } - return nil -} - -// A `WindowsBasedSli` defines `good_service` as the count of time windows for -// which the provided service was of good quality. Criteria for determining -// if service was good are embedded in the `window_criterion`. -type WindowsBasedSli struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The criterion to use for evaluating window goodness. - // - // Types that are assignable to WindowCriterion: - // *WindowsBasedSli_GoodBadMetricFilter - // *WindowsBasedSli_GoodTotalRatioThreshold - // *WindowsBasedSli_MetricMeanInRange - // *WindowsBasedSli_MetricSumInRange - WindowCriterion isWindowsBasedSli_WindowCriterion `protobuf_oneof:"window_criterion"` - // Duration over which window quality is evaluated. Must be an integer - // fraction of a day and at least `60s`. - WindowPeriod *durationpb.Duration `protobuf:"bytes,4,opt,name=window_period,json=windowPeriod,proto3" json:"window_period,omitempty"` -} - -func (x *WindowsBasedSli) Reset() { - *x = WindowsBasedSli{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_service_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *WindowsBasedSli) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*WindowsBasedSli) ProtoMessage() {} - -func (x *WindowsBasedSli) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_service_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use WindowsBasedSli.ProtoReflect.Descriptor instead. -func (*WindowsBasedSli) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{8} -} - -func (m *WindowsBasedSli) GetWindowCriterion() isWindowsBasedSli_WindowCriterion { - if m != nil { - return m.WindowCriterion - } - return nil -} - -func (x *WindowsBasedSli) GetGoodBadMetricFilter() string { - if x, ok := x.GetWindowCriterion().(*WindowsBasedSli_GoodBadMetricFilter); ok { - return x.GoodBadMetricFilter - } - return "" -} - -func (x *WindowsBasedSli) GetGoodTotalRatioThreshold() *WindowsBasedSli_PerformanceThreshold { - if x, ok := x.GetWindowCriterion().(*WindowsBasedSli_GoodTotalRatioThreshold); ok { - return x.GoodTotalRatioThreshold - } - return nil -} - -func (x *WindowsBasedSli) GetMetricMeanInRange() *WindowsBasedSli_MetricRange { - if x, ok := x.GetWindowCriterion().(*WindowsBasedSli_MetricMeanInRange); ok { - return x.MetricMeanInRange - } - return nil -} - -func (x *WindowsBasedSli) GetMetricSumInRange() *WindowsBasedSli_MetricRange { - if x, ok := x.GetWindowCriterion().(*WindowsBasedSli_MetricSumInRange); ok { - return x.MetricSumInRange - } - return nil -} - -func (x *WindowsBasedSli) GetWindowPeriod() *durationpb.Duration { - if x != nil { - return x.WindowPeriod - } - return nil -} - -type isWindowsBasedSli_WindowCriterion interface { - isWindowsBasedSli_WindowCriterion() -} - -type WindowsBasedSli_GoodBadMetricFilter struct { - // A [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) - // specifying a `TimeSeries` with `ValueType = BOOL`. The window is good if - // any `true` values appear in the window. - GoodBadMetricFilter string `protobuf:"bytes,5,opt,name=good_bad_metric_filter,json=goodBadMetricFilter,proto3,oneof"` -} - -type WindowsBasedSli_GoodTotalRatioThreshold struct { - // A window is good if its `performance` is high enough. - GoodTotalRatioThreshold *WindowsBasedSli_PerformanceThreshold `protobuf:"bytes,2,opt,name=good_total_ratio_threshold,json=goodTotalRatioThreshold,proto3,oneof"` -} - -type WindowsBasedSli_MetricMeanInRange struct { - // A window is good if the metric's value is in a good range, averaged - // across returned streams. - MetricMeanInRange *WindowsBasedSli_MetricRange `protobuf:"bytes,6,opt,name=metric_mean_in_range,json=metricMeanInRange,proto3,oneof"` -} - -type WindowsBasedSli_MetricSumInRange struct { - // A window is good if the metric's value is in a good range, summed across - // returned streams. - MetricSumInRange *WindowsBasedSli_MetricRange `protobuf:"bytes,7,opt,name=metric_sum_in_range,json=metricSumInRange,proto3,oneof"` -} - -func (*WindowsBasedSli_GoodBadMetricFilter) isWindowsBasedSli_WindowCriterion() {} - -func (*WindowsBasedSli_GoodTotalRatioThreshold) isWindowsBasedSli_WindowCriterion() {} - -func (*WindowsBasedSli_MetricMeanInRange) isWindowsBasedSli_WindowCriterion() {} - -func (*WindowsBasedSli_MetricSumInRange) isWindowsBasedSli_WindowCriterion() {} - -// Custom view of service telemetry. Currently a place-holder pending final -// design. -type Service_Custom struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *Service_Custom) Reset() { - *x = Service_Custom{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_service_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Service_Custom) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Service_Custom) ProtoMessage() {} - -func (x *Service_Custom) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_service_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Service_Custom.ProtoReflect.Descriptor instead. -func (*Service_Custom) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{0, 0} -} - -// App Engine service. Learn more at https://cloud.google.com/appengine. -type Service_AppEngine struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The ID of the App Engine module underlying this service. Corresponds to - // the `module_id` resource label in the `gae_app` monitored resource: - // https://cloud.google.com/monitoring/api/resources#tag_gae_app - ModuleId string `protobuf:"bytes,1,opt,name=module_id,json=moduleId,proto3" json:"module_id,omitempty"` -} - -func (x *Service_AppEngine) Reset() { - *x = Service_AppEngine{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_service_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Service_AppEngine) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Service_AppEngine) ProtoMessage() {} - -func (x *Service_AppEngine) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_service_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Service_AppEngine.ProtoReflect.Descriptor instead. -func (*Service_AppEngine) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{0, 1} -} - -func (x *Service_AppEngine) GetModuleId() string { - if x != nil { - return x.ModuleId - } - return "" -} - -// Cloud Endpoints service. Learn more at https://cloud.google.com/endpoints. -type Service_CloudEndpoints struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The name of the Cloud Endpoints service underlying this service. - // Corresponds to the `service` resource label in the `api` monitored - // resource: https://cloud.google.com/monitoring/api/resources#tag_api - Service string `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"` -} - -func (x *Service_CloudEndpoints) Reset() { - *x = Service_CloudEndpoints{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_service_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Service_CloudEndpoints) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Service_CloudEndpoints) ProtoMessage() {} - -func (x *Service_CloudEndpoints) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_service_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Service_CloudEndpoints.ProtoReflect.Descriptor instead. -func (*Service_CloudEndpoints) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{0, 2} -} - -func (x *Service_CloudEndpoints) GetService() string { - if x != nil { - return x.Service - } - return "" -} - -// Istio service scoped to a single Kubernetes cluster. Learn more at -// http://istio.io. -// -// Deprecated: Do not use. -type Service_ClusterIstio struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The location of the Kubernetes cluster in which this Istio service is - // defined. Corresponds to the `location` resource label in `k8s_cluster` - // resources. - Location string `protobuf:"bytes,1,opt,name=location,proto3" json:"location,omitempty"` - // The name of the Kubernetes cluster in which this Istio service is - // defined. Corresponds to the `cluster_name` resource label in - // `k8s_cluster` resources. - ClusterName string `protobuf:"bytes,2,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"` - // The namespace of the Istio service underlying this service. Corresponds - // to the `destination_service_namespace` metric label in Istio metrics. - ServiceNamespace string `protobuf:"bytes,3,opt,name=service_namespace,json=serviceNamespace,proto3" json:"service_namespace,omitempty"` - // The name of the Istio service underlying this service. Corresponds to the - // `destination_service_name` metric label in Istio metrics. - ServiceName string `protobuf:"bytes,4,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"` -} - -func (x *Service_ClusterIstio) Reset() { - *x = Service_ClusterIstio{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_service_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Service_ClusterIstio) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Service_ClusterIstio) ProtoMessage() {} - -func (x *Service_ClusterIstio) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_service_proto_msgTypes[12] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Service_ClusterIstio.ProtoReflect.Descriptor instead. -func (*Service_ClusterIstio) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{0, 3} -} - -func (x *Service_ClusterIstio) GetLocation() string { - if x != nil { - return x.Location - } - return "" -} - -func (x *Service_ClusterIstio) GetClusterName() string { - if x != nil { - return x.ClusterName - } - return "" -} - -func (x *Service_ClusterIstio) GetServiceNamespace() string { - if x != nil { - return x.ServiceNamespace - } - return "" -} - -func (x *Service_ClusterIstio) GetServiceName() string { - if x != nil { - return x.ServiceName - } - return "" -} - -// Istio service scoped to an Istio mesh -type Service_MeshIstio struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Identifier for the mesh in which this Istio service is defined. - // Corresponds to the `mesh_uid` metric label in Istio metrics. - MeshUid string `protobuf:"bytes,1,opt,name=mesh_uid,json=meshUid,proto3" json:"mesh_uid,omitempty"` - // The namespace of the Istio service underlying this service. Corresponds - // to the `destination_service_namespace` metric label in Istio metrics. - ServiceNamespace string `protobuf:"bytes,3,opt,name=service_namespace,json=serviceNamespace,proto3" json:"service_namespace,omitempty"` - // The name of the Istio service underlying this service. Corresponds to the - // `destination_service_name` metric label in Istio metrics. - ServiceName string `protobuf:"bytes,4,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"` -} - -func (x *Service_MeshIstio) Reset() { - *x = Service_MeshIstio{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_service_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Service_MeshIstio) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Service_MeshIstio) ProtoMessage() {} - -func (x *Service_MeshIstio) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_service_proto_msgTypes[13] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Service_MeshIstio.ProtoReflect.Descriptor instead. -func (*Service_MeshIstio) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{0, 4} -} - -func (x *Service_MeshIstio) GetMeshUid() string { - if x != nil { - return x.MeshUid - } - return "" -} - -func (x *Service_MeshIstio) GetServiceNamespace() string { - if x != nil { - return x.ServiceNamespace - } - return "" -} - -func (x *Service_MeshIstio) GetServiceName() string { - if x != nil { - return x.ServiceName - } - return "" -} - -// Configuration for how to query telemetry on a Service. -type Service_Telemetry struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The full name of the resource that defines this service. Formatted as - // described in https://cloud.google.com/apis/design/resource_names. - ResourceName string `protobuf:"bytes,1,opt,name=resource_name,json=resourceName,proto3" json:"resource_name,omitempty"` -} - -func (x *Service_Telemetry) Reset() { - *x = Service_Telemetry{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_service_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Service_Telemetry) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Service_Telemetry) ProtoMessage() {} - -func (x *Service_Telemetry) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_service_proto_msgTypes[14] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Service_Telemetry.ProtoReflect.Descriptor instead. -func (*Service_Telemetry) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{0, 5} -} - -func (x *Service_Telemetry) GetResourceName() string { - if x != nil { - return x.ResourceName - } - return "" -} - -// Future parameters for the availability SLI. -type BasicSli_AvailabilityCriteria struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *BasicSli_AvailabilityCriteria) Reset() { - *x = BasicSli_AvailabilityCriteria{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_service_proto_msgTypes[15] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *BasicSli_AvailabilityCriteria) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*BasicSli_AvailabilityCriteria) ProtoMessage() {} - -func (x *BasicSli_AvailabilityCriteria) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_service_proto_msgTypes[15] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use BasicSli_AvailabilityCriteria.ProtoReflect.Descriptor instead. -func (*BasicSli_AvailabilityCriteria) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{3, 0} -} - -// Parameters for a latency threshold SLI. -type BasicSli_LatencyCriteria struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Good service is defined to be the count of requests made to this service - // that return in no more than `threshold`. - Threshold *durationpb.Duration `protobuf:"bytes,3,opt,name=threshold,proto3" json:"threshold,omitempty"` -} - -func (x *BasicSli_LatencyCriteria) Reset() { - *x = BasicSli_LatencyCriteria{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_service_proto_msgTypes[16] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *BasicSli_LatencyCriteria) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*BasicSli_LatencyCriteria) ProtoMessage() {} - -func (x *BasicSli_LatencyCriteria) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_service_proto_msgTypes[16] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use BasicSli_LatencyCriteria.ProtoReflect.Descriptor instead. -func (*BasicSli_LatencyCriteria) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{3, 1} -} - -func (x *BasicSli_LatencyCriteria) GetThreshold() *durationpb.Duration { - if x != nil { - return x.Threshold - } - return nil -} - -// A `PerformanceThreshold` is used when each window is good when that window -// has a sufficiently high `performance`. -type WindowsBasedSli_PerformanceThreshold struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The means, either a request-based SLI or a basic SLI, by which to compute - // performance over a window. - // - // Types that are assignable to Type: - // *WindowsBasedSli_PerformanceThreshold_Performance - // *WindowsBasedSli_PerformanceThreshold_BasicSliPerformance - Type isWindowsBasedSli_PerformanceThreshold_Type `protobuf_oneof:"type"` - // If window `performance >= threshold`, the window is counted as good. - Threshold float64 `protobuf:"fixed64,2,opt,name=threshold,proto3" json:"threshold,omitempty"` -} - -func (x *WindowsBasedSli_PerformanceThreshold) Reset() { - *x = WindowsBasedSli_PerformanceThreshold{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_service_proto_msgTypes[17] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *WindowsBasedSli_PerformanceThreshold) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*WindowsBasedSli_PerformanceThreshold) ProtoMessage() {} - -func (x *WindowsBasedSli_PerformanceThreshold) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_service_proto_msgTypes[17] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use WindowsBasedSli_PerformanceThreshold.ProtoReflect.Descriptor instead. -func (*WindowsBasedSli_PerformanceThreshold) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{8, 0} -} - -func (m *WindowsBasedSli_PerformanceThreshold) GetType() isWindowsBasedSli_PerformanceThreshold_Type { - if m != nil { - return m.Type - } - return nil -} - -func (x *WindowsBasedSli_PerformanceThreshold) GetPerformance() *RequestBasedSli { - if x, ok := x.GetType().(*WindowsBasedSli_PerformanceThreshold_Performance); ok { - return x.Performance - } - return nil -} - -func (x *WindowsBasedSli_PerformanceThreshold) GetBasicSliPerformance() *BasicSli { - if x, ok := x.GetType().(*WindowsBasedSli_PerformanceThreshold_BasicSliPerformance); ok { - return x.BasicSliPerformance - } - return nil -} - -func (x *WindowsBasedSli_PerformanceThreshold) GetThreshold() float64 { - if x != nil { - return x.Threshold - } - return 0 -} - -type isWindowsBasedSli_PerformanceThreshold_Type interface { - isWindowsBasedSli_PerformanceThreshold_Type() -} - -type WindowsBasedSli_PerformanceThreshold_Performance struct { - // `RequestBasedSli` to evaluate to judge window quality. - Performance *RequestBasedSli `protobuf:"bytes,1,opt,name=performance,proto3,oneof"` -} - -type WindowsBasedSli_PerformanceThreshold_BasicSliPerformance struct { - // `BasicSli` to evaluate to judge window quality. - BasicSliPerformance *BasicSli `protobuf:"bytes,3,opt,name=basic_sli_performance,json=basicSliPerformance,proto3,oneof"` -} - -func (*WindowsBasedSli_PerformanceThreshold_Performance) isWindowsBasedSli_PerformanceThreshold_Type() { -} - -func (*WindowsBasedSli_PerformanceThreshold_BasicSliPerformance) isWindowsBasedSli_PerformanceThreshold_Type() { -} - -// A `MetricRange` is used when each window is good when the value x of a -// single `TimeSeries` satisfies `range.min <= x < range.max`. The provided -// `TimeSeries` must have `ValueType = INT64` or `ValueType = DOUBLE` and -// `MetricKind = GAUGE`. -type WindowsBasedSli_MetricRange struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // A [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) - // specifying the `TimeSeries` to use for evaluating window quality. - TimeSeries string `protobuf:"bytes,1,opt,name=time_series,json=timeSeries,proto3" json:"time_series,omitempty"` - // Range of values considered "good." For a one-sided range, set one bound - // to an infinite value. - Range *Range `protobuf:"bytes,4,opt,name=range,proto3" json:"range,omitempty"` -} - -func (x *WindowsBasedSli_MetricRange) Reset() { - *x = WindowsBasedSli_MetricRange{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_service_proto_msgTypes[18] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *WindowsBasedSli_MetricRange) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*WindowsBasedSli_MetricRange) ProtoMessage() {} - -func (x *WindowsBasedSli_MetricRange) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_service_proto_msgTypes[18] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use WindowsBasedSli_MetricRange.ProtoReflect.Descriptor instead. -func (*WindowsBasedSli_MetricRange) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{8, 1} -} - -func (x *WindowsBasedSli_MetricRange) GetTimeSeries() string { - if x != nil { - return x.TimeSeries - } - return "" -} - -func (x *WindowsBasedSli_MetricRange) GetRange() *Range { - if x != nil { - return x.Range - } - return nil -} - -var File_google_monitoring_v3_service_proto protoreflect.FileDescriptor - -var file_google_monitoring_v3_service_proto_rawDesc = []byte{ - 0x0a, 0x22, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, - 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, - 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x1a, 0x23, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, - 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, - 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, - 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x63, 0x61, 0x6c, 0x65, 0x6e, 0x64, 0x61, - 0x72, 0x5f, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xf1, - 0x08, 0x0a, 0x07, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x21, - 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, - 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x18, 0x06, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, - 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x48, 0x00, 0x52, 0x06, 0x63, 0x75, 0x73, 0x74, 0x6f, - 0x6d, 0x12, 0x48, 0x0a, 0x0a, 0x61, 0x70, 0x70, 0x5f, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, - 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x2e, 0x41, 0x70, 0x70, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x48, 0x00, - 0x52, 0x09, 0x61, 0x70, 0x70, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x12, 0x57, 0x0a, 0x0f, 0x63, - 0x6c, 0x6f, 0x75, 0x64, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x18, 0x08, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, - 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, - 0x74, 0x73, 0x48, 0x00, 0x52, 0x0e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x45, 0x6e, 0x64, 0x70, 0x6f, - 0x69, 0x6e, 0x74, 0x73, 0x12, 0x55, 0x0a, 0x0d, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, - 0x69, 0x73, 0x74, 0x69, 0x6f, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, - 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, - 0x65, 0x72, 0x49, 0x73, 0x74, 0x69, 0x6f, 0x42, 0x02, 0x18, 0x01, 0x48, 0x00, 0x52, 0x0c, 0x63, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x73, 0x74, 0x69, 0x6f, 0x12, 0x48, 0x0a, 0x0a, 0x6d, - 0x65, 0x73, 0x68, 0x5f, 0x69, 0x73, 0x74, 0x69, 0x6f, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, - 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x4d, - 0x65, 0x73, 0x68, 0x49, 0x73, 0x74, 0x69, 0x6f, 0x48, 0x00, 0x52, 0x09, 0x6d, 0x65, 0x73, 0x68, - 0x49, 0x73, 0x74, 0x69, 0x6f, 0x12, 0x45, 0x0a, 0x09, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, - 0x72, 0x79, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, - 0x79, 0x52, 0x09, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x1a, 0x08, 0x0a, 0x06, - 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x1a, 0x28, 0x0a, 0x09, 0x41, 0x70, 0x70, 0x45, 0x6e, 0x67, - 0x69, 0x6e, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x5f, 0x69, 0x64, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x49, 0x64, - 0x1a, 0x2a, 0x0a, 0x0e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, - 0x74, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x1a, 0xa1, 0x01, 0x0a, - 0x0c, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x73, 0x74, 0x69, 0x6f, 0x12, 0x1a, 0x0a, - 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2b, 0x0a, 0x11, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x3a, 0x02, 0x18, 0x01, - 0x1a, 0x76, 0x0a, 0x09, 0x4d, 0x65, 0x73, 0x68, 0x49, 0x73, 0x74, 0x69, 0x6f, 0x12, 0x19, 0x0a, - 0x08, 0x6d, 0x65, 0x73, 0x68, 0x5f, 0x75, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x07, 0x6d, 0x65, 0x73, 0x68, 0x55, 0x69, 0x64, 0x12, 0x2b, 0x0a, 0x11, 0x73, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x10, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x30, 0x0a, 0x09, 0x54, 0x65, 0x6c, 0x65, - 0x6d, 0x65, 0x74, 0x72, 0x79, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x3a, 0xa7, 0x01, 0xea, 0x41, 0xa3, - 0x01, 0x0a, 0x21, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x12, 0x25, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, - 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x73, 0x2f, 0x7b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x7d, 0x12, 0x2f, 0x6f, 0x72, 0x67, - 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6f, 0x72, 0x67, 0x61, - 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x73, 0x2f, 0x7b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x7d, 0x12, 0x23, 0x66, 0x6f, - 0x6c, 0x64, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x7d, 0x2f, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x7d, 0x12, 0x01, 0x2a, 0x42, 0x0c, 0x0a, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, - 0x65, 0x72, 0x22, 0xe0, 0x05, 0x0a, 0x15, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, - 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x12, 0x12, 0x0a, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, - 0x61, 0x6d, 0x65, 0x12, 0x63, 0x0a, 0x17, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6c, - 0x65, 0x76, 0x65, 0x6c, 0x5f, 0x69, 0x6e, 0x64, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, - 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x49, 0x6e, 0x64, 0x69, 0x63, 0x61, 0x74, 0x6f, - 0x72, 0x52, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x49, - 0x6e, 0x64, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x67, 0x6f, 0x61, 0x6c, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, 0x04, 0x67, 0x6f, 0x61, 0x6c, 0x12, 0x42, 0x0a, 0x0e, - 0x72, 0x6f, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, - 0x00, 0x52, 0x0d, 0x72, 0x6f, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x50, 0x65, 0x72, 0x69, 0x6f, 0x64, - 0x12, 0x46, 0x0a, 0x0f, 0x63, 0x61, 0x6c, 0x65, 0x6e, 0x64, 0x61, 0x72, 0x5f, 0x70, 0x65, 0x72, - 0x69, 0x6f, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x43, 0x61, 0x6c, 0x65, 0x6e, 0x64, 0x61, 0x72, - 0x50, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x48, 0x00, 0x52, 0x0e, 0x63, 0x61, 0x6c, 0x65, 0x6e, 0x64, - 0x61, 0x72, 0x50, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x22, 0x34, 0x0a, 0x04, 0x56, 0x69, 0x65, 0x77, - 0x12, 0x14, 0x0a, 0x10, 0x56, 0x49, 0x45, 0x57, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, - 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x46, 0x55, 0x4c, 0x4c, 0x10, 0x02, - 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x10, 0x01, 0x3a, 0xca, - 0x02, 0xea, 0x41, 0xc6, 0x02, 0x0a, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, - 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, - 0x2f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x12, 0x56, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, - 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x7d, 0x2f, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x69, 0x76, 0x65, 0x73, 0x2f, 0x7b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6c, 0x65, - 0x76, 0x65, 0x6c, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x7d, 0x12, 0x60, - 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6f, - 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x7d, 0x2f, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x69, 0x76, 0x65, 0x73, 0x2f, 0x7b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, - 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x7d, - 0x12, 0x54, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x66, 0x6f, 0x6c, 0x64, 0x65, - 0x72, 0x7d, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x7d, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, - 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x73, 0x2f, 0x7b, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x5f, 0x6f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x69, 0x76, 0x65, 0x7d, 0x12, 0x01, 0x2a, 0x20, 0x01, 0x42, 0x08, 0x0a, 0x06, 0x70, - 0x65, 0x72, 0x69, 0x6f, 0x64, 0x22, 0xfa, 0x01, 0x0a, 0x15, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x49, 0x6e, 0x64, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x12, - 0x3d, 0x0a, 0x09, 0x62, 0x61, 0x73, 0x69, 0x63, 0x5f, 0x73, 0x6c, 0x69, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, - 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x42, 0x61, 0x73, 0x69, 0x63, 0x53, - 0x6c, 0x69, 0x48, 0x00, 0x52, 0x08, 0x62, 0x61, 0x73, 0x69, 0x63, 0x53, 0x6c, 0x69, 0x12, 0x4c, - 0x0a, 0x0d, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x62, 0x61, 0x73, 0x65, 0x64, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, - 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x42, 0x61, 0x73, 0x65, 0x64, 0x53, 0x6c, 0x69, 0x48, 0x00, 0x52, 0x0c, - 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x61, 0x73, 0x65, 0x64, 0x12, 0x4c, 0x0a, 0x0d, - 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x73, 0x5f, 0x62, 0x61, 0x73, 0x65, 0x64, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, - 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x57, 0x69, 0x6e, 0x64, 0x6f, - 0x77, 0x73, 0x42, 0x61, 0x73, 0x65, 0x64, 0x53, 0x6c, 0x69, 0x48, 0x00, 0x52, 0x0c, 0x77, 0x69, - 0x6e, 0x64, 0x6f, 0x77, 0x73, 0x42, 0x61, 0x73, 0x65, 0x64, 0x42, 0x06, 0x0a, 0x04, 0x74, 0x79, - 0x70, 0x65, 0x22, 0xf3, 0x02, 0x0a, 0x08, 0x42, 0x61, 0x73, 0x69, 0x63, 0x53, 0x6c, 0x69, 0x12, - 0x16, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, - 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x09, - 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x59, 0x0a, - 0x0c, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, - 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x42, 0x61, 0x73, 0x69, 0x63, - 0x53, 0x6c, 0x69, 0x2e, 0x41, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, - 0x43, 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, 0x61, 0x48, 0x00, 0x52, 0x0c, 0x61, 0x76, 0x61, 0x69, - 0x6c, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x12, 0x4a, 0x0a, 0x07, 0x6c, 0x61, 0x74, 0x65, - 0x6e, 0x63, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, - 0x2e, 0x42, 0x61, 0x73, 0x69, 0x63, 0x53, 0x6c, 0x69, 0x2e, 0x4c, 0x61, 0x74, 0x65, 0x6e, 0x63, - 0x79, 0x43, 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, 0x61, 0x48, 0x00, 0x52, 0x07, 0x6c, 0x61, 0x74, - 0x65, 0x6e, 0x63, 0x79, 0x1a, 0x16, 0x0a, 0x14, 0x41, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x69, - 0x6c, 0x69, 0x74, 0x79, 0x43, 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, 0x61, 0x1a, 0x4a, 0x0a, 0x0f, - 0x4c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x43, 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, 0x61, 0x12, - 0x37, 0x0a, 0x09, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x74, - 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x42, 0x0e, 0x0a, 0x0c, 0x73, 0x6c, 0x69, 0x5f, - 0x63, 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, 0x61, 0x22, 0x2b, 0x0a, 0x05, 0x52, 0x61, 0x6e, 0x67, - 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6d, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x03, - 0x6d, 0x69, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x6d, 0x61, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, - 0x52, 0x03, 0x6d, 0x61, 0x78, 0x22, 0xc2, 0x01, 0x0a, 0x0f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x42, 0x61, 0x73, 0x65, 0x64, 0x53, 0x6c, 0x69, 0x12, 0x51, 0x0a, 0x10, 0x67, 0x6f, 0x6f, - 0x64, 0x5f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, - 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x53, - 0x65, 0x72, 0x69, 0x65, 0x73, 0x52, 0x61, 0x74, 0x69, 0x6f, 0x48, 0x00, 0x52, 0x0e, 0x67, 0x6f, - 0x6f, 0x64, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x52, 0x61, 0x74, 0x69, 0x6f, 0x12, 0x52, 0x0a, 0x10, - 0x64, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x75, 0x74, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x69, - 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x75, 0x74, 0x48, 0x00, 0x52, - 0x0f, 0x64, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x75, 0x74, - 0x42, 0x08, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x22, 0xa1, 0x01, 0x0a, 0x0f, 0x54, - 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x52, 0x61, 0x74, 0x69, 0x6f, 0x12, 0x2e, - 0x0a, 0x13, 0x67, 0x6f, 0x6f, 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x66, - 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x67, 0x6f, 0x6f, - 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x2c, - 0x0a, 0x12, 0x62, 0x61, 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x66, 0x69, - 0x6c, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x62, 0x61, 0x64, 0x53, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x30, 0x0a, 0x14, - 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x66, 0x69, - 0x6c, 0x74, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x74, 0x6f, 0x74, 0x61, - 0x6c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0x75, - 0x0a, 0x0f, 0x44, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x75, - 0x74, 0x12, 0x2f, 0x0a, 0x13, 0x64, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, - 0x64, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x6c, 0x74, - 0x65, 0x72, 0x12, 0x31, 0x0a, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, - 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x05, - 0x72, 0x61, 0x6e, 0x67, 0x65, 0x22, 0xa4, 0x06, 0x0a, 0x0f, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, - 0x73, 0x42, 0x61, 0x73, 0x65, 0x64, 0x53, 0x6c, 0x69, 0x12, 0x35, 0x0a, 0x16, 0x67, 0x6f, 0x6f, - 0x64, 0x5f, 0x62, 0x61, 0x64, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x66, 0x69, 0x6c, - 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x13, 0x67, 0x6f, 0x6f, - 0x64, 0x42, 0x61, 0x64, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, - 0x12, 0x79, 0x0a, 0x1a, 0x67, 0x6f, 0x6f, 0x64, 0x5f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, - 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x57, 0x69, 0x6e, 0x64, - 0x6f, 0x77, 0x73, 0x42, 0x61, 0x73, 0x65, 0x64, 0x53, 0x6c, 0x69, 0x2e, 0x50, 0x65, 0x72, 0x66, - 0x6f, 0x72, 0x6d, 0x61, 0x6e, 0x63, 0x65, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, - 0x48, 0x00, 0x52, 0x17, 0x67, 0x6f, 0x6f, 0x64, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x52, 0x61, 0x74, - 0x69, 0x6f, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x64, 0x0a, 0x14, 0x6d, - 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x6d, 0x65, 0x61, 0x6e, 0x5f, 0x69, 0x6e, 0x5f, 0x72, 0x61, - 0x6e, 0x67, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, - 0x2e, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x73, 0x42, 0x61, 0x73, 0x65, 0x64, 0x53, 0x6c, 0x69, - 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x48, 0x00, 0x52, 0x11, - 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x4d, 0x65, 0x61, 0x6e, 0x49, 0x6e, 0x52, 0x61, 0x6e, 0x67, - 0x65, 0x12, 0x62, 0x0a, 0x13, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x75, 0x6d, 0x5f, - 0x69, 0x6e, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, - 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x73, 0x42, 0x61, 0x73, - 0x65, 0x64, 0x53, 0x6c, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x52, 0x61, 0x6e, 0x67, - 0x65, 0x48, 0x00, 0x52, 0x10, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x53, 0x75, 0x6d, 0x49, 0x6e, - 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x3e, 0x0a, 0x0d, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x5f, - 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, - 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x50, - 0x65, 0x72, 0x69, 0x6f, 0x64, 0x1a, 0xdd, 0x01, 0x0a, 0x14, 0x50, 0x65, 0x72, 0x66, 0x6f, 0x72, - 0x6d, 0x61, 0x6e, 0x63, 0x65, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x49, - 0x0a, 0x0b, 0x70, 0x65, 0x72, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, - 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x42, 0x61, 0x73, 0x65, 0x64, 0x53, 0x6c, 0x69, 0x48, 0x00, 0x52, 0x0b, 0x70, 0x65, - 0x72, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x54, 0x0a, 0x15, 0x62, 0x61, 0x73, - 0x69, 0x63, 0x5f, 0x73, 0x6c, 0x69, 0x5f, 0x70, 0x65, 0x72, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x6e, - 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, - 0x42, 0x61, 0x73, 0x69, 0x63, 0x53, 0x6c, 0x69, 0x48, 0x00, 0x52, 0x13, 0x62, 0x61, 0x73, 0x69, - 0x63, 0x53, 0x6c, 0x69, 0x50, 0x65, 0x72, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x6e, 0x63, 0x65, 0x12, - 0x1c, 0x0a, 0x09, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x01, 0x52, 0x09, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x42, 0x06, 0x0a, - 0x04, 0x74, 0x79, 0x70, 0x65, 0x1a, 0x61, 0x0a, 0x0b, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x52, - 0x61, 0x6e, 0x67, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, - 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x53, - 0x65, 0x72, 0x69, 0x65, 0x73, 0x12, 0x31, 0x0a, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, - 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x6e, 0x67, - 0x65, 0x52, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x42, 0x12, 0x0a, 0x10, 0x77, 0x69, 0x6e, 0x64, - 0x6f, 0x77, 0x5f, 0x63, 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, 0x6f, 0x6e, 0x42, 0xce, 0x01, 0x0a, - 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, - 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x42, 0x16, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x50, 0x72, 0x6f, 0x74, - 0x6f, 0x50, 0x01, 0x5a, 0x3e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, - 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, - 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, - 0x69, 0x6e, 0x67, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, - 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x56, 0x33, - 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, - 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x33, 0xea, 0x02, 0x1d, - 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x4d, - 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56, 0x33, 0x62, 0x06, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_google_monitoring_v3_service_proto_rawDescOnce sync.Once - file_google_monitoring_v3_service_proto_rawDescData = file_google_monitoring_v3_service_proto_rawDesc -) - -func file_google_monitoring_v3_service_proto_rawDescGZIP() []byte { - file_google_monitoring_v3_service_proto_rawDescOnce.Do(func() { - file_google_monitoring_v3_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_service_proto_rawDescData) - }) - return file_google_monitoring_v3_service_proto_rawDescData -} - -var file_google_monitoring_v3_service_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_google_monitoring_v3_service_proto_msgTypes = make([]protoimpl.MessageInfo, 19) -var file_google_monitoring_v3_service_proto_goTypes = []interface{}{ - (ServiceLevelObjective_View)(0), // 0: google.monitoring.v3.ServiceLevelObjective.View - (*Service)(nil), // 1: google.monitoring.v3.Service - (*ServiceLevelObjective)(nil), // 2: google.monitoring.v3.ServiceLevelObjective - (*ServiceLevelIndicator)(nil), // 3: google.monitoring.v3.ServiceLevelIndicator - (*BasicSli)(nil), // 4: google.monitoring.v3.BasicSli - (*Range)(nil), // 5: google.monitoring.v3.Range - (*RequestBasedSli)(nil), // 6: google.monitoring.v3.RequestBasedSli - (*TimeSeriesRatio)(nil), // 7: google.monitoring.v3.TimeSeriesRatio - (*DistributionCut)(nil), // 8: google.monitoring.v3.DistributionCut - (*WindowsBasedSli)(nil), // 9: google.monitoring.v3.WindowsBasedSli - (*Service_Custom)(nil), // 10: google.monitoring.v3.Service.Custom - (*Service_AppEngine)(nil), // 11: google.monitoring.v3.Service.AppEngine - (*Service_CloudEndpoints)(nil), // 12: google.monitoring.v3.Service.CloudEndpoints - (*Service_ClusterIstio)(nil), // 13: google.monitoring.v3.Service.ClusterIstio - (*Service_MeshIstio)(nil), // 14: google.monitoring.v3.Service.MeshIstio - (*Service_Telemetry)(nil), // 15: google.monitoring.v3.Service.Telemetry - (*BasicSli_AvailabilityCriteria)(nil), // 16: google.monitoring.v3.BasicSli.AvailabilityCriteria - (*BasicSli_LatencyCriteria)(nil), // 17: google.monitoring.v3.BasicSli.LatencyCriteria - (*WindowsBasedSli_PerformanceThreshold)(nil), // 18: google.monitoring.v3.WindowsBasedSli.PerformanceThreshold - (*WindowsBasedSli_MetricRange)(nil), // 19: google.monitoring.v3.WindowsBasedSli.MetricRange - (*durationpb.Duration)(nil), // 20: google.protobuf.Duration - (calendarperiod.CalendarPeriod)(0), // 21: google.type.CalendarPeriod -} -var file_google_monitoring_v3_service_proto_depIdxs = []int32{ - 10, // 0: google.monitoring.v3.Service.custom:type_name -> google.monitoring.v3.Service.Custom - 11, // 1: google.monitoring.v3.Service.app_engine:type_name -> google.monitoring.v3.Service.AppEngine - 12, // 2: google.monitoring.v3.Service.cloud_endpoints:type_name -> google.monitoring.v3.Service.CloudEndpoints - 13, // 3: google.monitoring.v3.Service.cluster_istio:type_name -> google.monitoring.v3.Service.ClusterIstio - 14, // 4: google.monitoring.v3.Service.mesh_istio:type_name -> google.monitoring.v3.Service.MeshIstio - 15, // 5: google.monitoring.v3.Service.telemetry:type_name -> google.monitoring.v3.Service.Telemetry - 3, // 6: google.monitoring.v3.ServiceLevelObjective.service_level_indicator:type_name -> google.monitoring.v3.ServiceLevelIndicator - 20, // 7: google.monitoring.v3.ServiceLevelObjective.rolling_period:type_name -> google.protobuf.Duration - 21, // 8: google.monitoring.v3.ServiceLevelObjective.calendar_period:type_name -> google.type.CalendarPeriod - 4, // 9: google.monitoring.v3.ServiceLevelIndicator.basic_sli:type_name -> google.monitoring.v3.BasicSli - 6, // 10: google.monitoring.v3.ServiceLevelIndicator.request_based:type_name -> google.monitoring.v3.RequestBasedSli - 9, // 11: google.monitoring.v3.ServiceLevelIndicator.windows_based:type_name -> google.monitoring.v3.WindowsBasedSli - 16, // 12: google.monitoring.v3.BasicSli.availability:type_name -> google.monitoring.v3.BasicSli.AvailabilityCriteria - 17, // 13: google.monitoring.v3.BasicSli.latency:type_name -> google.monitoring.v3.BasicSli.LatencyCriteria - 7, // 14: google.monitoring.v3.RequestBasedSli.good_total_ratio:type_name -> google.monitoring.v3.TimeSeriesRatio - 8, // 15: google.monitoring.v3.RequestBasedSli.distribution_cut:type_name -> google.monitoring.v3.DistributionCut - 5, // 16: google.monitoring.v3.DistributionCut.range:type_name -> google.monitoring.v3.Range - 18, // 17: google.monitoring.v3.WindowsBasedSli.good_total_ratio_threshold:type_name -> google.monitoring.v3.WindowsBasedSli.PerformanceThreshold - 19, // 18: google.monitoring.v3.WindowsBasedSli.metric_mean_in_range:type_name -> google.monitoring.v3.WindowsBasedSli.MetricRange - 19, // 19: google.monitoring.v3.WindowsBasedSli.metric_sum_in_range:type_name -> google.monitoring.v3.WindowsBasedSli.MetricRange - 20, // 20: google.monitoring.v3.WindowsBasedSli.window_period:type_name -> google.protobuf.Duration - 20, // 21: google.monitoring.v3.BasicSli.LatencyCriteria.threshold:type_name -> google.protobuf.Duration - 6, // 22: google.monitoring.v3.WindowsBasedSli.PerformanceThreshold.performance:type_name -> google.monitoring.v3.RequestBasedSli - 4, // 23: google.monitoring.v3.WindowsBasedSli.PerformanceThreshold.basic_sli_performance:type_name -> google.monitoring.v3.BasicSli - 5, // 24: google.monitoring.v3.WindowsBasedSli.MetricRange.range:type_name -> google.monitoring.v3.Range - 25, // [25:25] is the sub-list for method output_type - 25, // [25:25] is the sub-list for method input_type - 25, // [25:25] is the sub-list for extension type_name - 25, // [25:25] is the sub-list for extension extendee - 0, // [0:25] is the sub-list for field type_name -} - -func init() { file_google_monitoring_v3_service_proto_init() } -func file_google_monitoring_v3_service_proto_init() { - if File_google_monitoring_v3_service_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_google_monitoring_v3_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Service); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ServiceLevelObjective); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_service_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ServiceLevelIndicator); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_service_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*BasicSli); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_service_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Range); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_service_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RequestBasedSli); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_service_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TimeSeriesRatio); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_service_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DistributionCut); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_service_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*WindowsBasedSli); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_service_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Service_Custom); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_service_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Service_AppEngine); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_service_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Service_CloudEndpoints); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_service_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Service_ClusterIstio); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_service_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Service_MeshIstio); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_service_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Service_Telemetry); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_service_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*BasicSli_AvailabilityCriteria); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_service_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*BasicSli_LatencyCriteria); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_service_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*WindowsBasedSli_PerformanceThreshold); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_service_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*WindowsBasedSli_MetricRange); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_google_monitoring_v3_service_proto_msgTypes[0].OneofWrappers = []interface{}{ - (*Service_Custom_)(nil), - (*Service_AppEngine_)(nil), - (*Service_CloudEndpoints_)(nil), - (*Service_ClusterIstio_)(nil), - (*Service_MeshIstio_)(nil), - } - file_google_monitoring_v3_service_proto_msgTypes[1].OneofWrappers = []interface{}{ - (*ServiceLevelObjective_RollingPeriod)(nil), - (*ServiceLevelObjective_CalendarPeriod)(nil), - } - file_google_monitoring_v3_service_proto_msgTypes[2].OneofWrappers = []interface{}{ - (*ServiceLevelIndicator_BasicSli)(nil), - (*ServiceLevelIndicator_RequestBased)(nil), - (*ServiceLevelIndicator_WindowsBased)(nil), - } - file_google_monitoring_v3_service_proto_msgTypes[3].OneofWrappers = []interface{}{ - (*BasicSli_Availability)(nil), - (*BasicSli_Latency)(nil), - } - file_google_monitoring_v3_service_proto_msgTypes[5].OneofWrappers = []interface{}{ - (*RequestBasedSli_GoodTotalRatio)(nil), - (*RequestBasedSli_DistributionCut)(nil), - } - file_google_monitoring_v3_service_proto_msgTypes[8].OneofWrappers = []interface{}{ - (*WindowsBasedSli_GoodBadMetricFilter)(nil), - (*WindowsBasedSli_GoodTotalRatioThreshold)(nil), - (*WindowsBasedSli_MetricMeanInRange)(nil), - (*WindowsBasedSli_MetricSumInRange)(nil), - } - file_google_monitoring_v3_service_proto_msgTypes[17].OneofWrappers = []interface{}{ - (*WindowsBasedSli_PerformanceThreshold_Performance)(nil), - (*WindowsBasedSli_PerformanceThreshold_BasicSliPerformance)(nil), - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_monitoring_v3_service_proto_rawDesc, - NumEnums: 1, - NumMessages: 19, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_google_monitoring_v3_service_proto_goTypes, - DependencyIndexes: file_google_monitoring_v3_service_proto_depIdxs, - EnumInfos: file_google_monitoring_v3_service_proto_enumTypes, - MessageInfos: file_google_monitoring_v3_service_proto_msgTypes, - }.Build() - File_google_monitoring_v3_service_proto = out.File - file_google_monitoring_v3_service_proto_rawDesc = nil - file_google_monitoring_v3_service_proto_goTypes = nil - file_google_monitoring_v3_service_proto_depIdxs = nil -} diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/service_service.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/service_service.pb.go deleted file mode 100644 index e05d0b40c0..0000000000 --- a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/service_service.pb.go +++ /dev/null @@ -1,1793 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.25.0 -// protoc v3.13.0 -// source: google/monitoring/v3/service_service.proto - -package monitoring - -import ( - context "context" - reflect "reflect" - sync "sync" - - proto "github.com/golang/protobuf/proto" - _ "google.golang.org/genproto/googleapis/api/annotations" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - emptypb "google.golang.org/protobuf/types/known/emptypb" - fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - -// The `CreateService` request. -type CreateServiceRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. Resource name of the parent workspace. The format is: - // - // projects/[PROJECT_ID_OR_NUMBER] - Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` - // Optional. The Service id to use for this Service. If omitted, an id will be - // generated instead. Must match the pattern `[a-z0-9\-]+` - ServiceId string `protobuf:"bytes,3,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"` - // Required. The `Service` to create. - Service *Service `protobuf:"bytes,2,opt,name=service,proto3" json:"service,omitempty"` -} - -func (x *CreateServiceRequest) Reset() { - *x = CreateServiceRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_service_service_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CreateServiceRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CreateServiceRequest) ProtoMessage() {} - -func (x *CreateServiceRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_service_service_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CreateServiceRequest.ProtoReflect.Descriptor instead. -func (*CreateServiceRequest) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_service_service_proto_rawDescGZIP(), []int{0} -} - -func (x *CreateServiceRequest) GetParent() string { - if x != nil { - return x.Parent - } - return "" -} - -func (x *CreateServiceRequest) GetServiceId() string { - if x != nil { - return x.ServiceId - } - return "" -} - -func (x *CreateServiceRequest) GetService() *Service { - if x != nil { - return x.Service - } - return nil -} - -// The `GetService` request. -type GetServiceRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. Resource name of the `Service`. The format is: - // - // projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID] - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` -} - -func (x *GetServiceRequest) Reset() { - *x = GetServiceRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_service_service_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetServiceRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetServiceRequest) ProtoMessage() {} - -func (x *GetServiceRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_service_service_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetServiceRequest.ProtoReflect.Descriptor instead. -func (*GetServiceRequest) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_service_service_proto_rawDescGZIP(), []int{1} -} - -func (x *GetServiceRequest) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -// The `ListServices` request. -type ListServicesRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. Resource name of the parent containing the listed services, either a - // project or a Monitoring Workspace. The formats are: - // - // projects/[PROJECT_ID_OR_NUMBER] - // workspaces/[HOST_PROJECT_ID_OR_NUMBER] - Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` - // A filter specifying what `Service`s to return. The filter currently - // supports the following fields: - // - // - `identifier_case` - // - `app_engine.module_id` - // - `cloud_endpoints.service` - // - `cluster_istio.location` - // - `cluster_istio.cluster_name` - // - `cluster_istio.service_namespace` - // - `cluster_istio.service_name` - // - // `identifier_case` refers to which option in the identifier oneof is - // populated. For example, the filter `identifier_case = "CUSTOM"` would match - // all services with a value for the `custom` field. Valid options are - // "CUSTOM", "APP_ENGINE", "CLOUD_ENDPOINTS", and "CLUSTER_ISTIO". - Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"` - // A non-negative number that is the maximum number of results to return. - // When 0, use default page size. - PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` - // If this field is not empty then it must contain the `nextPageToken` value - // returned by a previous call to this method. Using this field causes the - // method to return additional results from the previous method call. - PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` -} - -func (x *ListServicesRequest) Reset() { - *x = ListServicesRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_service_service_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListServicesRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListServicesRequest) ProtoMessage() {} - -func (x *ListServicesRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_service_service_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListServicesRequest.ProtoReflect.Descriptor instead. -func (*ListServicesRequest) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_service_service_proto_rawDescGZIP(), []int{2} -} - -func (x *ListServicesRequest) GetParent() string { - if x != nil { - return x.Parent - } - return "" -} - -func (x *ListServicesRequest) GetFilter() string { - if x != nil { - return x.Filter - } - return "" -} - -func (x *ListServicesRequest) GetPageSize() int32 { - if x != nil { - return x.PageSize - } - return 0 -} - -func (x *ListServicesRequest) GetPageToken() string { - if x != nil { - return x.PageToken - } - return "" -} - -// The `ListServices` response. -type ListServicesResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The `Service`s matching the specified filter. - Services []*Service `protobuf:"bytes,1,rep,name=services,proto3" json:"services,omitempty"` - // If there are more results than have been returned, then this field is set - // to a non-empty value. To see the additional results, - // use that value as `page_token` in the next call to this method. - NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` -} - -func (x *ListServicesResponse) Reset() { - *x = ListServicesResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_service_service_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListServicesResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListServicesResponse) ProtoMessage() {} - -func (x *ListServicesResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_service_service_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListServicesResponse.ProtoReflect.Descriptor instead. -func (*ListServicesResponse) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_service_service_proto_rawDescGZIP(), []int{3} -} - -func (x *ListServicesResponse) GetServices() []*Service { - if x != nil { - return x.Services - } - return nil -} - -func (x *ListServicesResponse) GetNextPageToken() string { - if x != nil { - return x.NextPageToken - } - return "" -} - -// The `UpdateService` request. -type UpdateServiceRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The `Service` to draw updates from. - // The given `name` specifies the resource to update. - Service *Service `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"` - // A set of field paths defining which fields to use for the update. - UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` -} - -func (x *UpdateServiceRequest) Reset() { - *x = UpdateServiceRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_service_service_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *UpdateServiceRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*UpdateServiceRequest) ProtoMessage() {} - -func (x *UpdateServiceRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_service_service_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use UpdateServiceRequest.ProtoReflect.Descriptor instead. -func (*UpdateServiceRequest) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_service_service_proto_rawDescGZIP(), []int{4} -} - -func (x *UpdateServiceRequest) GetService() *Service { - if x != nil { - return x.Service - } - return nil -} - -func (x *UpdateServiceRequest) GetUpdateMask() *fieldmaskpb.FieldMask { - if x != nil { - return x.UpdateMask - } - return nil -} - -// The `DeleteService` request. -type DeleteServiceRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. Resource name of the `Service` to delete. The format is: - // - // projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID] - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` -} - -func (x *DeleteServiceRequest) Reset() { - *x = DeleteServiceRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_service_service_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *DeleteServiceRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DeleteServiceRequest) ProtoMessage() {} - -func (x *DeleteServiceRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_service_service_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DeleteServiceRequest.ProtoReflect.Descriptor instead. -func (*DeleteServiceRequest) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_service_service_proto_rawDescGZIP(), []int{5} -} - -func (x *DeleteServiceRequest) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -// The `CreateServiceLevelObjective` request. -type CreateServiceLevelObjectiveRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. Resource name of the parent `Service`. The format is: - // - // projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID] - Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` - // Optional. The ServiceLevelObjective id to use for this - // ServiceLevelObjective. If omitted, an id will be generated instead. Must - // match the pattern `[a-z0-9\-]+` - ServiceLevelObjectiveId string `protobuf:"bytes,3,opt,name=service_level_objective_id,json=serviceLevelObjectiveId,proto3" json:"service_level_objective_id,omitempty"` - // Required. The `ServiceLevelObjective` to create. - // The provided `name` will be respected if no `ServiceLevelObjective` exists - // with this name. - ServiceLevelObjective *ServiceLevelObjective `protobuf:"bytes,2,opt,name=service_level_objective,json=serviceLevelObjective,proto3" json:"service_level_objective,omitempty"` -} - -func (x *CreateServiceLevelObjectiveRequest) Reset() { - *x = CreateServiceLevelObjectiveRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_service_service_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CreateServiceLevelObjectiveRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CreateServiceLevelObjectiveRequest) ProtoMessage() {} - -func (x *CreateServiceLevelObjectiveRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_service_service_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CreateServiceLevelObjectiveRequest.ProtoReflect.Descriptor instead. -func (*CreateServiceLevelObjectiveRequest) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_service_service_proto_rawDescGZIP(), []int{6} -} - -func (x *CreateServiceLevelObjectiveRequest) GetParent() string { - if x != nil { - return x.Parent - } - return "" -} - -func (x *CreateServiceLevelObjectiveRequest) GetServiceLevelObjectiveId() string { - if x != nil { - return x.ServiceLevelObjectiveId - } - return "" -} - -func (x *CreateServiceLevelObjectiveRequest) GetServiceLevelObjective() *ServiceLevelObjective { - if x != nil { - return x.ServiceLevelObjective - } - return nil -} - -// The `GetServiceLevelObjective` request. -type GetServiceLevelObjectiveRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. Resource name of the `ServiceLevelObjective` to get. The format is: - // - // projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]/serviceLevelObjectives/[SLO_NAME] - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // View of the `ServiceLevelObjective` to return. If `DEFAULT`, return the - // `ServiceLevelObjective` as originally defined. If `EXPLICIT` and the - // `ServiceLevelObjective` is defined in terms of a `BasicSli`, replace the - // `BasicSli` with a `RequestBasedSli` spelling out how the SLI is computed. - View ServiceLevelObjective_View `protobuf:"varint,2,opt,name=view,proto3,enum=google.monitoring.v3.ServiceLevelObjective_View" json:"view,omitempty"` -} - -func (x *GetServiceLevelObjectiveRequest) Reset() { - *x = GetServiceLevelObjectiveRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_service_service_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetServiceLevelObjectiveRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetServiceLevelObjectiveRequest) ProtoMessage() {} - -func (x *GetServiceLevelObjectiveRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_service_service_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetServiceLevelObjectiveRequest.ProtoReflect.Descriptor instead. -func (*GetServiceLevelObjectiveRequest) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_service_service_proto_rawDescGZIP(), []int{7} -} - -func (x *GetServiceLevelObjectiveRequest) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *GetServiceLevelObjectiveRequest) GetView() ServiceLevelObjective_View { - if x != nil { - return x.View - } - return ServiceLevelObjective_VIEW_UNSPECIFIED -} - -// The `ListServiceLevelObjectives` request. -type ListServiceLevelObjectivesRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. Resource name of the parent containing the listed SLOs, either a - // project or a Monitoring Workspace. The formats are: - // - // projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID] - // workspaces/[HOST_PROJECT_ID_OR_NUMBER]/services/- - Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` - // A filter specifying what `ServiceLevelObjective`s to return. - Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"` - // A non-negative number that is the maximum number of results to return. - // When 0, use default page size. - PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` - // If this field is not empty then it must contain the `nextPageToken` value - // returned by a previous call to this method. Using this field causes the - // method to return additional results from the previous method call. - PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` - // View of the `ServiceLevelObjective`s to return. If `DEFAULT`, return each - // `ServiceLevelObjective` as originally defined. If `EXPLICIT` and the - // `ServiceLevelObjective` is defined in terms of a `BasicSli`, replace the - // `BasicSli` with a `RequestBasedSli` spelling out how the SLI is computed. - View ServiceLevelObjective_View `protobuf:"varint,5,opt,name=view,proto3,enum=google.monitoring.v3.ServiceLevelObjective_View" json:"view,omitempty"` -} - -func (x *ListServiceLevelObjectivesRequest) Reset() { - *x = ListServiceLevelObjectivesRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_service_service_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListServiceLevelObjectivesRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListServiceLevelObjectivesRequest) ProtoMessage() {} - -func (x *ListServiceLevelObjectivesRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_service_service_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListServiceLevelObjectivesRequest.ProtoReflect.Descriptor instead. -func (*ListServiceLevelObjectivesRequest) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_service_service_proto_rawDescGZIP(), []int{8} -} - -func (x *ListServiceLevelObjectivesRequest) GetParent() string { - if x != nil { - return x.Parent - } - return "" -} - -func (x *ListServiceLevelObjectivesRequest) GetFilter() string { - if x != nil { - return x.Filter - } - return "" -} - -func (x *ListServiceLevelObjectivesRequest) GetPageSize() int32 { - if x != nil { - return x.PageSize - } - return 0 -} - -func (x *ListServiceLevelObjectivesRequest) GetPageToken() string { - if x != nil { - return x.PageToken - } - return "" -} - -func (x *ListServiceLevelObjectivesRequest) GetView() ServiceLevelObjective_View { - if x != nil { - return x.View - } - return ServiceLevelObjective_VIEW_UNSPECIFIED -} - -// The `ListServiceLevelObjectives` response. -type ListServiceLevelObjectivesResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The `ServiceLevelObjective`s matching the specified filter. - ServiceLevelObjectives []*ServiceLevelObjective `protobuf:"bytes,1,rep,name=service_level_objectives,json=serviceLevelObjectives,proto3" json:"service_level_objectives,omitempty"` - // If there are more results than have been returned, then this field is set - // to a non-empty value. To see the additional results, - // use that value as `page_token` in the next call to this method. - NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` -} - -func (x *ListServiceLevelObjectivesResponse) Reset() { - *x = ListServiceLevelObjectivesResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_service_service_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListServiceLevelObjectivesResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListServiceLevelObjectivesResponse) ProtoMessage() {} - -func (x *ListServiceLevelObjectivesResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_service_service_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListServiceLevelObjectivesResponse.ProtoReflect.Descriptor instead. -func (*ListServiceLevelObjectivesResponse) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_service_service_proto_rawDescGZIP(), []int{9} -} - -func (x *ListServiceLevelObjectivesResponse) GetServiceLevelObjectives() []*ServiceLevelObjective { - if x != nil { - return x.ServiceLevelObjectives - } - return nil -} - -func (x *ListServiceLevelObjectivesResponse) GetNextPageToken() string { - if x != nil { - return x.NextPageToken - } - return "" -} - -// The `UpdateServiceLevelObjective` request. -type UpdateServiceLevelObjectiveRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The `ServiceLevelObjective` to draw updates from. - // The given `name` specifies the resource to update. - ServiceLevelObjective *ServiceLevelObjective `protobuf:"bytes,1,opt,name=service_level_objective,json=serviceLevelObjective,proto3" json:"service_level_objective,omitempty"` - // A set of field paths defining which fields to use for the update. - UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` -} - -func (x *UpdateServiceLevelObjectiveRequest) Reset() { - *x = UpdateServiceLevelObjectiveRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_service_service_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *UpdateServiceLevelObjectiveRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*UpdateServiceLevelObjectiveRequest) ProtoMessage() {} - -func (x *UpdateServiceLevelObjectiveRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_service_service_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use UpdateServiceLevelObjectiveRequest.ProtoReflect.Descriptor instead. -func (*UpdateServiceLevelObjectiveRequest) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_service_service_proto_rawDescGZIP(), []int{10} -} - -func (x *UpdateServiceLevelObjectiveRequest) GetServiceLevelObjective() *ServiceLevelObjective { - if x != nil { - return x.ServiceLevelObjective - } - return nil -} - -func (x *UpdateServiceLevelObjectiveRequest) GetUpdateMask() *fieldmaskpb.FieldMask { - if x != nil { - return x.UpdateMask - } - return nil -} - -// The `DeleteServiceLevelObjective` request. -type DeleteServiceLevelObjectiveRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. Resource name of the `ServiceLevelObjective` to delete. The format is: - // - // projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]/serviceLevelObjectives/[SLO_NAME] - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` -} - -func (x *DeleteServiceLevelObjectiveRequest) Reset() { - *x = DeleteServiceLevelObjectiveRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_service_service_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *DeleteServiceLevelObjectiveRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DeleteServiceLevelObjectiveRequest) ProtoMessage() {} - -func (x *DeleteServiceLevelObjectiveRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_service_service_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DeleteServiceLevelObjectiveRequest.ProtoReflect.Descriptor instead. -func (*DeleteServiceLevelObjectiveRequest) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_service_service_proto_rawDescGZIP(), []int{11} -} - -func (x *DeleteServiceLevelObjectiveRequest) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -var File_google_monitoring_v3_service_service_proto protoreflect.FileDescriptor - -var file_google_monitoring_v3_service_service_proto_rawDesc = []byte{ - 0x0a, 0x2a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, - 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, - 0x76, 0x33, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, - 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, - 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, - 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x22, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, - 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, - 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xb6, 0x01, 0x0a, 0x14, 0x43, 0x72, 0x65, - 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x41, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x42, 0x29, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x23, 0x12, 0x21, 0x6d, 0x6f, 0x6e, 0x69, 0x74, - 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, - 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x06, 0x70, 0x61, - 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, - 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x49, 0x64, 0x12, 0x3c, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, - 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x22, 0x52, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x42, 0x29, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x6d, 0x6f, - 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, - 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xac, 0x01, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x41, 0x0a, - 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x29, 0xe0, - 0x41, 0x02, 0xfa, 0x41, 0x23, 0x12, 0x21, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, - 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, - 0x2f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, - 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, - 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, - 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, - 0x6b, 0x65, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, - 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x79, 0x0a, 0x14, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x39, 0x0a, 0x08, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, - 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x08, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, - 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, - 0x91, 0x01, 0x0a, 0x14, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3c, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, - 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x07, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x3b, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, - 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, - 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, - 0x61, 0x73, 0x6b, 0x22, 0x55, 0x0a, 0x14, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x29, 0xe0, 0x41, 0x02, 0xfa, 0x41, - 0x23, 0x0a, 0x21, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x8e, 0x02, 0x0a, 0x22, 0x43, - 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, - 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x41, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x42, 0x29, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x6d, 0x6f, 0x6e, 0x69, 0x74, - 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, - 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x06, 0x70, 0x61, - 0x72, 0x65, 0x6e, 0x74, 0x12, 0x3b, 0x0a, 0x1a, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, - 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, - 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x17, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x49, - 0x64, 0x12, 0x68, 0x0a, 0x17, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6c, 0x65, 0x76, - 0x65, 0x6c, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, - 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x42, - 0x03, 0xe0, 0x41, 0x02, 0x52, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, - 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x22, 0xb4, 0x01, 0x0a, 0x1f, - 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x4b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x37, 0xe0, - 0x41, 0x02, 0xfa, 0x41, 0x31, 0x0a, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, - 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, - 0x2f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x44, 0x0a, 0x04, - 0x76, 0x69, 0x65, 0x77, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x30, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, - 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x2e, 0x56, 0x69, 0x65, 0x77, 0x52, 0x04, 0x76, 0x69, - 0x65, 0x77, 0x22, 0x80, 0x02, 0x0a, 0x21, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x41, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, - 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x29, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x23, - 0x0a, 0x21, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x66, - 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x69, 0x6c, - 0x74, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, - 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, - 0x44, 0x0a, 0x04, 0x76, 0x69, 0x65, 0x77, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x30, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, - 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, - 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x2e, 0x56, 0x69, 0x65, 0x77, 0x52, - 0x04, 0x76, 0x69, 0x65, 0x77, 0x22, 0xb3, 0x01, 0x0a, 0x22, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x69, 0x76, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x65, 0x0a, 0x18, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x5f, 0x6f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, - 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, - 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x52, 0x16, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, - 0x76, 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, - 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, - 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xcb, 0x01, 0x0a, 0x22, - 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, - 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x68, 0x0a, 0x17, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6c, 0x65, - 0x76, 0x65, 0x6c, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, - 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, - 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, - 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x12, 0x3b, 0x0a, 0x0b, - 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x52, 0x0a, 0x75, - 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x22, 0x71, 0x0a, 0x22, 0x44, 0x65, 0x6c, - 0x65, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x4b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x37, 0xe0, - 0x41, 0x02, 0xfa, 0x41, 0x31, 0x0a, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, - 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, - 0x2f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x32, 0xea, 0x0f, 0x0a, - 0x18, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, - 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x97, 0x01, 0x0a, 0x0d, 0x43, 0x72, - 0x65, 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x2a, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, - 0x76, 0x33, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x22, 0x3b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x24, 0x22, 0x19, - 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x2a, 0x2f, 0x2a, 0x7d, - 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x3a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0xda, 0x41, 0x0e, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x73, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x12, 0x7e, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, - 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, - 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x22, 0x28, 0x82, 0xd3, 0xe4, 0x93, 0x02, - 0x1b, 0x12, 0x19, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x2a, 0x2f, 0x2a, - 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x91, 0x01, 0x0a, 0x0c, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x73, 0x12, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, - 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, - 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2a, 0x82, 0xd3, 0xe4, - 0x93, 0x02, 0x1b, 0x12, 0x19, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, - 0x3d, 0x2a, 0x2f, 0x2a, 0x7d, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0xda, 0x41, - 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x98, 0x01, 0x0a, 0x0d, 0x55, 0x70, 0x64, 0x61, - 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, - 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, - 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x22, 0x3c, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2c, 0x32, 0x21, 0x2f, 0x76, - 0x33, 0x2f, 0x7b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d, - 0x2a, 0x2f, 0x2a, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, - 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0xda, 0x41, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x12, 0x7d, 0x0a, 0x0d, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x12, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, - 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, - 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x28, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1b, 0x2a, - 0x19, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x2a, 0x2f, 0x2a, 0x2f, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x12, 0xfa, 0x01, 0x0a, 0x1b, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, - 0x65, 0x12, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, - 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x69, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, - 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x22, 0x74, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x4d, - 0x22, 0x32, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x2a, 0x2f, - 0x2a, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x69, 0x76, 0x65, 0x73, 0x3a, 0x17, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6c, 0x65, - 0x76, 0x65, 0x6c, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0xda, 0x41, 0x1e, - 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6c, - 0x65, 0x76, 0x65, 0x6c, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x12, 0xc1, - 0x01, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, - 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x12, 0x35, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, - 0x76, 0x33, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, - 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, - 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x22, - 0x41, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x34, 0x12, 0x32, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, - 0x6d, 0x65, 0x3d, 0x2a, 0x2f, 0x2a, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, - 0x2a, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x12, 0xd4, 0x01, 0x0a, 0x1a, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, - 0x73, 0x12, 0x37, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, - 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, - 0x76, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x38, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, - 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, - 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x43, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x34, 0x12, 0x32, 0x2f, 0x76, - 0x33, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x2a, 0x2f, 0x2a, 0x2f, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x73, - 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x8c, 0x02, 0x0a, 0x1b, 0x55, 0x70, - 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, - 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x12, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, - 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, - 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, - 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, - 0x22, 0x85, 0x01, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x65, 0x32, 0x4a, 0x2f, 0x76, 0x33, 0x2f, 0x7b, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x5f, 0x6f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x2a, 0x2f, 0x2a, - 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, - 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x17, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6c, - 0x65, 0x76, 0x65, 0x6c, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0xda, 0x41, - 0x17, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x5f, 0x6f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x12, 0xb2, 0x01, 0x0a, 0x1b, 0x44, 0x65, 0x6c, - 0x65, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x12, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, - 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, - 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x41, 0x82, 0xd3, 0xe4, 0x93, - 0x02, 0x34, 0x2a, 0x32, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x2a, 0x2f, - 0x2a, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, - 0x76, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x1a, 0xa9, 0x01, - 0xca, 0x41, 0x19, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x89, 0x01, - 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, - 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, - 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x6d, - 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, - 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, - 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, - 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x42, 0xd5, 0x01, 0x0a, 0x18, 0x63, 0x6f, - 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, - 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x42, 0x1d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4d, - 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x6d, - 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x3b, 0x6d, 0x6f, 0x6e, - 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, - 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, - 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c, 0x56, - 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, - 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56, - 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_google_monitoring_v3_service_service_proto_rawDescOnce sync.Once - file_google_monitoring_v3_service_service_proto_rawDescData = file_google_monitoring_v3_service_service_proto_rawDesc -) - -func file_google_monitoring_v3_service_service_proto_rawDescGZIP() []byte { - file_google_monitoring_v3_service_service_proto_rawDescOnce.Do(func() { - file_google_monitoring_v3_service_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_service_service_proto_rawDescData) - }) - return file_google_monitoring_v3_service_service_proto_rawDescData -} - -var file_google_monitoring_v3_service_service_proto_msgTypes = make([]protoimpl.MessageInfo, 12) -var file_google_monitoring_v3_service_service_proto_goTypes = []interface{}{ - (*CreateServiceRequest)(nil), // 0: google.monitoring.v3.CreateServiceRequest - (*GetServiceRequest)(nil), // 1: google.monitoring.v3.GetServiceRequest - (*ListServicesRequest)(nil), // 2: google.monitoring.v3.ListServicesRequest - (*ListServicesResponse)(nil), // 3: google.monitoring.v3.ListServicesResponse - (*UpdateServiceRequest)(nil), // 4: google.monitoring.v3.UpdateServiceRequest - (*DeleteServiceRequest)(nil), // 5: google.monitoring.v3.DeleteServiceRequest - (*CreateServiceLevelObjectiveRequest)(nil), // 6: google.monitoring.v3.CreateServiceLevelObjectiveRequest - (*GetServiceLevelObjectiveRequest)(nil), // 7: google.monitoring.v3.GetServiceLevelObjectiveRequest - (*ListServiceLevelObjectivesRequest)(nil), // 8: google.monitoring.v3.ListServiceLevelObjectivesRequest - (*ListServiceLevelObjectivesResponse)(nil), // 9: google.monitoring.v3.ListServiceLevelObjectivesResponse - (*UpdateServiceLevelObjectiveRequest)(nil), // 10: google.monitoring.v3.UpdateServiceLevelObjectiveRequest - (*DeleteServiceLevelObjectiveRequest)(nil), // 11: google.monitoring.v3.DeleteServiceLevelObjectiveRequest - (*Service)(nil), // 12: google.monitoring.v3.Service - (*fieldmaskpb.FieldMask)(nil), // 13: google.protobuf.FieldMask - (*ServiceLevelObjective)(nil), // 14: google.monitoring.v3.ServiceLevelObjective - (ServiceLevelObjective_View)(0), // 15: google.monitoring.v3.ServiceLevelObjective.View - (*emptypb.Empty)(nil), // 16: google.protobuf.Empty -} -var file_google_monitoring_v3_service_service_proto_depIdxs = []int32{ - 12, // 0: google.monitoring.v3.CreateServiceRequest.service:type_name -> google.monitoring.v3.Service - 12, // 1: google.monitoring.v3.ListServicesResponse.services:type_name -> google.monitoring.v3.Service - 12, // 2: google.monitoring.v3.UpdateServiceRequest.service:type_name -> google.monitoring.v3.Service - 13, // 3: google.monitoring.v3.UpdateServiceRequest.update_mask:type_name -> google.protobuf.FieldMask - 14, // 4: google.monitoring.v3.CreateServiceLevelObjectiveRequest.service_level_objective:type_name -> google.monitoring.v3.ServiceLevelObjective - 15, // 5: google.monitoring.v3.GetServiceLevelObjectiveRequest.view:type_name -> google.monitoring.v3.ServiceLevelObjective.View - 15, // 6: google.monitoring.v3.ListServiceLevelObjectivesRequest.view:type_name -> google.monitoring.v3.ServiceLevelObjective.View - 14, // 7: google.monitoring.v3.ListServiceLevelObjectivesResponse.service_level_objectives:type_name -> google.monitoring.v3.ServiceLevelObjective - 14, // 8: google.monitoring.v3.UpdateServiceLevelObjectiveRequest.service_level_objective:type_name -> google.monitoring.v3.ServiceLevelObjective - 13, // 9: google.monitoring.v3.UpdateServiceLevelObjectiveRequest.update_mask:type_name -> google.protobuf.FieldMask - 0, // 10: google.monitoring.v3.ServiceMonitoringService.CreateService:input_type -> google.monitoring.v3.CreateServiceRequest - 1, // 11: google.monitoring.v3.ServiceMonitoringService.GetService:input_type -> google.monitoring.v3.GetServiceRequest - 2, // 12: google.monitoring.v3.ServiceMonitoringService.ListServices:input_type -> google.monitoring.v3.ListServicesRequest - 4, // 13: google.monitoring.v3.ServiceMonitoringService.UpdateService:input_type -> google.monitoring.v3.UpdateServiceRequest - 5, // 14: google.monitoring.v3.ServiceMonitoringService.DeleteService:input_type -> google.monitoring.v3.DeleteServiceRequest - 6, // 15: google.monitoring.v3.ServiceMonitoringService.CreateServiceLevelObjective:input_type -> google.monitoring.v3.CreateServiceLevelObjectiveRequest - 7, // 16: google.monitoring.v3.ServiceMonitoringService.GetServiceLevelObjective:input_type -> google.monitoring.v3.GetServiceLevelObjectiveRequest - 8, // 17: google.monitoring.v3.ServiceMonitoringService.ListServiceLevelObjectives:input_type -> google.monitoring.v3.ListServiceLevelObjectivesRequest - 10, // 18: google.monitoring.v3.ServiceMonitoringService.UpdateServiceLevelObjective:input_type -> google.monitoring.v3.UpdateServiceLevelObjectiveRequest - 11, // 19: google.monitoring.v3.ServiceMonitoringService.DeleteServiceLevelObjective:input_type -> google.monitoring.v3.DeleteServiceLevelObjectiveRequest - 12, // 20: google.monitoring.v3.ServiceMonitoringService.CreateService:output_type -> google.monitoring.v3.Service - 12, // 21: google.monitoring.v3.ServiceMonitoringService.GetService:output_type -> google.monitoring.v3.Service - 3, // 22: google.monitoring.v3.ServiceMonitoringService.ListServices:output_type -> google.monitoring.v3.ListServicesResponse - 12, // 23: google.monitoring.v3.ServiceMonitoringService.UpdateService:output_type -> google.monitoring.v3.Service - 16, // 24: google.monitoring.v3.ServiceMonitoringService.DeleteService:output_type -> google.protobuf.Empty - 14, // 25: google.monitoring.v3.ServiceMonitoringService.CreateServiceLevelObjective:output_type -> google.monitoring.v3.ServiceLevelObjective - 14, // 26: google.monitoring.v3.ServiceMonitoringService.GetServiceLevelObjective:output_type -> google.monitoring.v3.ServiceLevelObjective - 9, // 27: google.monitoring.v3.ServiceMonitoringService.ListServiceLevelObjectives:output_type -> google.monitoring.v3.ListServiceLevelObjectivesResponse - 14, // 28: google.monitoring.v3.ServiceMonitoringService.UpdateServiceLevelObjective:output_type -> google.monitoring.v3.ServiceLevelObjective - 16, // 29: google.monitoring.v3.ServiceMonitoringService.DeleteServiceLevelObjective:output_type -> google.protobuf.Empty - 20, // [20:30] is the sub-list for method output_type - 10, // [10:20] is the sub-list for method input_type - 10, // [10:10] is the sub-list for extension type_name - 10, // [10:10] is the sub-list for extension extendee - 0, // [0:10] is the sub-list for field type_name -} - -func init() { file_google_monitoring_v3_service_service_proto_init() } -func file_google_monitoring_v3_service_service_proto_init() { - if File_google_monitoring_v3_service_service_proto != nil { - return - } - file_google_monitoring_v3_service_proto_init() - if !protoimpl.UnsafeEnabled { - file_google_monitoring_v3_service_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateServiceRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_service_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetServiceRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_service_service_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListServicesRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_service_service_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListServicesResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_service_service_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UpdateServiceRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_service_service_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteServiceRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_service_service_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateServiceLevelObjectiveRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_service_service_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetServiceLevelObjectiveRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_service_service_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListServiceLevelObjectivesRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_service_service_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListServiceLevelObjectivesResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_service_service_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UpdateServiceLevelObjectiveRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_service_service_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteServiceLevelObjectiveRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_monitoring_v3_service_service_proto_rawDesc, - NumEnums: 0, - NumMessages: 12, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_google_monitoring_v3_service_service_proto_goTypes, - DependencyIndexes: file_google_monitoring_v3_service_service_proto_depIdxs, - MessageInfos: file_google_monitoring_v3_service_service_proto_msgTypes, - }.Build() - File_google_monitoring_v3_service_service_proto = out.File - file_google_monitoring_v3_service_service_proto_rawDesc = nil - file_google_monitoring_v3_service_service_proto_goTypes = nil - file_google_monitoring_v3_service_service_proto_depIdxs = nil -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConnInterface - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion6 - -// ServiceMonitoringServiceClient is the client API for ServiceMonitoringService service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type ServiceMonitoringServiceClient interface { - // Create a `Service`. - CreateService(ctx context.Context, in *CreateServiceRequest, opts ...grpc.CallOption) (*Service, error) - // Get the named `Service`. - GetService(ctx context.Context, in *GetServiceRequest, opts ...grpc.CallOption) (*Service, error) - // List `Service`s for this workspace. - ListServices(ctx context.Context, in *ListServicesRequest, opts ...grpc.CallOption) (*ListServicesResponse, error) - // Update this `Service`. - UpdateService(ctx context.Context, in *UpdateServiceRequest, opts ...grpc.CallOption) (*Service, error) - // Soft delete this `Service`. - DeleteService(ctx context.Context, in *DeleteServiceRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) - // Create a `ServiceLevelObjective` for the given `Service`. - CreateServiceLevelObjective(ctx context.Context, in *CreateServiceLevelObjectiveRequest, opts ...grpc.CallOption) (*ServiceLevelObjective, error) - // Get a `ServiceLevelObjective` by name. - GetServiceLevelObjective(ctx context.Context, in *GetServiceLevelObjectiveRequest, opts ...grpc.CallOption) (*ServiceLevelObjective, error) - // List the `ServiceLevelObjective`s for the given `Service`. - ListServiceLevelObjectives(ctx context.Context, in *ListServiceLevelObjectivesRequest, opts ...grpc.CallOption) (*ListServiceLevelObjectivesResponse, error) - // Update the given `ServiceLevelObjective`. - UpdateServiceLevelObjective(ctx context.Context, in *UpdateServiceLevelObjectiveRequest, opts ...grpc.CallOption) (*ServiceLevelObjective, error) - // Delete the given `ServiceLevelObjective`. - DeleteServiceLevelObjective(ctx context.Context, in *DeleteServiceLevelObjectiveRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) -} - -type serviceMonitoringServiceClient struct { - cc grpc.ClientConnInterface -} - -func NewServiceMonitoringServiceClient(cc grpc.ClientConnInterface) ServiceMonitoringServiceClient { - return &serviceMonitoringServiceClient{cc} -} - -func (c *serviceMonitoringServiceClient) CreateService(ctx context.Context, in *CreateServiceRequest, opts ...grpc.CallOption) (*Service, error) { - out := new(Service) - err := c.cc.Invoke(ctx, "/google.monitoring.v3.ServiceMonitoringService/CreateService", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *serviceMonitoringServiceClient) GetService(ctx context.Context, in *GetServiceRequest, opts ...grpc.CallOption) (*Service, error) { - out := new(Service) - err := c.cc.Invoke(ctx, "/google.monitoring.v3.ServiceMonitoringService/GetService", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *serviceMonitoringServiceClient) ListServices(ctx context.Context, in *ListServicesRequest, opts ...grpc.CallOption) (*ListServicesResponse, error) { - out := new(ListServicesResponse) - err := c.cc.Invoke(ctx, "/google.monitoring.v3.ServiceMonitoringService/ListServices", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *serviceMonitoringServiceClient) UpdateService(ctx context.Context, in *UpdateServiceRequest, opts ...grpc.CallOption) (*Service, error) { - out := new(Service) - err := c.cc.Invoke(ctx, "/google.monitoring.v3.ServiceMonitoringService/UpdateService", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *serviceMonitoringServiceClient) DeleteService(ctx context.Context, in *DeleteServiceRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { - out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/google.monitoring.v3.ServiceMonitoringService/DeleteService", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *serviceMonitoringServiceClient) CreateServiceLevelObjective(ctx context.Context, in *CreateServiceLevelObjectiveRequest, opts ...grpc.CallOption) (*ServiceLevelObjective, error) { - out := new(ServiceLevelObjective) - err := c.cc.Invoke(ctx, "/google.monitoring.v3.ServiceMonitoringService/CreateServiceLevelObjective", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *serviceMonitoringServiceClient) GetServiceLevelObjective(ctx context.Context, in *GetServiceLevelObjectiveRequest, opts ...grpc.CallOption) (*ServiceLevelObjective, error) { - out := new(ServiceLevelObjective) - err := c.cc.Invoke(ctx, "/google.monitoring.v3.ServiceMonitoringService/GetServiceLevelObjective", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *serviceMonitoringServiceClient) ListServiceLevelObjectives(ctx context.Context, in *ListServiceLevelObjectivesRequest, opts ...grpc.CallOption) (*ListServiceLevelObjectivesResponse, error) { - out := new(ListServiceLevelObjectivesResponse) - err := c.cc.Invoke(ctx, "/google.monitoring.v3.ServiceMonitoringService/ListServiceLevelObjectives", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *serviceMonitoringServiceClient) UpdateServiceLevelObjective(ctx context.Context, in *UpdateServiceLevelObjectiveRequest, opts ...grpc.CallOption) (*ServiceLevelObjective, error) { - out := new(ServiceLevelObjective) - err := c.cc.Invoke(ctx, "/google.monitoring.v3.ServiceMonitoringService/UpdateServiceLevelObjective", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *serviceMonitoringServiceClient) DeleteServiceLevelObjective(ctx context.Context, in *DeleteServiceLevelObjectiveRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { - out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/google.monitoring.v3.ServiceMonitoringService/DeleteServiceLevelObjective", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// ServiceMonitoringServiceServer is the server API for ServiceMonitoringService service. -type ServiceMonitoringServiceServer interface { - // Create a `Service`. - CreateService(context.Context, *CreateServiceRequest) (*Service, error) - // Get the named `Service`. - GetService(context.Context, *GetServiceRequest) (*Service, error) - // List `Service`s for this workspace. - ListServices(context.Context, *ListServicesRequest) (*ListServicesResponse, error) - // Update this `Service`. - UpdateService(context.Context, *UpdateServiceRequest) (*Service, error) - // Soft delete this `Service`. - DeleteService(context.Context, *DeleteServiceRequest) (*emptypb.Empty, error) - // Create a `ServiceLevelObjective` for the given `Service`. - CreateServiceLevelObjective(context.Context, *CreateServiceLevelObjectiveRequest) (*ServiceLevelObjective, error) - // Get a `ServiceLevelObjective` by name. - GetServiceLevelObjective(context.Context, *GetServiceLevelObjectiveRequest) (*ServiceLevelObjective, error) - // List the `ServiceLevelObjective`s for the given `Service`. - ListServiceLevelObjectives(context.Context, *ListServiceLevelObjectivesRequest) (*ListServiceLevelObjectivesResponse, error) - // Update the given `ServiceLevelObjective`. - UpdateServiceLevelObjective(context.Context, *UpdateServiceLevelObjectiveRequest) (*ServiceLevelObjective, error) - // Delete the given `ServiceLevelObjective`. - DeleteServiceLevelObjective(context.Context, *DeleteServiceLevelObjectiveRequest) (*emptypb.Empty, error) -} - -// UnimplementedServiceMonitoringServiceServer can be embedded to have forward compatible implementations. -type UnimplementedServiceMonitoringServiceServer struct { -} - -func (*UnimplementedServiceMonitoringServiceServer) CreateService(context.Context, *CreateServiceRequest) (*Service, error) { - return nil, status.Errorf(codes.Unimplemented, "method CreateService not implemented") -} -func (*UnimplementedServiceMonitoringServiceServer) GetService(context.Context, *GetServiceRequest) (*Service, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetService not implemented") -} -func (*UnimplementedServiceMonitoringServiceServer) ListServices(context.Context, *ListServicesRequest) (*ListServicesResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ListServices not implemented") -} -func (*UnimplementedServiceMonitoringServiceServer) UpdateService(context.Context, *UpdateServiceRequest) (*Service, error) { - return nil, status.Errorf(codes.Unimplemented, "method UpdateService not implemented") -} -func (*UnimplementedServiceMonitoringServiceServer) DeleteService(context.Context, *DeleteServiceRequest) (*emptypb.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method DeleteService not implemented") -} -func (*UnimplementedServiceMonitoringServiceServer) CreateServiceLevelObjective(context.Context, *CreateServiceLevelObjectiveRequest) (*ServiceLevelObjective, error) { - return nil, status.Errorf(codes.Unimplemented, "method CreateServiceLevelObjective not implemented") -} -func (*UnimplementedServiceMonitoringServiceServer) GetServiceLevelObjective(context.Context, *GetServiceLevelObjectiveRequest) (*ServiceLevelObjective, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetServiceLevelObjective not implemented") -} -func (*UnimplementedServiceMonitoringServiceServer) ListServiceLevelObjectives(context.Context, *ListServiceLevelObjectivesRequest) (*ListServiceLevelObjectivesResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ListServiceLevelObjectives not implemented") -} -func (*UnimplementedServiceMonitoringServiceServer) UpdateServiceLevelObjective(context.Context, *UpdateServiceLevelObjectiveRequest) (*ServiceLevelObjective, error) { - return nil, status.Errorf(codes.Unimplemented, "method UpdateServiceLevelObjective not implemented") -} -func (*UnimplementedServiceMonitoringServiceServer) DeleteServiceLevelObjective(context.Context, *DeleteServiceLevelObjectiveRequest) (*emptypb.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method DeleteServiceLevelObjective not implemented") -} - -func RegisterServiceMonitoringServiceServer(s *grpc.Server, srv ServiceMonitoringServiceServer) { - s.RegisterService(&_ServiceMonitoringService_serviceDesc, srv) -} - -func _ServiceMonitoringService_CreateService_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CreateServiceRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ServiceMonitoringServiceServer).CreateService(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.monitoring.v3.ServiceMonitoringService/CreateService", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ServiceMonitoringServiceServer).CreateService(ctx, req.(*CreateServiceRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _ServiceMonitoringService_GetService_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetServiceRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ServiceMonitoringServiceServer).GetService(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.monitoring.v3.ServiceMonitoringService/GetService", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ServiceMonitoringServiceServer).GetService(ctx, req.(*GetServiceRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _ServiceMonitoringService_ListServices_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListServicesRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ServiceMonitoringServiceServer).ListServices(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.monitoring.v3.ServiceMonitoringService/ListServices", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ServiceMonitoringServiceServer).ListServices(ctx, req.(*ListServicesRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _ServiceMonitoringService_UpdateService_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(UpdateServiceRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ServiceMonitoringServiceServer).UpdateService(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.monitoring.v3.ServiceMonitoringService/UpdateService", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ServiceMonitoringServiceServer).UpdateService(ctx, req.(*UpdateServiceRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _ServiceMonitoringService_DeleteService_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteServiceRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ServiceMonitoringServiceServer).DeleteService(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.monitoring.v3.ServiceMonitoringService/DeleteService", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ServiceMonitoringServiceServer).DeleteService(ctx, req.(*DeleteServiceRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _ServiceMonitoringService_CreateServiceLevelObjective_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CreateServiceLevelObjectiveRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ServiceMonitoringServiceServer).CreateServiceLevelObjective(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.monitoring.v3.ServiceMonitoringService/CreateServiceLevelObjective", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ServiceMonitoringServiceServer).CreateServiceLevelObjective(ctx, req.(*CreateServiceLevelObjectiveRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _ServiceMonitoringService_GetServiceLevelObjective_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetServiceLevelObjectiveRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ServiceMonitoringServiceServer).GetServiceLevelObjective(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.monitoring.v3.ServiceMonitoringService/GetServiceLevelObjective", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ServiceMonitoringServiceServer).GetServiceLevelObjective(ctx, req.(*GetServiceLevelObjectiveRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _ServiceMonitoringService_ListServiceLevelObjectives_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListServiceLevelObjectivesRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ServiceMonitoringServiceServer).ListServiceLevelObjectives(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.monitoring.v3.ServiceMonitoringService/ListServiceLevelObjectives", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ServiceMonitoringServiceServer).ListServiceLevelObjectives(ctx, req.(*ListServiceLevelObjectivesRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _ServiceMonitoringService_UpdateServiceLevelObjective_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(UpdateServiceLevelObjectiveRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ServiceMonitoringServiceServer).UpdateServiceLevelObjective(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.monitoring.v3.ServiceMonitoringService/UpdateServiceLevelObjective", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ServiceMonitoringServiceServer).UpdateServiceLevelObjective(ctx, req.(*UpdateServiceLevelObjectiveRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _ServiceMonitoringService_DeleteServiceLevelObjective_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteServiceLevelObjectiveRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ServiceMonitoringServiceServer).DeleteServiceLevelObjective(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.monitoring.v3.ServiceMonitoringService/DeleteServiceLevelObjective", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ServiceMonitoringServiceServer).DeleteServiceLevelObjective(ctx, req.(*DeleteServiceLevelObjectiveRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _ServiceMonitoringService_serviceDesc = grpc.ServiceDesc{ - ServiceName: "google.monitoring.v3.ServiceMonitoringService", - HandlerType: (*ServiceMonitoringServiceServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "CreateService", - Handler: _ServiceMonitoringService_CreateService_Handler, - }, - { - MethodName: "GetService", - Handler: _ServiceMonitoringService_GetService_Handler, - }, - { - MethodName: "ListServices", - Handler: _ServiceMonitoringService_ListServices_Handler, - }, - { - MethodName: "UpdateService", - Handler: _ServiceMonitoringService_UpdateService_Handler, - }, - { - MethodName: "DeleteService", - Handler: _ServiceMonitoringService_DeleteService_Handler, - }, - { - MethodName: "CreateServiceLevelObjective", - Handler: _ServiceMonitoringService_CreateServiceLevelObjective_Handler, - }, - { - MethodName: "GetServiceLevelObjective", - Handler: _ServiceMonitoringService_GetServiceLevelObjective_Handler, - }, - { - MethodName: "ListServiceLevelObjectives", - Handler: _ServiceMonitoringService_ListServiceLevelObjectives_Handler, - }, - { - MethodName: "UpdateServiceLevelObjective", - Handler: _ServiceMonitoringService_UpdateServiceLevelObjective_Handler, - }, - { - MethodName: "DeleteServiceLevelObjective", - Handler: _ServiceMonitoringService_DeleteServiceLevelObjective_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "google/monitoring/v3/service_service.proto", -} diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/span_context.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/span_context.pb.go deleted file mode 100644 index 0b3c3c3020..0000000000 --- a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/span_context.pb.go +++ /dev/null @@ -1,192 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.25.0 -// protoc v3.13.0 -// source: google/monitoring/v3/span_context.proto - -package monitoring - -import ( - reflect "reflect" - sync "sync" - - proto "github.com/golang/protobuf/proto" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - -// The context of a span, attached to -// [Exemplars][google.api.Distribution.Exemplars] -// in [Distribution][google.api.Distribution] values during aggregation. -// -// It contains the name of a span with format: -// -// projects/[PROJECT_ID_OR_NUMBER]/traces/[TRACE_ID]/spans/[SPAN_ID] -type SpanContext struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The resource name of the span. The format is: - // - // projects/[PROJECT_ID_OR_NUMBER]/traces/[TRACE_ID]/spans/[SPAN_ID] - // - // `[TRACE_ID]` is a unique identifier for a trace within a project; - // it is a 32-character hexadecimal encoding of a 16-byte array. - // - // `[SPAN_ID]` is a unique identifier for a span within a trace; it - // is a 16-character hexadecimal encoding of an 8-byte array. - SpanName string `protobuf:"bytes,1,opt,name=span_name,json=spanName,proto3" json:"span_name,omitempty"` -} - -func (x *SpanContext) Reset() { - *x = SpanContext{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_span_context_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SpanContext) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SpanContext) ProtoMessage() {} - -func (x *SpanContext) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_span_context_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SpanContext.ProtoReflect.Descriptor instead. -func (*SpanContext) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_span_context_proto_rawDescGZIP(), []int{0} -} - -func (x *SpanContext) GetSpanName() string { - if x != nil { - return x.SpanName - } - return "" -} - -var File_google_monitoring_v3_span_context_proto protoreflect.FileDescriptor - -var file_google_monitoring_v3_span_context_proto_rawDesc = []byte{ - 0x0a, 0x27, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, - 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x70, 0x61, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x74, - 0x65, 0x78, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x22, - 0x2a, 0x0a, 0x0b, 0x53, 0x70, 0x61, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x1b, - 0x0a, 0x09, 0x73, 0x70, 0x61, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x08, 0x73, 0x70, 0x61, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x42, 0xc8, 0x01, 0x0a, 0x18, - 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, - 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x42, 0x10, 0x53, 0x70, 0x61, 0x6e, 0x43, 0x6f, - 0x6e, 0x74, 0x65, 0x78, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, - 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, - 0x70, 0x69, 0x73, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, - 0x33, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0xaa, 0x02, 0x1a, 0x47, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, - 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, - 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, - 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, - 0x6e, 0x67, 0x3a, 0x3a, 0x56, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_google_monitoring_v3_span_context_proto_rawDescOnce sync.Once - file_google_monitoring_v3_span_context_proto_rawDescData = file_google_monitoring_v3_span_context_proto_rawDesc -) - -func file_google_monitoring_v3_span_context_proto_rawDescGZIP() []byte { - file_google_monitoring_v3_span_context_proto_rawDescOnce.Do(func() { - file_google_monitoring_v3_span_context_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_span_context_proto_rawDescData) - }) - return file_google_monitoring_v3_span_context_proto_rawDescData -} - -var file_google_monitoring_v3_span_context_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_monitoring_v3_span_context_proto_goTypes = []interface{}{ - (*SpanContext)(nil), // 0: google.monitoring.v3.SpanContext -} -var file_google_monitoring_v3_span_context_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_google_monitoring_v3_span_context_proto_init() } -func file_google_monitoring_v3_span_context_proto_init() { - if File_google_monitoring_v3_span_context_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_google_monitoring_v3_span_context_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SpanContext); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_monitoring_v3_span_context_proto_rawDesc, - NumEnums: 0, - NumMessages: 1, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_google_monitoring_v3_span_context_proto_goTypes, - DependencyIndexes: file_google_monitoring_v3_span_context_proto_depIdxs, - MessageInfos: file_google_monitoring_v3_span_context_proto_msgTypes, - }.Build() - File_google_monitoring_v3_span_context_proto = out.File - file_google_monitoring_v3_span_context_proto_rawDesc = nil - file_google_monitoring_v3_span_context_proto_goTypes = nil - file_google_monitoring_v3_span_context_proto_depIdxs = nil -} diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/uptime.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/uptime.pb.go deleted file mode 100644 index d09c926e0b..0000000000 --- a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/uptime.pb.go +++ /dev/null @@ -1,1627 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.25.0 -// protoc v3.13.0 -// source: google/monitoring/v3/uptime.proto - -package monitoring - -import ( - reflect "reflect" - sync "sync" - - proto "github.com/golang/protobuf/proto" - _ "google.golang.org/genproto/googleapis/api/annotations" - monitoredres "google.golang.org/genproto/googleapis/api/monitoredres" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - durationpb "google.golang.org/protobuf/types/known/durationpb" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - -// The regions from which an Uptime check can be run. -type UptimeCheckRegion int32 - -const ( - // Default value if no region is specified. Will result in Uptime checks - // running from all regions. - UptimeCheckRegion_REGION_UNSPECIFIED UptimeCheckRegion = 0 - // Allows checks to run from locations within the United States of America. - UptimeCheckRegion_USA UptimeCheckRegion = 1 - // Allows checks to run from locations within the continent of Europe. - UptimeCheckRegion_EUROPE UptimeCheckRegion = 2 - // Allows checks to run from locations within the continent of South - // America. - UptimeCheckRegion_SOUTH_AMERICA UptimeCheckRegion = 3 - // Allows checks to run from locations within the Asia Pacific area (ex: - // Singapore). - UptimeCheckRegion_ASIA_PACIFIC UptimeCheckRegion = 4 -) - -// Enum value maps for UptimeCheckRegion. -var ( - UptimeCheckRegion_name = map[int32]string{ - 0: "REGION_UNSPECIFIED", - 1: "USA", - 2: "EUROPE", - 3: "SOUTH_AMERICA", - 4: "ASIA_PACIFIC", - } - UptimeCheckRegion_value = map[string]int32{ - "REGION_UNSPECIFIED": 0, - "USA": 1, - "EUROPE": 2, - "SOUTH_AMERICA": 3, - "ASIA_PACIFIC": 4, - } -) - -func (x UptimeCheckRegion) Enum() *UptimeCheckRegion { - p := new(UptimeCheckRegion) - *p = x - return p -} - -func (x UptimeCheckRegion) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (UptimeCheckRegion) Descriptor() protoreflect.EnumDescriptor { - return file_google_monitoring_v3_uptime_proto_enumTypes[0].Descriptor() -} - -func (UptimeCheckRegion) Type() protoreflect.EnumType { - return &file_google_monitoring_v3_uptime_proto_enumTypes[0] -} - -func (x UptimeCheckRegion) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use UptimeCheckRegion.Descriptor instead. -func (UptimeCheckRegion) EnumDescriptor() ([]byte, []int) { - return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{0} -} - -// The supported resource types that can be used as values of -// `group_resource.resource_type`. -// `INSTANCE` includes `gce_instance` and `aws_ec2_instance` resource types. -// The resource types `gae_app` and `uptime_url` are not valid here because -// group checks on App Engine modules and URLs are not allowed. -type GroupResourceType int32 - -const ( - // Default value (not valid). - GroupResourceType_RESOURCE_TYPE_UNSPECIFIED GroupResourceType = 0 - // A group of instances from Google Cloud Platform (GCP) or - // Amazon Web Services (AWS). - GroupResourceType_INSTANCE GroupResourceType = 1 - // A group of Amazon ELB load balancers. - GroupResourceType_AWS_ELB_LOAD_BALANCER GroupResourceType = 2 -) - -// Enum value maps for GroupResourceType. -var ( - GroupResourceType_name = map[int32]string{ - 0: "RESOURCE_TYPE_UNSPECIFIED", - 1: "INSTANCE", - 2: "AWS_ELB_LOAD_BALANCER", - } - GroupResourceType_value = map[string]int32{ - "RESOURCE_TYPE_UNSPECIFIED": 0, - "INSTANCE": 1, - "AWS_ELB_LOAD_BALANCER": 2, - } -) - -func (x GroupResourceType) Enum() *GroupResourceType { - p := new(GroupResourceType) - *p = x - return p -} - -func (x GroupResourceType) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (GroupResourceType) Descriptor() protoreflect.EnumDescriptor { - return file_google_monitoring_v3_uptime_proto_enumTypes[1].Descriptor() -} - -func (GroupResourceType) Type() protoreflect.EnumType { - return &file_google_monitoring_v3_uptime_proto_enumTypes[1] -} - -func (x GroupResourceType) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use GroupResourceType.Descriptor instead. -func (GroupResourceType) EnumDescriptor() ([]byte, []int) { - return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{1} -} - -// Operational states for an internal checker. -type InternalChecker_State int32 - -const ( - // An internal checker should never be in the unspecified state. - InternalChecker_UNSPECIFIED InternalChecker_State = 0 - // The checker is being created, provisioned, and configured. A checker in - // this state can be returned by `ListInternalCheckers` or - // `GetInternalChecker`, as well as by examining the [long running - // Operation](https://cloud.google.com/apis/design/design_patterns#long_running_operations) - // that created it. - InternalChecker_CREATING InternalChecker_State = 1 - // The checker is running and available for use. A checker in this state - // can be returned by `ListInternalCheckers` or `GetInternalChecker` as - // well as by examining the [long running - // Operation](https://cloud.google.com/apis/design/design_patterns#long_running_operations) - // that created it. - // If a checker is being torn down, it is neither visible nor usable, so - // there is no "deleting" or "down" state. - InternalChecker_RUNNING InternalChecker_State = 2 -) - -// Enum value maps for InternalChecker_State. -var ( - InternalChecker_State_name = map[int32]string{ - 0: "UNSPECIFIED", - 1: "CREATING", - 2: "RUNNING", - } - InternalChecker_State_value = map[string]int32{ - "UNSPECIFIED": 0, - "CREATING": 1, - "RUNNING": 2, - } -) - -func (x InternalChecker_State) Enum() *InternalChecker_State { - p := new(InternalChecker_State) - *p = x - return p -} - -func (x InternalChecker_State) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (InternalChecker_State) Descriptor() protoreflect.EnumDescriptor { - return file_google_monitoring_v3_uptime_proto_enumTypes[2].Descriptor() -} - -func (InternalChecker_State) Type() protoreflect.EnumType { - return &file_google_monitoring_v3_uptime_proto_enumTypes[2] -} - -func (x InternalChecker_State) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use InternalChecker_State.Descriptor instead. -func (InternalChecker_State) EnumDescriptor() ([]byte, []int) { - return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{0, 0} -} - -// The HTTP request method options. -type UptimeCheckConfig_HttpCheck_RequestMethod int32 - -const ( - // No request method specified. - UptimeCheckConfig_HttpCheck_METHOD_UNSPECIFIED UptimeCheckConfig_HttpCheck_RequestMethod = 0 - // GET request. - UptimeCheckConfig_HttpCheck_GET UptimeCheckConfig_HttpCheck_RequestMethod = 1 - // POST request. - UptimeCheckConfig_HttpCheck_POST UptimeCheckConfig_HttpCheck_RequestMethod = 2 -) - -// Enum value maps for UptimeCheckConfig_HttpCheck_RequestMethod. -var ( - UptimeCheckConfig_HttpCheck_RequestMethod_name = map[int32]string{ - 0: "METHOD_UNSPECIFIED", - 1: "GET", - 2: "POST", - } - UptimeCheckConfig_HttpCheck_RequestMethod_value = map[string]int32{ - "METHOD_UNSPECIFIED": 0, - "GET": 1, - "POST": 2, - } -) - -func (x UptimeCheckConfig_HttpCheck_RequestMethod) Enum() *UptimeCheckConfig_HttpCheck_RequestMethod { - p := new(UptimeCheckConfig_HttpCheck_RequestMethod) - *p = x - return p -} - -func (x UptimeCheckConfig_HttpCheck_RequestMethod) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (UptimeCheckConfig_HttpCheck_RequestMethod) Descriptor() protoreflect.EnumDescriptor { - return file_google_monitoring_v3_uptime_proto_enumTypes[3].Descriptor() -} - -func (UptimeCheckConfig_HttpCheck_RequestMethod) Type() protoreflect.EnumType { - return &file_google_monitoring_v3_uptime_proto_enumTypes[3] -} - -func (x UptimeCheckConfig_HttpCheck_RequestMethod) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use UptimeCheckConfig_HttpCheck_RequestMethod.Descriptor instead. -func (UptimeCheckConfig_HttpCheck_RequestMethod) EnumDescriptor() ([]byte, []int) { - return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{1, 1, 0} -} - -// Header options corresponding to the Content-Type of the body in HTTP -// requests. Note that a `Content-Type` header cannot be present in the -// `headers` field if this field is specified. -type UptimeCheckConfig_HttpCheck_ContentType int32 - -const ( - // No content type specified. If the request method is POST, an - // unspecified content type results in a check creation rejection. - UptimeCheckConfig_HttpCheck_TYPE_UNSPECIFIED UptimeCheckConfig_HttpCheck_ContentType = 0 - // `body` is in URL-encoded form. Equivalent to setting the `Content-Type` - // to `application/x-www-form-urlencoded` in the HTTP request. - UptimeCheckConfig_HttpCheck_URL_ENCODED UptimeCheckConfig_HttpCheck_ContentType = 1 -) - -// Enum value maps for UptimeCheckConfig_HttpCheck_ContentType. -var ( - UptimeCheckConfig_HttpCheck_ContentType_name = map[int32]string{ - 0: "TYPE_UNSPECIFIED", - 1: "URL_ENCODED", - } - UptimeCheckConfig_HttpCheck_ContentType_value = map[string]int32{ - "TYPE_UNSPECIFIED": 0, - "URL_ENCODED": 1, - } -) - -func (x UptimeCheckConfig_HttpCheck_ContentType) Enum() *UptimeCheckConfig_HttpCheck_ContentType { - p := new(UptimeCheckConfig_HttpCheck_ContentType) - *p = x - return p -} - -func (x UptimeCheckConfig_HttpCheck_ContentType) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (UptimeCheckConfig_HttpCheck_ContentType) Descriptor() protoreflect.EnumDescriptor { - return file_google_monitoring_v3_uptime_proto_enumTypes[4].Descriptor() -} - -func (UptimeCheckConfig_HttpCheck_ContentType) Type() protoreflect.EnumType { - return &file_google_monitoring_v3_uptime_proto_enumTypes[4] -} - -func (x UptimeCheckConfig_HttpCheck_ContentType) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use UptimeCheckConfig_HttpCheck_ContentType.Descriptor instead. -func (UptimeCheckConfig_HttpCheck_ContentType) EnumDescriptor() ([]byte, []int) { - return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{1, 1, 1} -} - -// Options to perform content matching. -type UptimeCheckConfig_ContentMatcher_ContentMatcherOption int32 - -const ( - // No content matcher type specified (maintained for backward - // compatibility, but deprecated for future use). - // Treated as `CONTAINS_STRING`. - UptimeCheckConfig_ContentMatcher_CONTENT_MATCHER_OPTION_UNSPECIFIED UptimeCheckConfig_ContentMatcher_ContentMatcherOption = 0 - // Selects substring matching. The match succeeds if the output contains - // the `content` string. This is the default value for checks without - // a `matcher` option, or where the value of `matcher` is - // `CONTENT_MATCHER_OPTION_UNSPECIFIED`. - UptimeCheckConfig_ContentMatcher_CONTAINS_STRING UptimeCheckConfig_ContentMatcher_ContentMatcherOption = 1 - // Selects negation of substring matching. The match succeeds if the - // output does _NOT_ contain the `content` string. - UptimeCheckConfig_ContentMatcher_NOT_CONTAINS_STRING UptimeCheckConfig_ContentMatcher_ContentMatcherOption = 2 - // Selects regular-expression matching. The match succeeds of the output - // matches the regular expression specified in the `content` string. - UptimeCheckConfig_ContentMatcher_MATCHES_REGEX UptimeCheckConfig_ContentMatcher_ContentMatcherOption = 3 - // Selects negation of regular-expression matching. The match succeeds if - // the output does _NOT_ match the regular expression specified in the - // `content` string. - UptimeCheckConfig_ContentMatcher_NOT_MATCHES_REGEX UptimeCheckConfig_ContentMatcher_ContentMatcherOption = 4 -) - -// Enum value maps for UptimeCheckConfig_ContentMatcher_ContentMatcherOption. -var ( - UptimeCheckConfig_ContentMatcher_ContentMatcherOption_name = map[int32]string{ - 0: "CONTENT_MATCHER_OPTION_UNSPECIFIED", - 1: "CONTAINS_STRING", - 2: "NOT_CONTAINS_STRING", - 3: "MATCHES_REGEX", - 4: "NOT_MATCHES_REGEX", - } - UptimeCheckConfig_ContentMatcher_ContentMatcherOption_value = map[string]int32{ - "CONTENT_MATCHER_OPTION_UNSPECIFIED": 0, - "CONTAINS_STRING": 1, - "NOT_CONTAINS_STRING": 2, - "MATCHES_REGEX": 3, - "NOT_MATCHES_REGEX": 4, - } -) - -func (x UptimeCheckConfig_ContentMatcher_ContentMatcherOption) Enum() *UptimeCheckConfig_ContentMatcher_ContentMatcherOption { - p := new(UptimeCheckConfig_ContentMatcher_ContentMatcherOption) - *p = x - return p -} - -func (x UptimeCheckConfig_ContentMatcher_ContentMatcherOption) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (UptimeCheckConfig_ContentMatcher_ContentMatcherOption) Descriptor() protoreflect.EnumDescriptor { - return file_google_monitoring_v3_uptime_proto_enumTypes[5].Descriptor() -} - -func (UptimeCheckConfig_ContentMatcher_ContentMatcherOption) Type() protoreflect.EnumType { - return &file_google_monitoring_v3_uptime_proto_enumTypes[5] -} - -func (x UptimeCheckConfig_ContentMatcher_ContentMatcherOption) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use UptimeCheckConfig_ContentMatcher_ContentMatcherOption.Descriptor instead. -func (UptimeCheckConfig_ContentMatcher_ContentMatcherOption) EnumDescriptor() ([]byte, []int) { - return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{1, 3, 0} -} - -// An internal checker allows Uptime checks to run on private/internal GCP -// resources. -// -// Deprecated: Do not use. -type InternalChecker struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // A unique resource name for this InternalChecker. The format is: - // - // projects/[PROJECT_ID_OR_NUMBER]/internalCheckers/[INTERNAL_CHECKER_ID] - // - // `[PROJECT_ID_OR_NUMBER]` is the Stackdriver Workspace project for the - // Uptime check config associated with the internal checker. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // The checker's human-readable name. The display name - // should be unique within a Stackdriver Workspace in order to make it easier - // to identify; however, uniqueness is not enforced. - DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` - // The [GCP VPC network](https://cloud.google.com/vpc/docs/vpc) where the - // internal resource lives (ex: "default"). - Network string `protobuf:"bytes,3,opt,name=network,proto3" json:"network,omitempty"` - // The GCP zone the Uptime check should egress from. Only respected for - // internal Uptime checks, where internal_network is specified. - GcpZone string `protobuf:"bytes,4,opt,name=gcp_zone,json=gcpZone,proto3" json:"gcp_zone,omitempty"` - // The GCP project ID where the internal checker lives. Not necessary - // the same as the Workspace project. - PeerProjectId string `protobuf:"bytes,6,opt,name=peer_project_id,json=peerProjectId,proto3" json:"peer_project_id,omitempty"` - // The current operational state of the internal checker. - State InternalChecker_State `protobuf:"varint,7,opt,name=state,proto3,enum=google.monitoring.v3.InternalChecker_State" json:"state,omitempty"` -} - -func (x *InternalChecker) Reset() { - *x = InternalChecker{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_uptime_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *InternalChecker) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*InternalChecker) ProtoMessage() {} - -func (x *InternalChecker) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_uptime_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use InternalChecker.ProtoReflect.Descriptor instead. -func (*InternalChecker) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{0} -} - -func (x *InternalChecker) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *InternalChecker) GetDisplayName() string { - if x != nil { - return x.DisplayName - } - return "" -} - -func (x *InternalChecker) GetNetwork() string { - if x != nil { - return x.Network - } - return "" -} - -func (x *InternalChecker) GetGcpZone() string { - if x != nil { - return x.GcpZone - } - return "" -} - -func (x *InternalChecker) GetPeerProjectId() string { - if x != nil { - return x.PeerProjectId - } - return "" -} - -func (x *InternalChecker) GetState() InternalChecker_State { - if x != nil { - return x.State - } - return InternalChecker_UNSPECIFIED -} - -// This message configures which resources and services to monitor for -// availability. -type UptimeCheckConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // A unique resource name for this Uptime check configuration. The format is: - // - // projects/[PROJECT_ID_OR_NUMBER]/uptimeCheckConfigs/[UPTIME_CHECK_ID] - // - // This field should be omitted when creating the Uptime check configuration; - // on create, the resource name is assigned by the server and included in the - // response. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // A human-friendly name for the Uptime check configuration. The display name - // should be unique within a Stackdriver Workspace in order to make it easier - // to identify; however, uniqueness is not enforced. Required. - DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` - // The resource the check is checking. Required. - // - // Types that are assignable to Resource: - // *UptimeCheckConfig_MonitoredResource - // *UptimeCheckConfig_ResourceGroup_ - Resource isUptimeCheckConfig_Resource `protobuf_oneof:"resource"` - // The type of Uptime check request. - // - // Types that are assignable to CheckRequestType: - // *UptimeCheckConfig_HttpCheck_ - // *UptimeCheckConfig_TcpCheck_ - CheckRequestType isUptimeCheckConfig_CheckRequestType `protobuf_oneof:"check_request_type"` - // How often, in seconds, the Uptime check is performed. - // Currently, the only supported values are `60s` (1 minute), `300s` - // (5 minutes), `600s` (10 minutes), and `900s` (15 minutes). Optional, - // defaults to `60s`. - Period *durationpb.Duration `protobuf:"bytes,7,opt,name=period,proto3" json:"period,omitempty"` - // The maximum amount of time to wait for the request to complete (must be - // between 1 and 60 seconds). Required. - Timeout *durationpb.Duration `protobuf:"bytes,8,opt,name=timeout,proto3" json:"timeout,omitempty"` - // The content that is expected to appear in the data returned by the target - // server against which the check is run. Currently, only the first entry - // in the `content_matchers` list is supported, and additional entries will - // be ignored. This field is optional and should only be specified if a - // content match is required as part of the/ Uptime check. - ContentMatchers []*UptimeCheckConfig_ContentMatcher `protobuf:"bytes,9,rep,name=content_matchers,json=contentMatchers,proto3" json:"content_matchers,omitempty"` - // The list of regions from which the check will be run. - // Some regions contain one location, and others contain more than one. - // If this field is specified, enough regions must be provided to include a - // minimum of 3 locations. Not specifying this field will result in Uptime - // checks running from all available regions. - SelectedRegions []UptimeCheckRegion `protobuf:"varint,10,rep,packed,name=selected_regions,json=selectedRegions,proto3,enum=google.monitoring.v3.UptimeCheckRegion" json:"selected_regions,omitempty"` - // If this is `true`, then checks are made only from the 'internal_checkers'. - // If it is `false`, then checks are made only from the 'selected_regions'. - // It is an error to provide 'selected_regions' when is_internal is `true`, - // or to provide 'internal_checkers' when is_internal is `false`. - // - // Deprecated: Do not use. - IsInternal bool `protobuf:"varint,15,opt,name=is_internal,json=isInternal,proto3" json:"is_internal,omitempty"` - // The internal checkers that this check will egress from. If `is_internal` is - // `true` and this list is empty, the check will egress from all the - // InternalCheckers configured for the project that owns this - // `UptimeCheckConfig`. - // - // Deprecated: Do not use. - InternalCheckers []*InternalChecker `protobuf:"bytes,14,rep,name=internal_checkers,json=internalCheckers,proto3" json:"internal_checkers,omitempty"` -} - -func (x *UptimeCheckConfig) Reset() { - *x = UptimeCheckConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_uptime_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *UptimeCheckConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*UptimeCheckConfig) ProtoMessage() {} - -func (x *UptimeCheckConfig) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_uptime_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use UptimeCheckConfig.ProtoReflect.Descriptor instead. -func (*UptimeCheckConfig) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{1} -} - -func (x *UptimeCheckConfig) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *UptimeCheckConfig) GetDisplayName() string { - if x != nil { - return x.DisplayName - } - return "" -} - -func (m *UptimeCheckConfig) GetResource() isUptimeCheckConfig_Resource { - if m != nil { - return m.Resource - } - return nil -} - -func (x *UptimeCheckConfig) GetMonitoredResource() *monitoredres.MonitoredResource { - if x, ok := x.GetResource().(*UptimeCheckConfig_MonitoredResource); ok { - return x.MonitoredResource - } - return nil -} - -func (x *UptimeCheckConfig) GetResourceGroup() *UptimeCheckConfig_ResourceGroup { - if x, ok := x.GetResource().(*UptimeCheckConfig_ResourceGroup_); ok { - return x.ResourceGroup - } - return nil -} - -func (m *UptimeCheckConfig) GetCheckRequestType() isUptimeCheckConfig_CheckRequestType { - if m != nil { - return m.CheckRequestType - } - return nil -} - -func (x *UptimeCheckConfig) GetHttpCheck() *UptimeCheckConfig_HttpCheck { - if x, ok := x.GetCheckRequestType().(*UptimeCheckConfig_HttpCheck_); ok { - return x.HttpCheck - } - return nil -} - -func (x *UptimeCheckConfig) GetTcpCheck() *UptimeCheckConfig_TcpCheck { - if x, ok := x.GetCheckRequestType().(*UptimeCheckConfig_TcpCheck_); ok { - return x.TcpCheck - } - return nil -} - -func (x *UptimeCheckConfig) GetPeriod() *durationpb.Duration { - if x != nil { - return x.Period - } - return nil -} - -func (x *UptimeCheckConfig) GetTimeout() *durationpb.Duration { - if x != nil { - return x.Timeout - } - return nil -} - -func (x *UptimeCheckConfig) GetContentMatchers() []*UptimeCheckConfig_ContentMatcher { - if x != nil { - return x.ContentMatchers - } - return nil -} - -func (x *UptimeCheckConfig) GetSelectedRegions() []UptimeCheckRegion { - if x != nil { - return x.SelectedRegions - } - return nil -} - -// Deprecated: Do not use. -func (x *UptimeCheckConfig) GetIsInternal() bool { - if x != nil { - return x.IsInternal - } - return false -} - -// Deprecated: Do not use. -func (x *UptimeCheckConfig) GetInternalCheckers() []*InternalChecker { - if x != nil { - return x.InternalCheckers - } - return nil -} - -type isUptimeCheckConfig_Resource interface { - isUptimeCheckConfig_Resource() -} - -type UptimeCheckConfig_MonitoredResource struct { - // The [monitored - // resource](https://cloud.google.com/monitoring/api/resources) associated - // with the configuration. - // The following monitored resource types are supported for Uptime checks: - // `uptime_url`, - // `gce_instance`, - // `gae_app`, - // `aws_ec2_instance`, - // `aws_elb_load_balancer` - MonitoredResource *monitoredres.MonitoredResource `protobuf:"bytes,3,opt,name=monitored_resource,json=monitoredResource,proto3,oneof"` -} - -type UptimeCheckConfig_ResourceGroup_ struct { - // The group resource associated with the configuration. - ResourceGroup *UptimeCheckConfig_ResourceGroup `protobuf:"bytes,4,opt,name=resource_group,json=resourceGroup,proto3,oneof"` -} - -func (*UptimeCheckConfig_MonitoredResource) isUptimeCheckConfig_Resource() {} - -func (*UptimeCheckConfig_ResourceGroup_) isUptimeCheckConfig_Resource() {} - -type isUptimeCheckConfig_CheckRequestType interface { - isUptimeCheckConfig_CheckRequestType() -} - -type UptimeCheckConfig_HttpCheck_ struct { - // Contains information needed to make an HTTP or HTTPS check. - HttpCheck *UptimeCheckConfig_HttpCheck `protobuf:"bytes,5,opt,name=http_check,json=httpCheck,proto3,oneof"` -} - -type UptimeCheckConfig_TcpCheck_ struct { - // Contains information needed to make a TCP check. - TcpCheck *UptimeCheckConfig_TcpCheck `protobuf:"bytes,6,opt,name=tcp_check,json=tcpCheck,proto3,oneof"` -} - -func (*UptimeCheckConfig_HttpCheck_) isUptimeCheckConfig_CheckRequestType() {} - -func (*UptimeCheckConfig_TcpCheck_) isUptimeCheckConfig_CheckRequestType() {} - -// Contains the region, location, and list of IP -// addresses where checkers in the location run from. -type UptimeCheckIp struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // A broad region category in which the IP address is located. - Region UptimeCheckRegion `protobuf:"varint,1,opt,name=region,proto3,enum=google.monitoring.v3.UptimeCheckRegion" json:"region,omitempty"` - // A more specific location within the region that typically encodes - // a particular city/town/metro (and its containing state/province or country) - // within the broader umbrella region category. - Location string `protobuf:"bytes,2,opt,name=location,proto3" json:"location,omitempty"` - // The IP address from which the Uptime check originates. This is a fully - // specified IP address (not an IP address range). Most IP addresses, as of - // this publication, are in IPv4 format; however, one should not rely on the - // IP addresses being in IPv4 format indefinitely, and should support - // interpreting this field in either IPv4 or IPv6 format. - IpAddress string `protobuf:"bytes,3,opt,name=ip_address,json=ipAddress,proto3" json:"ip_address,omitempty"` -} - -func (x *UptimeCheckIp) Reset() { - *x = UptimeCheckIp{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_uptime_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *UptimeCheckIp) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*UptimeCheckIp) ProtoMessage() {} - -func (x *UptimeCheckIp) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_uptime_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use UptimeCheckIp.ProtoReflect.Descriptor instead. -func (*UptimeCheckIp) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2} -} - -func (x *UptimeCheckIp) GetRegion() UptimeCheckRegion { - if x != nil { - return x.Region - } - return UptimeCheckRegion_REGION_UNSPECIFIED -} - -func (x *UptimeCheckIp) GetLocation() string { - if x != nil { - return x.Location - } - return "" -} - -func (x *UptimeCheckIp) GetIpAddress() string { - if x != nil { - return x.IpAddress - } - return "" -} - -// The resource submessage for group checks. It can be used instead of a -// monitored resource, when multiple resources are being monitored. -type UptimeCheckConfig_ResourceGroup struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The group of resources being monitored. Should be only the `[GROUP_ID]`, - // and not the full-path - // `projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID]`. - GroupId string `protobuf:"bytes,1,opt,name=group_id,json=groupId,proto3" json:"group_id,omitempty"` - // The resource type of the group members. - ResourceType GroupResourceType `protobuf:"varint,2,opt,name=resource_type,json=resourceType,proto3,enum=google.monitoring.v3.GroupResourceType" json:"resource_type,omitempty"` -} - -func (x *UptimeCheckConfig_ResourceGroup) Reset() { - *x = UptimeCheckConfig_ResourceGroup{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_uptime_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *UptimeCheckConfig_ResourceGroup) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*UptimeCheckConfig_ResourceGroup) ProtoMessage() {} - -func (x *UptimeCheckConfig_ResourceGroup) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_uptime_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use UptimeCheckConfig_ResourceGroup.ProtoReflect.Descriptor instead. -func (*UptimeCheckConfig_ResourceGroup) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{1, 0} -} - -func (x *UptimeCheckConfig_ResourceGroup) GetGroupId() string { - if x != nil { - return x.GroupId - } - return "" -} - -func (x *UptimeCheckConfig_ResourceGroup) GetResourceType() GroupResourceType { - if x != nil { - return x.ResourceType - } - return GroupResourceType_RESOURCE_TYPE_UNSPECIFIED -} - -// Information involved in an HTTP/HTTPS Uptime check request. -type UptimeCheckConfig_HttpCheck struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The HTTP request method to use for the check. - RequestMethod UptimeCheckConfig_HttpCheck_RequestMethod `protobuf:"varint,8,opt,name=request_method,json=requestMethod,proto3,enum=google.monitoring.v3.UptimeCheckConfig_HttpCheck_RequestMethod" json:"request_method,omitempty"` - // If `true`, use HTTPS instead of HTTP to run the check. - UseSsl bool `protobuf:"varint,1,opt,name=use_ssl,json=useSsl,proto3" json:"use_ssl,omitempty"` - // Optional (defaults to "/"). The path to the page against which to run - // the check. Will be combined with the `host` (specified within the - // `monitored_resource`) and `port` to construct the full URL. If the - // provided path does not begin with "/", a "/" will be prepended - // automatically. - Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"` - // Optional (defaults to 80 when `use_ssl` is `false`, and 443 when - // `use_ssl` is `true`). The TCP port on the HTTP server against which to - // run the check. Will be combined with host (specified within the - // `monitored_resource`) and `path` to construct the full URL. - Port int32 `protobuf:"varint,3,opt,name=port,proto3" json:"port,omitempty"` - // The authentication information. Optional when creating an HTTP check; - // defaults to empty. - AuthInfo *UptimeCheckConfig_HttpCheck_BasicAuthentication `protobuf:"bytes,4,opt,name=auth_info,json=authInfo,proto3" json:"auth_info,omitempty"` - // Boolean specifiying whether to encrypt the header information. - // Encryption should be specified for any headers related to authentication - // that you do not wish to be seen when retrieving the configuration. The - // server will be responsible for encrypting the headers. - // On Get/List calls, if `mask_headers` is set to `true` then the headers - // will be obscured with `******.` - MaskHeaders bool `protobuf:"varint,5,opt,name=mask_headers,json=maskHeaders,proto3" json:"mask_headers,omitempty"` - // The list of headers to send as part of the Uptime check request. - // If two headers have the same key and different values, they should - // be entered as a single header, with the value being a comma-separated - // list of all the desired values as described at - // https://www.w3.org/Protocols/rfc2616/rfc2616.txt (page 31). - // Entering two separate headers with the same key in a Create call will - // cause the first to be overwritten by the second. - // The maximum number of headers allowed is 100. - Headers map[string]string `protobuf:"bytes,6,rep,name=headers,proto3" json:"headers,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - // The content type to use for the check. - ContentType UptimeCheckConfig_HttpCheck_ContentType `protobuf:"varint,9,opt,name=content_type,json=contentType,proto3,enum=google.monitoring.v3.UptimeCheckConfig_HttpCheck_ContentType" json:"content_type,omitempty"` - // Boolean specifying whether to include SSL certificate validation as a - // part of the Uptime check. Only applies to checks where - // `monitored_resource` is set to `uptime_url`. If `use_ssl` is `false`, - // setting `validate_ssl` to `true` has no effect. - ValidateSsl bool `protobuf:"varint,7,opt,name=validate_ssl,json=validateSsl,proto3" json:"validate_ssl,omitempty"` - // The request body associated with the HTTP request. If `content_type` is - // `URL_ENCODED`, the body passed in must be URL-encoded. Users can provide - // a `Content-Length` header via the `headers` field or the API will do - // so. The maximum byte size is 1 megabyte. Note: As with all `bytes` fields - // JSON representations are base64 encoded. - Body []byte `protobuf:"bytes,10,opt,name=body,proto3" json:"body,omitempty"` -} - -func (x *UptimeCheckConfig_HttpCheck) Reset() { - *x = UptimeCheckConfig_HttpCheck{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_uptime_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *UptimeCheckConfig_HttpCheck) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*UptimeCheckConfig_HttpCheck) ProtoMessage() {} - -func (x *UptimeCheckConfig_HttpCheck) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_uptime_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use UptimeCheckConfig_HttpCheck.ProtoReflect.Descriptor instead. -func (*UptimeCheckConfig_HttpCheck) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{1, 1} -} - -func (x *UptimeCheckConfig_HttpCheck) GetRequestMethod() UptimeCheckConfig_HttpCheck_RequestMethod { - if x != nil { - return x.RequestMethod - } - return UptimeCheckConfig_HttpCheck_METHOD_UNSPECIFIED -} - -func (x *UptimeCheckConfig_HttpCheck) GetUseSsl() bool { - if x != nil { - return x.UseSsl - } - return false -} - -func (x *UptimeCheckConfig_HttpCheck) GetPath() string { - if x != nil { - return x.Path - } - return "" -} - -func (x *UptimeCheckConfig_HttpCheck) GetPort() int32 { - if x != nil { - return x.Port - } - return 0 -} - -func (x *UptimeCheckConfig_HttpCheck) GetAuthInfo() *UptimeCheckConfig_HttpCheck_BasicAuthentication { - if x != nil { - return x.AuthInfo - } - return nil -} - -func (x *UptimeCheckConfig_HttpCheck) GetMaskHeaders() bool { - if x != nil { - return x.MaskHeaders - } - return false -} - -func (x *UptimeCheckConfig_HttpCheck) GetHeaders() map[string]string { - if x != nil { - return x.Headers - } - return nil -} - -func (x *UptimeCheckConfig_HttpCheck) GetContentType() UptimeCheckConfig_HttpCheck_ContentType { - if x != nil { - return x.ContentType - } - return UptimeCheckConfig_HttpCheck_TYPE_UNSPECIFIED -} - -func (x *UptimeCheckConfig_HttpCheck) GetValidateSsl() bool { - if x != nil { - return x.ValidateSsl - } - return false -} - -func (x *UptimeCheckConfig_HttpCheck) GetBody() []byte { - if x != nil { - return x.Body - } - return nil -} - -// Information required for a TCP Uptime check request. -type UptimeCheckConfig_TcpCheck struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The TCP port on the server against which to run the check. Will be - // combined with host (specified within the `monitored_resource`) to - // construct the full URL. Required. - Port int32 `protobuf:"varint,1,opt,name=port,proto3" json:"port,omitempty"` -} - -func (x *UptimeCheckConfig_TcpCheck) Reset() { - *x = UptimeCheckConfig_TcpCheck{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_uptime_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *UptimeCheckConfig_TcpCheck) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*UptimeCheckConfig_TcpCheck) ProtoMessage() {} - -func (x *UptimeCheckConfig_TcpCheck) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_uptime_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use UptimeCheckConfig_TcpCheck.ProtoReflect.Descriptor instead. -func (*UptimeCheckConfig_TcpCheck) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{1, 2} -} - -func (x *UptimeCheckConfig_TcpCheck) GetPort() int32 { - if x != nil { - return x.Port - } - return 0 -} - -// Optional. Used to perform content matching. This allows matching based on -// substrings and regular expressions, together with their negations. Only the -// first 4 MB of an HTTP or HTTPS check's response (and the first -// 1 MB of a TCP check's response) are examined for purposes of content -// matching. -type UptimeCheckConfig_ContentMatcher struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // String or regex content to match. Maximum 1024 bytes. An empty `content` - // string indicates no content matching is to be performed. - Content string `protobuf:"bytes,1,opt,name=content,proto3" json:"content,omitempty"` - // The type of content matcher that will be applied to the server output, - // compared to the `content` string when the check is run. - Matcher UptimeCheckConfig_ContentMatcher_ContentMatcherOption `protobuf:"varint,2,opt,name=matcher,proto3,enum=google.monitoring.v3.UptimeCheckConfig_ContentMatcher_ContentMatcherOption" json:"matcher,omitempty"` -} - -func (x *UptimeCheckConfig_ContentMatcher) Reset() { - *x = UptimeCheckConfig_ContentMatcher{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_uptime_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *UptimeCheckConfig_ContentMatcher) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*UptimeCheckConfig_ContentMatcher) ProtoMessage() {} - -func (x *UptimeCheckConfig_ContentMatcher) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_uptime_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use UptimeCheckConfig_ContentMatcher.ProtoReflect.Descriptor instead. -func (*UptimeCheckConfig_ContentMatcher) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{1, 3} -} - -func (x *UptimeCheckConfig_ContentMatcher) GetContent() string { - if x != nil { - return x.Content - } - return "" -} - -func (x *UptimeCheckConfig_ContentMatcher) GetMatcher() UptimeCheckConfig_ContentMatcher_ContentMatcherOption { - if x != nil { - return x.Matcher - } - return UptimeCheckConfig_ContentMatcher_CONTENT_MATCHER_OPTION_UNSPECIFIED -} - -// The authentication parameters to provide to the specified resource or -// URL that requires a username and password. Currently, only -// [Basic HTTP authentication](https://tools.ietf.org/html/rfc7617) is -// supported in Uptime checks. -type UptimeCheckConfig_HttpCheck_BasicAuthentication struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The username to use when authenticating with the HTTP server. - Username string `protobuf:"bytes,1,opt,name=username,proto3" json:"username,omitempty"` - // The password to use when authenticating with the HTTP server. - Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` -} - -func (x *UptimeCheckConfig_HttpCheck_BasicAuthentication) Reset() { - *x = UptimeCheckConfig_HttpCheck_BasicAuthentication{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_uptime_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *UptimeCheckConfig_HttpCheck_BasicAuthentication) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*UptimeCheckConfig_HttpCheck_BasicAuthentication) ProtoMessage() {} - -func (x *UptimeCheckConfig_HttpCheck_BasicAuthentication) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_uptime_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use UptimeCheckConfig_HttpCheck_BasicAuthentication.ProtoReflect.Descriptor instead. -func (*UptimeCheckConfig_HttpCheck_BasicAuthentication) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{1, 1, 0} -} - -func (x *UptimeCheckConfig_HttpCheck_BasicAuthentication) GetUsername() string { - if x != nil { - return x.Username - } - return "" -} - -func (x *UptimeCheckConfig_HttpCheck_BasicAuthentication) GetPassword() string { - if x != nil { - return x.Password - } - return "" -} - -var File_google_monitoring_v3_uptime_proto protoreflect.FileDescriptor - -var file_google_monitoring_v3_uptime_proto_rawDesc = []byte{ - 0x0a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, - 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, - 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x1a, 0x23, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x5f, - 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa1, 0x02, 0x0a, 0x0f, 0x49, 0x6e, - 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x72, 0x12, 0x12, 0x0a, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, - 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x12, 0x19, - 0x0a, 0x08, 0x67, 0x63, 0x70, 0x5f, 0x7a, 0x6f, 0x6e, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x07, 0x67, 0x63, 0x70, 0x5a, 0x6f, 0x6e, 0x65, 0x12, 0x26, 0x0a, 0x0f, 0x70, 0x65, 0x65, - 0x72, 0x5f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0d, 0x70, 0x65, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x49, - 0x64, 0x12, 0x41, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, - 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, - 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, - 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x72, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x73, - 0x74, 0x61, 0x74, 0x65, 0x22, 0x33, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, - 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0c, - 0x0a, 0x08, 0x43, 0x52, 0x45, 0x41, 0x54, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, - 0x52, 0x55, 0x4e, 0x4e, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x3a, 0x02, 0x18, 0x01, 0x22, 0xc8, 0x12, - 0x0a, 0x11, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, - 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, - 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x4e, 0x0a, 0x12, 0x6d, 0x6f, - 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x48, 0x00, 0x52, 0x11, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, - 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x5e, 0x0a, 0x0e, 0x72, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, - 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, - 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x48, 0x00, 0x52, 0x0d, 0x72, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x52, 0x0a, 0x0a, 0x68, 0x74, - 0x74, 0x70, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, - 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, - 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x68, 0x65, 0x63, - 0x6b, 0x48, 0x01, 0x52, 0x09, 0x68, 0x74, 0x74, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x4f, - 0x0a, 0x09, 0x74, 0x63, 0x70, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x18, 0x06, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, - 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, - 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x54, 0x63, 0x70, 0x43, 0x68, - 0x65, 0x63, 0x6b, 0x48, 0x01, 0x52, 0x08, 0x74, 0x63, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, - 0x31, 0x0a, 0x06, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x70, 0x65, 0x72, 0x69, - 0x6f, 0x64, 0x12, 0x33, 0x0a, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x08, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, - 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x61, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x74, 0x65, - 0x6e, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, - 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, - 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x65, - 0x6e, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x65, - 0x6e, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x73, 0x12, 0x52, 0x0a, 0x10, 0x73, 0x65, - 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0a, - 0x20, 0x03, 0x28, 0x0e, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, - 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, - 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x73, - 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x23, - 0x0a, 0x0b, 0x69, 0x73, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x18, 0x0f, 0x20, - 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0a, 0x69, 0x73, 0x49, 0x6e, 0x74, 0x65, 0x72, - 0x6e, 0x61, 0x6c, 0x12, 0x56, 0x0a, 0x11, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, - 0x63, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x72, 0x73, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, - 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x43, 0x68, - 0x65, 0x63, 0x6b, 0x65, 0x72, 0x42, 0x02, 0x18, 0x01, 0x52, 0x10, 0x69, 0x6e, 0x74, 0x65, 0x72, - 0x6e, 0x61, 0x6c, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x72, 0x73, 0x1a, 0x78, 0x0a, 0x0d, 0x52, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x19, 0x0a, 0x08, - 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, - 0x67, 0x72, 0x6f, 0x75, 0x70, 0x49, 0x64, 0x12, 0x4c, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x27, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, - 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x65, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x54, 0x79, 0x70, 0x65, 0x1a, 0xab, 0x06, 0x0a, 0x09, 0x48, 0x74, 0x74, 0x70, 0x43, 0x68, - 0x65, 0x63, 0x6b, 0x12, 0x66, 0x0a, 0x0e, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x6d, - 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3f, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, - 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x52, 0x0d, 0x72, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x75, - 0x73, 0x65, 0x5f, 0x73, 0x73, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x75, 0x73, - 0x65, 0x53, 0x73, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x62, 0x0a, 0x09, - 0x61, 0x75, 0x74, 0x68, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x45, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, - 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, - 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x68, 0x65, - 0x63, 0x6b, 0x2e, 0x42, 0x61, 0x73, 0x69, 0x63, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x61, 0x75, 0x74, 0x68, 0x49, 0x6e, 0x66, 0x6f, - 0x12, 0x21, 0x0a, 0x0c, 0x6d, 0x61, 0x73, 0x6b, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x6d, 0x61, 0x73, 0x6b, 0x48, 0x65, 0x61, 0x64, - 0x65, 0x72, 0x73, 0x12, 0x58, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x06, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, - 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, - 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x48, 0x74, - 0x74, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x60, 0x0a, - 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x09, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x3d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, - 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, - 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x48, 0x74, 0x74, - 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54, 0x79, - 0x70, 0x65, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, - 0x21, 0x0a, 0x0c, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x73, 0x73, 0x6c, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, - 0x73, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0c, - 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x1a, 0x4d, 0x0a, 0x13, 0x42, 0x61, 0x73, 0x69, 0x63, 0x41, - 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, - 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x61, 0x73, - 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x61, 0x73, - 0x73, 0x77, 0x6f, 0x72, 0x64, 0x1a, 0x3a, 0x0a, 0x0c, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, - 0x01, 0x22, 0x3a, 0x0a, 0x0d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x74, 0x68, - 0x6f, 0x64, 0x12, 0x16, 0x0a, 0x12, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x5f, 0x55, 0x4e, 0x53, - 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x47, 0x45, - 0x54, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x50, 0x4f, 0x53, 0x54, 0x10, 0x02, 0x22, 0x34, 0x0a, - 0x0b, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x10, - 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, - 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x52, 0x4c, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, 0x45, - 0x44, 0x10, 0x01, 0x1a, 0x1e, 0x0a, 0x08, 0x54, 0x63, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, - 0x12, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x70, - 0x6f, 0x72, 0x74, 0x1a, 0xaa, 0x02, 0x0a, 0x0e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x4d, - 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, - 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, - 0x12, 0x65, 0x0a, 0x07, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x4b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, - 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, - 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x65, - 0x6e, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, - 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, - 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x22, 0x96, 0x01, 0x0a, 0x14, 0x43, 0x6f, 0x6e, 0x74, - 0x65, 0x6e, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x26, 0x0a, 0x22, 0x43, 0x4f, 0x4e, 0x54, 0x45, 0x4e, 0x54, 0x5f, 0x4d, 0x41, 0x54, 0x43, - 0x48, 0x45, 0x52, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, - 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x43, 0x4f, 0x4e, 0x54, - 0x41, 0x49, 0x4e, 0x53, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x17, 0x0a, - 0x13, 0x4e, 0x4f, 0x54, 0x5f, 0x43, 0x4f, 0x4e, 0x54, 0x41, 0x49, 0x4e, 0x53, 0x5f, 0x53, 0x54, - 0x52, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x12, 0x11, 0x0a, 0x0d, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x45, - 0x53, 0x5f, 0x52, 0x45, 0x47, 0x45, 0x58, 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11, 0x4e, 0x4f, 0x54, - 0x5f, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x45, 0x53, 0x5f, 0x52, 0x45, 0x47, 0x45, 0x58, 0x10, 0x04, - 0x3a, 0xf3, 0x01, 0xea, 0x41, 0xef, 0x01, 0x0a, 0x2b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, - 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, - 0x6f, 0x6d, 0x2f, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, - 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, - 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x2f, 0x7b, 0x75, 0x70, 0x74, - 0x69, 0x6d, 0x65, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x7d, 0x12, 0x45, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x2f, 0x7b, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, - 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x73, 0x2f, 0x7b, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, - 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x7d, 0x12, 0x39, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, - 0x73, 0x2f, 0x7b, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x7d, 0x2f, 0x75, 0x70, 0x74, 0x69, 0x6d, - 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x2f, 0x7b, 0x75, - 0x70, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x7d, 0x12, 0x01, 0x2a, 0x42, 0x0a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x42, 0x14, 0x0a, 0x12, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x72, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x22, 0x8b, 0x01, 0x0a, 0x0d, 0x55, 0x70, 0x74, - 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x49, 0x70, 0x12, 0x3f, 0x0a, 0x06, 0x72, 0x65, - 0x67, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, - 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x67, - 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x72, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x6c, - 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, - 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x70, 0x5f, 0x61, 0x64, - 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x69, 0x70, 0x41, - 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x2a, 0x65, 0x0a, 0x11, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, - 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x12, 0x52, - 0x45, 0x47, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, - 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x55, 0x53, 0x41, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, - 0x45, 0x55, 0x52, 0x4f, 0x50, 0x45, 0x10, 0x02, 0x12, 0x11, 0x0a, 0x0d, 0x53, 0x4f, 0x55, 0x54, - 0x48, 0x5f, 0x41, 0x4d, 0x45, 0x52, 0x49, 0x43, 0x41, 0x10, 0x03, 0x12, 0x10, 0x0a, 0x0c, 0x41, - 0x53, 0x49, 0x41, 0x5f, 0x50, 0x41, 0x43, 0x49, 0x46, 0x49, 0x43, 0x10, 0x04, 0x2a, 0x5b, 0x0a, - 0x11, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, - 0x70, 0x65, 0x12, 0x1d, 0x0a, 0x19, 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x54, - 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, - 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4e, 0x53, 0x54, 0x41, 0x4e, 0x43, 0x45, 0x10, 0x01, 0x12, - 0x19, 0x0a, 0x15, 0x41, 0x57, 0x53, 0x5f, 0x45, 0x4c, 0x42, 0x5f, 0x4c, 0x4f, 0x41, 0x44, 0x5f, - 0x42, 0x41, 0x4c, 0x41, 0x4e, 0x43, 0x45, 0x52, 0x10, 0x02, 0x42, 0xc3, 0x01, 0x0a, 0x18, 0x63, - 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, - 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x42, 0x0b, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, - 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x6d, 0x6f, - 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, - 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, - 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, - 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x33, - 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, - 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56, 0x33, - 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_google_monitoring_v3_uptime_proto_rawDescOnce sync.Once - file_google_monitoring_v3_uptime_proto_rawDescData = file_google_monitoring_v3_uptime_proto_rawDesc -) - -func file_google_monitoring_v3_uptime_proto_rawDescGZIP() []byte { - file_google_monitoring_v3_uptime_proto_rawDescOnce.Do(func() { - file_google_monitoring_v3_uptime_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_uptime_proto_rawDescData) - }) - return file_google_monitoring_v3_uptime_proto_rawDescData -} - -var file_google_monitoring_v3_uptime_proto_enumTypes = make([]protoimpl.EnumInfo, 6) -var file_google_monitoring_v3_uptime_proto_msgTypes = make([]protoimpl.MessageInfo, 9) -var file_google_monitoring_v3_uptime_proto_goTypes = []interface{}{ - (UptimeCheckRegion)(0), // 0: google.monitoring.v3.UptimeCheckRegion - (GroupResourceType)(0), // 1: google.monitoring.v3.GroupResourceType - (InternalChecker_State)(0), // 2: google.monitoring.v3.InternalChecker.State - (UptimeCheckConfig_HttpCheck_RequestMethod)(0), // 3: google.monitoring.v3.UptimeCheckConfig.HttpCheck.RequestMethod - (UptimeCheckConfig_HttpCheck_ContentType)(0), // 4: google.monitoring.v3.UptimeCheckConfig.HttpCheck.ContentType - (UptimeCheckConfig_ContentMatcher_ContentMatcherOption)(0), // 5: google.monitoring.v3.UptimeCheckConfig.ContentMatcher.ContentMatcherOption - (*InternalChecker)(nil), // 6: google.monitoring.v3.InternalChecker - (*UptimeCheckConfig)(nil), // 7: google.monitoring.v3.UptimeCheckConfig - (*UptimeCheckIp)(nil), // 8: google.monitoring.v3.UptimeCheckIp - (*UptimeCheckConfig_ResourceGroup)(nil), // 9: google.monitoring.v3.UptimeCheckConfig.ResourceGroup - (*UptimeCheckConfig_HttpCheck)(nil), // 10: google.monitoring.v3.UptimeCheckConfig.HttpCheck - (*UptimeCheckConfig_TcpCheck)(nil), // 11: google.monitoring.v3.UptimeCheckConfig.TcpCheck - (*UptimeCheckConfig_ContentMatcher)(nil), // 12: google.monitoring.v3.UptimeCheckConfig.ContentMatcher - (*UptimeCheckConfig_HttpCheck_BasicAuthentication)(nil), // 13: google.monitoring.v3.UptimeCheckConfig.HttpCheck.BasicAuthentication - nil, // 14: google.monitoring.v3.UptimeCheckConfig.HttpCheck.HeadersEntry - (*monitoredres.MonitoredResource)(nil), // 15: google.api.MonitoredResource - (*durationpb.Duration)(nil), // 16: google.protobuf.Duration -} -var file_google_monitoring_v3_uptime_proto_depIdxs = []int32{ - 2, // 0: google.monitoring.v3.InternalChecker.state:type_name -> google.monitoring.v3.InternalChecker.State - 15, // 1: google.monitoring.v3.UptimeCheckConfig.monitored_resource:type_name -> google.api.MonitoredResource - 9, // 2: google.monitoring.v3.UptimeCheckConfig.resource_group:type_name -> google.monitoring.v3.UptimeCheckConfig.ResourceGroup - 10, // 3: google.monitoring.v3.UptimeCheckConfig.http_check:type_name -> google.monitoring.v3.UptimeCheckConfig.HttpCheck - 11, // 4: google.monitoring.v3.UptimeCheckConfig.tcp_check:type_name -> google.monitoring.v3.UptimeCheckConfig.TcpCheck - 16, // 5: google.monitoring.v3.UptimeCheckConfig.period:type_name -> google.protobuf.Duration - 16, // 6: google.monitoring.v3.UptimeCheckConfig.timeout:type_name -> google.protobuf.Duration - 12, // 7: google.monitoring.v3.UptimeCheckConfig.content_matchers:type_name -> google.monitoring.v3.UptimeCheckConfig.ContentMatcher - 0, // 8: google.monitoring.v3.UptimeCheckConfig.selected_regions:type_name -> google.monitoring.v3.UptimeCheckRegion - 6, // 9: google.monitoring.v3.UptimeCheckConfig.internal_checkers:type_name -> google.monitoring.v3.InternalChecker - 0, // 10: google.monitoring.v3.UptimeCheckIp.region:type_name -> google.monitoring.v3.UptimeCheckRegion - 1, // 11: google.monitoring.v3.UptimeCheckConfig.ResourceGroup.resource_type:type_name -> google.monitoring.v3.GroupResourceType - 3, // 12: google.monitoring.v3.UptimeCheckConfig.HttpCheck.request_method:type_name -> google.monitoring.v3.UptimeCheckConfig.HttpCheck.RequestMethod - 13, // 13: google.monitoring.v3.UptimeCheckConfig.HttpCheck.auth_info:type_name -> google.monitoring.v3.UptimeCheckConfig.HttpCheck.BasicAuthentication - 14, // 14: google.monitoring.v3.UptimeCheckConfig.HttpCheck.headers:type_name -> google.monitoring.v3.UptimeCheckConfig.HttpCheck.HeadersEntry - 4, // 15: google.monitoring.v3.UptimeCheckConfig.HttpCheck.content_type:type_name -> google.monitoring.v3.UptimeCheckConfig.HttpCheck.ContentType - 5, // 16: google.monitoring.v3.UptimeCheckConfig.ContentMatcher.matcher:type_name -> google.monitoring.v3.UptimeCheckConfig.ContentMatcher.ContentMatcherOption - 17, // [17:17] is the sub-list for method output_type - 17, // [17:17] is the sub-list for method input_type - 17, // [17:17] is the sub-list for extension type_name - 17, // [17:17] is the sub-list for extension extendee - 0, // [0:17] is the sub-list for field type_name -} - -func init() { file_google_monitoring_v3_uptime_proto_init() } -func file_google_monitoring_v3_uptime_proto_init() { - if File_google_monitoring_v3_uptime_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_google_monitoring_v3_uptime_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*InternalChecker); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_uptime_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UptimeCheckConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_uptime_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UptimeCheckIp); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_uptime_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UptimeCheckConfig_ResourceGroup); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_uptime_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UptimeCheckConfig_HttpCheck); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_uptime_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UptimeCheckConfig_TcpCheck); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_uptime_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UptimeCheckConfig_ContentMatcher); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_uptime_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UptimeCheckConfig_HttpCheck_BasicAuthentication); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_google_monitoring_v3_uptime_proto_msgTypes[1].OneofWrappers = []interface{}{ - (*UptimeCheckConfig_MonitoredResource)(nil), - (*UptimeCheckConfig_ResourceGroup_)(nil), - (*UptimeCheckConfig_HttpCheck_)(nil), - (*UptimeCheckConfig_TcpCheck_)(nil), - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_monitoring_v3_uptime_proto_rawDesc, - NumEnums: 6, - NumMessages: 9, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_google_monitoring_v3_uptime_proto_goTypes, - DependencyIndexes: file_google_monitoring_v3_uptime_proto_depIdxs, - EnumInfos: file_google_monitoring_v3_uptime_proto_enumTypes, - MessageInfos: file_google_monitoring_v3_uptime_proto_msgTypes, - }.Build() - File_google_monitoring_v3_uptime_proto = out.File - file_google_monitoring_v3_uptime_proto_rawDesc = nil - file_google_monitoring_v3_uptime_proto_goTypes = nil - file_google_monitoring_v3_uptime_proto_depIdxs = nil -} diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/uptime_service.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/uptime_service.pb.go deleted file mode 100644 index a828a127e3..0000000000 --- a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/uptime_service.pb.go +++ /dev/null @@ -1,1215 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.25.0 -// protoc v3.13.0 -// source: google/monitoring/v3/uptime_service.proto - -package monitoring - -import ( - context "context" - reflect "reflect" - sync "sync" - - proto "github.com/golang/protobuf/proto" - _ "google.golang.org/genproto/googleapis/api/annotations" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - _ "google.golang.org/protobuf/types/known/durationpb" - emptypb "google.golang.org/protobuf/types/known/emptypb" - fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - -// The protocol for the `ListUptimeCheckConfigs` request. -type ListUptimeCheckConfigsRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The project whose Uptime check configurations are listed. The format is: - // - // projects/[PROJECT_ID_OR_NUMBER] - Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` - // The maximum number of results to return in a single response. The server - // may further constrain the maximum number of results returned in a single - // page. If the page_size is <=0, the server will decide the number of results - // to be returned. - PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` - // If this field is not empty then it must contain the `nextPageToken` value - // returned by a previous call to this method. Using this field causes the - // method to return more results from the previous method call. - PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` -} - -func (x *ListUptimeCheckConfigsRequest) Reset() { - *x = ListUptimeCheckConfigsRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListUptimeCheckConfigsRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListUptimeCheckConfigsRequest) ProtoMessage() {} - -func (x *ListUptimeCheckConfigsRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListUptimeCheckConfigsRequest.ProtoReflect.Descriptor instead. -func (*ListUptimeCheckConfigsRequest) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_uptime_service_proto_rawDescGZIP(), []int{0} -} - -func (x *ListUptimeCheckConfigsRequest) GetParent() string { - if x != nil { - return x.Parent - } - return "" -} - -func (x *ListUptimeCheckConfigsRequest) GetPageSize() int32 { - if x != nil { - return x.PageSize - } - return 0 -} - -func (x *ListUptimeCheckConfigsRequest) GetPageToken() string { - if x != nil { - return x.PageToken - } - return "" -} - -// The protocol for the `ListUptimeCheckConfigs` response. -type ListUptimeCheckConfigsResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The returned Uptime check configurations. - UptimeCheckConfigs []*UptimeCheckConfig `protobuf:"bytes,1,rep,name=uptime_check_configs,json=uptimeCheckConfigs,proto3" json:"uptime_check_configs,omitempty"` - // This field represents the pagination token to retrieve the next page of - // results. If the value is empty, it means no further results for the - // request. To retrieve the next page of results, the value of the - // next_page_token is passed to the subsequent List method call (in the - // request message's page_token field). - NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` - // The total number of Uptime check configurations for the project, - // irrespective of any pagination. - TotalSize int32 `protobuf:"varint,3,opt,name=total_size,json=totalSize,proto3" json:"total_size,omitempty"` -} - -func (x *ListUptimeCheckConfigsResponse) Reset() { - *x = ListUptimeCheckConfigsResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListUptimeCheckConfigsResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListUptimeCheckConfigsResponse) ProtoMessage() {} - -func (x *ListUptimeCheckConfigsResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListUptimeCheckConfigsResponse.ProtoReflect.Descriptor instead. -func (*ListUptimeCheckConfigsResponse) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_uptime_service_proto_rawDescGZIP(), []int{1} -} - -func (x *ListUptimeCheckConfigsResponse) GetUptimeCheckConfigs() []*UptimeCheckConfig { - if x != nil { - return x.UptimeCheckConfigs - } - return nil -} - -func (x *ListUptimeCheckConfigsResponse) GetNextPageToken() string { - if x != nil { - return x.NextPageToken - } - return "" -} - -func (x *ListUptimeCheckConfigsResponse) GetTotalSize() int32 { - if x != nil { - return x.TotalSize - } - return 0 -} - -// The protocol for the `GetUptimeCheckConfig` request. -type GetUptimeCheckConfigRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The Uptime check configuration to retrieve. The format is: - // - // projects/[PROJECT_ID_OR_NUMBER]/uptimeCheckConfigs/[UPTIME_CHECK_ID] - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` -} - -func (x *GetUptimeCheckConfigRequest) Reset() { - *x = GetUptimeCheckConfigRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetUptimeCheckConfigRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetUptimeCheckConfigRequest) ProtoMessage() {} - -func (x *GetUptimeCheckConfigRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetUptimeCheckConfigRequest.ProtoReflect.Descriptor instead. -func (*GetUptimeCheckConfigRequest) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_uptime_service_proto_rawDescGZIP(), []int{2} -} - -func (x *GetUptimeCheckConfigRequest) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -// The protocol for the `CreateUptimeCheckConfig` request. -type CreateUptimeCheckConfigRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The project in which to create the Uptime check. The format is: - // - // projects/[PROJECT_ID_OR_NUMBER] - Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` - // Required. The new Uptime check configuration. - UptimeCheckConfig *UptimeCheckConfig `protobuf:"bytes,2,opt,name=uptime_check_config,json=uptimeCheckConfig,proto3" json:"uptime_check_config,omitempty"` -} - -func (x *CreateUptimeCheckConfigRequest) Reset() { - *x = CreateUptimeCheckConfigRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CreateUptimeCheckConfigRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CreateUptimeCheckConfigRequest) ProtoMessage() {} - -func (x *CreateUptimeCheckConfigRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CreateUptimeCheckConfigRequest.ProtoReflect.Descriptor instead. -func (*CreateUptimeCheckConfigRequest) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_uptime_service_proto_rawDescGZIP(), []int{3} -} - -func (x *CreateUptimeCheckConfigRequest) GetParent() string { - if x != nil { - return x.Parent - } - return "" -} - -func (x *CreateUptimeCheckConfigRequest) GetUptimeCheckConfig() *UptimeCheckConfig { - if x != nil { - return x.UptimeCheckConfig - } - return nil -} - -// The protocol for the `UpdateUptimeCheckConfig` request. -type UpdateUptimeCheckConfigRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Optional. If present, only the listed fields in the current Uptime check - // configuration are updated with values from the new configuration. If this - // field is empty, then the current configuration is completely replaced with - // the new configuration. - UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` - // Required. If an `updateMask` has been specified, this field gives - // the values for the set of fields mentioned in the `updateMask`. If an - // `updateMask` has not been given, this Uptime check configuration replaces - // the current configuration. If a field is mentioned in `updateMask` but - // the corresonding field is omitted in this partial Uptime check - // configuration, it has the effect of deleting/clearing the field from the - // configuration on the server. - // - // The following fields can be updated: `display_name`, - // `http_check`, `tcp_check`, `timeout`, `content_matchers`, and - // `selected_regions`. - UptimeCheckConfig *UptimeCheckConfig `protobuf:"bytes,3,opt,name=uptime_check_config,json=uptimeCheckConfig,proto3" json:"uptime_check_config,omitempty"` -} - -func (x *UpdateUptimeCheckConfigRequest) Reset() { - *x = UpdateUptimeCheckConfigRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *UpdateUptimeCheckConfigRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*UpdateUptimeCheckConfigRequest) ProtoMessage() {} - -func (x *UpdateUptimeCheckConfigRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use UpdateUptimeCheckConfigRequest.ProtoReflect.Descriptor instead. -func (*UpdateUptimeCheckConfigRequest) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_uptime_service_proto_rawDescGZIP(), []int{4} -} - -func (x *UpdateUptimeCheckConfigRequest) GetUpdateMask() *fieldmaskpb.FieldMask { - if x != nil { - return x.UpdateMask - } - return nil -} - -func (x *UpdateUptimeCheckConfigRequest) GetUptimeCheckConfig() *UptimeCheckConfig { - if x != nil { - return x.UptimeCheckConfig - } - return nil -} - -// The protocol for the `DeleteUptimeCheckConfig` request. -type DeleteUptimeCheckConfigRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The Uptime check configuration to delete. The format is: - // - // projects/[PROJECT_ID_OR_NUMBER]/uptimeCheckConfigs/[UPTIME_CHECK_ID] - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` -} - -func (x *DeleteUptimeCheckConfigRequest) Reset() { - *x = DeleteUptimeCheckConfigRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *DeleteUptimeCheckConfigRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DeleteUptimeCheckConfigRequest) ProtoMessage() {} - -func (x *DeleteUptimeCheckConfigRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DeleteUptimeCheckConfigRequest.ProtoReflect.Descriptor instead. -func (*DeleteUptimeCheckConfigRequest) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_uptime_service_proto_rawDescGZIP(), []int{5} -} - -func (x *DeleteUptimeCheckConfigRequest) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -// The protocol for the `ListUptimeCheckIps` request. -type ListUptimeCheckIpsRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The maximum number of results to return in a single response. The server - // may further constrain the maximum number of results returned in a single - // page. If the page_size is <=0, the server will decide the number of results - // to be returned. - // NOTE: this field is not yet implemented - PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` - // If this field is not empty then it must contain the `nextPageToken` value - // returned by a previous call to this method. Using this field causes the - // method to return more results from the previous method call. - // NOTE: this field is not yet implemented - PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` -} - -func (x *ListUptimeCheckIpsRequest) Reset() { - *x = ListUptimeCheckIpsRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListUptimeCheckIpsRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListUptimeCheckIpsRequest) ProtoMessage() {} - -func (x *ListUptimeCheckIpsRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListUptimeCheckIpsRequest.ProtoReflect.Descriptor instead. -func (*ListUptimeCheckIpsRequest) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_uptime_service_proto_rawDescGZIP(), []int{6} -} - -func (x *ListUptimeCheckIpsRequest) GetPageSize() int32 { - if x != nil { - return x.PageSize - } - return 0 -} - -func (x *ListUptimeCheckIpsRequest) GetPageToken() string { - if x != nil { - return x.PageToken - } - return "" -} - -// The protocol for the `ListUptimeCheckIps` response. -type ListUptimeCheckIpsResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The returned list of IP addresses (including region and location) that the - // checkers run from. - UptimeCheckIps []*UptimeCheckIp `protobuf:"bytes,1,rep,name=uptime_check_ips,json=uptimeCheckIps,proto3" json:"uptime_check_ips,omitempty"` - // This field represents the pagination token to retrieve the next page of - // results. If the value is empty, it means no further results for the - // request. To retrieve the next page of results, the value of the - // next_page_token is passed to the subsequent List method call (in the - // request message's page_token field). - // NOTE: this field is not yet implemented - NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` -} - -func (x *ListUptimeCheckIpsResponse) Reset() { - *x = ListUptimeCheckIpsResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListUptimeCheckIpsResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListUptimeCheckIpsResponse) ProtoMessage() {} - -func (x *ListUptimeCheckIpsResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListUptimeCheckIpsResponse.ProtoReflect.Descriptor instead. -func (*ListUptimeCheckIpsResponse) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_uptime_service_proto_rawDescGZIP(), []int{7} -} - -func (x *ListUptimeCheckIpsResponse) GetUptimeCheckIps() []*UptimeCheckIp { - if x != nil { - return x.UptimeCheckIps - } - return nil -} - -func (x *ListUptimeCheckIpsResponse) GetNextPageToken() string { - if x != nil { - return x.NextPageToken - } - return "" -} - -var File_google_monitoring_v3_uptime_service_proto protoreflect.FileDescriptor - -var file_google_monitoring_v3_uptime_service_proto_rawDesc = []byte{ - 0x0a, 0x29, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, - 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, - 0x33, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, - 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, - 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65, - 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, - 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, - 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x75, 0x70, 0x74, 0x69, 0x6d, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa8, 0x01, 0x0a, 0x1d, 0x4c, 0x69, 0x73, 0x74, 0x55, - 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4b, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, - 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, - 0x12, 0x2b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x55, 0x70, 0x74, 0x69, - 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x06, 0x70, - 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, - 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, - 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, - 0x6e, 0x22, 0xc2, 0x01, 0x0a, 0x1e, 0x4c, 0x69, 0x73, 0x74, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, - 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x59, 0x0a, 0x14, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x63, - 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, - 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, - 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x12, 0x75, 0x70, 0x74, - 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, - 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, - 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, - 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x6f, 0x74, 0x61, 0x6c, - 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x74, 0x6f, 0x74, - 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65, 0x22, 0x66, 0x0a, 0x1b, 0x47, 0x65, 0x74, 0x55, 0x70, 0x74, - 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x47, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x6d, 0x6f, 0x6e, - 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, - 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, - 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xcb, - 0x01, 0x0a, 0x1e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, - 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x4b, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x12, 0x2b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, - 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, - 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x5c, - 0x0a, 0x13, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, - 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x11, 0x75, 0x70, 0x74, 0x69, 0x6d, - 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0xbb, 0x01, 0x0a, - 0x1e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, - 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x3b, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, - 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x12, 0x5c, 0x0a, 0x13, - 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, - 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x11, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, - 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x69, 0x0a, 0x1e, 0x44, 0x65, - 0x6c, 0x65, 0x74, 0x65, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x47, 0x0a, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, - 0x41, 0x2d, 0x0a, 0x2b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x55, 0x70, - 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x57, 0x0a, 0x19, 0x4c, 0x69, 0x73, 0x74, 0x55, 0x70, 0x74, - 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x49, 0x70, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, - 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x93, - 0x01, 0x0a, 0x1a, 0x4c, 0x69, 0x73, 0x74, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, - 0x63, 0x6b, 0x49, 0x70, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4d, 0x0a, - 0x10, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x69, 0x70, - 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, - 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x49, 0x70, 0x52, 0x0e, 0x75, 0x70, - 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x49, 0x70, 0x73, 0x12, 0x26, 0x0a, 0x0f, - 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, - 0x6f, 0x6b, 0x65, 0x6e, 0x32, 0xbd, 0x0a, 0x0a, 0x12, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, - 0x68, 0x65, 0x63, 0x6b, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0xc0, 0x01, 0x0a, 0x16, - 0x4c, 0x69, 0x73, 0x74, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, - 0x73, 0x74, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x34, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, - 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, - 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x3b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2c, 0x12, 0x2a, 0x2f, 0x76, 0x33, 0x2f, 0x7b, - 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, - 0x2a, 0x7d, 0x2f, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x73, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0xad, - 0x01, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, - 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x47, - 0x65, 0x74, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, - 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x22, 0x39, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2c, 0x12, 0x2a, 0x2f, 0x76, 0x33, - 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, - 0x2a, 0x2f, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x73, 0x2f, 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0xde, - 0x01, 0x0a, 0x17, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, - 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x34, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, - 0x33, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, - 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, - 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, - 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x64, 0x82, 0xd3, 0xe4, 0x93, 0x02, - 0x41, 0x22, 0x2a, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x75, 0x70, 0x74, 0x69, 0x6d, - 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x3a, 0x13, 0x75, - 0x70, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0xda, 0x41, 0x1a, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x75, 0x70, 0x74, 0x69, - 0x6d, 0x65, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, - 0xeb, 0x01, 0x0a, 0x17, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, - 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x34, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, - 0x76, 0x33, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, - 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, - 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, - 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x71, 0x82, 0xd3, 0xe4, 0x93, - 0x02, 0x55, 0x32, 0x3e, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x5f, - 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6e, 0x61, 0x6d, - 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x75, 0x70, 0x74, - 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x2f, - 0x2a, 0x7d, 0x3a, 0x13, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, - 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0xda, 0x41, 0x13, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, - 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0xa2, 0x01, - 0x0a, 0x17, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, - 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x34, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, - 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, - 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x39, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2c, 0x2a, - 0x2a, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, - 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, - 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x2f, 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x12, 0x93, 0x01, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x55, 0x70, 0x74, 0x69, 0x6d, - 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x49, 0x70, 0x73, 0x12, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, - 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, - 0x49, 0x70, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x30, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, - 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, - 0x6b, 0x49, 0x70, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1a, 0x82, 0xd3, - 0xe4, 0x93, 0x02, 0x14, 0x12, 0x12, 0x2f, 0x76, 0x33, 0x2f, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, - 0x43, 0x68, 0x65, 0x63, 0x6b, 0x49, 0x70, 0x73, 0x1a, 0xa9, 0x01, 0xca, 0x41, 0x19, 0x6d, 0x6f, - 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, - 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x89, 0x01, 0x68, 0x74, 0x74, 0x70, 0x73, - 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, - 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, - 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, - 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, - 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, - 0x72, 0x69, 0x6e, 0x67, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, - 0x61, 0x75, 0x74, 0x68, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, - 0x72, 0x65, 0x61, 0x64, 0x42, 0xca, 0x01, 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, - 0x33, 0x42, 0x12, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x6d, - 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x3b, 0x6d, 0x6f, 0x6e, - 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, - 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, - 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c, 0x56, - 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, - 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56, - 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_google_monitoring_v3_uptime_service_proto_rawDescOnce sync.Once - file_google_monitoring_v3_uptime_service_proto_rawDescData = file_google_monitoring_v3_uptime_service_proto_rawDesc -) - -func file_google_monitoring_v3_uptime_service_proto_rawDescGZIP() []byte { - file_google_monitoring_v3_uptime_service_proto_rawDescOnce.Do(func() { - file_google_monitoring_v3_uptime_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_uptime_service_proto_rawDescData) - }) - return file_google_monitoring_v3_uptime_service_proto_rawDescData -} - -var file_google_monitoring_v3_uptime_service_proto_msgTypes = make([]protoimpl.MessageInfo, 8) -var file_google_monitoring_v3_uptime_service_proto_goTypes = []interface{}{ - (*ListUptimeCheckConfigsRequest)(nil), // 0: google.monitoring.v3.ListUptimeCheckConfigsRequest - (*ListUptimeCheckConfigsResponse)(nil), // 1: google.monitoring.v3.ListUptimeCheckConfigsResponse - (*GetUptimeCheckConfigRequest)(nil), // 2: google.monitoring.v3.GetUptimeCheckConfigRequest - (*CreateUptimeCheckConfigRequest)(nil), // 3: google.monitoring.v3.CreateUptimeCheckConfigRequest - (*UpdateUptimeCheckConfigRequest)(nil), // 4: google.monitoring.v3.UpdateUptimeCheckConfigRequest - (*DeleteUptimeCheckConfigRequest)(nil), // 5: google.monitoring.v3.DeleteUptimeCheckConfigRequest - (*ListUptimeCheckIpsRequest)(nil), // 6: google.monitoring.v3.ListUptimeCheckIpsRequest - (*ListUptimeCheckIpsResponse)(nil), // 7: google.monitoring.v3.ListUptimeCheckIpsResponse - (*UptimeCheckConfig)(nil), // 8: google.monitoring.v3.UptimeCheckConfig - (*fieldmaskpb.FieldMask)(nil), // 9: google.protobuf.FieldMask - (*UptimeCheckIp)(nil), // 10: google.monitoring.v3.UptimeCheckIp - (*emptypb.Empty)(nil), // 11: google.protobuf.Empty -} -var file_google_monitoring_v3_uptime_service_proto_depIdxs = []int32{ - 8, // 0: google.monitoring.v3.ListUptimeCheckConfigsResponse.uptime_check_configs:type_name -> google.monitoring.v3.UptimeCheckConfig - 8, // 1: google.monitoring.v3.CreateUptimeCheckConfigRequest.uptime_check_config:type_name -> google.monitoring.v3.UptimeCheckConfig - 9, // 2: google.monitoring.v3.UpdateUptimeCheckConfigRequest.update_mask:type_name -> google.protobuf.FieldMask - 8, // 3: google.monitoring.v3.UpdateUptimeCheckConfigRequest.uptime_check_config:type_name -> google.monitoring.v3.UptimeCheckConfig - 10, // 4: google.monitoring.v3.ListUptimeCheckIpsResponse.uptime_check_ips:type_name -> google.monitoring.v3.UptimeCheckIp - 0, // 5: google.monitoring.v3.UptimeCheckService.ListUptimeCheckConfigs:input_type -> google.monitoring.v3.ListUptimeCheckConfigsRequest - 2, // 6: google.monitoring.v3.UptimeCheckService.GetUptimeCheckConfig:input_type -> google.monitoring.v3.GetUptimeCheckConfigRequest - 3, // 7: google.monitoring.v3.UptimeCheckService.CreateUptimeCheckConfig:input_type -> google.monitoring.v3.CreateUptimeCheckConfigRequest - 4, // 8: google.monitoring.v3.UptimeCheckService.UpdateUptimeCheckConfig:input_type -> google.monitoring.v3.UpdateUptimeCheckConfigRequest - 5, // 9: google.monitoring.v3.UptimeCheckService.DeleteUptimeCheckConfig:input_type -> google.monitoring.v3.DeleteUptimeCheckConfigRequest - 6, // 10: google.monitoring.v3.UptimeCheckService.ListUptimeCheckIps:input_type -> google.monitoring.v3.ListUptimeCheckIpsRequest - 1, // 11: google.monitoring.v3.UptimeCheckService.ListUptimeCheckConfigs:output_type -> google.monitoring.v3.ListUptimeCheckConfigsResponse - 8, // 12: google.monitoring.v3.UptimeCheckService.GetUptimeCheckConfig:output_type -> google.monitoring.v3.UptimeCheckConfig - 8, // 13: google.monitoring.v3.UptimeCheckService.CreateUptimeCheckConfig:output_type -> google.monitoring.v3.UptimeCheckConfig - 8, // 14: google.monitoring.v3.UptimeCheckService.UpdateUptimeCheckConfig:output_type -> google.monitoring.v3.UptimeCheckConfig - 11, // 15: google.monitoring.v3.UptimeCheckService.DeleteUptimeCheckConfig:output_type -> google.protobuf.Empty - 7, // 16: google.monitoring.v3.UptimeCheckService.ListUptimeCheckIps:output_type -> google.monitoring.v3.ListUptimeCheckIpsResponse - 11, // [11:17] is the sub-list for method output_type - 5, // [5:11] is the sub-list for method input_type - 5, // [5:5] is the sub-list for extension type_name - 5, // [5:5] is the sub-list for extension extendee - 0, // [0:5] is the sub-list for field type_name -} - -func init() { file_google_monitoring_v3_uptime_service_proto_init() } -func file_google_monitoring_v3_uptime_service_proto_init() { - if File_google_monitoring_v3_uptime_service_proto != nil { - return - } - file_google_monitoring_v3_uptime_proto_init() - if !protoimpl.UnsafeEnabled { - file_google_monitoring_v3_uptime_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListUptimeCheckConfigsRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_uptime_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListUptimeCheckConfigsResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_uptime_service_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetUptimeCheckConfigRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_uptime_service_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateUptimeCheckConfigRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_uptime_service_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UpdateUptimeCheckConfigRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_uptime_service_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteUptimeCheckConfigRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_uptime_service_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListUptimeCheckIpsRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_monitoring_v3_uptime_service_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListUptimeCheckIpsResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_monitoring_v3_uptime_service_proto_rawDesc, - NumEnums: 0, - NumMessages: 8, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_google_monitoring_v3_uptime_service_proto_goTypes, - DependencyIndexes: file_google_monitoring_v3_uptime_service_proto_depIdxs, - MessageInfos: file_google_monitoring_v3_uptime_service_proto_msgTypes, - }.Build() - File_google_monitoring_v3_uptime_service_proto = out.File - file_google_monitoring_v3_uptime_service_proto_rawDesc = nil - file_google_monitoring_v3_uptime_service_proto_goTypes = nil - file_google_monitoring_v3_uptime_service_proto_depIdxs = nil -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConnInterface - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion6 - -// UptimeCheckServiceClient is the client API for UptimeCheckService service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type UptimeCheckServiceClient interface { - // Lists the existing valid Uptime check configurations for the project - // (leaving out any invalid configurations). - ListUptimeCheckConfigs(ctx context.Context, in *ListUptimeCheckConfigsRequest, opts ...grpc.CallOption) (*ListUptimeCheckConfigsResponse, error) - // Gets a single Uptime check configuration. - GetUptimeCheckConfig(ctx context.Context, in *GetUptimeCheckConfigRequest, opts ...grpc.CallOption) (*UptimeCheckConfig, error) - // Creates a new Uptime check configuration. - CreateUptimeCheckConfig(ctx context.Context, in *CreateUptimeCheckConfigRequest, opts ...grpc.CallOption) (*UptimeCheckConfig, error) - // Updates an Uptime check configuration. You can either replace the entire - // configuration with a new one or replace only certain fields in the current - // configuration by specifying the fields to be updated via `updateMask`. - // Returns the updated configuration. - UpdateUptimeCheckConfig(ctx context.Context, in *UpdateUptimeCheckConfigRequest, opts ...grpc.CallOption) (*UptimeCheckConfig, error) - // Deletes an Uptime check configuration. Note that this method will fail - // if the Uptime check configuration is referenced by an alert policy or - // other dependent configs that would be rendered invalid by the deletion. - DeleteUptimeCheckConfig(ctx context.Context, in *DeleteUptimeCheckConfigRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) - // Returns the list of IP addresses that checkers run from - ListUptimeCheckIps(ctx context.Context, in *ListUptimeCheckIpsRequest, opts ...grpc.CallOption) (*ListUptimeCheckIpsResponse, error) -} - -type uptimeCheckServiceClient struct { - cc grpc.ClientConnInterface -} - -func NewUptimeCheckServiceClient(cc grpc.ClientConnInterface) UptimeCheckServiceClient { - return &uptimeCheckServiceClient{cc} -} - -func (c *uptimeCheckServiceClient) ListUptimeCheckConfigs(ctx context.Context, in *ListUptimeCheckConfigsRequest, opts ...grpc.CallOption) (*ListUptimeCheckConfigsResponse, error) { - out := new(ListUptimeCheckConfigsResponse) - err := c.cc.Invoke(ctx, "/google.monitoring.v3.UptimeCheckService/ListUptimeCheckConfigs", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *uptimeCheckServiceClient) GetUptimeCheckConfig(ctx context.Context, in *GetUptimeCheckConfigRequest, opts ...grpc.CallOption) (*UptimeCheckConfig, error) { - out := new(UptimeCheckConfig) - err := c.cc.Invoke(ctx, "/google.monitoring.v3.UptimeCheckService/GetUptimeCheckConfig", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *uptimeCheckServiceClient) CreateUptimeCheckConfig(ctx context.Context, in *CreateUptimeCheckConfigRequest, opts ...grpc.CallOption) (*UptimeCheckConfig, error) { - out := new(UptimeCheckConfig) - err := c.cc.Invoke(ctx, "/google.monitoring.v3.UptimeCheckService/CreateUptimeCheckConfig", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *uptimeCheckServiceClient) UpdateUptimeCheckConfig(ctx context.Context, in *UpdateUptimeCheckConfigRequest, opts ...grpc.CallOption) (*UptimeCheckConfig, error) { - out := new(UptimeCheckConfig) - err := c.cc.Invoke(ctx, "/google.monitoring.v3.UptimeCheckService/UpdateUptimeCheckConfig", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *uptimeCheckServiceClient) DeleteUptimeCheckConfig(ctx context.Context, in *DeleteUptimeCheckConfigRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { - out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/google.monitoring.v3.UptimeCheckService/DeleteUptimeCheckConfig", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *uptimeCheckServiceClient) ListUptimeCheckIps(ctx context.Context, in *ListUptimeCheckIpsRequest, opts ...grpc.CallOption) (*ListUptimeCheckIpsResponse, error) { - out := new(ListUptimeCheckIpsResponse) - err := c.cc.Invoke(ctx, "/google.monitoring.v3.UptimeCheckService/ListUptimeCheckIps", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// UptimeCheckServiceServer is the server API for UptimeCheckService service. -type UptimeCheckServiceServer interface { - // Lists the existing valid Uptime check configurations for the project - // (leaving out any invalid configurations). - ListUptimeCheckConfigs(context.Context, *ListUptimeCheckConfigsRequest) (*ListUptimeCheckConfigsResponse, error) - // Gets a single Uptime check configuration. - GetUptimeCheckConfig(context.Context, *GetUptimeCheckConfigRequest) (*UptimeCheckConfig, error) - // Creates a new Uptime check configuration. - CreateUptimeCheckConfig(context.Context, *CreateUptimeCheckConfigRequest) (*UptimeCheckConfig, error) - // Updates an Uptime check configuration. You can either replace the entire - // configuration with a new one or replace only certain fields in the current - // configuration by specifying the fields to be updated via `updateMask`. - // Returns the updated configuration. - UpdateUptimeCheckConfig(context.Context, *UpdateUptimeCheckConfigRequest) (*UptimeCheckConfig, error) - // Deletes an Uptime check configuration. Note that this method will fail - // if the Uptime check configuration is referenced by an alert policy or - // other dependent configs that would be rendered invalid by the deletion. - DeleteUptimeCheckConfig(context.Context, *DeleteUptimeCheckConfigRequest) (*emptypb.Empty, error) - // Returns the list of IP addresses that checkers run from - ListUptimeCheckIps(context.Context, *ListUptimeCheckIpsRequest) (*ListUptimeCheckIpsResponse, error) -} - -// UnimplementedUptimeCheckServiceServer can be embedded to have forward compatible implementations. -type UnimplementedUptimeCheckServiceServer struct { -} - -func (*UnimplementedUptimeCheckServiceServer) ListUptimeCheckConfigs(context.Context, *ListUptimeCheckConfigsRequest) (*ListUptimeCheckConfigsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ListUptimeCheckConfigs not implemented") -} -func (*UnimplementedUptimeCheckServiceServer) GetUptimeCheckConfig(context.Context, *GetUptimeCheckConfigRequest) (*UptimeCheckConfig, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetUptimeCheckConfig not implemented") -} -func (*UnimplementedUptimeCheckServiceServer) CreateUptimeCheckConfig(context.Context, *CreateUptimeCheckConfigRequest) (*UptimeCheckConfig, error) { - return nil, status.Errorf(codes.Unimplemented, "method CreateUptimeCheckConfig not implemented") -} -func (*UnimplementedUptimeCheckServiceServer) UpdateUptimeCheckConfig(context.Context, *UpdateUptimeCheckConfigRequest) (*UptimeCheckConfig, error) { - return nil, status.Errorf(codes.Unimplemented, "method UpdateUptimeCheckConfig not implemented") -} -func (*UnimplementedUptimeCheckServiceServer) DeleteUptimeCheckConfig(context.Context, *DeleteUptimeCheckConfigRequest) (*emptypb.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method DeleteUptimeCheckConfig not implemented") -} -func (*UnimplementedUptimeCheckServiceServer) ListUptimeCheckIps(context.Context, *ListUptimeCheckIpsRequest) (*ListUptimeCheckIpsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ListUptimeCheckIps not implemented") -} - -func RegisterUptimeCheckServiceServer(s *grpc.Server, srv UptimeCheckServiceServer) { - s.RegisterService(&_UptimeCheckService_serviceDesc, srv) -} - -func _UptimeCheckService_ListUptimeCheckConfigs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListUptimeCheckConfigsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(UptimeCheckServiceServer).ListUptimeCheckConfigs(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.monitoring.v3.UptimeCheckService/ListUptimeCheckConfigs", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(UptimeCheckServiceServer).ListUptimeCheckConfigs(ctx, req.(*ListUptimeCheckConfigsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _UptimeCheckService_GetUptimeCheckConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetUptimeCheckConfigRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(UptimeCheckServiceServer).GetUptimeCheckConfig(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.monitoring.v3.UptimeCheckService/GetUptimeCheckConfig", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(UptimeCheckServiceServer).GetUptimeCheckConfig(ctx, req.(*GetUptimeCheckConfigRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _UptimeCheckService_CreateUptimeCheckConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CreateUptimeCheckConfigRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(UptimeCheckServiceServer).CreateUptimeCheckConfig(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.monitoring.v3.UptimeCheckService/CreateUptimeCheckConfig", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(UptimeCheckServiceServer).CreateUptimeCheckConfig(ctx, req.(*CreateUptimeCheckConfigRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _UptimeCheckService_UpdateUptimeCheckConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(UpdateUptimeCheckConfigRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(UptimeCheckServiceServer).UpdateUptimeCheckConfig(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.monitoring.v3.UptimeCheckService/UpdateUptimeCheckConfig", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(UptimeCheckServiceServer).UpdateUptimeCheckConfig(ctx, req.(*UpdateUptimeCheckConfigRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _UptimeCheckService_DeleteUptimeCheckConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteUptimeCheckConfigRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(UptimeCheckServiceServer).DeleteUptimeCheckConfig(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.monitoring.v3.UptimeCheckService/DeleteUptimeCheckConfig", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(UptimeCheckServiceServer).DeleteUptimeCheckConfig(ctx, req.(*DeleteUptimeCheckConfigRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _UptimeCheckService_ListUptimeCheckIps_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListUptimeCheckIpsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(UptimeCheckServiceServer).ListUptimeCheckIps(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.monitoring.v3.UptimeCheckService/ListUptimeCheckIps", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(UptimeCheckServiceServer).ListUptimeCheckIps(ctx, req.(*ListUptimeCheckIpsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _UptimeCheckService_serviceDesc = grpc.ServiceDesc{ - ServiceName: "google.monitoring.v3.UptimeCheckService", - HandlerType: (*UptimeCheckServiceServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "ListUptimeCheckConfigs", - Handler: _UptimeCheckService_ListUptimeCheckConfigs_Handler, - }, - { - MethodName: "GetUptimeCheckConfig", - Handler: _UptimeCheckService_GetUptimeCheckConfig_Handler, - }, - { - MethodName: "CreateUptimeCheckConfig", - Handler: _UptimeCheckService_CreateUptimeCheckConfig_Handler, - }, - { - MethodName: "UpdateUptimeCheckConfig", - Handler: _UptimeCheckService_UpdateUptimeCheckConfig_Handler, - }, - { - MethodName: "DeleteUptimeCheckConfig", - Handler: _UptimeCheckService_DeleteUptimeCheckConfig_Handler, - }, - { - MethodName: "ListUptimeCheckIps", - Handler: _UptimeCheckService_ListUptimeCheckIps_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "google/monitoring/v3/uptime_service.proto", -} diff --git a/vendor/google.golang.org/genproto/googleapis/type/calendarperiod/calendar_period.pb.go b/vendor/google.golang.org/genproto/googleapis/type/calendarperiod/calendar_period.pb.go deleted file mode 100644 index 36a957dd3e..0000000000 --- a/vendor/google.golang.org/genproto/googleapis/type/calendarperiod/calendar_period.pb.go +++ /dev/null @@ -1,194 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.25.0 -// protoc v3.13.0 -// source: google/type/calendar_period.proto - -package calendarperiod - -import ( - reflect "reflect" - sync "sync" - - proto "github.com/golang/protobuf/proto" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - -// A `CalendarPeriod` represents the abstract concept of a time period that has -// a canonical start. Grammatically, "the start of the current -// `CalendarPeriod`." All calendar times begin at midnight UTC. -type CalendarPeriod int32 - -const ( - // Undefined period, raises an error. - CalendarPeriod_CALENDAR_PERIOD_UNSPECIFIED CalendarPeriod = 0 - // A day. - CalendarPeriod_DAY CalendarPeriod = 1 - // A week. Weeks begin on Monday, following - // [ISO 8601](https://en.wikipedia.org/wiki/ISO_week_date). - CalendarPeriod_WEEK CalendarPeriod = 2 - // A fortnight. The first calendar fortnight of the year begins at the start - // of week 1 according to - // [ISO 8601](https://en.wikipedia.org/wiki/ISO_week_date). - CalendarPeriod_FORTNIGHT CalendarPeriod = 3 - // A month. - CalendarPeriod_MONTH CalendarPeriod = 4 - // A quarter. Quarters start on dates 1-Jan, 1-Apr, 1-Jul, and 1-Oct of each - // year. - CalendarPeriod_QUARTER CalendarPeriod = 5 - // A half-year. Half-years start on dates 1-Jan and 1-Jul. - CalendarPeriod_HALF CalendarPeriod = 6 - // A year. - CalendarPeriod_YEAR CalendarPeriod = 7 -) - -// Enum value maps for CalendarPeriod. -var ( - CalendarPeriod_name = map[int32]string{ - 0: "CALENDAR_PERIOD_UNSPECIFIED", - 1: "DAY", - 2: "WEEK", - 3: "FORTNIGHT", - 4: "MONTH", - 5: "QUARTER", - 6: "HALF", - 7: "YEAR", - } - CalendarPeriod_value = map[string]int32{ - "CALENDAR_PERIOD_UNSPECIFIED": 0, - "DAY": 1, - "WEEK": 2, - "FORTNIGHT": 3, - "MONTH": 4, - "QUARTER": 5, - "HALF": 6, - "YEAR": 7, - } -) - -func (x CalendarPeriod) Enum() *CalendarPeriod { - p := new(CalendarPeriod) - *p = x - return p -} - -func (x CalendarPeriod) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (CalendarPeriod) Descriptor() protoreflect.EnumDescriptor { - return file_google_type_calendar_period_proto_enumTypes[0].Descriptor() -} - -func (CalendarPeriod) Type() protoreflect.EnumType { - return &file_google_type_calendar_period_proto_enumTypes[0] -} - -func (x CalendarPeriod) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use CalendarPeriod.Descriptor instead. -func (CalendarPeriod) EnumDescriptor() ([]byte, []int) { - return file_google_type_calendar_period_proto_rawDescGZIP(), []int{0} -} - -var File_google_type_calendar_period_proto protoreflect.FileDescriptor - -var file_google_type_calendar_period_proto_rawDesc = []byte{ - 0x0a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x63, 0x61, - 0x6c, 0x65, 0x6e, 0x64, 0x61, 0x72, 0x5f, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x12, 0x0b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, - 0x2a, 0x7f, 0x0a, 0x0e, 0x43, 0x61, 0x6c, 0x65, 0x6e, 0x64, 0x61, 0x72, 0x50, 0x65, 0x72, 0x69, - 0x6f, 0x64, 0x12, 0x1f, 0x0a, 0x1b, 0x43, 0x41, 0x4c, 0x45, 0x4e, 0x44, 0x41, 0x52, 0x5f, 0x50, - 0x45, 0x52, 0x49, 0x4f, 0x44, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, - 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x44, 0x41, 0x59, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, - 0x57, 0x45, 0x45, 0x4b, 0x10, 0x02, 0x12, 0x0d, 0x0a, 0x09, 0x46, 0x4f, 0x52, 0x54, 0x4e, 0x49, - 0x47, 0x48, 0x54, 0x10, 0x03, 0x12, 0x09, 0x0a, 0x05, 0x4d, 0x4f, 0x4e, 0x54, 0x48, 0x10, 0x04, - 0x12, 0x0b, 0x0a, 0x07, 0x51, 0x55, 0x41, 0x52, 0x54, 0x45, 0x52, 0x10, 0x05, 0x12, 0x08, 0x0a, - 0x04, 0x48, 0x41, 0x4c, 0x46, 0x10, 0x06, 0x12, 0x08, 0x0a, 0x04, 0x59, 0x45, 0x41, 0x52, 0x10, - 0x07, 0x42, 0x78, 0x0a, 0x0f, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x74, 0x79, 0x70, 0x65, 0x42, 0x13, 0x43, 0x61, 0x6c, 0x65, 0x6e, 0x64, 0x61, 0x72, 0x50, 0x65, - 0x72, 0x69, 0x6f, 0x64, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x48, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, - 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, - 0x69, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x63, 0x61, 0x6c, 0x65, 0x6e, 0x64, 0x61, 0x72, - 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x3b, 0x63, 0x61, 0x6c, 0x65, 0x6e, 0x64, 0x61, 0x72, 0x70, - 0x65, 0x72, 0x69, 0x6f, 0x64, 0xa2, 0x02, 0x03, 0x47, 0x54, 0x50, 0x62, 0x06, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x33, -} - -var ( - file_google_type_calendar_period_proto_rawDescOnce sync.Once - file_google_type_calendar_period_proto_rawDescData = file_google_type_calendar_period_proto_rawDesc -) - -func file_google_type_calendar_period_proto_rawDescGZIP() []byte { - file_google_type_calendar_period_proto_rawDescOnce.Do(func() { - file_google_type_calendar_period_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_type_calendar_period_proto_rawDescData) - }) - return file_google_type_calendar_period_proto_rawDescData -} - -var file_google_type_calendar_period_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_google_type_calendar_period_proto_goTypes = []interface{}{ - (CalendarPeriod)(0), // 0: google.type.CalendarPeriod -} -var file_google_type_calendar_period_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_google_type_calendar_period_proto_init() } -func file_google_type_calendar_period_proto_init() { - if File_google_type_calendar_period_proto != nil { - return - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_type_calendar_period_proto_rawDesc, - NumEnums: 1, - NumMessages: 0, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_google_type_calendar_period_proto_goTypes, - DependencyIndexes: file_google_type_calendar_period_proto_depIdxs, - EnumInfos: file_google_type_calendar_period_proto_enumTypes, - }.Build() - File_google_type_calendar_period_proto = out.File - file_google_type_calendar_period_proto_rawDesc = nil - file_google_type_calendar_period_proto_goTypes = nil - file_google_type_calendar_period_proto_depIdxs = nil -} diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go index 788759bde4..ab531f4c0b 100644 --- a/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -101,6 +101,9 @@ type SubConn interface { // a new connection will be created. // // This will trigger a state transition for the SubConn. + // + // Deprecated: This method is now part of the ClientConn interface and will + // eventually be removed from here. UpdateAddresses([]resolver.Address) // Connect starts the connecting for this SubConn. Connect() @@ -143,6 +146,13 @@ type ClientConn interface { // RemoveSubConn removes the SubConn from ClientConn. // The SubConn will be shutdown. RemoveSubConn(SubConn) + // UpdateAddresses updates the addresses used in the passed in SubConn. + // gRPC checks if the currently connected address is still in the new list. + // If so, the connection will be kept. Else, the connection will be + // gracefully closed, and a new connection will be created. + // + // This will trigger a state transition for the SubConn. + UpdateAddresses(SubConn, []resolver.Address) // UpdateState notifies gRPC that the balancer's internal state has // changed. diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go index e0d34288cc..c883efa0bb 100644 --- a/vendor/google.golang.org/grpc/balancer/base/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go @@ -22,6 +22,7 @@ import ( "errors" "fmt" + "google.golang.org/grpc/attributes" "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/grpclog" @@ -41,7 +42,7 @@ func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) cc: cc, pickerBuilder: bb.pickerBuilder, - subConns: make(map[resolver.Address]balancer.SubConn), + subConns: make(map[resolver.Address]subConnInfo), scStates: make(map[balancer.SubConn]connectivity.State), csEvltr: &balancer.ConnectivityStateEvaluator{}, config: bb.config, @@ -57,6 +58,11 @@ func (bb *baseBuilder) Name() string { return bb.name } +type subConnInfo struct { + subConn balancer.SubConn + attrs *attributes.Attributes +} + type baseBalancer struct { cc balancer.ClientConn pickerBuilder PickerBuilder @@ -64,7 +70,7 @@ type baseBalancer struct { csEvltr *balancer.ConnectivityStateEvaluator state connectivity.State - subConns map[resolver.Address]balancer.SubConn // `attributes` is stripped from the keys of this map (the addresses) + subConns map[resolver.Address]subConnInfo // `attributes` is stripped from the keys of this map (the addresses) scStates map[balancer.SubConn]connectivity.State picker balancer.Picker config Config @@ -114,7 +120,7 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { aNoAttrs := a aNoAttrs.Attributes = nil addrsSet[aNoAttrs] = struct{}{} - if sc, ok := b.subConns[aNoAttrs]; !ok { + if scInfo, ok := b.subConns[aNoAttrs]; !ok { // a is a new address (not existing in b.subConns). // // When creating SubConn, the original address with attributes is @@ -125,7 +131,7 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { logger.Warningf("base.baseBalancer: failed to create new SubConn: %v", err) continue } - b.subConns[aNoAttrs] = sc + b.subConns[aNoAttrs] = subConnInfo{subConn: sc, attrs: a.Attributes} b.scStates[sc] = connectivity.Idle sc.Connect() } else { @@ -135,13 +141,15 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { // The SubConn does a reflect.DeepEqual of the new and old // addresses. So this is a noop if the current address is the same // as the old one (including attributes). - sc.UpdateAddresses([]resolver.Address{a}) + scInfo.attrs = a.Attributes + b.subConns[aNoAttrs] = scInfo + b.cc.UpdateAddresses(scInfo.subConn, []resolver.Address{a}) } } - for a, sc := range b.subConns { + for a, scInfo := range b.subConns { // a was removed by resolver. if _, ok := addrsSet[a]; !ok { - b.cc.RemoveSubConn(sc) + b.cc.RemoveSubConn(scInfo.subConn) delete(b.subConns, a) // Keep the state of this sc in b.scStates until sc's state becomes Shutdown. // The entry will be deleted in UpdateSubConnState. @@ -184,9 +192,10 @@ func (b *baseBalancer) regeneratePicker() { readySCs := make(map[balancer.SubConn]SubConnInfo) // Filter out all ready SCs from full subConn map. - for addr, sc := range b.subConns { - if st, ok := b.scStates[sc]; ok && st == connectivity.Ready { - readySCs[sc] = SubConnInfo{Address: addr} + for addr, scInfo := range b.subConns { + if st, ok := b.scStates[scInfo.subConn]; ok && st == connectivity.Ready { + addr.Attributes = scInfo.attrs + readySCs[scInfo.subConn] = SubConnInfo{Address: addr} } } b.picker = b.pickerBuilder.Build(PickerBuildInfo{ReadySCs: readySCs}) diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go deleted file mode 100644 index c393d7ffd3..0000000000 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go +++ /dev/null @@ -1,960 +0,0 @@ -// Copyright 2015 The gRPC Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// This file defines the GRPCLB LoadBalancing protocol. -// -// The canonical version of this proto can be found at -// https://github.com/grpc/grpc-proto/blob/master/grpc/lb/v1/load_balancer.proto - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.25.0 -// protoc v3.14.0 -// source: grpc/lb/v1/load_balancer.proto - -package grpc_lb_v1 - -import ( - proto "github.com/golang/protobuf/proto" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - durationpb "google.golang.org/protobuf/types/known/durationpb" - timestamppb "google.golang.org/protobuf/types/known/timestamppb" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - -type LoadBalanceRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Types that are assignable to LoadBalanceRequestType: - // *LoadBalanceRequest_InitialRequest - // *LoadBalanceRequest_ClientStats - LoadBalanceRequestType isLoadBalanceRequest_LoadBalanceRequestType `protobuf_oneof:"load_balance_request_type"` -} - -func (x *LoadBalanceRequest) Reset() { - *x = LoadBalanceRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *LoadBalanceRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*LoadBalanceRequest) ProtoMessage() {} - -func (x *LoadBalanceRequest) ProtoReflect() protoreflect.Message { - mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use LoadBalanceRequest.ProtoReflect.Descriptor instead. -func (*LoadBalanceRequest) Descriptor() ([]byte, []int) { - return file_grpc_lb_v1_load_balancer_proto_rawDescGZIP(), []int{0} -} - -func (m *LoadBalanceRequest) GetLoadBalanceRequestType() isLoadBalanceRequest_LoadBalanceRequestType { - if m != nil { - return m.LoadBalanceRequestType - } - return nil -} - -func (x *LoadBalanceRequest) GetInitialRequest() *InitialLoadBalanceRequest { - if x, ok := x.GetLoadBalanceRequestType().(*LoadBalanceRequest_InitialRequest); ok { - return x.InitialRequest - } - return nil -} - -func (x *LoadBalanceRequest) GetClientStats() *ClientStats { - if x, ok := x.GetLoadBalanceRequestType().(*LoadBalanceRequest_ClientStats); ok { - return x.ClientStats - } - return nil -} - -type isLoadBalanceRequest_LoadBalanceRequestType interface { - isLoadBalanceRequest_LoadBalanceRequestType() -} - -type LoadBalanceRequest_InitialRequest struct { - // This message should be sent on the first request to the load balancer. - InitialRequest *InitialLoadBalanceRequest `protobuf:"bytes,1,opt,name=initial_request,json=initialRequest,proto3,oneof"` -} - -type LoadBalanceRequest_ClientStats struct { - // The client stats should be periodically reported to the load balancer - // based on the duration defined in the InitialLoadBalanceResponse. - ClientStats *ClientStats `protobuf:"bytes,2,opt,name=client_stats,json=clientStats,proto3,oneof"` -} - -func (*LoadBalanceRequest_InitialRequest) isLoadBalanceRequest_LoadBalanceRequestType() {} - -func (*LoadBalanceRequest_ClientStats) isLoadBalanceRequest_LoadBalanceRequestType() {} - -type InitialLoadBalanceRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The name of the load balanced service (e.g., service.googleapis.com). Its - // length should be less than 256 bytes. - // The name might include a port number. How to handle the port number is up - // to the balancer. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` -} - -func (x *InitialLoadBalanceRequest) Reset() { - *x = InitialLoadBalanceRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *InitialLoadBalanceRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*InitialLoadBalanceRequest) ProtoMessage() {} - -func (x *InitialLoadBalanceRequest) ProtoReflect() protoreflect.Message { - mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use InitialLoadBalanceRequest.ProtoReflect.Descriptor instead. -func (*InitialLoadBalanceRequest) Descriptor() ([]byte, []int) { - return file_grpc_lb_v1_load_balancer_proto_rawDescGZIP(), []int{1} -} - -func (x *InitialLoadBalanceRequest) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -// Contains the number of calls finished for a particular load balance token. -type ClientStatsPerToken struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // See Server.load_balance_token. - LoadBalanceToken string `protobuf:"bytes,1,opt,name=load_balance_token,json=loadBalanceToken,proto3" json:"load_balance_token,omitempty"` - // The total number of RPCs that finished associated with the token. - NumCalls int64 `protobuf:"varint,2,opt,name=num_calls,json=numCalls,proto3" json:"num_calls,omitempty"` -} - -func (x *ClientStatsPerToken) Reset() { - *x = ClientStatsPerToken{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ClientStatsPerToken) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ClientStatsPerToken) ProtoMessage() {} - -func (x *ClientStatsPerToken) ProtoReflect() protoreflect.Message { - mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ClientStatsPerToken.ProtoReflect.Descriptor instead. -func (*ClientStatsPerToken) Descriptor() ([]byte, []int) { - return file_grpc_lb_v1_load_balancer_proto_rawDescGZIP(), []int{2} -} - -func (x *ClientStatsPerToken) GetLoadBalanceToken() string { - if x != nil { - return x.LoadBalanceToken - } - return "" -} - -func (x *ClientStatsPerToken) GetNumCalls() int64 { - if x != nil { - return x.NumCalls - } - return 0 -} - -// Contains client level statistics that are useful to load balancing. Each -// count except the timestamp should be reset to zero after reporting the stats. -type ClientStats struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The timestamp of generating the report. - Timestamp *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` - // The total number of RPCs that started. - NumCallsStarted int64 `protobuf:"varint,2,opt,name=num_calls_started,json=numCallsStarted,proto3" json:"num_calls_started,omitempty"` - // The total number of RPCs that finished. - NumCallsFinished int64 `protobuf:"varint,3,opt,name=num_calls_finished,json=numCallsFinished,proto3" json:"num_calls_finished,omitempty"` - // The total number of RPCs that failed to reach a server except dropped RPCs. - NumCallsFinishedWithClientFailedToSend int64 `protobuf:"varint,6,opt,name=num_calls_finished_with_client_failed_to_send,json=numCallsFinishedWithClientFailedToSend,proto3" json:"num_calls_finished_with_client_failed_to_send,omitempty"` - // The total number of RPCs that finished and are known to have been received - // by a server. - NumCallsFinishedKnownReceived int64 `protobuf:"varint,7,opt,name=num_calls_finished_known_received,json=numCallsFinishedKnownReceived,proto3" json:"num_calls_finished_known_received,omitempty"` - // The list of dropped calls. - CallsFinishedWithDrop []*ClientStatsPerToken `protobuf:"bytes,8,rep,name=calls_finished_with_drop,json=callsFinishedWithDrop,proto3" json:"calls_finished_with_drop,omitempty"` -} - -func (x *ClientStats) Reset() { - *x = ClientStats{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ClientStats) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ClientStats) ProtoMessage() {} - -func (x *ClientStats) ProtoReflect() protoreflect.Message { - mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ClientStats.ProtoReflect.Descriptor instead. -func (*ClientStats) Descriptor() ([]byte, []int) { - return file_grpc_lb_v1_load_balancer_proto_rawDescGZIP(), []int{3} -} - -func (x *ClientStats) GetTimestamp() *timestamppb.Timestamp { - if x != nil { - return x.Timestamp - } - return nil -} - -func (x *ClientStats) GetNumCallsStarted() int64 { - if x != nil { - return x.NumCallsStarted - } - return 0 -} - -func (x *ClientStats) GetNumCallsFinished() int64 { - if x != nil { - return x.NumCallsFinished - } - return 0 -} - -func (x *ClientStats) GetNumCallsFinishedWithClientFailedToSend() int64 { - if x != nil { - return x.NumCallsFinishedWithClientFailedToSend - } - return 0 -} - -func (x *ClientStats) GetNumCallsFinishedKnownReceived() int64 { - if x != nil { - return x.NumCallsFinishedKnownReceived - } - return 0 -} - -func (x *ClientStats) GetCallsFinishedWithDrop() []*ClientStatsPerToken { - if x != nil { - return x.CallsFinishedWithDrop - } - return nil -} - -type LoadBalanceResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Types that are assignable to LoadBalanceResponseType: - // *LoadBalanceResponse_InitialResponse - // *LoadBalanceResponse_ServerList - // *LoadBalanceResponse_FallbackResponse - LoadBalanceResponseType isLoadBalanceResponse_LoadBalanceResponseType `protobuf_oneof:"load_balance_response_type"` -} - -func (x *LoadBalanceResponse) Reset() { - *x = LoadBalanceResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *LoadBalanceResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*LoadBalanceResponse) ProtoMessage() {} - -func (x *LoadBalanceResponse) ProtoReflect() protoreflect.Message { - mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use LoadBalanceResponse.ProtoReflect.Descriptor instead. -func (*LoadBalanceResponse) Descriptor() ([]byte, []int) { - return file_grpc_lb_v1_load_balancer_proto_rawDescGZIP(), []int{4} -} - -func (m *LoadBalanceResponse) GetLoadBalanceResponseType() isLoadBalanceResponse_LoadBalanceResponseType { - if m != nil { - return m.LoadBalanceResponseType - } - return nil -} - -func (x *LoadBalanceResponse) GetInitialResponse() *InitialLoadBalanceResponse { - if x, ok := x.GetLoadBalanceResponseType().(*LoadBalanceResponse_InitialResponse); ok { - return x.InitialResponse - } - return nil -} - -func (x *LoadBalanceResponse) GetServerList() *ServerList { - if x, ok := x.GetLoadBalanceResponseType().(*LoadBalanceResponse_ServerList); ok { - return x.ServerList - } - return nil -} - -func (x *LoadBalanceResponse) GetFallbackResponse() *FallbackResponse { - if x, ok := x.GetLoadBalanceResponseType().(*LoadBalanceResponse_FallbackResponse); ok { - return x.FallbackResponse - } - return nil -} - -type isLoadBalanceResponse_LoadBalanceResponseType interface { - isLoadBalanceResponse_LoadBalanceResponseType() -} - -type LoadBalanceResponse_InitialResponse struct { - // This message should be sent on the first response to the client. - InitialResponse *InitialLoadBalanceResponse `protobuf:"bytes,1,opt,name=initial_response,json=initialResponse,proto3,oneof"` -} - -type LoadBalanceResponse_ServerList struct { - // Contains the list of servers selected by the load balancer. The client - // should send requests to these servers in the specified order. - ServerList *ServerList `protobuf:"bytes,2,opt,name=server_list,json=serverList,proto3,oneof"` -} - -type LoadBalanceResponse_FallbackResponse struct { - // If this field is set, then the client should eagerly enter fallback - // mode (even if there are existing, healthy connections to backends). - FallbackResponse *FallbackResponse `protobuf:"bytes,3,opt,name=fallback_response,json=fallbackResponse,proto3,oneof"` -} - -func (*LoadBalanceResponse_InitialResponse) isLoadBalanceResponse_LoadBalanceResponseType() {} - -func (*LoadBalanceResponse_ServerList) isLoadBalanceResponse_LoadBalanceResponseType() {} - -func (*LoadBalanceResponse_FallbackResponse) isLoadBalanceResponse_LoadBalanceResponseType() {} - -type FallbackResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *FallbackResponse) Reset() { - *x = FallbackResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *FallbackResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*FallbackResponse) ProtoMessage() {} - -func (x *FallbackResponse) ProtoReflect() protoreflect.Message { - mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use FallbackResponse.ProtoReflect.Descriptor instead. -func (*FallbackResponse) Descriptor() ([]byte, []int) { - return file_grpc_lb_v1_load_balancer_proto_rawDescGZIP(), []int{5} -} - -type InitialLoadBalanceResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // This interval defines how often the client should send the client stats - // to the load balancer. Stats should only be reported when the duration is - // positive. - ClientStatsReportInterval *durationpb.Duration `protobuf:"bytes,2,opt,name=client_stats_report_interval,json=clientStatsReportInterval,proto3" json:"client_stats_report_interval,omitempty"` -} - -func (x *InitialLoadBalanceResponse) Reset() { - *x = InitialLoadBalanceResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *InitialLoadBalanceResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*InitialLoadBalanceResponse) ProtoMessage() {} - -func (x *InitialLoadBalanceResponse) ProtoReflect() protoreflect.Message { - mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use InitialLoadBalanceResponse.ProtoReflect.Descriptor instead. -func (*InitialLoadBalanceResponse) Descriptor() ([]byte, []int) { - return file_grpc_lb_v1_load_balancer_proto_rawDescGZIP(), []int{6} -} - -func (x *InitialLoadBalanceResponse) GetClientStatsReportInterval() *durationpb.Duration { - if x != nil { - return x.ClientStatsReportInterval - } - return nil -} - -type ServerList struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Contains a list of servers selected by the load balancer. The list will - // be updated when server resolutions change or as needed to balance load - // across more servers. The client should consume the server list in order - // unless instructed otherwise via the client_config. - Servers []*Server `protobuf:"bytes,1,rep,name=servers,proto3" json:"servers,omitempty"` -} - -func (x *ServerList) Reset() { - *x = ServerList{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ServerList) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ServerList) ProtoMessage() {} - -func (x *ServerList) ProtoReflect() protoreflect.Message { - mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ServerList.ProtoReflect.Descriptor instead. -func (*ServerList) Descriptor() ([]byte, []int) { - return file_grpc_lb_v1_load_balancer_proto_rawDescGZIP(), []int{7} -} - -func (x *ServerList) GetServers() []*Server { - if x != nil { - return x.Servers - } - return nil -} - -// Contains server information. When the drop field is not true, use the other -// fields. -type Server struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // A resolved address for the server, serialized in network-byte-order. It may - // either be an IPv4 or IPv6 address. - IpAddress []byte `protobuf:"bytes,1,opt,name=ip_address,json=ipAddress,proto3" json:"ip_address,omitempty"` - // A resolved port number for the server. - Port int32 `protobuf:"varint,2,opt,name=port,proto3" json:"port,omitempty"` - // An opaque but printable token for load reporting. The client must include - // the token of the picked server into the initial metadata when it starts a - // call to that server. The token is used by the server to verify the request - // and to allow the server to report load to the gRPC LB system. The token is - // also used in client stats for reporting dropped calls. - // - // Its length can be variable but must be less than 50 bytes. - LoadBalanceToken string `protobuf:"bytes,3,opt,name=load_balance_token,json=loadBalanceToken,proto3" json:"load_balance_token,omitempty"` - // Indicates whether this particular request should be dropped by the client. - // If the request is dropped, there will be a corresponding entry in - // ClientStats.calls_finished_with_drop. - Drop bool `protobuf:"varint,4,opt,name=drop,proto3" json:"drop,omitempty"` -} - -func (x *Server) Reset() { - *x = Server{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Server) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Server) ProtoMessage() {} - -func (x *Server) ProtoReflect() protoreflect.Message { - mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Server.ProtoReflect.Descriptor instead. -func (*Server) Descriptor() ([]byte, []int) { - return file_grpc_lb_v1_load_balancer_proto_rawDescGZIP(), []int{8} -} - -func (x *Server) GetIpAddress() []byte { - if x != nil { - return x.IpAddress - } - return nil -} - -func (x *Server) GetPort() int32 { - if x != nil { - return x.Port - } - return 0 -} - -func (x *Server) GetLoadBalanceToken() string { - if x != nil { - return x.LoadBalanceToken - } - return "" -} - -func (x *Server) GetDrop() bool { - if x != nil { - return x.Drop - } - return false -} - -var File_grpc_lb_v1_load_balancer_proto protoreflect.FileDescriptor - -var file_grpc_lb_v1_load_balancer_proto_rawDesc = []byte{ - 0x0a, 0x1e, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x6c, 0x62, 0x2f, 0x76, 0x31, 0x2f, 0x6c, 0x6f, 0x61, - 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x12, 0x0a, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x62, 0x2e, 0x76, 0x31, 0x1a, 0x1e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, - 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc1, 0x01, - 0x0a, 0x12, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x50, 0x0a, 0x0f, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, - 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, - 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x69, - 0x61, 0x6c, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x0e, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3c, 0x0a, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, - 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, - 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, - 0x53, 0x74, 0x61, 0x74, 0x73, 0x48, 0x00, 0x52, 0x0b, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, - 0x74, 0x61, 0x74, 0x73, 0x42, 0x1b, 0x0a, 0x19, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, - 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x74, 0x79, 0x70, - 0x65, 0x22, 0x2f, 0x0a, 0x19, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x4c, 0x6f, 0x61, 0x64, - 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, - 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x22, 0x60, 0x0a, 0x13, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, - 0x73, 0x50, 0x65, 0x72, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x2c, 0x0a, 0x12, 0x6c, 0x6f, 0x61, - 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x6c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, - 0x63, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x75, 0x6d, 0x5f, 0x63, - 0x61, 0x6c, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6e, 0x75, 0x6d, 0x43, - 0x61, 0x6c, 0x6c, 0x73, 0x22, 0xb0, 0x03, 0x0a, 0x0b, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, - 0x74, 0x61, 0x74, 0x73, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, - 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, - 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x2a, - 0x0a, 0x11, 0x6e, 0x75, 0x6d, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x73, 0x5f, 0x73, 0x74, 0x61, 0x72, - 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0f, 0x6e, 0x75, 0x6d, 0x43, 0x61, - 0x6c, 0x6c, 0x73, 0x53, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x12, 0x2c, 0x0a, 0x12, 0x6e, 0x75, - 0x6d, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x73, 0x5f, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, 0x64, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6e, 0x75, 0x6d, 0x43, 0x61, 0x6c, 0x6c, 0x73, - 0x46, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, 0x64, 0x12, 0x5d, 0x0a, 0x2d, 0x6e, 0x75, 0x6d, 0x5f, - 0x63, 0x61, 0x6c, 0x6c, 0x73, 0x5f, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, 0x64, 0x5f, 0x77, - 0x69, 0x74, 0x68, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x65, - 0x64, 0x5f, 0x74, 0x6f, 0x5f, 0x73, 0x65, 0x6e, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x26, 0x6e, 0x75, 0x6d, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x46, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, - 0x64, 0x57, 0x69, 0x74, 0x68, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x46, 0x61, 0x69, 0x6c, 0x65, - 0x64, 0x54, 0x6f, 0x53, 0x65, 0x6e, 0x64, 0x12, 0x48, 0x0a, 0x21, 0x6e, 0x75, 0x6d, 0x5f, 0x63, - 0x61, 0x6c, 0x6c, 0x73, 0x5f, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, 0x64, 0x5f, 0x6b, 0x6e, - 0x6f, 0x77, 0x6e, 0x5f, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, 0x18, 0x07, 0x20, 0x01, - 0x28, 0x03, 0x52, 0x1d, 0x6e, 0x75, 0x6d, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x46, 0x69, 0x6e, 0x69, - 0x73, 0x68, 0x65, 0x64, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x52, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, - 0x64, 0x12, 0x58, 0x0a, 0x18, 0x63, 0x61, 0x6c, 0x6c, 0x73, 0x5f, 0x66, 0x69, 0x6e, 0x69, 0x73, - 0x68, 0x65, 0x64, 0x5f, 0x77, 0x69, 0x74, 0x68, 0x5f, 0x64, 0x72, 0x6f, 0x70, 0x18, 0x08, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x62, 0x2e, 0x76, 0x31, - 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x73, 0x50, 0x65, 0x72, 0x54, - 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x15, 0x63, 0x61, 0x6c, 0x6c, 0x73, 0x46, 0x69, 0x6e, 0x69, 0x73, - 0x68, 0x65, 0x64, 0x57, 0x69, 0x74, 0x68, 0x44, 0x72, 0x6f, 0x70, 0x4a, 0x04, 0x08, 0x04, 0x10, - 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0x90, 0x02, 0x0a, 0x13, 0x4c, 0x6f, 0x61, 0x64, - 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x53, 0x0a, 0x10, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x72, 0x70, 0x63, - 0x2e, 0x6c, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x4c, 0x6f, - 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x48, 0x00, 0x52, 0x0f, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x39, 0x0a, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x6c, - 0x69, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x72, 0x70, 0x63, - 0x2e, 0x6c, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4c, 0x69, 0x73, - 0x74, 0x48, 0x00, 0x52, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x12, - 0x4b, 0x0a, 0x11, 0x66, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x5f, 0x72, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x72, 0x70, - 0x63, 0x2e, 0x6c, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x46, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x10, 0x66, 0x61, 0x6c, 0x6c, - 0x62, 0x61, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x1c, 0x0a, 0x1a, - 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x72, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x22, 0x12, 0x0a, 0x10, 0x46, 0x61, - 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x7e, - 0x0a, 0x1a, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, - 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5a, 0x0a, 0x1c, - 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x5f, 0x72, 0x65, 0x70, - 0x6f, 0x72, 0x74, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x19, 0x63, - 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, - 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x22, 0x40, - 0x0a, 0x0a, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x2c, 0x0a, 0x07, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, - 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, - 0x22, 0x83, 0x01, 0x0a, 0x06, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x69, - 0x70, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, - 0x09, 0x69, 0x70, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x6f, - 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x2c, - 0x0a, 0x12, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x74, - 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x6c, 0x6f, 0x61, 0x64, - 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x12, 0x0a, 0x04, - 0x64, 0x72, 0x6f, 0x70, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, 0x64, 0x72, 0x6f, 0x70, - 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x32, 0x62, 0x0a, 0x0c, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, - 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x12, 0x52, 0x0a, 0x0b, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, - 0x65, 0x4c, 0x6f, 0x61, 0x64, 0x12, 0x1e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x62, 0x2e, - 0x76, 0x31, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x62, 0x2e, - 0x76, 0x31, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x28, 0x01, 0x30, 0x01, 0x42, 0x57, 0x0a, 0x0d, 0x69, 0x6f, - 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x62, 0x2e, 0x76, 0x31, 0x42, 0x11, 0x4c, 0x6f, 0x61, - 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, - 0x5a, 0x31, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, - 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, - 0x72, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x6c, 0x62, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x6c, 0x62, - 0x5f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_grpc_lb_v1_load_balancer_proto_rawDescOnce sync.Once - file_grpc_lb_v1_load_balancer_proto_rawDescData = file_grpc_lb_v1_load_balancer_proto_rawDesc -) - -func file_grpc_lb_v1_load_balancer_proto_rawDescGZIP() []byte { - file_grpc_lb_v1_load_balancer_proto_rawDescOnce.Do(func() { - file_grpc_lb_v1_load_balancer_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_lb_v1_load_balancer_proto_rawDescData) - }) - return file_grpc_lb_v1_load_balancer_proto_rawDescData -} - -var file_grpc_lb_v1_load_balancer_proto_msgTypes = make([]protoimpl.MessageInfo, 9) -var file_grpc_lb_v1_load_balancer_proto_goTypes = []interface{}{ - (*LoadBalanceRequest)(nil), // 0: grpc.lb.v1.LoadBalanceRequest - (*InitialLoadBalanceRequest)(nil), // 1: grpc.lb.v1.InitialLoadBalanceRequest - (*ClientStatsPerToken)(nil), // 2: grpc.lb.v1.ClientStatsPerToken - (*ClientStats)(nil), // 3: grpc.lb.v1.ClientStats - (*LoadBalanceResponse)(nil), // 4: grpc.lb.v1.LoadBalanceResponse - (*FallbackResponse)(nil), // 5: grpc.lb.v1.FallbackResponse - (*InitialLoadBalanceResponse)(nil), // 6: grpc.lb.v1.InitialLoadBalanceResponse - (*ServerList)(nil), // 7: grpc.lb.v1.ServerList - (*Server)(nil), // 8: grpc.lb.v1.Server - (*timestamppb.Timestamp)(nil), // 9: google.protobuf.Timestamp - (*durationpb.Duration)(nil), // 10: google.protobuf.Duration -} -var file_grpc_lb_v1_load_balancer_proto_depIdxs = []int32{ - 1, // 0: grpc.lb.v1.LoadBalanceRequest.initial_request:type_name -> grpc.lb.v1.InitialLoadBalanceRequest - 3, // 1: grpc.lb.v1.LoadBalanceRequest.client_stats:type_name -> grpc.lb.v1.ClientStats - 9, // 2: grpc.lb.v1.ClientStats.timestamp:type_name -> google.protobuf.Timestamp - 2, // 3: grpc.lb.v1.ClientStats.calls_finished_with_drop:type_name -> grpc.lb.v1.ClientStatsPerToken - 6, // 4: grpc.lb.v1.LoadBalanceResponse.initial_response:type_name -> grpc.lb.v1.InitialLoadBalanceResponse - 7, // 5: grpc.lb.v1.LoadBalanceResponse.server_list:type_name -> grpc.lb.v1.ServerList - 5, // 6: grpc.lb.v1.LoadBalanceResponse.fallback_response:type_name -> grpc.lb.v1.FallbackResponse - 10, // 7: grpc.lb.v1.InitialLoadBalanceResponse.client_stats_report_interval:type_name -> google.protobuf.Duration - 8, // 8: grpc.lb.v1.ServerList.servers:type_name -> grpc.lb.v1.Server - 0, // 9: grpc.lb.v1.LoadBalancer.BalanceLoad:input_type -> grpc.lb.v1.LoadBalanceRequest - 4, // 10: grpc.lb.v1.LoadBalancer.BalanceLoad:output_type -> grpc.lb.v1.LoadBalanceResponse - 10, // [10:11] is the sub-list for method output_type - 9, // [9:10] is the sub-list for method input_type - 9, // [9:9] is the sub-list for extension type_name - 9, // [9:9] is the sub-list for extension extendee - 0, // [0:9] is the sub-list for field type_name -} - -func init() { file_grpc_lb_v1_load_balancer_proto_init() } -func file_grpc_lb_v1_load_balancer_proto_init() { - if File_grpc_lb_v1_load_balancer_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_grpc_lb_v1_load_balancer_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*LoadBalanceRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_lb_v1_load_balancer_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*InitialLoadBalanceRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_lb_v1_load_balancer_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ClientStatsPerToken); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_lb_v1_load_balancer_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ClientStats); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_lb_v1_load_balancer_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*LoadBalanceResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_lb_v1_load_balancer_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*FallbackResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_lb_v1_load_balancer_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*InitialLoadBalanceResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_lb_v1_load_balancer_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ServerList); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_lb_v1_load_balancer_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Server); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_grpc_lb_v1_load_balancer_proto_msgTypes[0].OneofWrappers = []interface{}{ - (*LoadBalanceRequest_InitialRequest)(nil), - (*LoadBalanceRequest_ClientStats)(nil), - } - file_grpc_lb_v1_load_balancer_proto_msgTypes[4].OneofWrappers = []interface{}{ - (*LoadBalanceResponse_InitialResponse)(nil), - (*LoadBalanceResponse_ServerList)(nil), - (*LoadBalanceResponse_FallbackResponse)(nil), - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_grpc_lb_v1_load_balancer_proto_rawDesc, - NumEnums: 0, - NumMessages: 9, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_grpc_lb_v1_load_balancer_proto_goTypes, - DependencyIndexes: file_grpc_lb_v1_load_balancer_proto_depIdxs, - MessageInfos: file_grpc_lb_v1_load_balancer_proto_msgTypes, - }.Build() - File_grpc_lb_v1_load_balancer_proto = out.File - file_grpc_lb_v1_load_balancer_proto_rawDesc = nil - file_grpc_lb_v1_load_balancer_proto_goTypes = nil - file_grpc_lb_v1_load_balancer_proto_depIdxs = nil -} diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go deleted file mode 100644 index d56b77cca6..0000000000 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go +++ /dev/null @@ -1,133 +0,0 @@ -// Code generated by protoc-gen-go-grpc. DO NOT EDIT. - -package grpc_lb_v1 - -import ( - context "context" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" -) - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 - -// LoadBalancerClient is the client API for LoadBalancer service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -type LoadBalancerClient interface { - // Bidirectional rpc to get a list of servers. - BalanceLoad(ctx context.Context, opts ...grpc.CallOption) (LoadBalancer_BalanceLoadClient, error) -} - -type loadBalancerClient struct { - cc grpc.ClientConnInterface -} - -func NewLoadBalancerClient(cc grpc.ClientConnInterface) LoadBalancerClient { - return &loadBalancerClient{cc} -} - -func (c *loadBalancerClient) BalanceLoad(ctx context.Context, opts ...grpc.CallOption) (LoadBalancer_BalanceLoadClient, error) { - stream, err := c.cc.NewStream(ctx, &LoadBalancer_ServiceDesc.Streams[0], "/grpc.lb.v1.LoadBalancer/BalanceLoad", opts...) - if err != nil { - return nil, err - } - x := &loadBalancerBalanceLoadClient{stream} - return x, nil -} - -type LoadBalancer_BalanceLoadClient interface { - Send(*LoadBalanceRequest) error - Recv() (*LoadBalanceResponse, error) - grpc.ClientStream -} - -type loadBalancerBalanceLoadClient struct { - grpc.ClientStream -} - -func (x *loadBalancerBalanceLoadClient) Send(m *LoadBalanceRequest) error { - return x.ClientStream.SendMsg(m) -} - -func (x *loadBalancerBalanceLoadClient) Recv() (*LoadBalanceResponse, error) { - m := new(LoadBalanceResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// LoadBalancerServer is the server API for LoadBalancer service. -// All implementations should embed UnimplementedLoadBalancerServer -// for forward compatibility -type LoadBalancerServer interface { - // Bidirectional rpc to get a list of servers. - BalanceLoad(LoadBalancer_BalanceLoadServer) error -} - -// UnimplementedLoadBalancerServer should be embedded to have forward compatible implementations. -type UnimplementedLoadBalancerServer struct { -} - -func (UnimplementedLoadBalancerServer) BalanceLoad(LoadBalancer_BalanceLoadServer) error { - return status.Errorf(codes.Unimplemented, "method BalanceLoad not implemented") -} - -// UnsafeLoadBalancerServer may be embedded to opt out of forward compatibility for this service. -// Use of this interface is not recommended, as added methods to LoadBalancerServer will -// result in compilation errors. -type UnsafeLoadBalancerServer interface { - mustEmbedUnimplementedLoadBalancerServer() -} - -func RegisterLoadBalancerServer(s grpc.ServiceRegistrar, srv LoadBalancerServer) { - s.RegisterService(&LoadBalancer_ServiceDesc, srv) -} - -func _LoadBalancer_BalanceLoad_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(LoadBalancerServer).BalanceLoad(&loadBalancerBalanceLoadServer{stream}) -} - -type LoadBalancer_BalanceLoadServer interface { - Send(*LoadBalanceResponse) error - Recv() (*LoadBalanceRequest, error) - grpc.ServerStream -} - -type loadBalancerBalanceLoadServer struct { - grpc.ServerStream -} - -func (x *loadBalancerBalanceLoadServer) Send(m *LoadBalanceResponse) error { - return x.ServerStream.SendMsg(m) -} - -func (x *loadBalancerBalanceLoadServer) Recv() (*LoadBalanceRequest, error) { - m := new(LoadBalanceRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// LoadBalancer_ServiceDesc is the grpc.ServiceDesc for LoadBalancer service. -// It's only intended for direct use with grpc.RegisterService, -// and not to be introspected or modified (even as a copy) -var LoadBalancer_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "grpc.lb.v1.LoadBalancer", - HandlerType: (*LoadBalancerServer)(nil), - Methods: []grpc.MethodDesc{}, - Streams: []grpc.StreamDesc{ - { - StreamName: "BalanceLoad", - Handler: _LoadBalancer_BalanceLoad_Handler, - ServerStreams: true, - ClientStreams: true, - }, - }, - Metadata: "grpc/lb/v1/load_balancer.proto", -} diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go deleted file mode 100644 index a43d896411..0000000000 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go +++ /dev/null @@ -1,486 +0,0 @@ -/* - * - * Copyright 2016 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package grpclb defines a grpclb balancer. -// -// To install grpclb balancer, import this package as: -// import _ "google.golang.org/grpc/balancer/grpclb" -package grpclb - -import ( - "context" - "errors" - "sync" - "time" - - "google.golang.org/grpc" - "google.golang.org/grpc/balancer" - grpclbstate "google.golang.org/grpc/balancer/grpclb/state" - "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/internal" - "google.golang.org/grpc/internal/backoff" - "google.golang.org/grpc/internal/resolver/dns" - "google.golang.org/grpc/resolver" - - durationpb "github.com/golang/protobuf/ptypes/duration" - lbpb "google.golang.org/grpc/balancer/grpclb/grpc_lb_v1" -) - -const ( - lbTokenKey = "lb-token" - defaultFallbackTimeout = 10 * time.Second - grpclbName = "grpclb" -) - -var errServerTerminatedConnection = errors.New("grpclb: failed to recv server list: server terminated connection") -var logger = grpclog.Component("grpclb") - -func convertDuration(d *durationpb.Duration) time.Duration { - if d == nil { - return 0 - } - return time.Duration(d.Seconds)*time.Second + time.Duration(d.Nanos)*time.Nanosecond -} - -// Client API for LoadBalancer service. -// Mostly copied from generated pb.go file. -// To avoid circular dependency. -type loadBalancerClient struct { - cc *grpc.ClientConn -} - -func (c *loadBalancerClient) BalanceLoad(ctx context.Context, opts ...grpc.CallOption) (*balanceLoadClientStream, error) { - desc := &grpc.StreamDesc{ - StreamName: "BalanceLoad", - ServerStreams: true, - ClientStreams: true, - } - stream, err := c.cc.NewStream(ctx, desc, "/grpc.lb.v1.LoadBalancer/BalanceLoad", opts...) - if err != nil { - return nil, err - } - x := &balanceLoadClientStream{stream} - return x, nil -} - -type balanceLoadClientStream struct { - grpc.ClientStream -} - -func (x *balanceLoadClientStream) Send(m *lbpb.LoadBalanceRequest) error { - return x.ClientStream.SendMsg(m) -} - -func (x *balanceLoadClientStream) Recv() (*lbpb.LoadBalanceResponse, error) { - m := new(lbpb.LoadBalanceResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func init() { - balancer.Register(newLBBuilder()) - dns.EnableSRVLookups = true -} - -// newLBBuilder creates a builder for grpclb. -func newLBBuilder() balancer.Builder { - return newLBBuilderWithFallbackTimeout(defaultFallbackTimeout) -} - -// newLBBuilderWithFallbackTimeout creates a grpclb builder with the given -// fallbackTimeout. If no response is received from the remote balancer within -// fallbackTimeout, the backend addresses from the resolved address list will be -// used. -// -// Only call this function when a non-default fallback timeout is needed. -func newLBBuilderWithFallbackTimeout(fallbackTimeout time.Duration) balancer.Builder { - return &lbBuilder{ - fallbackTimeout: fallbackTimeout, - } -} - -type lbBuilder struct { - fallbackTimeout time.Duration -} - -func (b *lbBuilder) Name() string { - return grpclbName -} - -func (b *lbBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { - // This generates a manual resolver builder with a fixed scheme. This - // scheme will be used to dial to remote LB, so we can send filtered - // address updates to remote LB ClientConn using this manual resolver. - r := &lbManualResolver{scheme: "grpclb-internal", ccb: cc} - - lb := &lbBalancer{ - cc: newLBCacheClientConn(cc), - target: opt.Target.Endpoint, - opt: opt, - fallbackTimeout: b.fallbackTimeout, - doneCh: make(chan struct{}), - - manualResolver: r, - subConns: make(map[resolver.Address]balancer.SubConn), - scStates: make(map[balancer.SubConn]connectivity.State), - picker: &errPicker{err: balancer.ErrNoSubConnAvailable}, - clientStats: newRPCStats(), - backoff: backoff.DefaultExponential, // TODO: make backoff configurable. - } - - var err error - if opt.CredsBundle != nil { - lb.grpclbClientConnCreds, err = opt.CredsBundle.NewWithMode(internal.CredsBundleModeBalancer) - if err != nil { - logger.Warningf("lbBalancer: client connection creds NewWithMode failed: %v", err) - } - lb.grpclbBackendCreds, err = opt.CredsBundle.NewWithMode(internal.CredsBundleModeBackendFromBalancer) - if err != nil { - logger.Warningf("lbBalancer: backend creds NewWithMode failed: %v", err) - } - } - - return lb -} - -type lbBalancer struct { - cc *lbCacheClientConn - target string - opt balancer.BuildOptions - - usePickFirst bool - - // grpclbClientConnCreds is the creds bundle to be used to connect to grpclb - // servers. If it's nil, use the TransportCredentials from BuildOptions - // instead. - grpclbClientConnCreds credentials.Bundle - // grpclbBackendCreds is the creds bundle to be used for addresses that are - // returned by grpclb server. If it's nil, don't set anything when creating - // SubConns. - grpclbBackendCreds credentials.Bundle - - fallbackTimeout time.Duration - doneCh chan struct{} - - // manualResolver is used in the remote LB ClientConn inside grpclb. When - // resolved address updates are received by grpclb, filtered updates will be - // send to remote LB ClientConn through this resolver. - manualResolver *lbManualResolver - // The ClientConn to talk to the remote balancer. - ccRemoteLB *remoteBalancerCCWrapper - // backoff for calling remote balancer. - backoff backoff.Strategy - - // Support client side load reporting. Each picker gets a reference to this, - // and will update its content. - clientStats *rpcStats - - mu sync.Mutex // guards everything following. - // The full server list including drops, used to check if the newly received - // serverList contains anything new. Each generate picker will also have - // reference to this list to do the first layer pick. - fullServerList []*lbpb.Server - // Backend addresses. It's kept so the addresses are available when - // switching between round_robin and pickfirst. - backendAddrs []resolver.Address - // All backends addresses, with metadata set to nil. This list contains all - // backend addresses in the same order and with the same duplicates as in - // serverlist. When generating picker, a SubConn slice with the same order - // but with only READY SCs will be gerenated. - backendAddrsWithoutMetadata []resolver.Address - // Roundrobin functionalities. - state connectivity.State - subConns map[resolver.Address]balancer.SubConn // Used to new/remove SubConn. - scStates map[balancer.SubConn]connectivity.State // Used to filter READY SubConns. - picker balancer.Picker - // Support fallback to resolved backend addresses if there's no response - // from remote balancer within fallbackTimeout. - remoteBalancerConnected bool - serverListReceived bool - inFallback bool - // resolvedBackendAddrs is resolvedAddrs minus remote balancers. It's set - // when resolved address updates are received, and read in the goroutine - // handling fallback. - resolvedBackendAddrs []resolver.Address -} - -// regeneratePicker takes a snapshot of the balancer, and generates a picker from -// it. The picker -// - always returns ErrTransientFailure if the balancer is in TransientFailure, -// - does two layer roundrobin pick otherwise. -// Caller must hold lb.mu. -func (lb *lbBalancer) regeneratePicker(resetDrop bool) { - if lb.state == connectivity.TransientFailure { - lb.picker = &errPicker{err: balancer.ErrTransientFailure} - return - } - - if lb.state == connectivity.Connecting { - lb.picker = &errPicker{err: balancer.ErrNoSubConnAvailable} - return - } - - var readySCs []balancer.SubConn - if lb.usePickFirst { - for _, sc := range lb.subConns { - readySCs = append(readySCs, sc) - break - } - } else { - for _, a := range lb.backendAddrsWithoutMetadata { - if sc, ok := lb.subConns[a]; ok { - if st, ok := lb.scStates[sc]; ok && st == connectivity.Ready { - readySCs = append(readySCs, sc) - } - } - } - } - - if len(readySCs) <= 0 { - // If there's no ready SubConns, always re-pick. This is to avoid drops - // unless at least one SubConn is ready. Otherwise we may drop more - // often than want because of drops + re-picks(which become re-drops). - // - // This doesn't seem to be necessary after the connecting check above. - // Kept for safety. - lb.picker = &errPicker{err: balancer.ErrNoSubConnAvailable} - return - } - if lb.inFallback { - lb.picker = newRRPicker(readySCs) - return - } - if resetDrop { - lb.picker = newLBPicker(lb.fullServerList, readySCs, lb.clientStats) - return - } - prevLBPicker, ok := lb.picker.(*lbPicker) - if !ok { - lb.picker = newLBPicker(lb.fullServerList, readySCs, lb.clientStats) - return - } - prevLBPicker.updateReadySCs(readySCs) -} - -// aggregateSubConnStats calculate the aggregated state of SubConns in -// lb.SubConns. These SubConns are subconns in use (when switching between -// fallback and grpclb). lb.scState contains states for all SubConns, including -// those in cache (SubConns are cached for 10 seconds after remove). -// -// The aggregated state is: -// - If at least one SubConn in Ready, the aggregated state is Ready; -// - Else if at least one SubConn in Connecting or IDLE, the aggregated state is Connecting; -// - It's OK to consider IDLE as Connecting. SubConns never stay in IDLE, -// they start to connect immediately. But there's a race between the overall -// state is reported, and when the new SubConn state arrives. And SubConns -// never go back to IDLE. -// - Else the aggregated state is TransientFailure. -func (lb *lbBalancer) aggregateSubConnStates() connectivity.State { - var numConnecting uint64 - - for _, sc := range lb.subConns { - if state, ok := lb.scStates[sc]; ok { - switch state { - case connectivity.Ready: - return connectivity.Ready - case connectivity.Connecting, connectivity.Idle: - numConnecting++ - } - } - } - if numConnecting > 0 { - return connectivity.Connecting - } - return connectivity.TransientFailure -} - -func (lb *lbBalancer) UpdateSubConnState(sc balancer.SubConn, scs balancer.SubConnState) { - s := scs.ConnectivityState - if logger.V(2) { - logger.Infof("lbBalancer: handle SubConn state change: %p, %v", sc, s) - } - lb.mu.Lock() - defer lb.mu.Unlock() - - oldS, ok := lb.scStates[sc] - if !ok { - if logger.V(2) { - logger.Infof("lbBalancer: got state changes for an unknown SubConn: %p, %v", sc, s) - } - return - } - lb.scStates[sc] = s - switch s { - case connectivity.Idle: - sc.Connect() - case connectivity.Shutdown: - // When an address was removed by resolver, b called RemoveSubConn but - // kept the sc's state in scStates. Remove state for this sc here. - delete(lb.scStates, sc) - } - // Force regenerate picker if - // - this sc became ready from not-ready - // - this sc became not-ready from ready - lb.updateStateAndPicker((oldS == connectivity.Ready) != (s == connectivity.Ready), false) - - // Enter fallback when the aggregated state is not Ready and the connection - // to remote balancer is lost. - if lb.state != connectivity.Ready { - if !lb.inFallback && !lb.remoteBalancerConnected { - // Enter fallback. - lb.refreshSubConns(lb.resolvedBackendAddrs, true, lb.usePickFirst) - } - } -} - -// updateStateAndPicker re-calculate the aggregated state, and regenerate picker -// if overall state is changed. -// -// If forceRegeneratePicker is true, picker will be regenerated. -func (lb *lbBalancer) updateStateAndPicker(forceRegeneratePicker bool, resetDrop bool) { - oldAggrState := lb.state - lb.state = lb.aggregateSubConnStates() - // Regenerate picker when one of the following happens: - // - caller wants to regenerate - // - the aggregated state changed - if forceRegeneratePicker || (lb.state != oldAggrState) { - lb.regeneratePicker(resetDrop) - } - - lb.cc.UpdateState(balancer.State{ConnectivityState: lb.state, Picker: lb.picker}) -} - -// fallbackToBackendsAfter blocks for fallbackTimeout and falls back to use -// resolved backends (backends received from resolver, not from remote balancer) -// if no connection to remote balancers was successful. -func (lb *lbBalancer) fallbackToBackendsAfter(fallbackTimeout time.Duration) { - timer := time.NewTimer(fallbackTimeout) - defer timer.Stop() - select { - case <-timer.C: - case <-lb.doneCh: - return - } - lb.mu.Lock() - if lb.inFallback || lb.serverListReceived { - lb.mu.Unlock() - return - } - // Enter fallback. - lb.refreshSubConns(lb.resolvedBackendAddrs, true, lb.usePickFirst) - lb.mu.Unlock() -} - -func (lb *lbBalancer) handleServiceConfig(gc *grpclbServiceConfig) { - lb.mu.Lock() - defer lb.mu.Unlock() - - newUsePickFirst := childIsPickFirst(gc) - if lb.usePickFirst == newUsePickFirst { - return - } - if logger.V(2) { - logger.Infof("lbBalancer: switching mode, new usePickFirst: %+v", newUsePickFirst) - } - lb.refreshSubConns(lb.backendAddrs, lb.inFallback, newUsePickFirst) -} - -func (lb *lbBalancer) ResolverError(error) { - // Ignore resolver errors. GRPCLB is not selected unless the resolver - // works at least once. -} - -func (lb *lbBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error { - if logger.V(2) { - logger.Infof("lbBalancer: UpdateClientConnState: %+v", ccs) - } - gc, _ := ccs.BalancerConfig.(*grpclbServiceConfig) - lb.handleServiceConfig(gc) - - addrs := ccs.ResolverState.Addresses - - var remoteBalancerAddrs, backendAddrs []resolver.Address - for _, a := range addrs { - if a.Type == resolver.GRPCLB { - a.Type = resolver.Backend - remoteBalancerAddrs = append(remoteBalancerAddrs, a) - } else { - backendAddrs = append(backendAddrs, a) - } - } - if sd := grpclbstate.Get(ccs.ResolverState); sd != nil { - // Override any balancer addresses provided via - // ccs.ResolverState.Addresses. - remoteBalancerAddrs = sd.BalancerAddresses - } - - if len(backendAddrs)+len(remoteBalancerAddrs) == 0 { - // There should be at least one address, either grpclb server or - // fallback. Empty address is not valid. - return balancer.ErrBadResolverState - } - - if len(remoteBalancerAddrs) == 0 { - if lb.ccRemoteLB != nil { - lb.ccRemoteLB.close() - lb.ccRemoteLB = nil - } - } else if lb.ccRemoteLB == nil { - // First time receiving resolved addresses, create a cc to remote - // balancers. - lb.newRemoteBalancerCCWrapper() - // Start the fallback goroutine. - go lb.fallbackToBackendsAfter(lb.fallbackTimeout) - } - - if lb.ccRemoteLB != nil { - // cc to remote balancers uses lb.manualResolver. Send the updated remote - // balancer addresses to it through manualResolver. - lb.manualResolver.UpdateState(resolver.State{Addresses: remoteBalancerAddrs}) - } - - lb.mu.Lock() - lb.resolvedBackendAddrs = backendAddrs - if len(remoteBalancerAddrs) == 0 || lb.inFallback { - // If there's no remote balancer address in ClientConn update, grpclb - // enters fallback mode immediately. - // - // If a new update is received while grpclb is in fallback, update the - // list of backends being used to the new fallback backends. - lb.refreshSubConns(lb.resolvedBackendAddrs, true, lb.usePickFirst) - } - lb.mu.Unlock() - return nil -} - -func (lb *lbBalancer) Close() { - select { - case <-lb.doneCh: - return - default: - } - close(lb.doneCh) - if lb.ccRemoteLB != nil { - lb.ccRemoteLB.close() - } - lb.cc.close() -} diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_config.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_config.go deleted file mode 100644 index aac3719631..0000000000 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_config.go +++ /dev/null @@ -1,66 +0,0 @@ -/* - * - * Copyright 2019 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpclb - -import ( - "encoding/json" - - "google.golang.org/grpc" - "google.golang.org/grpc/balancer/roundrobin" - "google.golang.org/grpc/serviceconfig" -) - -const ( - roundRobinName = roundrobin.Name - pickFirstName = grpc.PickFirstBalancerName -) - -type grpclbServiceConfig struct { - serviceconfig.LoadBalancingConfig - ChildPolicy *[]map[string]json.RawMessage -} - -func (b *lbBuilder) ParseConfig(lbConfig json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { - ret := &grpclbServiceConfig{} - if err := json.Unmarshal(lbConfig, ret); err != nil { - return nil, err - } - return ret, nil -} - -func childIsPickFirst(sc *grpclbServiceConfig) bool { - if sc == nil { - return false - } - childConfigs := sc.ChildPolicy - if childConfigs == nil { - return false - } - for _, childC := range *childConfigs { - // If round_robin exists before pick_first, return false - if _, ok := childC[roundRobinName]; ok { - return false - } - // If pick_first is before round_robin, return true - if _, ok := childC[pickFirstName]; ok { - return true - } - } - return false -} diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_picker.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_picker.go deleted file mode 100644 index 39bc5cc71e..0000000000 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_picker.go +++ /dev/null @@ -1,202 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpclb - -import ( - "sync" - "sync/atomic" - - "google.golang.org/grpc/balancer" - lbpb "google.golang.org/grpc/balancer/grpclb/grpc_lb_v1" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/internal/grpcrand" - "google.golang.org/grpc/status" -) - -// rpcStats is same as lbpb.ClientStats, except that numCallsDropped is a map -// instead of a slice. -type rpcStats struct { - // Only access the following fields atomically. - numCallsStarted int64 - numCallsFinished int64 - numCallsFinishedWithClientFailedToSend int64 - numCallsFinishedKnownReceived int64 - - mu sync.Mutex - // map load_balance_token -> num_calls_dropped - numCallsDropped map[string]int64 -} - -func newRPCStats() *rpcStats { - return &rpcStats{ - numCallsDropped: make(map[string]int64), - } -} - -func isZeroStats(stats *lbpb.ClientStats) bool { - return len(stats.CallsFinishedWithDrop) == 0 && - stats.NumCallsStarted == 0 && - stats.NumCallsFinished == 0 && - stats.NumCallsFinishedWithClientFailedToSend == 0 && - stats.NumCallsFinishedKnownReceived == 0 -} - -// toClientStats converts rpcStats to lbpb.ClientStats, and clears rpcStats. -func (s *rpcStats) toClientStats() *lbpb.ClientStats { - stats := &lbpb.ClientStats{ - NumCallsStarted: atomic.SwapInt64(&s.numCallsStarted, 0), - NumCallsFinished: atomic.SwapInt64(&s.numCallsFinished, 0), - NumCallsFinishedWithClientFailedToSend: atomic.SwapInt64(&s.numCallsFinishedWithClientFailedToSend, 0), - NumCallsFinishedKnownReceived: atomic.SwapInt64(&s.numCallsFinishedKnownReceived, 0), - } - s.mu.Lock() - dropped := s.numCallsDropped - s.numCallsDropped = make(map[string]int64) - s.mu.Unlock() - for token, count := range dropped { - stats.CallsFinishedWithDrop = append(stats.CallsFinishedWithDrop, &lbpb.ClientStatsPerToken{ - LoadBalanceToken: token, - NumCalls: count, - }) - } - return stats -} - -func (s *rpcStats) drop(token string) { - atomic.AddInt64(&s.numCallsStarted, 1) - s.mu.Lock() - s.numCallsDropped[token]++ - s.mu.Unlock() - atomic.AddInt64(&s.numCallsFinished, 1) -} - -func (s *rpcStats) failedToSend() { - atomic.AddInt64(&s.numCallsStarted, 1) - atomic.AddInt64(&s.numCallsFinishedWithClientFailedToSend, 1) - atomic.AddInt64(&s.numCallsFinished, 1) -} - -func (s *rpcStats) knownReceived() { - atomic.AddInt64(&s.numCallsStarted, 1) - atomic.AddInt64(&s.numCallsFinishedKnownReceived, 1) - atomic.AddInt64(&s.numCallsFinished, 1) -} - -type errPicker struct { - // Pick always returns this err. - err error -} - -func (p *errPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { - return balancer.PickResult{}, p.err -} - -// rrPicker does roundrobin on subConns. It's typically used when there's no -// response from remote balancer, and grpclb falls back to the resolved -// backends. -// -// It guaranteed that len(subConns) > 0. -type rrPicker struct { - mu sync.Mutex - subConns []balancer.SubConn // The subConns that were READY when taking the snapshot. - subConnsNext int -} - -func newRRPicker(readySCs []balancer.SubConn) *rrPicker { - return &rrPicker{ - subConns: readySCs, - subConnsNext: grpcrand.Intn(len(readySCs)), - } -} - -func (p *rrPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { - p.mu.Lock() - defer p.mu.Unlock() - sc := p.subConns[p.subConnsNext] - p.subConnsNext = (p.subConnsNext + 1) % len(p.subConns) - return balancer.PickResult{SubConn: sc}, nil -} - -// lbPicker does two layers of picks: -// -// First layer: roundrobin on all servers in serverList, including drops and backends. -// - If it picks a drop, the RPC will fail as being dropped. -// - If it picks a backend, do a second layer pick to pick the real backend. -// -// Second layer: roundrobin on all READY backends. -// -// It's guaranteed that len(serverList) > 0. -type lbPicker struct { - mu sync.Mutex - serverList []*lbpb.Server - serverListNext int - subConns []balancer.SubConn // The subConns that were READY when taking the snapshot. - subConnsNext int - - stats *rpcStats -} - -func newLBPicker(serverList []*lbpb.Server, readySCs []balancer.SubConn, stats *rpcStats) *lbPicker { - return &lbPicker{ - serverList: serverList, - subConns: readySCs, - subConnsNext: grpcrand.Intn(len(readySCs)), - stats: stats, - } -} - -func (p *lbPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { - p.mu.Lock() - defer p.mu.Unlock() - - // Layer one roundrobin on serverList. - s := p.serverList[p.serverListNext] - p.serverListNext = (p.serverListNext + 1) % len(p.serverList) - - // If it's a drop, return an error and fail the RPC. - if s.Drop { - p.stats.drop(s.LoadBalanceToken) - return balancer.PickResult{}, status.Errorf(codes.Unavailable, "request dropped by grpclb") - } - - // If not a drop but there's no ready subConns. - if len(p.subConns) <= 0 { - return balancer.PickResult{}, balancer.ErrNoSubConnAvailable - } - - // Return the next ready subConn in the list, also collect rpc stats. - sc := p.subConns[p.subConnsNext] - p.subConnsNext = (p.subConnsNext + 1) % len(p.subConns) - done := func(info balancer.DoneInfo) { - if !info.BytesSent { - p.stats.failedToSend() - } else if info.BytesReceived { - p.stats.knownReceived() - } - } - return balancer.PickResult{SubConn: sc, Done: done}, nil -} - -func (p *lbPicker) updateReadySCs(readySCs []balancer.SubConn) { - p.mu.Lock() - defer p.mu.Unlock() - - p.subConns = readySCs - p.subConnsNext = p.subConnsNext % len(readySCs) -} diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go deleted file mode 100644 index 08d326ab40..0000000000 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go +++ /dev/null @@ -1,410 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpclb - -import ( - "context" - "fmt" - "io" - "net" - "sync" - "time" - - "github.com/golang/protobuf/proto" - timestamppb "github.com/golang/protobuf/ptypes/timestamp" - "github.com/google/go-cmp/cmp" - "google.golang.org/grpc" - "google.golang.org/grpc/balancer" - lbpb "google.golang.org/grpc/balancer/grpclb/grpc_lb_v1" - "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/internal/backoff" - "google.golang.org/grpc/internal/channelz" - imetadata "google.golang.org/grpc/internal/metadata" - "google.golang.org/grpc/keepalive" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/resolver" -) - -// processServerList updates balancer's internal state, create/remove SubConns -// and regenerates picker using the received serverList. -func (lb *lbBalancer) processServerList(l *lbpb.ServerList) { - if logger.V(2) { - logger.Infof("lbBalancer: processing server list: %+v", l) - } - lb.mu.Lock() - defer lb.mu.Unlock() - - // Set serverListReceived to true so fallback will not take effect if it has - // not hit timeout. - lb.serverListReceived = true - - // If the new server list == old server list, do nothing. - if cmp.Equal(lb.fullServerList, l.Servers, cmp.Comparer(proto.Equal)) { - if logger.V(2) { - logger.Infof("lbBalancer: new serverlist same as the previous one, ignoring") - } - return - } - lb.fullServerList = l.Servers - - var backendAddrs []resolver.Address - for i, s := range l.Servers { - if s.Drop { - continue - } - - md := metadata.Pairs(lbTokenKey, s.LoadBalanceToken) - ip := net.IP(s.IpAddress) - ipStr := ip.String() - if ip.To4() == nil { - // Add square brackets to ipv6 addresses, otherwise net.Dial() and - // net.SplitHostPort() will return too many colons error. - ipStr = fmt.Sprintf("[%s]", ipStr) - } - addr := imetadata.Set(resolver.Address{Addr: fmt.Sprintf("%s:%d", ipStr, s.Port)}, md) - if logger.V(2) { - logger.Infof("lbBalancer: server list entry[%d]: ipStr:|%s|, port:|%d|, load balancer token:|%v|", - i, ipStr, s.Port, s.LoadBalanceToken) - } - backendAddrs = append(backendAddrs, addr) - } - - // Call refreshSubConns to create/remove SubConns. If we are in fallback, - // this is also exiting fallback. - lb.refreshSubConns(backendAddrs, false, lb.usePickFirst) -} - -// refreshSubConns creates/removes SubConns with backendAddrs, and refreshes -// balancer state and picker. -// -// Caller must hold lb.mu. -func (lb *lbBalancer) refreshSubConns(backendAddrs []resolver.Address, fallback bool, pickFirst bool) { - opts := balancer.NewSubConnOptions{} - if !fallback { - opts.CredsBundle = lb.grpclbBackendCreds - } - - lb.backendAddrs = backendAddrs - lb.backendAddrsWithoutMetadata = nil - - fallbackModeChanged := lb.inFallback != fallback - lb.inFallback = fallback - if fallbackModeChanged && lb.inFallback { - // Clear previous received list when entering fallback, so if the server - // comes back and sends the same list again, the new addresses will be - // used. - lb.fullServerList = nil - } - - balancingPolicyChanged := lb.usePickFirst != pickFirst - oldUsePickFirst := lb.usePickFirst - lb.usePickFirst = pickFirst - - if fallbackModeChanged || balancingPolicyChanged { - // Remove all SubConns when switching balancing policy or switching - // fallback mode. - // - // For fallback mode switching with pickfirst, we want to recreate the - // SubConn because the creds could be different. - for a, sc := range lb.subConns { - if oldUsePickFirst { - // If old SubConn were created for pickfirst, bypass cache and - // remove directly. - lb.cc.cc.RemoveSubConn(sc) - } else { - lb.cc.RemoveSubConn(sc) - } - delete(lb.subConns, a) - } - } - - if lb.usePickFirst { - var sc balancer.SubConn - for _, sc = range lb.subConns { - break - } - if sc != nil { - sc.UpdateAddresses(backendAddrs) - sc.Connect() - return - } - // This bypasses the cc wrapper with SubConn cache. - sc, err := lb.cc.cc.NewSubConn(backendAddrs, opts) - if err != nil { - logger.Warningf("grpclb: failed to create new SubConn: %v", err) - return - } - sc.Connect() - lb.subConns[backendAddrs[0]] = sc - lb.scStates[sc] = connectivity.Idle - return - } - - // addrsSet is the set converted from backendAddrsWithoutMetadata, it's used to quick - // lookup for an address. - addrsSet := make(map[resolver.Address]struct{}) - // Create new SubConns. - for _, addr := range backendAddrs { - addrWithoutAttrs := addr - addrWithoutAttrs.Attributes = nil - addrsSet[addrWithoutAttrs] = struct{}{} - lb.backendAddrsWithoutMetadata = append(lb.backendAddrsWithoutMetadata, addrWithoutAttrs) - - if _, ok := lb.subConns[addrWithoutAttrs]; !ok { - // Use addrWithMD to create the SubConn. - sc, err := lb.cc.NewSubConn([]resolver.Address{addr}, opts) - if err != nil { - logger.Warningf("grpclb: failed to create new SubConn: %v", err) - continue - } - lb.subConns[addrWithoutAttrs] = sc // Use the addr without MD as key for the map. - if _, ok := lb.scStates[sc]; !ok { - // Only set state of new sc to IDLE. The state could already be - // READY for cached SubConns. - lb.scStates[sc] = connectivity.Idle - } - sc.Connect() - } - } - - for a, sc := range lb.subConns { - // a was removed by resolver. - if _, ok := addrsSet[a]; !ok { - lb.cc.RemoveSubConn(sc) - delete(lb.subConns, a) - // Keep the state of this sc in b.scStates until sc's state becomes Shutdown. - // The entry will be deleted in UpdateSubConnState. - } - } - - // Regenerate and update picker after refreshing subconns because with - // cache, even if SubConn was newed/removed, there might be no state - // changes (the subconn will be kept in cache, not actually - // newed/removed). - lb.updateStateAndPicker(true, true) -} - -type remoteBalancerCCWrapper struct { - cc *grpc.ClientConn - lb *lbBalancer - backoff backoff.Strategy - done chan struct{} - - // waitgroup to wait for all goroutines to exit. - wg sync.WaitGroup -} - -func (lb *lbBalancer) newRemoteBalancerCCWrapper() { - var dopts []grpc.DialOption - if creds := lb.opt.DialCreds; creds != nil { - dopts = append(dopts, grpc.WithTransportCredentials(creds)) - } else if bundle := lb.grpclbClientConnCreds; bundle != nil { - dopts = append(dopts, grpc.WithCredentialsBundle(bundle)) - } else { - dopts = append(dopts, grpc.WithInsecure()) - } - if lb.opt.Dialer != nil { - dopts = append(dopts, grpc.WithContextDialer(lb.opt.Dialer)) - } - if lb.opt.CustomUserAgent != "" { - dopts = append(dopts, grpc.WithUserAgent(lb.opt.CustomUserAgent)) - } - // Explicitly set pickfirst as the balancer. - dopts = append(dopts, grpc.WithDefaultServiceConfig(`{"loadBalancingPolicy":"pick_first"}`)) - dopts = append(dopts, grpc.WithResolvers(lb.manualResolver)) - if channelz.IsOn() { - dopts = append(dopts, grpc.WithChannelzParentID(lb.opt.ChannelzParentID)) - } - - // Enable Keepalive for grpclb client. - dopts = append(dopts, grpc.WithKeepaliveParams(keepalive.ClientParameters{ - Time: 20 * time.Second, - Timeout: 10 * time.Second, - PermitWithoutStream: true, - })) - - // The dial target is not important. - // - // The grpclb server addresses will set field ServerName, and creds will - // receive ServerName as authority. - cc, err := grpc.DialContext(context.Background(), lb.manualResolver.Scheme()+":///grpclb.subClientConn", dopts...) - if err != nil { - logger.Fatalf("failed to dial: %v", err) - } - ccw := &remoteBalancerCCWrapper{ - cc: cc, - lb: lb, - backoff: lb.backoff, - done: make(chan struct{}), - } - lb.ccRemoteLB = ccw - ccw.wg.Add(1) - go ccw.watchRemoteBalancer() -} - -// close closed the ClientConn to remote balancer, and waits until all -// goroutines to finish. -func (ccw *remoteBalancerCCWrapper) close() { - close(ccw.done) - ccw.cc.Close() - ccw.wg.Wait() -} - -func (ccw *remoteBalancerCCWrapper) readServerList(s *balanceLoadClientStream) error { - for { - reply, err := s.Recv() - if err != nil { - if err == io.EOF { - return errServerTerminatedConnection - } - return fmt.Errorf("grpclb: failed to recv server list: %v", err) - } - if serverList := reply.GetServerList(); serverList != nil { - ccw.lb.processServerList(serverList) - } - if reply.GetFallbackResponse() != nil { - // Eagerly enter fallback - ccw.lb.mu.Lock() - ccw.lb.refreshSubConns(ccw.lb.resolvedBackendAddrs, true, ccw.lb.usePickFirst) - ccw.lb.mu.Unlock() - } - } -} - -func (ccw *remoteBalancerCCWrapper) sendLoadReport(s *balanceLoadClientStream, interval time.Duration) { - ticker := time.NewTicker(interval) - defer ticker.Stop() - lastZero := false - for { - select { - case <-ticker.C: - case <-s.Context().Done(): - return - } - stats := ccw.lb.clientStats.toClientStats() - zero := isZeroStats(stats) - if zero && lastZero { - // Quash redundant empty load reports. - continue - } - lastZero = zero - t := time.Now() - stats.Timestamp = ×tamppb.Timestamp{ - Seconds: t.Unix(), - Nanos: int32(t.Nanosecond()), - } - if err := s.Send(&lbpb.LoadBalanceRequest{ - LoadBalanceRequestType: &lbpb.LoadBalanceRequest_ClientStats{ - ClientStats: stats, - }, - }); err != nil { - return - } - } -} - -func (ccw *remoteBalancerCCWrapper) callRemoteBalancer() (backoff bool, _ error) { - lbClient := &loadBalancerClient{cc: ccw.cc} - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - stream, err := lbClient.BalanceLoad(ctx, grpc.WaitForReady(true)) - if err != nil { - return true, fmt.Errorf("grpclb: failed to perform RPC to the remote balancer %v", err) - } - ccw.lb.mu.Lock() - ccw.lb.remoteBalancerConnected = true - ccw.lb.mu.Unlock() - - // grpclb handshake on the stream. - initReq := &lbpb.LoadBalanceRequest{ - LoadBalanceRequestType: &lbpb.LoadBalanceRequest_InitialRequest{ - InitialRequest: &lbpb.InitialLoadBalanceRequest{ - Name: ccw.lb.target, - }, - }, - } - if err := stream.Send(initReq); err != nil { - return true, fmt.Errorf("grpclb: failed to send init request: %v", err) - } - reply, err := stream.Recv() - if err != nil { - return true, fmt.Errorf("grpclb: failed to recv init response: %v", err) - } - initResp := reply.GetInitialResponse() - if initResp == nil { - return true, fmt.Errorf("grpclb: reply from remote balancer did not include initial response") - } - - ccw.wg.Add(1) - go func() { - defer ccw.wg.Done() - if d := convertDuration(initResp.ClientStatsReportInterval); d > 0 { - ccw.sendLoadReport(stream, d) - } - }() - // No backoff if init req/resp handshake was successful. - return false, ccw.readServerList(stream) -} - -func (ccw *remoteBalancerCCWrapper) watchRemoteBalancer() { - defer ccw.wg.Done() - var retryCount int - for { - doBackoff, err := ccw.callRemoteBalancer() - select { - case <-ccw.done: - return - default: - if err != nil { - if err == errServerTerminatedConnection { - logger.Info(err) - } else { - logger.Warning(err) - } - } - } - // Trigger a re-resolve when the stream errors. - ccw.lb.cc.cc.ResolveNow(resolver.ResolveNowOptions{}) - - ccw.lb.mu.Lock() - ccw.lb.remoteBalancerConnected = false - ccw.lb.fullServerList = nil - // Enter fallback when connection to remote balancer is lost, and the - // aggregated state is not Ready. - if !ccw.lb.inFallback && ccw.lb.state != connectivity.Ready { - // Entering fallback. - ccw.lb.refreshSubConns(ccw.lb.resolvedBackendAddrs, true, ccw.lb.usePickFirst) - } - ccw.lb.mu.Unlock() - - if !doBackoff { - retryCount = 0 - continue - } - - timer := time.NewTimer(ccw.backoff.Backoff(retryCount)) // Copy backoff - select { - case <-timer.C: - case <-ccw.done: - timer.Stop() - return - } - retryCount++ - } -} diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_util.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_util.go deleted file mode 100644 index 373f04b98d..0000000000 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_util.go +++ /dev/null @@ -1,208 +0,0 @@ -/* - * - * Copyright 2016 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpclb - -import ( - "fmt" - "sync" - "time" - - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/resolver" -) - -// The parent ClientConn should re-resolve when grpclb loses connection to the -// remote balancer. When the ClientConn inside grpclb gets a TransientFailure, -// it calls lbManualResolver.ResolveNow(), which calls parent ClientConn's -// ResolveNow, and eventually results in re-resolve happening in parent -// ClientConn's resolver (DNS for example). -// -// parent -// ClientConn -// +-----------------------------------------------------------------+ -// | parent +---------------------------------+ | -// | DNS ClientConn | grpclb | | -// | resolver balancerWrapper | | | -// | + + | grpclb grpclb | | -// | | | | ManualResolver ClientConn | | -// | | | | + + | | -// | | | | | | Transient | | -// | | | | | | Failure | | -// | | | | | <--------- | | | -// | | | <--------------- | ResolveNow | | | -// | | <--------- | ResolveNow | | | | | -// | | ResolveNow | | | | | | -// | | | | | | | | -// | + + | + + | | -// | +---------------------------------+ | -// +-----------------------------------------------------------------+ - -// lbManualResolver is used by the ClientConn inside grpclb. It's a manual -// resolver with a special ResolveNow() function. -// -// When ResolveNow() is called, it calls ResolveNow() on the parent ClientConn, -// so when grpclb client lose contact with remote balancers, the parent -// ClientConn's resolver will re-resolve. -type lbManualResolver struct { - scheme string - ccr resolver.ClientConn - - ccb balancer.ClientConn -} - -func (r *lbManualResolver) Build(_ resolver.Target, cc resolver.ClientConn, _ resolver.BuildOptions) (resolver.Resolver, error) { - r.ccr = cc - return r, nil -} - -func (r *lbManualResolver) Scheme() string { - return r.scheme -} - -// ResolveNow calls resolveNow on the parent ClientConn. -func (r *lbManualResolver) ResolveNow(o resolver.ResolveNowOptions) { - r.ccb.ResolveNow(o) -} - -// Close is a noop for Resolver. -func (*lbManualResolver) Close() {} - -// UpdateState calls cc.UpdateState. -func (r *lbManualResolver) UpdateState(s resolver.State) { - r.ccr.UpdateState(s) -} - -const subConnCacheTime = time.Second * 10 - -// lbCacheClientConn is a wrapper balancer.ClientConn with a SubConn cache. -// SubConns will be kept in cache for subConnCacheTime before being removed. -// -// Its new and remove methods are updated to do cache first. -type lbCacheClientConn struct { - cc balancer.ClientConn - timeout time.Duration - - mu sync.Mutex - // subConnCache only keeps subConns that are being deleted. - subConnCache map[resolver.Address]*subConnCacheEntry - subConnToAddr map[balancer.SubConn]resolver.Address -} - -type subConnCacheEntry struct { - sc balancer.SubConn - - cancel func() - abortDeleting bool -} - -func newLBCacheClientConn(cc balancer.ClientConn) *lbCacheClientConn { - return &lbCacheClientConn{ - cc: cc, - timeout: subConnCacheTime, - subConnCache: make(map[resolver.Address]*subConnCacheEntry), - subConnToAddr: make(map[balancer.SubConn]resolver.Address), - } -} - -func (ccc *lbCacheClientConn) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { - if len(addrs) != 1 { - return nil, fmt.Errorf("grpclb calling NewSubConn with addrs of length %v", len(addrs)) - } - addrWithoutAttrs := addrs[0] - addrWithoutAttrs.Attributes = nil - - ccc.mu.Lock() - defer ccc.mu.Unlock() - if entry, ok := ccc.subConnCache[addrWithoutAttrs]; ok { - // If entry is in subConnCache, the SubConn was being deleted. - // cancel function will never be nil. - entry.cancel() - delete(ccc.subConnCache, addrWithoutAttrs) - return entry.sc, nil - } - - scNew, err := ccc.cc.NewSubConn(addrs, opts) - if err != nil { - return nil, err - } - - ccc.subConnToAddr[scNew] = addrWithoutAttrs - return scNew, nil -} - -func (ccc *lbCacheClientConn) RemoveSubConn(sc balancer.SubConn) { - ccc.mu.Lock() - defer ccc.mu.Unlock() - addr, ok := ccc.subConnToAddr[sc] - if !ok { - return - } - - if entry, ok := ccc.subConnCache[addr]; ok { - if entry.sc != sc { - // This could happen if NewSubConn was called multiple times for the - // same address, and those SubConns are all removed. We remove sc - // immediately here. - delete(ccc.subConnToAddr, sc) - ccc.cc.RemoveSubConn(sc) - } - return - } - - entry := &subConnCacheEntry{ - sc: sc, - } - ccc.subConnCache[addr] = entry - - timer := time.AfterFunc(ccc.timeout, func() { - ccc.mu.Lock() - defer ccc.mu.Unlock() - if entry.abortDeleting { - return - } - ccc.cc.RemoveSubConn(sc) - delete(ccc.subConnToAddr, sc) - delete(ccc.subConnCache, addr) - }) - entry.cancel = func() { - if !timer.Stop() { - // If stop was not successful, the timer has fired (this can only - // happen in a race). But the deleting function is blocked on ccc.mu - // because the mutex was held by the caller of this function. - // - // Set abortDeleting to true to abort the deleting function. When - // the lock is released, the deleting function will acquire the - // lock, check the value of abortDeleting and return. - entry.abortDeleting = true - } - } -} - -func (ccc *lbCacheClientConn) UpdateState(s balancer.State) { - ccc.cc.UpdateState(s) -} - -func (ccc *lbCacheClientConn) close() { - ccc.mu.Lock() - // Only cancel all existing timers. There's no need to remove SubConns. - for _, entry := range ccc.subConnCache { - entry.cancel() - } - ccc.mu.Unlock() -} diff --git a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go index 11e592aabb..4cc7f9159b 100644 --- a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go +++ b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go @@ -163,6 +163,14 @@ func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) } +func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { + acbw, ok := sc.(*acBalancerWrapper) + if !ok { + return + } + acbw.UpdateAddresses(addrs) +} + func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) { ccb.mu.Lock() defer ccb.mu.Unlock() @@ -197,7 +205,7 @@ func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { acbw.mu.Lock() defer acbw.mu.Unlock() if len(addrs) <= 0 { - acbw.ac.tearDown(errConnDrain) + acbw.ac.cc.removeAddrConn(acbw.ac, errConnDrain) return } if !acbw.ac.tryUpdateAddrs(addrs) { @@ -212,7 +220,7 @@ func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { acbw.ac.acbw = nil acbw.ac.mu.Unlock() acState := acbw.ac.getState() - acbw.ac.tearDown(errConnDrain) + acbw.ac.cc.removeAddrConn(acbw.ac, errConnDrain) if acState == connectivity.Shutdown { return diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index 77a08fd33b..24109264f5 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -143,6 +143,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * firstResolveEvent: grpcsync.NewEvent(), } cc.retryThrottler.Store((*retryThrottler)(nil)) + cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil}) cc.ctx, cc.cancel = context.WithCancel(context.Background()) for _, opt := range opts { @@ -1197,7 +1198,7 @@ func (ac *addrConn) resetTransport() { ac.mu.Lock() if ac.state == connectivity.Shutdown { ac.mu.Unlock() - newTr.Close() + newTr.Close(fmt.Errorf("reached connectivity state: SHUTDOWN")) return } ac.curAddr = addr @@ -1329,7 +1330,7 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne select { case <-time.After(time.Until(connectDeadline)): // We didn't get the preface in time. - newTr.Close() + newTr.Close(fmt.Errorf("failed to receive server preface within timeout")) channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v: didn't receive server preface in time. Reconnecting...", addr) return nil, nil, errors.New("timed out waiting for server handshake") case <-prefaceReceived: @@ -1446,10 +1447,9 @@ func (ac *addrConn) getReadyTransport() (transport.ClientTransport, bool) { } // tearDown starts to tear down the addrConn. -// TODO(zhaoq): Make this synchronous to avoid unbounded memory consumption in -// some edge cases (e.g., the caller opens and closes many addrConn's in a -// tight loop. -// tearDown doesn't remove ac from ac.cc.conns. +// +// Note that tearDown doesn't remove ac from ac.cc.conns, so the addrConn struct +// will leak. In most cases, call cc.removeAddrConn() instead. func (ac *addrConn) tearDown(err error) { ac.mu.Lock() if ac.state == connectivity.Shutdown { diff --git a/vendor/google.golang.org/grpc/credentials/alts/alts.go b/vendor/google.golang.org/grpc/credentials/alts/alts.go deleted file mode 100644 index 729c4b43b5..0000000000 --- a/vendor/google.golang.org/grpc/credentials/alts/alts.go +++ /dev/null @@ -1,331 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package alts implements the ALTS credential support by gRPC library, which -// encapsulates all the state needed by a client to authenticate with a server -// using ALTS and make various assertions, e.g., about the client's identity, -// role, or whether it is authorized to make a particular call. -// This package is experimental. -package alts - -import ( - "context" - "errors" - "fmt" - "net" - "sync" - "time" - - "google.golang.org/grpc/credentials" - core "google.golang.org/grpc/credentials/alts/internal" - "google.golang.org/grpc/credentials/alts/internal/handshaker" - "google.golang.org/grpc/credentials/alts/internal/handshaker/service" - altspb "google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp" - "google.golang.org/grpc/grpclog" -) - -const ( - // hypervisorHandshakerServiceAddress represents the default ALTS gRPC - // handshaker service address in the hypervisor. - hypervisorHandshakerServiceAddress = "metadata.google.internal.:8080" - // defaultTimeout specifies the server handshake timeout. - defaultTimeout = 30.0 * time.Second - // The following constants specify the minimum and maximum acceptable - // protocol versions. - protocolVersionMaxMajor = 2 - protocolVersionMaxMinor = 1 - protocolVersionMinMajor = 2 - protocolVersionMinMinor = 1 -) - -var ( - once sync.Once - maxRPCVersion = &altspb.RpcProtocolVersions_Version{ - Major: protocolVersionMaxMajor, - Minor: protocolVersionMaxMinor, - } - minRPCVersion = &altspb.RpcProtocolVersions_Version{ - Major: protocolVersionMinMajor, - Minor: protocolVersionMinMinor, - } - // ErrUntrustedPlatform is returned from ClientHandshake and - // ServerHandshake is running on a platform where the trustworthiness of - // the handshaker service is not guaranteed. - ErrUntrustedPlatform = errors.New("ALTS: untrusted platform. ALTS is only supported on GCP") - logger = grpclog.Component("alts") -) - -// AuthInfo exposes security information from the ALTS handshake to the -// application. This interface is to be implemented by ALTS. Users should not -// need a brand new implementation of this interface. For situations like -// testing, any new implementation should embed this interface. This allows -// ALTS to add new methods to this interface. -type AuthInfo interface { - // ApplicationProtocol returns application protocol negotiated for the - // ALTS connection. - ApplicationProtocol() string - // RecordProtocol returns the record protocol negotiated for the ALTS - // connection. - RecordProtocol() string - // SecurityLevel returns the security level of the created ALTS secure - // channel. - SecurityLevel() altspb.SecurityLevel - // PeerServiceAccount returns the peer service account. - PeerServiceAccount() string - // LocalServiceAccount returns the local service account. - LocalServiceAccount() string - // PeerRPCVersions returns the RPC version supported by the peer. - PeerRPCVersions() *altspb.RpcProtocolVersions -} - -// ClientOptions contains the client-side options of an ALTS channel. These -// options will be passed to the underlying ALTS handshaker. -type ClientOptions struct { - // TargetServiceAccounts contains a list of expected target service - // accounts. - TargetServiceAccounts []string - // HandshakerServiceAddress represents the ALTS handshaker gRPC service - // address to connect to. - HandshakerServiceAddress string -} - -// DefaultClientOptions creates a new ClientOptions object with the default -// values. -func DefaultClientOptions() *ClientOptions { - return &ClientOptions{ - HandshakerServiceAddress: hypervisorHandshakerServiceAddress, - } -} - -// ServerOptions contains the server-side options of an ALTS channel. These -// options will be passed to the underlying ALTS handshaker. -type ServerOptions struct { - // HandshakerServiceAddress represents the ALTS handshaker gRPC service - // address to connect to. - HandshakerServiceAddress string -} - -// DefaultServerOptions creates a new ServerOptions object with the default -// values. -func DefaultServerOptions() *ServerOptions { - return &ServerOptions{ - HandshakerServiceAddress: hypervisorHandshakerServiceAddress, - } -} - -// altsTC is the credentials required for authenticating a connection using ALTS. -// It implements credentials.TransportCredentials interface. -type altsTC struct { - info *credentials.ProtocolInfo - side core.Side - accounts []string - hsAddress string -} - -// NewClientCreds constructs a client-side ALTS TransportCredentials object. -func NewClientCreds(opts *ClientOptions) credentials.TransportCredentials { - return newALTS(core.ClientSide, opts.TargetServiceAccounts, opts.HandshakerServiceAddress) -} - -// NewServerCreds constructs a server-side ALTS TransportCredentials object. -func NewServerCreds(opts *ServerOptions) credentials.TransportCredentials { - return newALTS(core.ServerSide, nil, opts.HandshakerServiceAddress) -} - -func newALTS(side core.Side, accounts []string, hsAddress string) credentials.TransportCredentials { - once.Do(func() { - vmOnGCP = isRunningOnGCP() - }) - - if hsAddress == "" { - hsAddress = hypervisorHandshakerServiceAddress - } - return &altsTC{ - info: &credentials.ProtocolInfo{ - SecurityProtocol: "alts", - SecurityVersion: "1.0", - }, - side: side, - accounts: accounts, - hsAddress: hsAddress, - } -} - -// ClientHandshake implements the client side handshake protocol. -func (g *altsTC) ClientHandshake(ctx context.Context, addr string, rawConn net.Conn) (_ net.Conn, _ credentials.AuthInfo, err error) { - if !vmOnGCP { - return nil, nil, ErrUntrustedPlatform - } - - // Connecting to ALTS handshaker service. - hsConn, err := service.Dial(g.hsAddress) - if err != nil { - return nil, nil, err - } - // Do not close hsConn since it is shared with other handshakes. - - // Possible context leak: - // The cancel function for the child context we create will only be - // called a non-nil error is returned. - var cancel context.CancelFunc - ctx, cancel = context.WithCancel(ctx) - defer func() { - if err != nil { - cancel() - } - }() - - opts := handshaker.DefaultClientHandshakerOptions() - opts.TargetName = addr - opts.TargetServiceAccounts = g.accounts - opts.RPCVersions = &altspb.RpcProtocolVersions{ - MaxRpcVersion: maxRPCVersion, - MinRpcVersion: minRPCVersion, - } - chs, err := handshaker.NewClientHandshaker(ctx, hsConn, rawConn, opts) - if err != nil { - return nil, nil, err - } - defer func() { - if err != nil { - chs.Close() - } - }() - secConn, authInfo, err := chs.ClientHandshake(ctx) - if err != nil { - return nil, nil, err - } - altsAuthInfo, ok := authInfo.(AuthInfo) - if !ok { - return nil, nil, errors.New("client-side auth info is not of type alts.AuthInfo") - } - match, _ := checkRPCVersions(opts.RPCVersions, altsAuthInfo.PeerRPCVersions()) - if !match { - return nil, nil, fmt.Errorf("server-side RPC versions are not compatible with this client, local versions: %v, peer versions: %v", opts.RPCVersions, altsAuthInfo.PeerRPCVersions()) - } - return secConn, authInfo, nil -} - -// ServerHandshake implements the server side ALTS handshaker. -func (g *altsTC) ServerHandshake(rawConn net.Conn) (_ net.Conn, _ credentials.AuthInfo, err error) { - if !vmOnGCP { - return nil, nil, ErrUntrustedPlatform - } - // Connecting to ALTS handshaker service. - hsConn, err := service.Dial(g.hsAddress) - if err != nil { - return nil, nil, err - } - // Do not close hsConn since it's shared with other handshakes. - - ctx, cancel := context.WithTimeout(context.Background(), defaultTimeout) - defer cancel() - opts := handshaker.DefaultServerHandshakerOptions() - opts.RPCVersions = &altspb.RpcProtocolVersions{ - MaxRpcVersion: maxRPCVersion, - MinRpcVersion: minRPCVersion, - } - shs, err := handshaker.NewServerHandshaker(ctx, hsConn, rawConn, opts) - if err != nil { - return nil, nil, err - } - defer func() { - if err != nil { - shs.Close() - } - }() - secConn, authInfo, err := shs.ServerHandshake(ctx) - if err != nil { - return nil, nil, err - } - altsAuthInfo, ok := authInfo.(AuthInfo) - if !ok { - return nil, nil, errors.New("server-side auth info is not of type alts.AuthInfo") - } - match, _ := checkRPCVersions(opts.RPCVersions, altsAuthInfo.PeerRPCVersions()) - if !match { - return nil, nil, fmt.Errorf("client-side RPC versions is not compatible with this server, local versions: %v, peer versions: %v", opts.RPCVersions, altsAuthInfo.PeerRPCVersions()) - } - return secConn, authInfo, nil -} - -func (g *altsTC) Info() credentials.ProtocolInfo { - return *g.info -} - -func (g *altsTC) Clone() credentials.TransportCredentials { - info := *g.info - var accounts []string - if g.accounts != nil { - accounts = make([]string, len(g.accounts)) - copy(accounts, g.accounts) - } - return &altsTC{ - info: &info, - side: g.side, - hsAddress: g.hsAddress, - accounts: accounts, - } -} - -func (g *altsTC) OverrideServerName(serverNameOverride string) error { - g.info.ServerName = serverNameOverride - return nil -} - -// compareRPCVersion returns 0 if v1 == v2, 1 if v1 > v2 and -1 if v1 < v2. -func compareRPCVersions(v1, v2 *altspb.RpcProtocolVersions_Version) int { - switch { - case v1.GetMajor() > v2.GetMajor(), - v1.GetMajor() == v2.GetMajor() && v1.GetMinor() > v2.GetMinor(): - return 1 - case v1.GetMajor() < v2.GetMajor(), - v1.GetMajor() == v2.GetMajor() && v1.GetMinor() < v2.GetMinor(): - return -1 - } - return 0 -} - -// checkRPCVersions performs a version check between local and peer rpc protocol -// versions. This function returns true if the check passes which means both -// parties agreed on a common rpc protocol to use, and false otherwise. The -// function also returns the highest common RPC protocol version both parties -// agreed on. -func checkRPCVersions(local, peer *altspb.RpcProtocolVersions) (bool, *altspb.RpcProtocolVersions_Version) { - if local == nil || peer == nil { - logger.Error("invalid checkRPCVersions argument, either local or peer is nil.") - return false, nil - } - - // maxCommonVersion is MIN(local.max, peer.max). - maxCommonVersion := local.GetMaxRpcVersion() - if compareRPCVersions(local.GetMaxRpcVersion(), peer.GetMaxRpcVersion()) > 0 { - maxCommonVersion = peer.GetMaxRpcVersion() - } - - // minCommonVersion is MAX(local.min, peer.min). - minCommonVersion := peer.GetMinRpcVersion() - if compareRPCVersions(local.GetMinRpcVersion(), peer.GetMinRpcVersion()) > 0 { - minCommonVersion = local.GetMinRpcVersion() - } - - if compareRPCVersions(maxCommonVersion, minCommonVersion) < 0 { - return false, nil - } - return true, maxCommonVersion -} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/authinfo/authinfo.go b/vendor/google.golang.org/grpc/credentials/alts/internal/authinfo/authinfo.go deleted file mode 100644 index ebea57da1d..0000000000 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/authinfo/authinfo.go +++ /dev/null @@ -1,95 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package authinfo provide authentication information returned by handshakers. -package authinfo - -import ( - "google.golang.org/grpc/credentials" - altspb "google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp" -) - -var _ credentials.AuthInfo = (*altsAuthInfo)(nil) - -// altsAuthInfo exposes security information from the ALTS handshake to the -// application. altsAuthInfo is immutable and implements credentials.AuthInfo. -type altsAuthInfo struct { - p *altspb.AltsContext - credentials.CommonAuthInfo -} - -// New returns a new altsAuthInfo object given handshaker results. -func New(result *altspb.HandshakerResult) credentials.AuthInfo { - return newAuthInfo(result) -} - -func newAuthInfo(result *altspb.HandshakerResult) *altsAuthInfo { - return &altsAuthInfo{ - p: &altspb.AltsContext{ - ApplicationProtocol: result.GetApplicationProtocol(), - RecordProtocol: result.GetRecordProtocol(), - // TODO: assign security level from result. - SecurityLevel: altspb.SecurityLevel_INTEGRITY_AND_PRIVACY, - PeerServiceAccount: result.GetPeerIdentity().GetServiceAccount(), - LocalServiceAccount: result.GetLocalIdentity().GetServiceAccount(), - PeerRpcVersions: result.GetPeerRpcVersions(), - PeerAttributes: result.GetPeerIdentity().GetAttributes(), - }, - CommonAuthInfo: credentials.CommonAuthInfo{SecurityLevel: credentials.PrivacyAndIntegrity}, - } -} - -// AuthType identifies the context as providing ALTS authentication information. -func (s *altsAuthInfo) AuthType() string { - return "alts" -} - -// ApplicationProtocol returns the context's application protocol. -func (s *altsAuthInfo) ApplicationProtocol() string { - return s.p.GetApplicationProtocol() -} - -// RecordProtocol returns the context's record protocol. -func (s *altsAuthInfo) RecordProtocol() string { - return s.p.GetRecordProtocol() -} - -// SecurityLevel returns the context's security level. -func (s *altsAuthInfo) SecurityLevel() altspb.SecurityLevel { - return s.p.GetSecurityLevel() -} - -// PeerServiceAccount returns the context's peer service account. -func (s *altsAuthInfo) PeerServiceAccount() string { - return s.p.GetPeerServiceAccount() -} - -// LocalServiceAccount returns the context's local service account. -func (s *altsAuthInfo) LocalServiceAccount() string { - return s.p.GetLocalServiceAccount() -} - -// PeerRPCVersions returns the context's peer RPC versions. -func (s *altsAuthInfo) PeerRPCVersions() *altspb.RpcProtocolVersions { - return s.p.GetPeerRpcVersions() -} - -// PeerAttributes returns the context's peer attributes. -func (s *altsAuthInfo) PeerAttributes() map[string]string { - return s.p.GetPeerAttributes() -} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/common.go b/vendor/google.golang.org/grpc/credentials/alts/internal/common.go deleted file mode 100644 index 3896e8cf2b..0000000000 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/common.go +++ /dev/null @@ -1,67 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package internal contains common core functionality for ALTS. -package internal - -import ( - "context" - "net" - - "google.golang.org/grpc/credentials" -) - -const ( - // ClientSide identifies the client in this communication. - ClientSide Side = iota - // ServerSide identifies the server in this communication. - ServerSide -) - -// PeerNotRespondingError is returned when a peer server is not responding -// after a channel has been established. It is treated as a temporary connection -// error and re-connection to the server should be attempted. -var PeerNotRespondingError = &peerNotRespondingError{} - -// Side identifies the party's role: client or server. -type Side int - -type peerNotRespondingError struct{} - -// Return an error message for the purpose of logging. -func (e *peerNotRespondingError) Error() string { - return "peer server is not responding and re-connection should be attempted." -} - -// Temporary indicates if this connection error is temporary or fatal. -func (e *peerNotRespondingError) Temporary() bool { - return true -} - -// Handshaker defines a ALTS handshaker interface. -type Handshaker interface { - // ClientHandshake starts and completes a client-side handshaking and - // returns a secure connection and corresponding auth information. - ClientHandshake(ctx context.Context) (net.Conn, credentials.AuthInfo, error) - // ServerHandshake starts and completes a server-side handshaking and - // returns a secure connection and corresponding auth information. - ServerHandshake(ctx context.Context) (net.Conn, credentials.AuthInfo, error) - // Close terminates the Handshaker. It should be called when the caller - // obtains the secure connection. - Close() -} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aeadrekey.go b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aeadrekey.go deleted file mode 100644 index 43726e877b..0000000000 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aeadrekey.go +++ /dev/null @@ -1,131 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package conn - -import ( - "bytes" - "crypto/aes" - "crypto/cipher" - "crypto/hmac" - "crypto/sha256" - "encoding/binary" - "fmt" - "strconv" -) - -// rekeyAEAD holds the necessary information for an AEAD based on -// AES-GCM that performs nonce-based key derivation and XORs the -// nonce with a random mask. -type rekeyAEAD struct { - kdfKey []byte - kdfCounter []byte - nonceMask []byte - nonceBuf []byte - gcmAEAD cipher.AEAD -} - -// KeySizeError signals that the given key does not have the correct size. -type KeySizeError int - -func (k KeySizeError) Error() string { - return "alts/conn: invalid key size " + strconv.Itoa(int(k)) -} - -// newRekeyAEAD creates a new instance of aes128gcm with rekeying. -// The key argument should be 44 bytes, the first 32 bytes are used as a key -// for HKDF-expand and the remainining 12 bytes are used as a random mask for -// the counter. -func newRekeyAEAD(key []byte) (*rekeyAEAD, error) { - k := len(key) - if k != kdfKeyLen+nonceLen { - return nil, KeySizeError(k) - } - return &rekeyAEAD{ - kdfKey: key[:kdfKeyLen], - kdfCounter: make([]byte, kdfCounterLen), - nonceMask: key[kdfKeyLen:], - nonceBuf: make([]byte, nonceLen), - gcmAEAD: nil, - }, nil -} - -// Seal rekeys if nonce[2:8] is different than in the last call, masks the nonce, -// and calls Seal for aes128gcm. -func (s *rekeyAEAD) Seal(dst, nonce, plaintext, additionalData []byte) []byte { - if err := s.rekeyIfRequired(nonce); err != nil { - panic(fmt.Sprintf("Rekeying failed with: %s", err.Error())) - } - maskNonce(s.nonceBuf, nonce, s.nonceMask) - return s.gcmAEAD.Seal(dst, s.nonceBuf, plaintext, additionalData) -} - -// Open rekeys if nonce[2:8] is different than in the last call, masks the nonce, -// and calls Open for aes128gcm. -func (s *rekeyAEAD) Open(dst, nonce, ciphertext, additionalData []byte) ([]byte, error) { - if err := s.rekeyIfRequired(nonce); err != nil { - return nil, err - } - maskNonce(s.nonceBuf, nonce, s.nonceMask) - return s.gcmAEAD.Open(dst, s.nonceBuf, ciphertext, additionalData) -} - -// rekeyIfRequired creates a new aes128gcm AEAD if the existing AEAD is nil -// or cannot be used with given nonce. -func (s *rekeyAEAD) rekeyIfRequired(nonce []byte) error { - newKdfCounter := nonce[kdfCounterOffset : kdfCounterOffset+kdfCounterLen] - if s.gcmAEAD != nil && bytes.Equal(newKdfCounter, s.kdfCounter) { - return nil - } - copy(s.kdfCounter, newKdfCounter) - a, err := aes.NewCipher(hkdfExpand(s.kdfKey, s.kdfCounter)) - if err != nil { - return err - } - s.gcmAEAD, err = cipher.NewGCM(a) - return err -} - -// maskNonce XORs the given nonce with the mask and stores the result in dst. -func maskNonce(dst, nonce, mask []byte) { - nonce1 := binary.LittleEndian.Uint64(nonce[:sizeUint64]) - nonce2 := binary.LittleEndian.Uint32(nonce[sizeUint64:]) - mask1 := binary.LittleEndian.Uint64(mask[:sizeUint64]) - mask2 := binary.LittleEndian.Uint32(mask[sizeUint64:]) - binary.LittleEndian.PutUint64(dst[:sizeUint64], nonce1^mask1) - binary.LittleEndian.PutUint32(dst[sizeUint64:], nonce2^mask2) -} - -// NonceSize returns the required nonce size. -func (s *rekeyAEAD) NonceSize() int { - return s.gcmAEAD.NonceSize() -} - -// Overhead returns the ciphertext overhead. -func (s *rekeyAEAD) Overhead() int { - return s.gcmAEAD.Overhead() -} - -// hkdfExpand computes the first 16 bytes of the HKDF-expand function -// defined in RFC5869. -func hkdfExpand(key, info []byte) []byte { - mac := hmac.New(sha256.New, key) - mac.Write(info) - mac.Write([]byte{0x01}[:]) - return mac.Sum(nil)[:aeadKeyLen] -} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcm.go b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcm.go deleted file mode 100644 index 04e0adb6c9..0000000000 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcm.go +++ /dev/null @@ -1,105 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package conn - -import ( - "crypto/aes" - "crypto/cipher" - - core "google.golang.org/grpc/credentials/alts/internal" -) - -const ( - // Overflow length n in bytes, never encrypt more than 2^(n*8) frames (in - // each direction). - overflowLenAES128GCM = 5 -) - -// aes128gcm is the struct that holds necessary information for ALTS record. -// The counter value is NOT included in the payload during the encryption and -// decryption operations. -type aes128gcm struct { - // inCounter is used in ALTS record to check that incoming counters are - // as expected, since ALTS record guarantees that messages are unwrapped - // in the same order that the peer wrapped them. - inCounter Counter - outCounter Counter - aead cipher.AEAD -} - -// NewAES128GCM creates an instance that uses aes128gcm for ALTS record. -func NewAES128GCM(side core.Side, key []byte) (ALTSRecordCrypto, error) { - c, err := aes.NewCipher(key) - if err != nil { - return nil, err - } - a, err := cipher.NewGCM(c) - if err != nil { - return nil, err - } - return &aes128gcm{ - inCounter: NewInCounter(side, overflowLenAES128GCM), - outCounter: NewOutCounter(side, overflowLenAES128GCM), - aead: a, - }, nil -} - -// Encrypt is the encryption function. dst can contain bytes at the beginning of -// the ciphertext that will not be encrypted but will be authenticated. If dst -// has enough capacity to hold these bytes, the ciphertext and the tag, no -// allocation and copy operations will be performed. dst and plaintext do not -// overlap. -func (s *aes128gcm) Encrypt(dst, plaintext []byte) ([]byte, error) { - // If we need to allocate an output buffer, we want to include space for - // GCM tag to avoid forcing ALTS record to reallocate as well. - dlen := len(dst) - dst, out := SliceForAppend(dst, len(plaintext)+GcmTagSize) - seq, err := s.outCounter.Value() - if err != nil { - return nil, err - } - data := out[:len(plaintext)] - copy(data, plaintext) // data may alias plaintext - - // Seal appends the ciphertext and the tag to its first argument and - // returns the updated slice. However, SliceForAppend above ensures that - // dst has enough capacity to avoid a reallocation and copy due to the - // append. - dst = s.aead.Seal(dst[:dlen], seq, data, nil) - s.outCounter.Inc() - return dst, nil -} - -func (s *aes128gcm) EncryptionOverhead() int { - return GcmTagSize -} - -func (s *aes128gcm) Decrypt(dst, ciphertext []byte) ([]byte, error) { - seq, err := s.inCounter.Value() - if err != nil { - return nil, err - } - // If dst is equal to ciphertext[:0], ciphertext storage is reused. - plaintext, err := s.aead.Open(dst, seq, ciphertext, nil) - if err != nil { - return nil, ErrAuth - } - s.inCounter.Inc() - return plaintext, nil -} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcmrekey.go b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcmrekey.go deleted file mode 100644 index 6a9035ea25..0000000000 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcmrekey.go +++ /dev/null @@ -1,116 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package conn - -import ( - "crypto/cipher" - - core "google.golang.org/grpc/credentials/alts/internal" -) - -const ( - // Overflow length n in bytes, never encrypt more than 2^(n*8) frames (in - // each direction). - overflowLenAES128GCMRekey = 8 - nonceLen = 12 - aeadKeyLen = 16 - kdfKeyLen = 32 - kdfCounterOffset = 2 - kdfCounterLen = 6 - sizeUint64 = 8 -) - -// aes128gcmRekey is the struct that holds necessary information for ALTS record. -// The counter value is NOT included in the payload during the encryption and -// decryption operations. -type aes128gcmRekey struct { - // inCounter is used in ALTS record to check that incoming counters are - // as expected, since ALTS record guarantees that messages are unwrapped - // in the same order that the peer wrapped them. - inCounter Counter - outCounter Counter - inAEAD cipher.AEAD - outAEAD cipher.AEAD -} - -// NewAES128GCMRekey creates an instance that uses aes128gcm with rekeying -// for ALTS record. The key argument should be 44 bytes, the first 32 bytes -// are used as a key for HKDF-expand and the remainining 12 bytes are used -// as a random mask for the counter. -func NewAES128GCMRekey(side core.Side, key []byte) (ALTSRecordCrypto, error) { - inCounter := NewInCounter(side, overflowLenAES128GCMRekey) - outCounter := NewOutCounter(side, overflowLenAES128GCMRekey) - inAEAD, err := newRekeyAEAD(key) - if err != nil { - return nil, err - } - outAEAD, err := newRekeyAEAD(key) - if err != nil { - return nil, err - } - return &aes128gcmRekey{ - inCounter, - outCounter, - inAEAD, - outAEAD, - }, nil -} - -// Encrypt is the encryption function. dst can contain bytes at the beginning of -// the ciphertext that will not be encrypted but will be authenticated. If dst -// has enough capacity to hold these bytes, the ciphertext and the tag, no -// allocation and copy operations will be performed. dst and plaintext do not -// overlap. -func (s *aes128gcmRekey) Encrypt(dst, plaintext []byte) ([]byte, error) { - // If we need to allocate an output buffer, we want to include space for - // GCM tag to avoid forcing ALTS record to reallocate as well. - dlen := len(dst) - dst, out := SliceForAppend(dst, len(plaintext)+GcmTagSize) - seq, err := s.outCounter.Value() - if err != nil { - return nil, err - } - data := out[:len(plaintext)] - copy(data, plaintext) // data may alias plaintext - - // Seal appends the ciphertext and the tag to its first argument and - // returns the updated slice. However, SliceForAppend above ensures that - // dst has enough capacity to avoid a reallocation and copy due to the - // append. - dst = s.outAEAD.Seal(dst[:dlen], seq, data, nil) - s.outCounter.Inc() - return dst, nil -} - -func (s *aes128gcmRekey) EncryptionOverhead() int { - return GcmTagSize -} - -func (s *aes128gcmRekey) Decrypt(dst, ciphertext []byte) ([]byte, error) { - seq, err := s.inCounter.Value() - if err != nil { - return nil, err - } - plaintext, err := s.inAEAD.Open(dst, seq, ciphertext, nil) - if err != nil { - return nil, ErrAuth - } - s.inCounter.Inc() - return plaintext, nil -} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/common.go b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/common.go deleted file mode 100644 index 1795d0c9e3..0000000000 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/common.go +++ /dev/null @@ -1,70 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package conn - -import ( - "encoding/binary" - "errors" - "fmt" -) - -const ( - // GcmTagSize is the GCM tag size is the difference in length between - // plaintext and ciphertext. From crypto/cipher/gcm.go in Go crypto - // library. - GcmTagSize = 16 -) - -// ErrAuth occurs on authentication failure. -var ErrAuth = errors.New("message authentication failed") - -// SliceForAppend takes a slice and a requested number of bytes. It returns a -// slice with the contents of the given slice followed by that many bytes and a -// second slice that aliases into it and contains only the extra bytes. If the -// original slice has sufficient capacity then no allocation is performed. -func SliceForAppend(in []byte, n int) (head, tail []byte) { - if total := len(in) + n; cap(in) >= total { - head = in[:total] - } else { - head = make([]byte, total) - copy(head, in) - } - tail = head[len(in):] - return head, tail -} - -// ParseFramedMsg parse the provided buffer and returns a frame of the format -// msgLength+msg and any remaining bytes in that buffer. -func ParseFramedMsg(b []byte, maxLen uint32) ([]byte, []byte, error) { - // If the size field is not complete, return the provided buffer as - // remaining buffer. - if len(b) < MsgLenFieldSize { - return nil, b, nil - } - msgLenField := b[:MsgLenFieldSize] - length := binary.LittleEndian.Uint32(msgLenField) - if length > maxLen { - return nil, nil, fmt.Errorf("received the frame length %d larger than the limit %d", length, maxLen) - } - if len(b) < int(length)+4 { // account for the first 4 msg length bytes. - // Frame is not complete yet. - return nil, b, nil - } - return b[:MsgLenFieldSize+length], b[MsgLenFieldSize+length:], nil -} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/counter.go b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/counter.go deleted file mode 100644 index 9f00aca0b6..0000000000 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/counter.go +++ /dev/null @@ -1,62 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package conn - -import ( - "errors" -) - -const counterLen = 12 - -var ( - errInvalidCounter = errors.New("invalid counter") -) - -// Counter is a 96-bit, little-endian counter. -type Counter struct { - value [counterLen]byte - invalid bool - overflowLen int -} - -// Value returns the current value of the counter as a byte slice. -func (c *Counter) Value() ([]byte, error) { - if c.invalid { - return nil, errInvalidCounter - } - return c.value[:], nil -} - -// Inc increments the counter and checks for overflow. -func (c *Counter) Inc() { - // If the counter is already invalid, there is no need to increase it. - if c.invalid { - return - } - i := 0 - for ; i < c.overflowLen; i++ { - c.value[i]++ - if c.value[i] != 0 { - break - } - } - if i == c.overflowLen { - c.invalid = true - } -} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/record.go b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/record.go deleted file mode 100644 index 0d64fb37a1..0000000000 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/record.go +++ /dev/null @@ -1,275 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package conn contains an implementation of a secure channel created by gRPC -// handshakers. -package conn - -import ( - "encoding/binary" - "fmt" - "math" - "net" - - core "google.golang.org/grpc/credentials/alts/internal" -) - -// ALTSRecordCrypto is the interface for gRPC ALTS record protocol. -type ALTSRecordCrypto interface { - // Encrypt encrypts the plaintext and computes the tag (if any) of dst - // and plaintext. dst and plaintext may fully overlap or not at all. - Encrypt(dst, plaintext []byte) ([]byte, error) - // EncryptionOverhead returns the tag size (if any) in bytes. - EncryptionOverhead() int - // Decrypt decrypts ciphertext and verify the tag (if any). dst and - // ciphertext may alias exactly or not at all. To reuse ciphertext's - // storage for the decrypted output, use ciphertext[:0] as dst. - Decrypt(dst, ciphertext []byte) ([]byte, error) -} - -// ALTSRecordFunc is a function type for factory functions that create -// ALTSRecordCrypto instances. -type ALTSRecordFunc func(s core.Side, keyData []byte) (ALTSRecordCrypto, error) - -const ( - // MsgLenFieldSize is the byte size of the frame length field of a - // framed message. - MsgLenFieldSize = 4 - // The byte size of the message type field of a framed message. - msgTypeFieldSize = 4 - // The bytes size limit for a ALTS record message. - altsRecordLengthLimit = 1024 * 1024 // 1 MiB - // The default bytes size of a ALTS record message. - altsRecordDefaultLength = 4 * 1024 // 4KiB - // Message type value included in ALTS record framing. - altsRecordMsgType = uint32(0x06) - // The initial write buffer size. - altsWriteBufferInitialSize = 32 * 1024 // 32KiB - // The maximum write buffer size. This *must* be multiple of - // altsRecordDefaultLength. - altsWriteBufferMaxSize = 512 * 1024 // 512KiB -) - -var ( - protocols = make(map[string]ALTSRecordFunc) -) - -// RegisterProtocol register a ALTS record encryption protocol. -func RegisterProtocol(protocol string, f ALTSRecordFunc) error { - if _, ok := protocols[protocol]; ok { - return fmt.Errorf("protocol %v is already registered", protocol) - } - protocols[protocol] = f - return nil -} - -// conn represents a secured connection. It implements the net.Conn interface. -type conn struct { - net.Conn - crypto ALTSRecordCrypto - // buf holds data that has been read from the connection and decrypted, - // but has not yet been returned by Read. - buf []byte - payloadLengthLimit int - // protected holds data read from the network but have not yet been - // decrypted. This data might not compose a complete frame. - protected []byte - // writeBuf is a buffer used to contain encrypted frames before being - // written to the network. - writeBuf []byte - // nextFrame stores the next frame (in protected buffer) info. - nextFrame []byte - // overhead is the calculated overhead of each frame. - overhead int -} - -// NewConn creates a new secure channel instance given the other party role and -// handshaking result. -func NewConn(c net.Conn, side core.Side, recordProtocol string, key []byte, protected []byte) (net.Conn, error) { - newCrypto := protocols[recordProtocol] - if newCrypto == nil { - return nil, fmt.Errorf("negotiated unknown next_protocol %q", recordProtocol) - } - crypto, err := newCrypto(side, key) - if err != nil { - return nil, fmt.Errorf("protocol %q: %v", recordProtocol, err) - } - overhead := MsgLenFieldSize + msgTypeFieldSize + crypto.EncryptionOverhead() - payloadLengthLimit := altsRecordDefaultLength - overhead - var protectedBuf []byte - if protected == nil { - // We pre-allocate protected to be of size - // 2*altsRecordDefaultLength-1 during initialization. We only - // read from the network into protected when protected does not - // contain a complete frame, which is at most - // altsRecordDefaultLength-1 (bytes). And we read at most - // altsRecordDefaultLength (bytes) data into protected at one - // time. Therefore, 2*altsRecordDefaultLength-1 is large enough - // to buffer data read from the network. - protectedBuf = make([]byte, 0, 2*altsRecordDefaultLength-1) - } else { - protectedBuf = make([]byte, len(protected)) - copy(protectedBuf, protected) - } - - altsConn := &conn{ - Conn: c, - crypto: crypto, - payloadLengthLimit: payloadLengthLimit, - protected: protectedBuf, - writeBuf: make([]byte, altsWriteBufferInitialSize), - nextFrame: protectedBuf, - overhead: overhead, - } - return altsConn, nil -} - -// Read reads and decrypts a frame from the underlying connection, and copies the -// decrypted payload into b. If the size of the payload is greater than len(b), -// Read retains the remaining bytes in an internal buffer, and subsequent calls -// to Read will read from this buffer until it is exhausted. -func (p *conn) Read(b []byte) (n int, err error) { - if len(p.buf) == 0 { - var framedMsg []byte - framedMsg, p.nextFrame, err = ParseFramedMsg(p.nextFrame, altsRecordLengthLimit) - if err != nil { - return n, err - } - // Check whether the next frame to be decrypted has been - // completely received yet. - if len(framedMsg) == 0 { - copy(p.protected, p.nextFrame) - p.protected = p.protected[:len(p.nextFrame)] - // Always copy next incomplete frame to the beginning of - // the protected buffer and reset nextFrame to it. - p.nextFrame = p.protected - } - // Check whether a complete frame has been received yet. - for len(framedMsg) == 0 { - if len(p.protected) == cap(p.protected) { - tmp := make([]byte, len(p.protected), cap(p.protected)+altsRecordDefaultLength) - copy(tmp, p.protected) - p.protected = tmp - } - n, err = p.Conn.Read(p.protected[len(p.protected):min(cap(p.protected), len(p.protected)+altsRecordDefaultLength)]) - if err != nil { - return 0, err - } - p.protected = p.protected[:len(p.protected)+n] - framedMsg, p.nextFrame, err = ParseFramedMsg(p.protected, altsRecordLengthLimit) - if err != nil { - return 0, err - } - } - // Now we have a complete frame, decrypted it. - msg := framedMsg[MsgLenFieldSize:] - msgType := binary.LittleEndian.Uint32(msg[:msgTypeFieldSize]) - if msgType&0xff != altsRecordMsgType { - return 0, fmt.Errorf("received frame with incorrect message type %v, expected lower byte %v", - msgType, altsRecordMsgType) - } - ciphertext := msg[msgTypeFieldSize:] - - // Decrypt requires that if the dst and ciphertext alias, they - // must alias exactly. Code here used to use msg[:0], but msg - // starts MsgLenFieldSize+msgTypeFieldSize bytes earlier than - // ciphertext, so they alias inexactly. Using ciphertext[:0] - // arranges the appropriate aliasing without needing to copy - // ciphertext or use a separate destination buffer. For more info - // check: https://golang.org/pkg/crypto/cipher/#AEAD. - p.buf, err = p.crypto.Decrypt(ciphertext[:0], ciphertext) - if err != nil { - return 0, err - } - } - - n = copy(b, p.buf) - p.buf = p.buf[n:] - return n, nil -} - -// Write encrypts, frames, and writes bytes from b to the underlying connection. -func (p *conn) Write(b []byte) (n int, err error) { - n = len(b) - // Calculate the output buffer size with framing and encryption overhead. - numOfFrames := int(math.Ceil(float64(len(b)) / float64(p.payloadLengthLimit))) - size := len(b) + numOfFrames*p.overhead - // If writeBuf is too small, increase its size up to the maximum size. - partialBSize := len(b) - if size > altsWriteBufferMaxSize { - size = altsWriteBufferMaxSize - const numOfFramesInMaxWriteBuf = altsWriteBufferMaxSize / altsRecordDefaultLength - partialBSize = numOfFramesInMaxWriteBuf * p.payloadLengthLimit - } - if len(p.writeBuf) < size { - p.writeBuf = make([]byte, size) - } - - for partialBStart := 0; partialBStart < len(b); partialBStart += partialBSize { - partialBEnd := partialBStart + partialBSize - if partialBEnd > len(b) { - partialBEnd = len(b) - } - partialB := b[partialBStart:partialBEnd] - writeBufIndex := 0 - for len(partialB) > 0 { - payloadLen := len(partialB) - if payloadLen > p.payloadLengthLimit { - payloadLen = p.payloadLengthLimit - } - buf := partialB[:payloadLen] - partialB = partialB[payloadLen:] - - // Write buffer contains: length, type, payload, and tag - // if any. - - // 1. Fill in type field. - msg := p.writeBuf[writeBufIndex+MsgLenFieldSize:] - binary.LittleEndian.PutUint32(msg, altsRecordMsgType) - - // 2. Encrypt the payload and create a tag if any. - msg, err = p.crypto.Encrypt(msg[:msgTypeFieldSize], buf) - if err != nil { - return n, err - } - - // 3. Fill in the size field. - binary.LittleEndian.PutUint32(p.writeBuf[writeBufIndex:], uint32(len(msg))) - - // 4. Increase writeBufIndex. - writeBufIndex += len(buf) + p.overhead - } - nn, err := p.Conn.Write(p.writeBuf[:writeBufIndex]) - if err != nil { - // We need to calculate the actual data size that was - // written. This means we need to remove header, - // encryption overheads, and any partially-written - // frame data. - numOfWrittenFrames := int(math.Floor(float64(nn) / float64(altsRecordDefaultLength))) - return partialBStart + numOfWrittenFrames*p.payloadLengthLimit, err - } - } - return n, nil -} - -func min(a, b int) int { - if a < b { - return a - } - return b -} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/utils.go b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/utils.go deleted file mode 100644 index 84821fa254..0000000000 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/utils.go +++ /dev/null @@ -1,63 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package conn - -import core "google.golang.org/grpc/credentials/alts/internal" - -// NewOutCounter returns an outgoing counter initialized to the starting sequence -// number for the client/server side of a connection. -func NewOutCounter(s core.Side, overflowLen int) (c Counter) { - c.overflowLen = overflowLen - if s == core.ServerSide { - // Server counters in ALTS record have the little-endian high bit - // set. - c.value[counterLen-1] = 0x80 - } - return -} - -// NewInCounter returns an incoming counter initialized to the starting sequence -// number for the client/server side of a connection. This is used in ALTS record -// to check that incoming counters are as expected, since ALTS record guarantees -// that messages are unwrapped in the same order that the peer wrapped them. -func NewInCounter(s core.Side, overflowLen int) (c Counter) { - c.overflowLen = overflowLen - if s == core.ClientSide { - // Server counters in ALTS record have the little-endian high bit - // set. - c.value[counterLen-1] = 0x80 - } - return -} - -// CounterFromValue creates a new counter given an initial value. -func CounterFromValue(value []byte, overflowLen int) (c Counter) { - c.overflowLen = overflowLen - copy(c.value[:], value) - return -} - -// CounterSide returns the connection side (client/server) a sequence counter is -// associated with. -func CounterSide(c []byte) core.Side { - if c[counterLen-1]&0x80 == 0x80 { - return core.ServerSide - } - return core.ClientSide -} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go b/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go deleted file mode 100644 index 8bc7ceee0a..0000000000 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go +++ /dev/null @@ -1,375 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package handshaker provides ALTS handshaking functionality for GCP. -package handshaker - -import ( - "context" - "errors" - "fmt" - "io" - "net" - "sync" - - grpc "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/credentials" - core "google.golang.org/grpc/credentials/alts/internal" - "google.golang.org/grpc/credentials/alts/internal/authinfo" - "google.golang.org/grpc/credentials/alts/internal/conn" - altsgrpc "google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp" - altspb "google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp" -) - -const ( - // The maximum byte size of receive frames. - frameLimit = 64 * 1024 // 64 KB - rekeyRecordProtocolName = "ALTSRP_GCM_AES128_REKEY" - // maxPendingHandshakes represents the maximum number of concurrent - // handshakes. - maxPendingHandshakes = 100 -) - -var ( - hsProtocol = altspb.HandshakeProtocol_ALTS - appProtocols = []string{"grpc"} - recordProtocols = []string{rekeyRecordProtocolName} - keyLength = map[string]int{ - rekeyRecordProtocolName: 44, - } - altsRecordFuncs = map[string]conn.ALTSRecordFunc{ - // ALTS handshaker protocols. - rekeyRecordProtocolName: func(s core.Side, keyData []byte) (conn.ALTSRecordCrypto, error) { - return conn.NewAES128GCMRekey(s, keyData) - }, - } - // control number of concurrent created (but not closed) handshakers. - mu sync.Mutex - concurrentHandshakes = int64(0) - // errDropped occurs when maxPendingHandshakes is reached. - errDropped = errors.New("maximum number of concurrent ALTS handshakes is reached") - // errOutOfBound occurs when the handshake service returns a consumed - // bytes value larger than the buffer that was passed to it originally. - errOutOfBound = errors.New("handshaker service consumed bytes value is out-of-bound") -) - -func init() { - for protocol, f := range altsRecordFuncs { - if err := conn.RegisterProtocol(protocol, f); err != nil { - panic(err) - } - } -} - -func acquire() bool { - mu.Lock() - // If we need n to be configurable, we can pass it as an argument. - n := int64(1) - success := maxPendingHandshakes-concurrentHandshakes >= n - if success { - concurrentHandshakes += n - } - mu.Unlock() - return success -} - -func release() { - mu.Lock() - // If we need n to be configurable, we can pass it as an argument. - n := int64(1) - concurrentHandshakes -= n - if concurrentHandshakes < 0 { - mu.Unlock() - panic("bad release") - } - mu.Unlock() -} - -// ClientHandshakerOptions contains the client handshaker options that can -// provided by the caller. -type ClientHandshakerOptions struct { - // ClientIdentity is the handshaker client local identity. - ClientIdentity *altspb.Identity - // TargetName is the server service account name for secure name - // checking. - TargetName string - // TargetServiceAccounts contains a list of expected target service - // accounts. One of these accounts should match one of the accounts in - // the handshaker results. Otherwise, the handshake fails. - TargetServiceAccounts []string - // RPCVersions specifies the gRPC versions accepted by the client. - RPCVersions *altspb.RpcProtocolVersions -} - -// ServerHandshakerOptions contains the server handshaker options that can -// provided by the caller. -type ServerHandshakerOptions struct { - // RPCVersions specifies the gRPC versions accepted by the server. - RPCVersions *altspb.RpcProtocolVersions -} - -// DefaultClientHandshakerOptions returns the default client handshaker options. -func DefaultClientHandshakerOptions() *ClientHandshakerOptions { - return &ClientHandshakerOptions{} -} - -// DefaultServerHandshakerOptions returns the default client handshaker options. -func DefaultServerHandshakerOptions() *ServerHandshakerOptions { - return &ServerHandshakerOptions{} -} - -// TODO: add support for future local and remote endpoint in both client options -// and server options (server options struct does not exist now. When -// caller can provide endpoints, it should be created. - -// altsHandshaker is used to complete a ALTS handshaking between client and -// server. This handshaker talks to the ALTS handshaker service in the metadata -// server. -type altsHandshaker struct { - // RPC stream used to access the ALTS Handshaker service. - stream altsgrpc.HandshakerService_DoHandshakeClient - // the connection to the peer. - conn net.Conn - // client handshake options. - clientOpts *ClientHandshakerOptions - // server handshake options. - serverOpts *ServerHandshakerOptions - // defines the side doing the handshake, client or server. - side core.Side -} - -// NewClientHandshaker creates a ALTS handshaker for GCP which contains an RPC -// stub created using the passed conn and used to talk to the ALTS Handshaker -// service in the metadata server. -func NewClientHandshaker(ctx context.Context, conn *grpc.ClientConn, c net.Conn, opts *ClientHandshakerOptions) (core.Handshaker, error) { - stream, err := altsgrpc.NewHandshakerServiceClient(conn).DoHandshake(ctx, grpc.WaitForReady(true)) - if err != nil { - return nil, err - } - return &altsHandshaker{ - stream: stream, - conn: c, - clientOpts: opts, - side: core.ClientSide, - }, nil -} - -// NewServerHandshaker creates a ALTS handshaker for GCP which contains an RPC -// stub created using the passed conn and used to talk to the ALTS Handshaker -// service in the metadata server. -func NewServerHandshaker(ctx context.Context, conn *grpc.ClientConn, c net.Conn, opts *ServerHandshakerOptions) (core.Handshaker, error) { - stream, err := altsgrpc.NewHandshakerServiceClient(conn).DoHandshake(ctx, grpc.WaitForReady(true)) - if err != nil { - return nil, err - } - return &altsHandshaker{ - stream: stream, - conn: c, - serverOpts: opts, - side: core.ServerSide, - }, nil -} - -// ClientHandshake starts and completes a client ALTS handshaking for GCP. Once -// done, ClientHandshake returns a secure connection. -func (h *altsHandshaker) ClientHandshake(ctx context.Context) (net.Conn, credentials.AuthInfo, error) { - if !acquire() { - return nil, nil, errDropped - } - defer release() - - if h.side != core.ClientSide { - return nil, nil, errors.New("only handshakers created using NewClientHandshaker can perform a client handshaker") - } - - // Create target identities from service account list. - targetIdentities := make([]*altspb.Identity, 0, len(h.clientOpts.TargetServiceAccounts)) - for _, account := range h.clientOpts.TargetServiceAccounts { - targetIdentities = append(targetIdentities, &altspb.Identity{ - IdentityOneof: &altspb.Identity_ServiceAccount{ - ServiceAccount: account, - }, - }) - } - req := &altspb.HandshakerReq{ - ReqOneof: &altspb.HandshakerReq_ClientStart{ - ClientStart: &altspb.StartClientHandshakeReq{ - HandshakeSecurityProtocol: hsProtocol, - ApplicationProtocols: appProtocols, - RecordProtocols: recordProtocols, - TargetIdentities: targetIdentities, - LocalIdentity: h.clientOpts.ClientIdentity, - TargetName: h.clientOpts.TargetName, - RpcVersions: h.clientOpts.RPCVersions, - }, - }, - } - - conn, result, err := h.doHandshake(req) - if err != nil { - return nil, nil, err - } - authInfo := authinfo.New(result) - return conn, authInfo, nil -} - -// ServerHandshake starts and completes a server ALTS handshaking for GCP. Once -// done, ServerHandshake returns a secure connection. -func (h *altsHandshaker) ServerHandshake(ctx context.Context) (net.Conn, credentials.AuthInfo, error) { - if !acquire() { - return nil, nil, errDropped - } - defer release() - - if h.side != core.ServerSide { - return nil, nil, errors.New("only handshakers created using NewServerHandshaker can perform a server handshaker") - } - - p := make([]byte, frameLimit) - n, err := h.conn.Read(p) - if err != nil { - return nil, nil, err - } - - // Prepare server parameters. - // TODO: currently only ALTS parameters are provided. Might need to use - // more options in the future. - params := make(map[int32]*altspb.ServerHandshakeParameters) - params[int32(altspb.HandshakeProtocol_ALTS)] = &altspb.ServerHandshakeParameters{ - RecordProtocols: recordProtocols, - } - req := &altspb.HandshakerReq{ - ReqOneof: &altspb.HandshakerReq_ServerStart{ - ServerStart: &altspb.StartServerHandshakeReq{ - ApplicationProtocols: appProtocols, - HandshakeParameters: params, - InBytes: p[:n], - RpcVersions: h.serverOpts.RPCVersions, - }, - }, - } - - conn, result, err := h.doHandshake(req) - if err != nil { - return nil, nil, err - } - authInfo := authinfo.New(result) - return conn, authInfo, nil -} - -func (h *altsHandshaker) doHandshake(req *altspb.HandshakerReq) (net.Conn, *altspb.HandshakerResult, error) { - resp, err := h.accessHandshakerService(req) - if err != nil { - return nil, nil, err - } - // Check of the returned status is an error. - if resp.GetStatus() != nil { - if got, want := resp.GetStatus().Code, uint32(codes.OK); got != want { - return nil, nil, fmt.Errorf("%v", resp.GetStatus().Details) - } - } - - var extra []byte - if req.GetServerStart() != nil { - if resp.GetBytesConsumed() > uint32(len(req.GetServerStart().GetInBytes())) { - return nil, nil, errOutOfBound - } - extra = req.GetServerStart().GetInBytes()[resp.GetBytesConsumed():] - } - result, extra, err := h.processUntilDone(resp, extra) - if err != nil { - return nil, nil, err - } - // The handshaker returns a 128 bytes key. It should be truncated based - // on the returned record protocol. - keyLen, ok := keyLength[result.RecordProtocol] - if !ok { - return nil, nil, fmt.Errorf("unknown resulted record protocol %v", result.RecordProtocol) - } - sc, err := conn.NewConn(h.conn, h.side, result.GetRecordProtocol(), result.KeyData[:keyLen], extra) - if err != nil { - return nil, nil, err - } - return sc, result, nil -} - -func (h *altsHandshaker) accessHandshakerService(req *altspb.HandshakerReq) (*altspb.HandshakerResp, error) { - if err := h.stream.Send(req); err != nil { - return nil, err - } - resp, err := h.stream.Recv() - if err != nil { - return nil, err - } - return resp, nil -} - -// processUntilDone processes the handshake until the handshaker service returns -// the results. Handshaker service takes care of frame parsing, so we read -// whatever received from the network and send it to the handshaker service. -func (h *altsHandshaker) processUntilDone(resp *altspb.HandshakerResp, extra []byte) (*altspb.HandshakerResult, []byte, error) { - for { - if len(resp.OutFrames) > 0 { - if _, err := h.conn.Write(resp.OutFrames); err != nil { - return nil, nil, err - } - } - if resp.Result != nil { - return resp.Result, extra, nil - } - buf := make([]byte, frameLimit) - n, err := h.conn.Read(buf) - if err != nil && err != io.EOF { - return nil, nil, err - } - // If there is nothing to send to the handshaker service, and - // nothing is received from the peer, then we are stuck. - // This covers the case when the peer is not responding. Note - // that handshaker service connection issues are caught in - // accessHandshakerService before we even get here. - if len(resp.OutFrames) == 0 && n == 0 { - return nil, nil, core.PeerNotRespondingError - } - // Append extra bytes from the previous interaction with the - // handshaker service with the current buffer read from conn. - p := append(extra, buf[:n]...) - // From here on, p and extra point to the same slice. - resp, err = h.accessHandshakerService(&altspb.HandshakerReq{ - ReqOneof: &altspb.HandshakerReq_Next{ - Next: &altspb.NextHandshakeMessageReq{ - InBytes: p, - }, - }, - }) - if err != nil { - return nil, nil, err - } - // Set extra based on handshaker service response. - if resp.GetBytesConsumed() > uint32(len(p)) { - return nil, nil, errOutOfBound - } - extra = p[resp.GetBytesConsumed():] - } -} - -// Close terminates the Handshaker. It should be called when the caller obtains -// the secure connection. -func (h *altsHandshaker) Close() { - h.stream.CloseSend() -} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go b/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go deleted file mode 100644 index 77d759cd95..0000000000 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go +++ /dev/null @@ -1,59 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package service manages connections between the VM application and the ALTS -// handshaker service. -package service - -import ( - "sync" - - grpc "google.golang.org/grpc" -) - -var ( - // mu guards hsConnMap and hsDialer. - mu sync.Mutex - // hsConn represents a mapping from a hypervisor handshaker service address - // to a corresponding connection to a hypervisor handshaker service - // instance. - hsConnMap = make(map[string]*grpc.ClientConn) - // hsDialer will be reassigned in tests. - hsDialer = grpc.Dial -) - -// Dial dials the handshake service in the hypervisor. If a connection has -// already been established, this function returns it. Otherwise, a new -// connection is created. -func Dial(hsAddress string) (*grpc.ClientConn, error) { - mu.Lock() - defer mu.Unlock() - - hsConn, ok := hsConnMap[hsAddress] - if !ok { - // Create a new connection to the handshaker service. Note that - // this connection stays open until the application is closed. - var err error - hsConn, err = hsDialer(hsAddress, grpc.WithInsecure()) - if err != nil { - return nil, err - } - hsConnMap[hsAddress] = hsConn - } - return hsConn, nil -} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go deleted file mode 100644 index 703b48da75..0000000000 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go +++ /dev/null @@ -1,264 +0,0 @@ -// Copyright 2018 The gRPC Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// The canonical version of this proto can be found at -// https://github.com/grpc/grpc-proto/blob/master/grpc/gcp/altscontext.proto - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.25.0 -// protoc v3.14.0 -// source: grpc/gcp/altscontext.proto - -package grpc_gcp - -import ( - proto "github.com/golang/protobuf/proto" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - -type AltsContext struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The application protocol negotiated for this connection. - ApplicationProtocol string `protobuf:"bytes,1,opt,name=application_protocol,json=applicationProtocol,proto3" json:"application_protocol,omitempty"` - // The record protocol negotiated for this connection. - RecordProtocol string `protobuf:"bytes,2,opt,name=record_protocol,json=recordProtocol,proto3" json:"record_protocol,omitempty"` - // The security level of the created secure channel. - SecurityLevel SecurityLevel `protobuf:"varint,3,opt,name=security_level,json=securityLevel,proto3,enum=grpc.gcp.SecurityLevel" json:"security_level,omitempty"` - // The peer service account. - PeerServiceAccount string `protobuf:"bytes,4,opt,name=peer_service_account,json=peerServiceAccount,proto3" json:"peer_service_account,omitempty"` - // The local service account. - LocalServiceAccount string `protobuf:"bytes,5,opt,name=local_service_account,json=localServiceAccount,proto3" json:"local_service_account,omitempty"` - // The RPC protocol versions supported by the peer. - PeerRpcVersions *RpcProtocolVersions `protobuf:"bytes,6,opt,name=peer_rpc_versions,json=peerRpcVersions,proto3" json:"peer_rpc_versions,omitempty"` - // Additional attributes of the peer. - PeerAttributes map[string]string `protobuf:"bytes,7,rep,name=peer_attributes,json=peerAttributes,proto3" json:"peer_attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` -} - -func (x *AltsContext) Reset() { - *x = AltsContext{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_gcp_altscontext_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *AltsContext) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*AltsContext) ProtoMessage() {} - -func (x *AltsContext) ProtoReflect() protoreflect.Message { - mi := &file_grpc_gcp_altscontext_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use AltsContext.ProtoReflect.Descriptor instead. -func (*AltsContext) Descriptor() ([]byte, []int) { - return file_grpc_gcp_altscontext_proto_rawDescGZIP(), []int{0} -} - -func (x *AltsContext) GetApplicationProtocol() string { - if x != nil { - return x.ApplicationProtocol - } - return "" -} - -func (x *AltsContext) GetRecordProtocol() string { - if x != nil { - return x.RecordProtocol - } - return "" -} - -func (x *AltsContext) GetSecurityLevel() SecurityLevel { - if x != nil { - return x.SecurityLevel - } - return SecurityLevel_SECURITY_NONE -} - -func (x *AltsContext) GetPeerServiceAccount() string { - if x != nil { - return x.PeerServiceAccount - } - return "" -} - -func (x *AltsContext) GetLocalServiceAccount() string { - if x != nil { - return x.LocalServiceAccount - } - return "" -} - -func (x *AltsContext) GetPeerRpcVersions() *RpcProtocolVersions { - if x != nil { - return x.PeerRpcVersions - } - return nil -} - -func (x *AltsContext) GetPeerAttributes() map[string]string { - if x != nil { - return x.PeerAttributes - } - return nil -} - -var File_grpc_gcp_altscontext_proto protoreflect.FileDescriptor - -var file_grpc_gcp_altscontext_proto_rawDesc = []byte{ - 0x0a, 0x1a, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x67, 0x63, 0x70, 0x2f, 0x61, 0x6c, 0x74, 0x73, 0x63, - 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x67, 0x72, - 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x1a, 0x28, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x67, 0x63, 0x70, - 0x2f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x65, 0x63, 0x75, 0x72, - 0x69, 0x74, 0x79, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x22, 0xf1, 0x03, 0x0a, 0x0b, 0x41, 0x6c, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, - 0x12, 0x31, 0x0a, 0x14, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, - 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x63, 0x6f, 0x6c, 0x12, 0x27, 0x0a, 0x0f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x72, 0x65, - 0x63, 0x6f, 0x72, 0x64, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x3e, 0x0a, 0x0e, - 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, - 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x52, 0x0d, 0x73, - 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x30, 0x0a, 0x14, - 0x70, 0x65, 0x65, 0x72, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, - 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x70, 0x65, 0x65, 0x72, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x32, - 0x0a, 0x15, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, - 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x6c, - 0x6f, 0x63, 0x61, 0x6c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, - 0x6e, 0x74, 0x12, 0x49, 0x0a, 0x11, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x72, 0x70, 0x63, 0x5f, 0x76, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, - 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x52, 0x70, 0x63, 0x50, 0x72, 0x6f, 0x74, - 0x6f, 0x63, 0x6f, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0f, 0x70, 0x65, - 0x65, 0x72, 0x52, 0x70, 0x63, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x52, 0x0a, - 0x0f, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, - 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, - 0x70, 0x2e, 0x41, 0x6c, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x50, 0x65, - 0x65, 0x72, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x52, 0x0e, 0x70, 0x65, 0x65, 0x72, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, - 0x73, 0x1a, 0x41, 0x0a, 0x13, 0x50, 0x65, 0x65, 0x72, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, - 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x3a, 0x02, 0x38, 0x01, 0x42, 0x6c, 0x0a, 0x15, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, - 0x61, 0x6c, 0x74, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x42, 0x10, 0x41, - 0x6c, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, - 0x01, 0x5a, 0x3f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, - 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, - 0x74, 0x69, 0x61, 0x6c, 0x73, 0x2f, 0x61, 0x6c, 0x74, 0x73, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, - 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x67, - 0x63, 0x70, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_grpc_gcp_altscontext_proto_rawDescOnce sync.Once - file_grpc_gcp_altscontext_proto_rawDescData = file_grpc_gcp_altscontext_proto_rawDesc -) - -func file_grpc_gcp_altscontext_proto_rawDescGZIP() []byte { - file_grpc_gcp_altscontext_proto_rawDescOnce.Do(func() { - file_grpc_gcp_altscontext_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_gcp_altscontext_proto_rawDescData) - }) - return file_grpc_gcp_altscontext_proto_rawDescData -} - -var file_grpc_gcp_altscontext_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_grpc_gcp_altscontext_proto_goTypes = []interface{}{ - (*AltsContext)(nil), // 0: grpc.gcp.AltsContext - nil, // 1: grpc.gcp.AltsContext.PeerAttributesEntry - (SecurityLevel)(0), // 2: grpc.gcp.SecurityLevel - (*RpcProtocolVersions)(nil), // 3: grpc.gcp.RpcProtocolVersions -} -var file_grpc_gcp_altscontext_proto_depIdxs = []int32{ - 2, // 0: grpc.gcp.AltsContext.security_level:type_name -> grpc.gcp.SecurityLevel - 3, // 1: grpc.gcp.AltsContext.peer_rpc_versions:type_name -> grpc.gcp.RpcProtocolVersions - 1, // 2: grpc.gcp.AltsContext.peer_attributes:type_name -> grpc.gcp.AltsContext.PeerAttributesEntry - 3, // [3:3] is the sub-list for method output_type - 3, // [3:3] is the sub-list for method input_type - 3, // [3:3] is the sub-list for extension type_name - 3, // [3:3] is the sub-list for extension extendee - 0, // [0:3] is the sub-list for field type_name -} - -func init() { file_grpc_gcp_altscontext_proto_init() } -func file_grpc_gcp_altscontext_proto_init() { - if File_grpc_gcp_altscontext_proto != nil { - return - } - file_grpc_gcp_transport_security_common_proto_init() - if !protoimpl.UnsafeEnabled { - file_grpc_gcp_altscontext_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AltsContext); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_grpc_gcp_altscontext_proto_rawDesc, - NumEnums: 0, - NumMessages: 2, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_grpc_gcp_altscontext_proto_goTypes, - DependencyIndexes: file_grpc_gcp_altscontext_proto_depIdxs, - MessageInfos: file_grpc_gcp_altscontext_proto_msgTypes, - }.Build() - File_grpc_gcp_altscontext_proto = out.File - file_grpc_gcp_altscontext_proto_rawDesc = nil - file_grpc_gcp_altscontext_proto_goTypes = nil - file_grpc_gcp_altscontext_proto_depIdxs = nil -} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go deleted file mode 100644 index 40570e9bf2..0000000000 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go +++ /dev/null @@ -1,1426 +0,0 @@ -// Copyright 2018 The gRPC Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// The canonical version of this proto can be found at -// https://github.com/grpc/grpc-proto/blob/master/grpc/gcp/handshaker.proto - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.25.0 -// protoc v3.14.0 -// source: grpc/gcp/handshaker.proto - -package grpc_gcp - -import ( - proto "github.com/golang/protobuf/proto" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - -type HandshakeProtocol int32 - -const ( - // Default value. - HandshakeProtocol_HANDSHAKE_PROTOCOL_UNSPECIFIED HandshakeProtocol = 0 - // TLS handshake protocol. - HandshakeProtocol_TLS HandshakeProtocol = 1 - // Application Layer Transport Security handshake protocol. - HandshakeProtocol_ALTS HandshakeProtocol = 2 -) - -// Enum value maps for HandshakeProtocol. -var ( - HandshakeProtocol_name = map[int32]string{ - 0: "HANDSHAKE_PROTOCOL_UNSPECIFIED", - 1: "TLS", - 2: "ALTS", - } - HandshakeProtocol_value = map[string]int32{ - "HANDSHAKE_PROTOCOL_UNSPECIFIED": 0, - "TLS": 1, - "ALTS": 2, - } -) - -func (x HandshakeProtocol) Enum() *HandshakeProtocol { - p := new(HandshakeProtocol) - *p = x - return p -} - -func (x HandshakeProtocol) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (HandshakeProtocol) Descriptor() protoreflect.EnumDescriptor { - return file_grpc_gcp_handshaker_proto_enumTypes[0].Descriptor() -} - -func (HandshakeProtocol) Type() protoreflect.EnumType { - return &file_grpc_gcp_handshaker_proto_enumTypes[0] -} - -func (x HandshakeProtocol) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use HandshakeProtocol.Descriptor instead. -func (HandshakeProtocol) EnumDescriptor() ([]byte, []int) { - return file_grpc_gcp_handshaker_proto_rawDescGZIP(), []int{0} -} - -type NetworkProtocol int32 - -const ( - NetworkProtocol_NETWORK_PROTOCOL_UNSPECIFIED NetworkProtocol = 0 - NetworkProtocol_TCP NetworkProtocol = 1 - NetworkProtocol_UDP NetworkProtocol = 2 -) - -// Enum value maps for NetworkProtocol. -var ( - NetworkProtocol_name = map[int32]string{ - 0: "NETWORK_PROTOCOL_UNSPECIFIED", - 1: "TCP", - 2: "UDP", - } - NetworkProtocol_value = map[string]int32{ - "NETWORK_PROTOCOL_UNSPECIFIED": 0, - "TCP": 1, - "UDP": 2, - } -) - -func (x NetworkProtocol) Enum() *NetworkProtocol { - p := new(NetworkProtocol) - *p = x - return p -} - -func (x NetworkProtocol) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (NetworkProtocol) Descriptor() protoreflect.EnumDescriptor { - return file_grpc_gcp_handshaker_proto_enumTypes[1].Descriptor() -} - -func (NetworkProtocol) Type() protoreflect.EnumType { - return &file_grpc_gcp_handshaker_proto_enumTypes[1] -} - -func (x NetworkProtocol) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use NetworkProtocol.Descriptor instead. -func (NetworkProtocol) EnumDescriptor() ([]byte, []int) { - return file_grpc_gcp_handshaker_proto_rawDescGZIP(), []int{1} -} - -type Endpoint struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // IP address. It should contain an IPv4 or IPv6 string literal, e.g. - // "192.168.0.1" or "2001:db8::1". - IpAddress string `protobuf:"bytes,1,opt,name=ip_address,json=ipAddress,proto3" json:"ip_address,omitempty"` - // Port number. - Port int32 `protobuf:"varint,2,opt,name=port,proto3" json:"port,omitempty"` - // Network protocol (e.g., TCP, UDP) associated with this endpoint. - Protocol NetworkProtocol `protobuf:"varint,3,opt,name=protocol,proto3,enum=grpc.gcp.NetworkProtocol" json:"protocol,omitempty"` -} - -func (x *Endpoint) Reset() { - *x = Endpoint{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_gcp_handshaker_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Endpoint) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Endpoint) ProtoMessage() {} - -func (x *Endpoint) ProtoReflect() protoreflect.Message { - mi := &file_grpc_gcp_handshaker_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Endpoint.ProtoReflect.Descriptor instead. -func (*Endpoint) Descriptor() ([]byte, []int) { - return file_grpc_gcp_handshaker_proto_rawDescGZIP(), []int{0} -} - -func (x *Endpoint) GetIpAddress() string { - if x != nil { - return x.IpAddress - } - return "" -} - -func (x *Endpoint) GetPort() int32 { - if x != nil { - return x.Port - } - return 0 -} - -func (x *Endpoint) GetProtocol() NetworkProtocol { - if x != nil { - return x.Protocol - } - return NetworkProtocol_NETWORK_PROTOCOL_UNSPECIFIED -} - -type Identity struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Types that are assignable to IdentityOneof: - // *Identity_ServiceAccount - // *Identity_Hostname - IdentityOneof isIdentity_IdentityOneof `protobuf_oneof:"identity_oneof"` - // Additional attributes of the identity. - Attributes map[string]string `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` -} - -func (x *Identity) Reset() { - *x = Identity{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_gcp_handshaker_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Identity) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Identity) ProtoMessage() {} - -func (x *Identity) ProtoReflect() protoreflect.Message { - mi := &file_grpc_gcp_handshaker_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Identity.ProtoReflect.Descriptor instead. -func (*Identity) Descriptor() ([]byte, []int) { - return file_grpc_gcp_handshaker_proto_rawDescGZIP(), []int{1} -} - -func (m *Identity) GetIdentityOneof() isIdentity_IdentityOneof { - if m != nil { - return m.IdentityOneof - } - return nil -} - -func (x *Identity) GetServiceAccount() string { - if x, ok := x.GetIdentityOneof().(*Identity_ServiceAccount); ok { - return x.ServiceAccount - } - return "" -} - -func (x *Identity) GetHostname() string { - if x, ok := x.GetIdentityOneof().(*Identity_Hostname); ok { - return x.Hostname - } - return "" -} - -func (x *Identity) GetAttributes() map[string]string { - if x != nil { - return x.Attributes - } - return nil -} - -type isIdentity_IdentityOneof interface { - isIdentity_IdentityOneof() -} - -type Identity_ServiceAccount struct { - // Service account of a connection endpoint. - ServiceAccount string `protobuf:"bytes,1,opt,name=service_account,json=serviceAccount,proto3,oneof"` -} - -type Identity_Hostname struct { - // Hostname of a connection endpoint. - Hostname string `protobuf:"bytes,2,opt,name=hostname,proto3,oneof"` -} - -func (*Identity_ServiceAccount) isIdentity_IdentityOneof() {} - -func (*Identity_Hostname) isIdentity_IdentityOneof() {} - -type StartClientHandshakeReq struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Handshake security protocol requested by the client. - HandshakeSecurityProtocol HandshakeProtocol `protobuf:"varint,1,opt,name=handshake_security_protocol,json=handshakeSecurityProtocol,proto3,enum=grpc.gcp.HandshakeProtocol" json:"handshake_security_protocol,omitempty"` - // The application protocols supported by the client, e.g., "h2" (for http2), - // "grpc". - ApplicationProtocols []string `protobuf:"bytes,2,rep,name=application_protocols,json=applicationProtocols,proto3" json:"application_protocols,omitempty"` - // The record protocols supported by the client, e.g., - // "ALTSRP_GCM_AES128". - RecordProtocols []string `protobuf:"bytes,3,rep,name=record_protocols,json=recordProtocols,proto3" json:"record_protocols,omitempty"` - // (Optional) Describes which server identities are acceptable by the client. - // If target identities are provided and none of them matches the peer - // identity of the server, handshake will fail. - TargetIdentities []*Identity `protobuf:"bytes,4,rep,name=target_identities,json=targetIdentities,proto3" json:"target_identities,omitempty"` - // (Optional) Application may specify a local identity. Otherwise, the - // handshaker chooses a default local identity. - LocalIdentity *Identity `protobuf:"bytes,5,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` - // (Optional) Local endpoint information of the connection to the server, - // such as local IP address, port number, and network protocol. - LocalEndpoint *Endpoint `protobuf:"bytes,6,opt,name=local_endpoint,json=localEndpoint,proto3" json:"local_endpoint,omitempty"` - // (Optional) Endpoint information of the remote server, such as IP address, - // port number, and network protocol. - RemoteEndpoint *Endpoint `protobuf:"bytes,7,opt,name=remote_endpoint,json=remoteEndpoint,proto3" json:"remote_endpoint,omitempty"` - // (Optional) If target name is provided, a secure naming check is performed - // to verify that the peer authenticated identity is indeed authorized to run - // the target name. - TargetName string `protobuf:"bytes,8,opt,name=target_name,json=targetName,proto3" json:"target_name,omitempty"` - // (Optional) RPC protocol versions supported by the client. - RpcVersions *RpcProtocolVersions `protobuf:"bytes,9,opt,name=rpc_versions,json=rpcVersions,proto3" json:"rpc_versions,omitempty"` - // (Optional) Maximum frame size supported by the client. - MaxFrameSize uint32 `protobuf:"varint,10,opt,name=max_frame_size,json=maxFrameSize,proto3" json:"max_frame_size,omitempty"` -} - -func (x *StartClientHandshakeReq) Reset() { - *x = StartClientHandshakeReq{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_gcp_handshaker_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *StartClientHandshakeReq) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StartClientHandshakeReq) ProtoMessage() {} - -func (x *StartClientHandshakeReq) ProtoReflect() protoreflect.Message { - mi := &file_grpc_gcp_handshaker_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StartClientHandshakeReq.ProtoReflect.Descriptor instead. -func (*StartClientHandshakeReq) Descriptor() ([]byte, []int) { - return file_grpc_gcp_handshaker_proto_rawDescGZIP(), []int{2} -} - -func (x *StartClientHandshakeReq) GetHandshakeSecurityProtocol() HandshakeProtocol { - if x != nil { - return x.HandshakeSecurityProtocol - } - return HandshakeProtocol_HANDSHAKE_PROTOCOL_UNSPECIFIED -} - -func (x *StartClientHandshakeReq) GetApplicationProtocols() []string { - if x != nil { - return x.ApplicationProtocols - } - return nil -} - -func (x *StartClientHandshakeReq) GetRecordProtocols() []string { - if x != nil { - return x.RecordProtocols - } - return nil -} - -func (x *StartClientHandshakeReq) GetTargetIdentities() []*Identity { - if x != nil { - return x.TargetIdentities - } - return nil -} - -func (x *StartClientHandshakeReq) GetLocalIdentity() *Identity { - if x != nil { - return x.LocalIdentity - } - return nil -} - -func (x *StartClientHandshakeReq) GetLocalEndpoint() *Endpoint { - if x != nil { - return x.LocalEndpoint - } - return nil -} - -func (x *StartClientHandshakeReq) GetRemoteEndpoint() *Endpoint { - if x != nil { - return x.RemoteEndpoint - } - return nil -} - -func (x *StartClientHandshakeReq) GetTargetName() string { - if x != nil { - return x.TargetName - } - return "" -} - -func (x *StartClientHandshakeReq) GetRpcVersions() *RpcProtocolVersions { - if x != nil { - return x.RpcVersions - } - return nil -} - -func (x *StartClientHandshakeReq) GetMaxFrameSize() uint32 { - if x != nil { - return x.MaxFrameSize - } - return 0 -} - -type ServerHandshakeParameters struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The record protocols supported by the server, e.g., - // "ALTSRP_GCM_AES128". - RecordProtocols []string `protobuf:"bytes,1,rep,name=record_protocols,json=recordProtocols,proto3" json:"record_protocols,omitempty"` - // (Optional) A list of local identities supported by the server, if - // specified. Otherwise, the handshaker chooses a default local identity. - LocalIdentities []*Identity `protobuf:"bytes,2,rep,name=local_identities,json=localIdentities,proto3" json:"local_identities,omitempty"` -} - -func (x *ServerHandshakeParameters) Reset() { - *x = ServerHandshakeParameters{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_gcp_handshaker_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ServerHandshakeParameters) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ServerHandshakeParameters) ProtoMessage() {} - -func (x *ServerHandshakeParameters) ProtoReflect() protoreflect.Message { - mi := &file_grpc_gcp_handshaker_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ServerHandshakeParameters.ProtoReflect.Descriptor instead. -func (*ServerHandshakeParameters) Descriptor() ([]byte, []int) { - return file_grpc_gcp_handshaker_proto_rawDescGZIP(), []int{3} -} - -func (x *ServerHandshakeParameters) GetRecordProtocols() []string { - if x != nil { - return x.RecordProtocols - } - return nil -} - -func (x *ServerHandshakeParameters) GetLocalIdentities() []*Identity { - if x != nil { - return x.LocalIdentities - } - return nil -} - -type StartServerHandshakeReq struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The application protocols supported by the server, e.g., "h2" (for http2), - // "grpc". - ApplicationProtocols []string `protobuf:"bytes,1,rep,name=application_protocols,json=applicationProtocols,proto3" json:"application_protocols,omitempty"` - // Handshake parameters (record protocols and local identities supported by - // the server) mapped by the handshake protocol. Each handshake security - // protocol (e.g., TLS or ALTS) has its own set of record protocols and local - // identities. Since protobuf does not support enum as key to the map, the key - // to handshake_parameters is the integer value of HandshakeProtocol enum. - HandshakeParameters map[int32]*ServerHandshakeParameters `protobuf:"bytes,2,rep,name=handshake_parameters,json=handshakeParameters,proto3" json:"handshake_parameters,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - // Bytes in out_frames returned from the peer's HandshakerResp. It is possible - // that the peer's out_frames are split into multiple HandshakReq messages. - InBytes []byte `protobuf:"bytes,3,opt,name=in_bytes,json=inBytes,proto3" json:"in_bytes,omitempty"` - // (Optional) Local endpoint information of the connection to the client, - // such as local IP address, port number, and network protocol. - LocalEndpoint *Endpoint `protobuf:"bytes,4,opt,name=local_endpoint,json=localEndpoint,proto3" json:"local_endpoint,omitempty"` - // (Optional) Endpoint information of the remote client, such as IP address, - // port number, and network protocol. - RemoteEndpoint *Endpoint `protobuf:"bytes,5,opt,name=remote_endpoint,json=remoteEndpoint,proto3" json:"remote_endpoint,omitempty"` - // (Optional) RPC protocol versions supported by the server. - RpcVersions *RpcProtocolVersions `protobuf:"bytes,6,opt,name=rpc_versions,json=rpcVersions,proto3" json:"rpc_versions,omitempty"` - // (Optional) Maximum frame size supported by the server. - MaxFrameSize uint32 `protobuf:"varint,7,opt,name=max_frame_size,json=maxFrameSize,proto3" json:"max_frame_size,omitempty"` -} - -func (x *StartServerHandshakeReq) Reset() { - *x = StartServerHandshakeReq{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_gcp_handshaker_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *StartServerHandshakeReq) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StartServerHandshakeReq) ProtoMessage() {} - -func (x *StartServerHandshakeReq) ProtoReflect() protoreflect.Message { - mi := &file_grpc_gcp_handshaker_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StartServerHandshakeReq.ProtoReflect.Descriptor instead. -func (*StartServerHandshakeReq) Descriptor() ([]byte, []int) { - return file_grpc_gcp_handshaker_proto_rawDescGZIP(), []int{4} -} - -func (x *StartServerHandshakeReq) GetApplicationProtocols() []string { - if x != nil { - return x.ApplicationProtocols - } - return nil -} - -func (x *StartServerHandshakeReq) GetHandshakeParameters() map[int32]*ServerHandshakeParameters { - if x != nil { - return x.HandshakeParameters - } - return nil -} - -func (x *StartServerHandshakeReq) GetInBytes() []byte { - if x != nil { - return x.InBytes - } - return nil -} - -func (x *StartServerHandshakeReq) GetLocalEndpoint() *Endpoint { - if x != nil { - return x.LocalEndpoint - } - return nil -} - -func (x *StartServerHandshakeReq) GetRemoteEndpoint() *Endpoint { - if x != nil { - return x.RemoteEndpoint - } - return nil -} - -func (x *StartServerHandshakeReq) GetRpcVersions() *RpcProtocolVersions { - if x != nil { - return x.RpcVersions - } - return nil -} - -func (x *StartServerHandshakeReq) GetMaxFrameSize() uint32 { - if x != nil { - return x.MaxFrameSize - } - return 0 -} - -type NextHandshakeMessageReq struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Bytes in out_frames returned from the peer's HandshakerResp. It is possible - // that the peer's out_frames are split into multiple NextHandshakerMessageReq - // messages. - InBytes []byte `protobuf:"bytes,1,opt,name=in_bytes,json=inBytes,proto3" json:"in_bytes,omitempty"` -} - -func (x *NextHandshakeMessageReq) Reset() { - *x = NextHandshakeMessageReq{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_gcp_handshaker_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *NextHandshakeMessageReq) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*NextHandshakeMessageReq) ProtoMessage() {} - -func (x *NextHandshakeMessageReq) ProtoReflect() protoreflect.Message { - mi := &file_grpc_gcp_handshaker_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use NextHandshakeMessageReq.ProtoReflect.Descriptor instead. -func (*NextHandshakeMessageReq) Descriptor() ([]byte, []int) { - return file_grpc_gcp_handshaker_proto_rawDescGZIP(), []int{5} -} - -func (x *NextHandshakeMessageReq) GetInBytes() []byte { - if x != nil { - return x.InBytes - } - return nil -} - -type HandshakerReq struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Types that are assignable to ReqOneof: - // *HandshakerReq_ClientStart - // *HandshakerReq_ServerStart - // *HandshakerReq_Next - ReqOneof isHandshakerReq_ReqOneof `protobuf_oneof:"req_oneof"` -} - -func (x *HandshakerReq) Reset() { - *x = HandshakerReq{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_gcp_handshaker_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *HandshakerReq) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*HandshakerReq) ProtoMessage() {} - -func (x *HandshakerReq) ProtoReflect() protoreflect.Message { - mi := &file_grpc_gcp_handshaker_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use HandshakerReq.ProtoReflect.Descriptor instead. -func (*HandshakerReq) Descriptor() ([]byte, []int) { - return file_grpc_gcp_handshaker_proto_rawDescGZIP(), []int{6} -} - -func (m *HandshakerReq) GetReqOneof() isHandshakerReq_ReqOneof { - if m != nil { - return m.ReqOneof - } - return nil -} - -func (x *HandshakerReq) GetClientStart() *StartClientHandshakeReq { - if x, ok := x.GetReqOneof().(*HandshakerReq_ClientStart); ok { - return x.ClientStart - } - return nil -} - -func (x *HandshakerReq) GetServerStart() *StartServerHandshakeReq { - if x, ok := x.GetReqOneof().(*HandshakerReq_ServerStart); ok { - return x.ServerStart - } - return nil -} - -func (x *HandshakerReq) GetNext() *NextHandshakeMessageReq { - if x, ok := x.GetReqOneof().(*HandshakerReq_Next); ok { - return x.Next - } - return nil -} - -type isHandshakerReq_ReqOneof interface { - isHandshakerReq_ReqOneof() -} - -type HandshakerReq_ClientStart struct { - // The start client handshake request message. - ClientStart *StartClientHandshakeReq `protobuf:"bytes,1,opt,name=client_start,json=clientStart,proto3,oneof"` -} - -type HandshakerReq_ServerStart struct { - // The start server handshake request message. - ServerStart *StartServerHandshakeReq `protobuf:"bytes,2,opt,name=server_start,json=serverStart,proto3,oneof"` -} - -type HandshakerReq_Next struct { - // The next handshake request message. - Next *NextHandshakeMessageReq `protobuf:"bytes,3,opt,name=next,proto3,oneof"` -} - -func (*HandshakerReq_ClientStart) isHandshakerReq_ReqOneof() {} - -func (*HandshakerReq_ServerStart) isHandshakerReq_ReqOneof() {} - -func (*HandshakerReq_Next) isHandshakerReq_ReqOneof() {} - -type HandshakerResult struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The application protocol negotiated for this connection. - ApplicationProtocol string `protobuf:"bytes,1,opt,name=application_protocol,json=applicationProtocol,proto3" json:"application_protocol,omitempty"` - // The record protocol negotiated for this connection. - RecordProtocol string `protobuf:"bytes,2,opt,name=record_protocol,json=recordProtocol,proto3" json:"record_protocol,omitempty"` - // Cryptographic key data. The key data may be more than the key length - // required for the record protocol, thus the client of the handshaker - // service needs to truncate the key data into the right key length. - KeyData []byte `protobuf:"bytes,3,opt,name=key_data,json=keyData,proto3" json:"key_data,omitempty"` - // The authenticated identity of the peer. - PeerIdentity *Identity `protobuf:"bytes,4,opt,name=peer_identity,json=peerIdentity,proto3" json:"peer_identity,omitempty"` - // The local identity used in the handshake. - LocalIdentity *Identity `protobuf:"bytes,5,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` - // Indicate whether the handshaker service client should keep the channel - // between the handshaker service open, e.g., in order to handle - // post-handshake messages in the future. - KeepChannelOpen bool `protobuf:"varint,6,opt,name=keep_channel_open,json=keepChannelOpen,proto3" json:"keep_channel_open,omitempty"` - // The RPC protocol versions supported by the peer. - PeerRpcVersions *RpcProtocolVersions `protobuf:"bytes,7,opt,name=peer_rpc_versions,json=peerRpcVersions,proto3" json:"peer_rpc_versions,omitempty"` - // The maximum frame size of the peer. - MaxFrameSize uint32 `protobuf:"varint,8,opt,name=max_frame_size,json=maxFrameSize,proto3" json:"max_frame_size,omitempty"` -} - -func (x *HandshakerResult) Reset() { - *x = HandshakerResult{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_gcp_handshaker_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *HandshakerResult) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*HandshakerResult) ProtoMessage() {} - -func (x *HandshakerResult) ProtoReflect() protoreflect.Message { - mi := &file_grpc_gcp_handshaker_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use HandshakerResult.ProtoReflect.Descriptor instead. -func (*HandshakerResult) Descriptor() ([]byte, []int) { - return file_grpc_gcp_handshaker_proto_rawDescGZIP(), []int{7} -} - -func (x *HandshakerResult) GetApplicationProtocol() string { - if x != nil { - return x.ApplicationProtocol - } - return "" -} - -func (x *HandshakerResult) GetRecordProtocol() string { - if x != nil { - return x.RecordProtocol - } - return "" -} - -func (x *HandshakerResult) GetKeyData() []byte { - if x != nil { - return x.KeyData - } - return nil -} - -func (x *HandshakerResult) GetPeerIdentity() *Identity { - if x != nil { - return x.PeerIdentity - } - return nil -} - -func (x *HandshakerResult) GetLocalIdentity() *Identity { - if x != nil { - return x.LocalIdentity - } - return nil -} - -func (x *HandshakerResult) GetKeepChannelOpen() bool { - if x != nil { - return x.KeepChannelOpen - } - return false -} - -func (x *HandshakerResult) GetPeerRpcVersions() *RpcProtocolVersions { - if x != nil { - return x.PeerRpcVersions - } - return nil -} - -func (x *HandshakerResult) GetMaxFrameSize() uint32 { - if x != nil { - return x.MaxFrameSize - } - return 0 -} - -type HandshakerStatus struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The status code. This could be the gRPC status code. - Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` - // The status details. - Details string `protobuf:"bytes,2,opt,name=details,proto3" json:"details,omitempty"` -} - -func (x *HandshakerStatus) Reset() { - *x = HandshakerStatus{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_gcp_handshaker_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *HandshakerStatus) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*HandshakerStatus) ProtoMessage() {} - -func (x *HandshakerStatus) ProtoReflect() protoreflect.Message { - mi := &file_grpc_gcp_handshaker_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use HandshakerStatus.ProtoReflect.Descriptor instead. -func (*HandshakerStatus) Descriptor() ([]byte, []int) { - return file_grpc_gcp_handshaker_proto_rawDescGZIP(), []int{8} -} - -func (x *HandshakerStatus) GetCode() uint32 { - if x != nil { - return x.Code - } - return 0 -} - -func (x *HandshakerStatus) GetDetails() string { - if x != nil { - return x.Details - } - return "" -} - -type HandshakerResp struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Frames to be given to the peer for the NextHandshakeMessageReq. May be - // empty if no out_frames have to be sent to the peer or if in_bytes in the - // HandshakerReq are incomplete. All the non-empty out frames must be sent to - // the peer even if the handshaker status is not OK as these frames may - // contain the alert frames. - OutFrames []byte `protobuf:"bytes,1,opt,name=out_frames,json=outFrames,proto3" json:"out_frames,omitempty"` - // Number of bytes in the in_bytes consumed by the handshaker. It is possible - // that part of in_bytes in HandshakerReq was unrelated to the handshake - // process. - BytesConsumed uint32 `protobuf:"varint,2,opt,name=bytes_consumed,json=bytesConsumed,proto3" json:"bytes_consumed,omitempty"` - // This is set iff the handshake was successful. out_frames may still be set - // to frames that needs to be forwarded to the peer. - Result *HandshakerResult `protobuf:"bytes,3,opt,name=result,proto3" json:"result,omitempty"` - // Status of the handshaker. - Status *HandshakerStatus `protobuf:"bytes,4,opt,name=status,proto3" json:"status,omitempty"` -} - -func (x *HandshakerResp) Reset() { - *x = HandshakerResp{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_gcp_handshaker_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *HandshakerResp) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*HandshakerResp) ProtoMessage() {} - -func (x *HandshakerResp) ProtoReflect() protoreflect.Message { - mi := &file_grpc_gcp_handshaker_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use HandshakerResp.ProtoReflect.Descriptor instead. -func (*HandshakerResp) Descriptor() ([]byte, []int) { - return file_grpc_gcp_handshaker_proto_rawDescGZIP(), []int{9} -} - -func (x *HandshakerResp) GetOutFrames() []byte { - if x != nil { - return x.OutFrames - } - return nil -} - -func (x *HandshakerResp) GetBytesConsumed() uint32 { - if x != nil { - return x.BytesConsumed - } - return 0 -} - -func (x *HandshakerResp) GetResult() *HandshakerResult { - if x != nil { - return x.Result - } - return nil -} - -func (x *HandshakerResp) GetStatus() *HandshakerStatus { - if x != nil { - return x.Status - } - return nil -} - -var File_grpc_gcp_handshaker_proto protoreflect.FileDescriptor - -var file_grpc_gcp_handshaker_proto_rawDesc = []byte{ - 0x0a, 0x19, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x67, 0x63, 0x70, 0x2f, 0x68, 0x61, 0x6e, 0x64, 0x73, - 0x68, 0x61, 0x6b, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x67, 0x72, 0x70, - 0x63, 0x2e, 0x67, 0x63, 0x70, 0x1a, 0x28, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x67, 0x63, 0x70, 0x2f, - 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, - 0x74, 0x79, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, - 0x74, 0x0a, 0x08, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x69, - 0x70, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x09, 0x69, 0x70, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x6f, - 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x35, - 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, - 0x32, 0x19, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x4e, 0x65, 0x74, 0x77, - 0x6f, 0x72, 0x6b, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x52, 0x08, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x22, 0xe8, 0x01, 0x0a, 0x08, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, - 0x74, 0x79, 0x12, 0x29, 0x0a, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, - 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0e, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1c, 0x0a, - 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, - 0x00, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x42, 0x0a, 0x0a, 0x61, - 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x22, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, - 0x69, 0x74, 0x79, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x1a, - 0x3d, 0x0a, 0x0f, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x10, - 0x0a, 0x0e, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, - 0x22, 0xd3, 0x04, 0x0a, 0x17, 0x53, 0x74, 0x61, 0x72, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, - 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x12, 0x5b, 0x0a, 0x1b, - 0x68, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, - 0x74, 0x79, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x48, 0x61, 0x6e, - 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x52, 0x19, - 0x68, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, - 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x33, 0x0a, 0x15, 0x61, 0x70, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, - 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x14, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x12, 0x29, - 0x0a, 0x10, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, - 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, - 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x12, 0x3f, 0x0a, 0x11, 0x74, 0x61, 0x72, - 0x67, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x04, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, - 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x10, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, - 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x0e, 0x6c, 0x6f, - 0x63, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x49, 0x64, - 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, - 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x39, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x65, - 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, - 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, - 0x74, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, - 0x12, 0x3b, 0x0a, 0x0f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, - 0x69, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, - 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x0e, 0x72, - 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x1f, 0x0a, - 0x0b, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0a, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x40, - 0x0a, 0x0c, 0x72, 0x70, 0x63, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x09, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, - 0x52, 0x70, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x73, 0x52, 0x0b, 0x72, 0x70, 0x63, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, - 0x12, 0x24, 0x0a, 0x0e, 0x6d, 0x61, 0x78, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f, 0x73, 0x69, - 0x7a, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x6d, 0x61, 0x78, 0x46, 0x72, 0x61, - 0x6d, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x22, 0x85, 0x01, 0x0a, 0x19, 0x53, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, - 0x74, 0x65, 0x72, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, - 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x12, - 0x3d, 0x0a, 0x10, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, - 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, - 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0f, 0x6c, - 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x22, 0xa5, - 0x04, 0x0a, 0x17, 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x61, - 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x12, 0x33, 0x0a, 0x15, 0x61, 0x70, - 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, - 0x6f, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x14, 0x61, 0x70, 0x70, 0x6c, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x12, - 0x6d, 0x0a, 0x14, 0x68, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x5f, 0x70, 0x61, 0x72, - 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3a, 0x2e, - 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x65, - 0x72, 0x76, 0x65, 0x72, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x71, - 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, - 0x74, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x13, 0x68, 0x61, 0x6e, 0x64, 0x73, - 0x68, 0x61, 0x6b, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x19, - 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, - 0x52, 0x07, 0x69, 0x6e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, - 0x61, 0x6c, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x45, 0x6e, 0x64, - 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x45, 0x6e, 0x64, 0x70, - 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x3b, 0x0a, 0x0f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x65, - 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, - 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, - 0x74, 0x52, 0x0e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, - 0x74, 0x12, 0x40, 0x0a, 0x0c, 0x72, 0x70, 0x63, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, - 0x63, 0x70, 0x2e, 0x52, 0x70, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x56, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0b, 0x72, 0x70, 0x63, 0x56, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x6d, 0x61, 0x78, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, - 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x6d, 0x61, 0x78, - 0x46, 0x72, 0x61, 0x6d, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x1a, 0x6b, 0x0a, 0x18, 0x48, 0x61, 0x6e, - 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x05, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x39, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, - 0x70, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, - 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x34, 0x0a, 0x17, 0x4e, 0x65, 0x78, 0x74, 0x48, 0x61, - 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, - 0x71, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x07, 0x69, 0x6e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xe5, 0x01, 0x0a, - 0x0d, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x71, 0x12, 0x46, - 0x0a, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, - 0x53, 0x74, 0x61, 0x72, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x48, 0x61, 0x6e, 0x64, 0x73, - 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x0b, 0x63, 0x6c, 0x69, 0x65, 0x6e, - 0x74, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x46, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, - 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x65, 0x72, - 0x76, 0x65, 0x72, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x48, - 0x00, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x37, - 0x0a, 0x04, 0x6e, 0x65, 0x78, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, - 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x4e, 0x65, 0x78, 0x74, 0x48, 0x61, 0x6e, 0x64, - 0x73, 0x68, 0x61, 0x6b, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x48, - 0x00, 0x52, 0x04, 0x6e, 0x65, 0x78, 0x74, 0x42, 0x0b, 0x0a, 0x09, 0x72, 0x65, 0x71, 0x5f, 0x6f, - 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x9a, 0x03, 0x0a, 0x10, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, - 0x6b, 0x65, 0x72, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x31, 0x0a, 0x14, 0x61, 0x70, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, - 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x27, 0x0a, 0x0f, - 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x19, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x5f, 0x64, 0x61, 0x74, - 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6b, 0x65, 0x79, 0x44, 0x61, 0x74, 0x61, - 0x12, 0x37, 0x0a, 0x0d, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, - 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, - 0x63, 0x70, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0c, 0x70, 0x65, 0x65, - 0x72, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x39, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, - 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x49, 0x64, 0x65, - 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, - 0x74, 0x69, 0x74, 0x79, 0x12, 0x2a, 0x0a, 0x11, 0x6b, 0x65, 0x65, 0x70, 0x5f, 0x63, 0x68, 0x61, - 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x0f, 0x6b, 0x65, 0x65, 0x70, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x4f, 0x70, 0x65, 0x6e, - 0x12, 0x49, 0x0a, 0x11, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x72, 0x70, 0x63, 0x5f, 0x76, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x72, - 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x52, 0x70, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, - 0x6f, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0f, 0x70, 0x65, 0x65, 0x72, - 0x52, 0x70, 0x63, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x6d, - 0x61, 0x78, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x08, 0x20, - 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x6d, 0x61, 0x78, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x53, 0x69, 0x7a, - 0x65, 0x22, 0x40, 0x0a, 0x10, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x53, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0d, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x74, - 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x74, 0x61, - 0x69, 0x6c, 0x73, 0x22, 0xbe, 0x01, 0x0a, 0x0e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, - 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x12, 0x1d, 0x0a, 0x0a, 0x6f, 0x75, 0x74, 0x5f, 0x66, 0x72, - 0x61, 0x6d, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x6f, 0x75, 0x74, 0x46, - 0x72, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x63, - 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0d, 0x62, - 0x79, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x64, 0x12, 0x32, 0x0a, 0x06, - 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, - 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, - 0x65, 0x72, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, - 0x12, 0x32, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x48, 0x61, 0x6e, 0x64, - 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x2a, 0x4a, 0x0a, 0x11, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, - 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x22, 0x0a, 0x1e, 0x48, 0x41, 0x4e, - 0x44, 0x53, 0x48, 0x41, 0x4b, 0x45, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, - 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, - 0x03, 0x54, 0x4c, 0x53, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x41, 0x4c, 0x54, 0x53, 0x10, 0x02, - 0x2a, 0x45, 0x0a, 0x0f, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x63, 0x6f, 0x6c, 0x12, 0x20, 0x0a, 0x1c, 0x4e, 0x45, 0x54, 0x57, 0x4f, 0x52, 0x4b, 0x5f, 0x50, - 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, - 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x54, 0x43, 0x50, 0x10, 0x01, 0x12, 0x07, - 0x0a, 0x03, 0x55, 0x44, 0x50, 0x10, 0x02, 0x32, 0x5b, 0x0a, 0x11, 0x48, 0x61, 0x6e, 0x64, 0x73, - 0x68, 0x61, 0x6b, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x46, 0x0a, 0x0b, - 0x44, 0x6f, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x12, 0x17, 0x2e, 0x67, 0x72, - 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, - 0x72, 0x52, 0x65, 0x71, 0x1a, 0x18, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, - 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, - 0x28, 0x01, 0x30, 0x01, 0x42, 0x6b, 0x0a, 0x15, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, - 0x61, 0x6c, 0x74, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x42, 0x0f, 0x48, - 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, - 0x5a, 0x3f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, - 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, - 0x69, 0x61, 0x6c, 0x73, 0x2f, 0x61, 0x6c, 0x74, 0x73, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, - 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x67, 0x63, - 0x70, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_grpc_gcp_handshaker_proto_rawDescOnce sync.Once - file_grpc_gcp_handshaker_proto_rawDescData = file_grpc_gcp_handshaker_proto_rawDesc -) - -func file_grpc_gcp_handshaker_proto_rawDescGZIP() []byte { - file_grpc_gcp_handshaker_proto_rawDescOnce.Do(func() { - file_grpc_gcp_handshaker_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_gcp_handshaker_proto_rawDescData) - }) - return file_grpc_gcp_handshaker_proto_rawDescData -} - -var file_grpc_gcp_handshaker_proto_enumTypes = make([]protoimpl.EnumInfo, 2) -var file_grpc_gcp_handshaker_proto_msgTypes = make([]protoimpl.MessageInfo, 12) -var file_grpc_gcp_handshaker_proto_goTypes = []interface{}{ - (HandshakeProtocol)(0), // 0: grpc.gcp.HandshakeProtocol - (NetworkProtocol)(0), // 1: grpc.gcp.NetworkProtocol - (*Endpoint)(nil), // 2: grpc.gcp.Endpoint - (*Identity)(nil), // 3: grpc.gcp.Identity - (*StartClientHandshakeReq)(nil), // 4: grpc.gcp.StartClientHandshakeReq - (*ServerHandshakeParameters)(nil), // 5: grpc.gcp.ServerHandshakeParameters - (*StartServerHandshakeReq)(nil), // 6: grpc.gcp.StartServerHandshakeReq - (*NextHandshakeMessageReq)(nil), // 7: grpc.gcp.NextHandshakeMessageReq - (*HandshakerReq)(nil), // 8: grpc.gcp.HandshakerReq - (*HandshakerResult)(nil), // 9: grpc.gcp.HandshakerResult - (*HandshakerStatus)(nil), // 10: grpc.gcp.HandshakerStatus - (*HandshakerResp)(nil), // 11: grpc.gcp.HandshakerResp - nil, // 12: grpc.gcp.Identity.AttributesEntry - nil, // 13: grpc.gcp.StartServerHandshakeReq.HandshakeParametersEntry - (*RpcProtocolVersions)(nil), // 14: grpc.gcp.RpcProtocolVersions -} -var file_grpc_gcp_handshaker_proto_depIdxs = []int32{ - 1, // 0: grpc.gcp.Endpoint.protocol:type_name -> grpc.gcp.NetworkProtocol - 12, // 1: grpc.gcp.Identity.attributes:type_name -> grpc.gcp.Identity.AttributesEntry - 0, // 2: grpc.gcp.StartClientHandshakeReq.handshake_security_protocol:type_name -> grpc.gcp.HandshakeProtocol - 3, // 3: grpc.gcp.StartClientHandshakeReq.target_identities:type_name -> grpc.gcp.Identity - 3, // 4: grpc.gcp.StartClientHandshakeReq.local_identity:type_name -> grpc.gcp.Identity - 2, // 5: grpc.gcp.StartClientHandshakeReq.local_endpoint:type_name -> grpc.gcp.Endpoint - 2, // 6: grpc.gcp.StartClientHandshakeReq.remote_endpoint:type_name -> grpc.gcp.Endpoint - 14, // 7: grpc.gcp.StartClientHandshakeReq.rpc_versions:type_name -> grpc.gcp.RpcProtocolVersions - 3, // 8: grpc.gcp.ServerHandshakeParameters.local_identities:type_name -> grpc.gcp.Identity - 13, // 9: grpc.gcp.StartServerHandshakeReq.handshake_parameters:type_name -> grpc.gcp.StartServerHandshakeReq.HandshakeParametersEntry - 2, // 10: grpc.gcp.StartServerHandshakeReq.local_endpoint:type_name -> grpc.gcp.Endpoint - 2, // 11: grpc.gcp.StartServerHandshakeReq.remote_endpoint:type_name -> grpc.gcp.Endpoint - 14, // 12: grpc.gcp.StartServerHandshakeReq.rpc_versions:type_name -> grpc.gcp.RpcProtocolVersions - 4, // 13: grpc.gcp.HandshakerReq.client_start:type_name -> grpc.gcp.StartClientHandshakeReq - 6, // 14: grpc.gcp.HandshakerReq.server_start:type_name -> grpc.gcp.StartServerHandshakeReq - 7, // 15: grpc.gcp.HandshakerReq.next:type_name -> grpc.gcp.NextHandshakeMessageReq - 3, // 16: grpc.gcp.HandshakerResult.peer_identity:type_name -> grpc.gcp.Identity - 3, // 17: grpc.gcp.HandshakerResult.local_identity:type_name -> grpc.gcp.Identity - 14, // 18: grpc.gcp.HandshakerResult.peer_rpc_versions:type_name -> grpc.gcp.RpcProtocolVersions - 9, // 19: grpc.gcp.HandshakerResp.result:type_name -> grpc.gcp.HandshakerResult - 10, // 20: grpc.gcp.HandshakerResp.status:type_name -> grpc.gcp.HandshakerStatus - 5, // 21: grpc.gcp.StartServerHandshakeReq.HandshakeParametersEntry.value:type_name -> grpc.gcp.ServerHandshakeParameters - 8, // 22: grpc.gcp.HandshakerService.DoHandshake:input_type -> grpc.gcp.HandshakerReq - 11, // 23: grpc.gcp.HandshakerService.DoHandshake:output_type -> grpc.gcp.HandshakerResp - 23, // [23:24] is the sub-list for method output_type - 22, // [22:23] is the sub-list for method input_type - 22, // [22:22] is the sub-list for extension type_name - 22, // [22:22] is the sub-list for extension extendee - 0, // [0:22] is the sub-list for field type_name -} - -func init() { file_grpc_gcp_handshaker_proto_init() } -func file_grpc_gcp_handshaker_proto_init() { - if File_grpc_gcp_handshaker_proto != nil { - return - } - file_grpc_gcp_transport_security_common_proto_init() - if !protoimpl.UnsafeEnabled { - file_grpc_gcp_handshaker_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Endpoint); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_gcp_handshaker_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Identity); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_gcp_handshaker_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StartClientHandshakeReq); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_gcp_handshaker_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ServerHandshakeParameters); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_gcp_handshaker_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StartServerHandshakeReq); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_gcp_handshaker_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NextHandshakeMessageReq); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_gcp_handshaker_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HandshakerReq); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_gcp_handshaker_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HandshakerResult); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_gcp_handshaker_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HandshakerStatus); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_gcp_handshaker_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HandshakerResp); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_grpc_gcp_handshaker_proto_msgTypes[1].OneofWrappers = []interface{}{ - (*Identity_ServiceAccount)(nil), - (*Identity_Hostname)(nil), - } - file_grpc_gcp_handshaker_proto_msgTypes[6].OneofWrappers = []interface{}{ - (*HandshakerReq_ClientStart)(nil), - (*HandshakerReq_ServerStart)(nil), - (*HandshakerReq_Next)(nil), - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_grpc_gcp_handshaker_proto_rawDesc, - NumEnums: 2, - NumMessages: 12, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_grpc_gcp_handshaker_proto_goTypes, - DependencyIndexes: file_grpc_gcp_handshaker_proto_depIdxs, - EnumInfos: file_grpc_gcp_handshaker_proto_enumTypes, - MessageInfos: file_grpc_gcp_handshaker_proto_msgTypes, - }.Build() - File_grpc_gcp_handshaker_proto = out.File - file_grpc_gcp_handshaker_proto_rawDesc = nil - file_grpc_gcp_handshaker_proto_goTypes = nil - file_grpc_gcp_handshaker_proto_depIdxs = nil -} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go deleted file mode 100644 index efdbd13fa3..0000000000 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go +++ /dev/null @@ -1,145 +0,0 @@ -// Code generated by protoc-gen-go-grpc. DO NOT EDIT. - -package grpc_gcp - -import ( - context "context" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" -) - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 - -// HandshakerServiceClient is the client API for HandshakerService service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -type HandshakerServiceClient interface { - // Handshaker service accepts a stream of handshaker request, returning a - // stream of handshaker response. Client is expected to send exactly one - // message with either client_start or server_start followed by one or more - // messages with next. Each time client sends a request, the handshaker - // service expects to respond. Client does not have to wait for service's - // response before sending next request. - DoHandshake(ctx context.Context, opts ...grpc.CallOption) (HandshakerService_DoHandshakeClient, error) -} - -type handshakerServiceClient struct { - cc grpc.ClientConnInterface -} - -func NewHandshakerServiceClient(cc grpc.ClientConnInterface) HandshakerServiceClient { - return &handshakerServiceClient{cc} -} - -func (c *handshakerServiceClient) DoHandshake(ctx context.Context, opts ...grpc.CallOption) (HandshakerService_DoHandshakeClient, error) { - stream, err := c.cc.NewStream(ctx, &HandshakerService_ServiceDesc.Streams[0], "/grpc.gcp.HandshakerService/DoHandshake", opts...) - if err != nil { - return nil, err - } - x := &handshakerServiceDoHandshakeClient{stream} - return x, nil -} - -type HandshakerService_DoHandshakeClient interface { - Send(*HandshakerReq) error - Recv() (*HandshakerResp, error) - grpc.ClientStream -} - -type handshakerServiceDoHandshakeClient struct { - grpc.ClientStream -} - -func (x *handshakerServiceDoHandshakeClient) Send(m *HandshakerReq) error { - return x.ClientStream.SendMsg(m) -} - -func (x *handshakerServiceDoHandshakeClient) Recv() (*HandshakerResp, error) { - m := new(HandshakerResp) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// HandshakerServiceServer is the server API for HandshakerService service. -// All implementations must embed UnimplementedHandshakerServiceServer -// for forward compatibility -type HandshakerServiceServer interface { - // Handshaker service accepts a stream of handshaker request, returning a - // stream of handshaker response. Client is expected to send exactly one - // message with either client_start or server_start followed by one or more - // messages with next. Each time client sends a request, the handshaker - // service expects to respond. Client does not have to wait for service's - // response before sending next request. - DoHandshake(HandshakerService_DoHandshakeServer) error - mustEmbedUnimplementedHandshakerServiceServer() -} - -// UnimplementedHandshakerServiceServer must be embedded to have forward compatible implementations. -type UnimplementedHandshakerServiceServer struct { -} - -func (UnimplementedHandshakerServiceServer) DoHandshake(HandshakerService_DoHandshakeServer) error { - return status.Errorf(codes.Unimplemented, "method DoHandshake not implemented") -} -func (UnimplementedHandshakerServiceServer) mustEmbedUnimplementedHandshakerServiceServer() {} - -// UnsafeHandshakerServiceServer may be embedded to opt out of forward compatibility for this service. -// Use of this interface is not recommended, as added methods to HandshakerServiceServer will -// result in compilation errors. -type UnsafeHandshakerServiceServer interface { - mustEmbedUnimplementedHandshakerServiceServer() -} - -func RegisterHandshakerServiceServer(s grpc.ServiceRegistrar, srv HandshakerServiceServer) { - s.RegisterService(&HandshakerService_ServiceDesc, srv) -} - -func _HandshakerService_DoHandshake_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(HandshakerServiceServer).DoHandshake(&handshakerServiceDoHandshakeServer{stream}) -} - -type HandshakerService_DoHandshakeServer interface { - Send(*HandshakerResp) error - Recv() (*HandshakerReq, error) - grpc.ServerStream -} - -type handshakerServiceDoHandshakeServer struct { - grpc.ServerStream -} - -func (x *handshakerServiceDoHandshakeServer) Send(m *HandshakerResp) error { - return x.ServerStream.SendMsg(m) -} - -func (x *handshakerServiceDoHandshakeServer) Recv() (*HandshakerReq, error) { - m := new(HandshakerReq) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// HandshakerService_ServiceDesc is the grpc.ServiceDesc for HandshakerService service. -// It's only intended for direct use with grpc.RegisterService, -// and not to be introspected or modified (even as a copy) -var HandshakerService_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "grpc.gcp.HandshakerService", - HandlerType: (*HandshakerServiceServer)(nil), - Methods: []grpc.MethodDesc{}, - Streams: []grpc.StreamDesc{ - { - StreamName: "DoHandshake", - Handler: _HandshakerService_DoHandshake_Handler, - ServerStreams: true, - ClientStreams: true, - }, - }, - Metadata: "grpc/gcp/handshaker.proto", -} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go deleted file mode 100644 index 4fc3c79d6a..0000000000 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go +++ /dev/null @@ -1,326 +0,0 @@ -// Copyright 2018 The gRPC Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// The canonical version of this proto can be found at -// https://github.com/grpc/grpc-proto/blob/master/grpc/gcp/transport_security_common.proto - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.25.0 -// protoc v3.14.0 -// source: grpc/gcp/transport_security_common.proto - -package grpc_gcp - -import ( - proto "github.com/golang/protobuf/proto" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - -// The security level of the created channel. The list is sorted in increasing -// level of security. This order must always be maintained. -type SecurityLevel int32 - -const ( - SecurityLevel_SECURITY_NONE SecurityLevel = 0 - SecurityLevel_INTEGRITY_ONLY SecurityLevel = 1 - SecurityLevel_INTEGRITY_AND_PRIVACY SecurityLevel = 2 -) - -// Enum value maps for SecurityLevel. -var ( - SecurityLevel_name = map[int32]string{ - 0: "SECURITY_NONE", - 1: "INTEGRITY_ONLY", - 2: "INTEGRITY_AND_PRIVACY", - } - SecurityLevel_value = map[string]int32{ - "SECURITY_NONE": 0, - "INTEGRITY_ONLY": 1, - "INTEGRITY_AND_PRIVACY": 2, - } -) - -func (x SecurityLevel) Enum() *SecurityLevel { - p := new(SecurityLevel) - *p = x - return p -} - -func (x SecurityLevel) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (SecurityLevel) Descriptor() protoreflect.EnumDescriptor { - return file_grpc_gcp_transport_security_common_proto_enumTypes[0].Descriptor() -} - -func (SecurityLevel) Type() protoreflect.EnumType { - return &file_grpc_gcp_transport_security_common_proto_enumTypes[0] -} - -func (x SecurityLevel) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use SecurityLevel.Descriptor instead. -func (SecurityLevel) EnumDescriptor() ([]byte, []int) { - return file_grpc_gcp_transport_security_common_proto_rawDescGZIP(), []int{0} -} - -// Max and min supported RPC protocol versions. -type RpcProtocolVersions struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Maximum supported RPC version. - MaxRpcVersion *RpcProtocolVersions_Version `protobuf:"bytes,1,opt,name=max_rpc_version,json=maxRpcVersion,proto3" json:"max_rpc_version,omitempty"` - // Minimum supported RPC version. - MinRpcVersion *RpcProtocolVersions_Version `protobuf:"bytes,2,opt,name=min_rpc_version,json=minRpcVersion,proto3" json:"min_rpc_version,omitempty"` -} - -func (x *RpcProtocolVersions) Reset() { - *x = RpcProtocolVersions{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_gcp_transport_security_common_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *RpcProtocolVersions) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RpcProtocolVersions) ProtoMessage() {} - -func (x *RpcProtocolVersions) ProtoReflect() protoreflect.Message { - mi := &file_grpc_gcp_transport_security_common_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RpcProtocolVersions.ProtoReflect.Descriptor instead. -func (*RpcProtocolVersions) Descriptor() ([]byte, []int) { - return file_grpc_gcp_transport_security_common_proto_rawDescGZIP(), []int{0} -} - -func (x *RpcProtocolVersions) GetMaxRpcVersion() *RpcProtocolVersions_Version { - if x != nil { - return x.MaxRpcVersion - } - return nil -} - -func (x *RpcProtocolVersions) GetMinRpcVersion() *RpcProtocolVersions_Version { - if x != nil { - return x.MinRpcVersion - } - return nil -} - -// RPC version contains a major version and a minor version. -type RpcProtocolVersions_Version struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Major uint32 `protobuf:"varint,1,opt,name=major,proto3" json:"major,omitempty"` - Minor uint32 `protobuf:"varint,2,opt,name=minor,proto3" json:"minor,omitempty"` -} - -func (x *RpcProtocolVersions_Version) Reset() { - *x = RpcProtocolVersions_Version{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_gcp_transport_security_common_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *RpcProtocolVersions_Version) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RpcProtocolVersions_Version) ProtoMessage() {} - -func (x *RpcProtocolVersions_Version) ProtoReflect() protoreflect.Message { - mi := &file_grpc_gcp_transport_security_common_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RpcProtocolVersions_Version.ProtoReflect.Descriptor instead. -func (*RpcProtocolVersions_Version) Descriptor() ([]byte, []int) { - return file_grpc_gcp_transport_security_common_proto_rawDescGZIP(), []int{0, 0} -} - -func (x *RpcProtocolVersions_Version) GetMajor() uint32 { - if x != nil { - return x.Major - } - return 0 -} - -func (x *RpcProtocolVersions_Version) GetMinor() uint32 { - if x != nil { - return x.Minor - } - return 0 -} - -var File_grpc_gcp_transport_security_common_proto protoreflect.FileDescriptor - -var file_grpc_gcp_transport_security_common_proto_rawDesc = []byte{ - 0x0a, 0x28, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x67, 0x63, 0x70, 0x2f, 0x74, 0x72, 0x61, 0x6e, 0x73, - 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x63, 0x6f, - 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x67, 0x72, 0x70, 0x63, - 0x2e, 0x67, 0x63, 0x70, 0x22, 0xea, 0x01, 0x0a, 0x13, 0x52, 0x70, 0x63, 0x50, 0x72, 0x6f, 0x74, - 0x6f, 0x63, 0x6f, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x4d, 0x0a, 0x0f, - 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x70, 0x63, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, - 0x2e, 0x52, 0x70, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x56, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x61, - 0x78, 0x52, 0x70, 0x63, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x4d, 0x0a, 0x0f, 0x6d, - 0x69, 0x6e, 0x5f, 0x72, 0x70, 0x63, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, - 0x52, 0x70, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x73, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x69, 0x6e, - 0x52, 0x70, 0x63, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x1a, 0x35, 0x0a, 0x07, 0x56, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x6d, - 0x69, 0x6e, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x6d, 0x69, 0x6e, 0x6f, - 0x72, 0x2a, 0x51, 0x0a, 0x0d, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x4c, 0x65, 0x76, - 0x65, 0x6c, 0x12, 0x11, 0x0a, 0x0d, 0x53, 0x45, 0x43, 0x55, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x4e, - 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x12, 0x0a, 0x0e, 0x49, 0x4e, 0x54, 0x45, 0x47, 0x52, 0x49, - 0x54, 0x59, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x01, 0x12, 0x19, 0x0a, 0x15, 0x49, 0x4e, 0x54, - 0x45, 0x47, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x41, 0x4e, 0x44, 0x5f, 0x50, 0x52, 0x49, 0x56, 0x41, - 0x43, 0x59, 0x10, 0x02, 0x42, 0x78, 0x0a, 0x15, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, - 0x61, 0x6c, 0x74, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x42, 0x1c, 0x54, - 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, - 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3f, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, - 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, - 0x73, 0x2f, 0x61, 0x6c, 0x74, 0x73, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x67, 0x63, 0x70, 0x62, 0x06, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_grpc_gcp_transport_security_common_proto_rawDescOnce sync.Once - file_grpc_gcp_transport_security_common_proto_rawDescData = file_grpc_gcp_transport_security_common_proto_rawDesc -) - -func file_grpc_gcp_transport_security_common_proto_rawDescGZIP() []byte { - file_grpc_gcp_transport_security_common_proto_rawDescOnce.Do(func() { - file_grpc_gcp_transport_security_common_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_gcp_transport_security_common_proto_rawDescData) - }) - return file_grpc_gcp_transport_security_common_proto_rawDescData -} - -var file_grpc_gcp_transport_security_common_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_grpc_gcp_transport_security_common_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_grpc_gcp_transport_security_common_proto_goTypes = []interface{}{ - (SecurityLevel)(0), // 0: grpc.gcp.SecurityLevel - (*RpcProtocolVersions)(nil), // 1: grpc.gcp.RpcProtocolVersions - (*RpcProtocolVersions_Version)(nil), // 2: grpc.gcp.RpcProtocolVersions.Version -} -var file_grpc_gcp_transport_security_common_proto_depIdxs = []int32{ - 2, // 0: grpc.gcp.RpcProtocolVersions.max_rpc_version:type_name -> grpc.gcp.RpcProtocolVersions.Version - 2, // 1: grpc.gcp.RpcProtocolVersions.min_rpc_version:type_name -> grpc.gcp.RpcProtocolVersions.Version - 2, // [2:2] is the sub-list for method output_type - 2, // [2:2] is the sub-list for method input_type - 2, // [2:2] is the sub-list for extension type_name - 2, // [2:2] is the sub-list for extension extendee - 0, // [0:2] is the sub-list for field type_name -} - -func init() { file_grpc_gcp_transport_security_common_proto_init() } -func file_grpc_gcp_transport_security_common_proto_init() { - if File_grpc_gcp_transport_security_common_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_grpc_gcp_transport_security_common_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RpcProtocolVersions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_gcp_transport_security_common_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RpcProtocolVersions_Version); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_grpc_gcp_transport_security_common_proto_rawDesc, - NumEnums: 1, - NumMessages: 2, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_grpc_gcp_transport_security_common_proto_goTypes, - DependencyIndexes: file_grpc_gcp_transport_security_common_proto_depIdxs, - EnumInfos: file_grpc_gcp_transport_security_common_proto_enumTypes, - MessageInfos: file_grpc_gcp_transport_security_common_proto_msgTypes, - }.Build() - File_grpc_gcp_transport_security_common_proto = out.File - file_grpc_gcp_transport_security_common_proto_rawDesc = nil - file_grpc_gcp_transport_security_common_proto_goTypes = nil - file_grpc_gcp_transport_security_common_proto_depIdxs = nil -} diff --git a/vendor/google.golang.org/grpc/credentials/alts/utils.go b/vendor/google.golang.org/grpc/credentials/alts/utils.go deleted file mode 100644 index 9a300bc19a..0000000000 --- a/vendor/google.golang.org/grpc/credentials/alts/utils.go +++ /dev/null @@ -1,164 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package alts - -import ( - "context" - "errors" - "fmt" - "io" - "io/ioutil" - "log" - "os" - "os/exec" - "regexp" - "runtime" - "strings" - - "google.golang.org/grpc/codes" - "google.golang.org/grpc/peer" - "google.golang.org/grpc/status" -) - -const ( - linuxProductNameFile = "/sys/class/dmi/id/product_name" - windowsCheckCommand = "powershell.exe" - windowsCheckCommandArgs = "Get-WmiObject -Class Win32_BIOS" - powershellOutputFilter = "Manufacturer" - windowsManufacturerRegex = ":(.*)" -) - -type platformError string - -func (k platformError) Error() string { - return fmt.Sprintf("%s is not supported", string(k)) -} - -var ( - // The following two variables will be reassigned in tests. - runningOS = runtime.GOOS - manufacturerReader = func() (io.Reader, error) { - switch runningOS { - case "linux": - return os.Open(linuxProductNameFile) - case "windows": - cmd := exec.Command(windowsCheckCommand, windowsCheckCommandArgs) - out, err := cmd.Output() - if err != nil { - return nil, err - } - - for _, line := range strings.Split(strings.TrimSuffix(string(out), "\n"), "\n") { - if strings.HasPrefix(line, powershellOutputFilter) { - re := regexp.MustCompile(windowsManufacturerRegex) - name := re.FindString(line) - name = strings.TrimLeft(name, ":") - return strings.NewReader(name), nil - } - } - - return nil, errors.New("cannot determine the machine's manufacturer") - default: - return nil, platformError(runningOS) - } - } - vmOnGCP bool -) - -// isRunningOnGCP checks whether the local system, without doing a network request is -// running on GCP. -func isRunningOnGCP() bool { - manufacturer, err := readManufacturer() - if os.IsNotExist(err) { - return false - } - if err != nil { - log.Fatalf("failure to read manufacturer information: %v", err) - } - name := string(manufacturer) - switch runningOS { - case "linux": - name = strings.TrimSpace(name) - return name == "Google" || name == "Google Compute Engine" - case "windows": - name = strings.Replace(name, " ", "", -1) - name = strings.Replace(name, "\n", "", -1) - name = strings.Replace(name, "\r", "", -1) - return name == "Google" - default: - log.Fatal(platformError(runningOS)) - } - return false -} - -func readManufacturer() ([]byte, error) { - reader, err := manufacturerReader() - if err != nil { - return nil, err - } - if reader == nil { - return nil, errors.New("got nil reader") - } - manufacturer, err := ioutil.ReadAll(reader) - if err != nil { - return nil, fmt.Errorf("failed reading %v: %v", linuxProductNameFile, err) - } - return manufacturer, nil -} - -// AuthInfoFromContext extracts the alts.AuthInfo object from the given context, -// if it exists. This API should be used by gRPC server RPC handlers to get -// information about the communicating peer. For client-side, use grpc.Peer() -// CallOption. -func AuthInfoFromContext(ctx context.Context) (AuthInfo, error) { - p, ok := peer.FromContext(ctx) - if !ok { - return nil, errors.New("no Peer found in Context") - } - return AuthInfoFromPeer(p) -} - -// AuthInfoFromPeer extracts the alts.AuthInfo object from the given peer, if it -// exists. This API should be used by gRPC clients after obtaining a peer object -// using the grpc.Peer() CallOption. -func AuthInfoFromPeer(p *peer.Peer) (AuthInfo, error) { - altsAuthInfo, ok := p.AuthInfo.(AuthInfo) - if !ok { - return nil, errors.New("no alts.AuthInfo found in Peer") - } - return altsAuthInfo, nil -} - -// ClientAuthorizationCheck checks whether the client is authorized to access -// the requested resources based on the given expected client service accounts. -// This API should be used by gRPC server RPC handlers. This API should not be -// used by clients. -func ClientAuthorizationCheck(ctx context.Context, expectedServiceAccounts []string) error { - authInfo, err := AuthInfoFromContext(ctx) - if err != nil { - return status.Errorf(codes.PermissionDenied, "The context is not an ALTS-compatible context: %v", err) - } - peer := authInfo.PeerServiceAccount() - for _, sa := range expectedServiceAccounts { - if strings.EqualFold(peer, sa) { - return nil - } - } - return status.Errorf(codes.PermissionDenied, "Client %v is not authorized", peer) -} diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go index e69562e787..7eee7e4ec1 100644 --- a/vendor/google.golang.org/grpc/credentials/credentials.go +++ b/vendor/google.golang.org/grpc/credentials/credentials.go @@ -30,7 +30,7 @@ import ( "github.com/golang/protobuf/proto" "google.golang.org/grpc/attributes" - "google.golang.org/grpc/internal" + icredentials "google.golang.org/grpc/internal/credentials" ) // PerRPCCredentials defines the common interface for the credentials which need to @@ -188,15 +188,12 @@ type RequestInfo struct { AuthInfo AuthInfo } -// requestInfoKey is a struct to be used as the key when attaching a RequestInfo to a context object. -type requestInfoKey struct{} - // RequestInfoFromContext extracts the RequestInfo from the context if it exists. // // This API is experimental. func RequestInfoFromContext(ctx context.Context) (ri RequestInfo, ok bool) { - ri, ok = ctx.Value(requestInfoKey{}).(RequestInfo) - return + ri, ok = icredentials.RequestInfoFromContext(ctx).(RequestInfo) + return ri, ok } // ClientHandshakeInfo holds data to be passed to ClientHandshake. This makes @@ -211,16 +208,12 @@ type ClientHandshakeInfo struct { Attributes *attributes.Attributes } -// clientHandshakeInfoKey is a struct used as the key to store -// ClientHandshakeInfo in a context. -type clientHandshakeInfoKey struct{} - // ClientHandshakeInfoFromContext returns the ClientHandshakeInfo struct stored // in ctx. // // This API is experimental. func ClientHandshakeInfoFromContext(ctx context.Context) ClientHandshakeInfo { - chi, _ := ctx.Value(clientHandshakeInfoKey{}).(ClientHandshakeInfo) + chi, _ := icredentials.ClientHandshakeInfoFromContext(ctx).(ClientHandshakeInfo) return chi } @@ -249,15 +242,6 @@ func CheckSecurityLevel(ai AuthInfo, level SecurityLevel) error { return nil } -func init() { - internal.NewRequestInfoContext = func(ctx context.Context, ri RequestInfo) context.Context { - return context.WithValue(ctx, requestInfoKey{}, ri) - } - internal.NewClientHandshakeInfoContext = func(ctx context.Context, chi ClientHandshakeInfo) context.Context { - return context.WithValue(ctx, clientHandshakeInfoKey{}, chi) - } -} - // ChannelzSecurityInfo defines the interface that security protocols should implement // in order to provide security info to channelz. // diff --git a/vendor/google.golang.org/grpc/credentials/google/google.go b/vendor/google.golang.org/grpc/credentials/google/google.go deleted file mode 100644 index 7f3e240e47..0000000000 --- a/vendor/google.golang.org/grpc/credentials/google/google.go +++ /dev/null @@ -1,127 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package google defines credentials for google cloud services. -package google - -import ( - "context" - "fmt" - "time" - - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/credentials/alts" - "google.golang.org/grpc/credentials/oauth" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/internal" -) - -const tokenRequestTimeout = 30 * time.Second - -var logger = grpclog.Component("credentials") - -// NewDefaultCredentials returns a credentials bundle that is configured to work -// with google services. -// -// This API is experimental. -func NewDefaultCredentials() credentials.Bundle { - c := &creds{ - newPerRPCCreds: func() credentials.PerRPCCredentials { - ctx, cancel := context.WithTimeout(context.Background(), tokenRequestTimeout) - defer cancel() - perRPCCreds, err := oauth.NewApplicationDefault(ctx) - if err != nil { - logger.Warningf("google default creds: failed to create application oauth: %v", err) - } - return perRPCCreds - }, - } - bundle, err := c.NewWithMode(internal.CredsBundleModeFallback) - if err != nil { - logger.Warningf("google default creds: failed to create new creds: %v", err) - } - return bundle -} - -// NewComputeEngineCredentials returns a credentials bundle that is configured to work -// with google services. This API must only be used when running on GCE. Authentication configured -// by this API represents the GCE VM's default service account. -// -// This API is experimental. -func NewComputeEngineCredentials() credentials.Bundle { - c := &creds{ - newPerRPCCreds: func() credentials.PerRPCCredentials { - return oauth.NewComputeEngine() - }, - } - bundle, err := c.NewWithMode(internal.CredsBundleModeFallback) - if err != nil { - logger.Warningf("compute engine creds: failed to create new creds: %v", err) - } - return bundle -} - -// creds implements credentials.Bundle. -type creds struct { - // Supported modes are defined in internal/internal.go. - mode string - // The transport credentials associated with this bundle. - transportCreds credentials.TransportCredentials - // The per RPC credentials associated with this bundle. - perRPCCreds credentials.PerRPCCredentials - // Creates new per RPC credentials - newPerRPCCreds func() credentials.PerRPCCredentials -} - -func (c *creds) TransportCredentials() credentials.TransportCredentials { - return c.transportCreds -} - -func (c *creds) PerRPCCredentials() credentials.PerRPCCredentials { - if c == nil { - return nil - } - return c.perRPCCreds -} - -// NewWithMode should make a copy of Bundle, and switch mode. Modifying the -// existing Bundle may cause races. -func (c *creds) NewWithMode(mode string) (credentials.Bundle, error) { - newCreds := &creds{ - mode: mode, - newPerRPCCreds: c.newPerRPCCreds, - } - - // Create transport credentials. - switch mode { - case internal.CredsBundleModeFallback: - newCreds.transportCreds = credentials.NewTLS(nil) - case internal.CredsBundleModeBackendFromBalancer, internal.CredsBundleModeBalancer: - // Only the clients can use google default credentials, so we only need - // to create new ALTS client creds here. - newCreds.transportCreds = alts.NewClientCreds(alts.DefaultClientOptions()) - default: - return nil, fmt.Errorf("unsupported mode: %v", mode) - } - - if mode == internal.CredsBundleModeFallback || mode == internal.CredsBundleModeBackendFromBalancer { - newCreds.perRPCCreds = newCreds.newPerRPCCreds() - } - - return newCreds, nil -} diff --git a/vendor/google.golang.org/grpc/credentials/oauth/oauth.go b/vendor/google.golang.org/grpc/credentials/oauth/oauth.go deleted file mode 100644 index 852ae375cf..0000000000 --- a/vendor/google.golang.org/grpc/credentials/oauth/oauth.go +++ /dev/null @@ -1,225 +0,0 @@ -/* - * - * Copyright 2015 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package oauth implements gRPC credentials using OAuth. -package oauth - -import ( - "context" - "fmt" - "io/ioutil" - "sync" - - "golang.org/x/oauth2" - "golang.org/x/oauth2/google" - "golang.org/x/oauth2/jwt" - "google.golang.org/grpc/credentials" -) - -// TokenSource supplies PerRPCCredentials from an oauth2.TokenSource. -type TokenSource struct { - oauth2.TokenSource -} - -// GetRequestMetadata gets the request metadata as a map from a TokenSource. -func (ts TokenSource) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { - token, err := ts.Token() - if err != nil { - return nil, err - } - ri, _ := credentials.RequestInfoFromContext(ctx) - if err = credentials.CheckSecurityLevel(ri.AuthInfo, credentials.PrivacyAndIntegrity); err != nil { - return nil, fmt.Errorf("unable to transfer TokenSource PerRPCCredentials: %v", err) - } - return map[string]string{ - "authorization": token.Type() + " " + token.AccessToken, - }, nil -} - -// RequireTransportSecurity indicates whether the credentials requires transport security. -func (ts TokenSource) RequireTransportSecurity() bool { - return true -} - -type jwtAccess struct { - jsonKey []byte -} - -// NewJWTAccessFromFile creates PerRPCCredentials from the given keyFile. -func NewJWTAccessFromFile(keyFile string) (credentials.PerRPCCredentials, error) { - jsonKey, err := ioutil.ReadFile(keyFile) - if err != nil { - return nil, fmt.Errorf("credentials: failed to read the service account key file: %v", err) - } - return NewJWTAccessFromKey(jsonKey) -} - -// NewJWTAccessFromKey creates PerRPCCredentials from the given jsonKey. -func NewJWTAccessFromKey(jsonKey []byte) (credentials.PerRPCCredentials, error) { - return jwtAccess{jsonKey}, nil -} - -func (j jwtAccess) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { - // TODO: the returned TokenSource is reusable. Store it in a sync.Map, with - // uri as the key, to avoid recreating for every RPC. - ts, err := google.JWTAccessTokenSourceFromJSON(j.jsonKey, uri[0]) - if err != nil { - return nil, err - } - token, err := ts.Token() - if err != nil { - return nil, err - } - ri, _ := credentials.RequestInfoFromContext(ctx) - if err = credentials.CheckSecurityLevel(ri.AuthInfo, credentials.PrivacyAndIntegrity); err != nil { - return nil, fmt.Errorf("unable to transfer jwtAccess PerRPCCredentials: %v", err) - } - return map[string]string{ - "authorization": token.Type() + " " + token.AccessToken, - }, nil -} - -func (j jwtAccess) RequireTransportSecurity() bool { - return true -} - -// oauthAccess supplies PerRPCCredentials from a given token. -type oauthAccess struct { - token oauth2.Token -} - -// NewOauthAccess constructs the PerRPCCredentials using a given token. -func NewOauthAccess(token *oauth2.Token) credentials.PerRPCCredentials { - return oauthAccess{token: *token} -} - -func (oa oauthAccess) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { - ri, _ := credentials.RequestInfoFromContext(ctx) - if err := credentials.CheckSecurityLevel(ri.AuthInfo, credentials.PrivacyAndIntegrity); err != nil { - return nil, fmt.Errorf("unable to transfer oauthAccess PerRPCCredentials: %v", err) - } - return map[string]string{ - "authorization": oa.token.Type() + " " + oa.token.AccessToken, - }, nil -} - -func (oa oauthAccess) RequireTransportSecurity() bool { - return true -} - -// NewComputeEngine constructs the PerRPCCredentials that fetches access tokens from -// Google Compute Engine (GCE)'s metadata server. It is only valid to use this -// if your program is running on a GCE instance. -// TODO(dsymonds): Deprecate and remove this. -func NewComputeEngine() credentials.PerRPCCredentials { - return TokenSource{google.ComputeTokenSource("")} -} - -// serviceAccount represents PerRPCCredentials via JWT signing key. -type serviceAccount struct { - mu sync.Mutex - config *jwt.Config - t *oauth2.Token -} - -func (s *serviceAccount) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { - s.mu.Lock() - defer s.mu.Unlock() - if !s.t.Valid() { - var err error - s.t, err = s.config.TokenSource(ctx).Token() - if err != nil { - return nil, err - } - } - ri, _ := credentials.RequestInfoFromContext(ctx) - if err := credentials.CheckSecurityLevel(ri.AuthInfo, credentials.PrivacyAndIntegrity); err != nil { - return nil, fmt.Errorf("unable to transfer serviceAccount PerRPCCredentials: %v", err) - } - return map[string]string{ - "authorization": s.t.Type() + " " + s.t.AccessToken, - }, nil -} - -func (s *serviceAccount) RequireTransportSecurity() bool { - return true -} - -// NewServiceAccountFromKey constructs the PerRPCCredentials using the JSON key slice -// from a Google Developers service account. -func NewServiceAccountFromKey(jsonKey []byte, scope ...string) (credentials.PerRPCCredentials, error) { - config, err := google.JWTConfigFromJSON(jsonKey, scope...) - if err != nil { - return nil, err - } - return &serviceAccount{config: config}, nil -} - -// NewServiceAccountFromFile constructs the PerRPCCredentials using the JSON key file -// of a Google Developers service account. -func NewServiceAccountFromFile(keyFile string, scope ...string) (credentials.PerRPCCredentials, error) { - jsonKey, err := ioutil.ReadFile(keyFile) - if err != nil { - return nil, fmt.Errorf("credentials: failed to read the service account key file: %v", err) - } - return NewServiceAccountFromKey(jsonKey, scope...) -} - -// NewApplicationDefault returns "Application Default Credentials". For more -// detail, see https://developers.google.com/accounts/docs/application-default-credentials. -func NewApplicationDefault(ctx context.Context, scope ...string) (credentials.PerRPCCredentials, error) { - creds, err := google.FindDefaultCredentials(ctx, scope...) - if err != nil { - return nil, err - } - - // If JSON is nil, the authentication is provided by the environment and not - // with a credentials file, e.g. when code is running on Google Cloud - // Platform. Use the returned token source. - if creds.JSON == nil { - return TokenSource{creds.TokenSource}, nil - } - - // If auth is provided by env variable or creds file, the behavior will be - // different based on whether scope is set. Because the returned - // creds.TokenSource does oauth with jwt by default, and it requires scope. - // We can only use it if scope is not empty, otherwise it will fail with - // missing scope error. - // - // If scope is set, use it, it should just work. - // - // If scope is not set, we try to use jwt directly without oauth (this only - // works if it's a service account). - - if len(scope) != 0 { - return TokenSource{creds.TokenSource}, nil - } - - // Try to convert JSON to a jwt config without setting the optional scope - // parameter to check if it's a service account (the function errors if it's - // not). This is necessary because the returned config doesn't show the type - // of the account. - if _, err := google.JWTConfigFromJSON(creds.JSON); err != nil { - // If this fails, it's not a service account, return the original - // TokenSource from above. - return TokenSource{creds.TokenSource}, nil - } - - // If it's a service account, create a JWT only access with the key. - return NewJWTAccessFromKey(creds.JSON) -} diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go index e7f86e6d7c..7a497237bb 100644 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -66,11 +66,7 @@ type dialOptions struct { minConnectTimeout func() time.Duration defaultServiceConfig *ServiceConfig // defaultServiceConfig is parsed from defaultServiceConfigRawJSON. defaultServiceConfigRawJSON *string - // This is used by ccResolverWrapper to backoff between successive calls to - // resolver.ResolveNow(). The user will have no need to configure this, but - // we need to be able to configure this in tests. - resolveNowBackoff func(int) time.Duration - resolvers []resolver.Builder + resolvers []resolver.Builder } // DialOption configures how we set up the connection. @@ -596,7 +592,6 @@ func defaultDialOptions() dialOptions { ReadBufferSize: defaultReadBufSize, UseProxy: true, }, - resolveNowBackoff: internalbackoff.DefaultExponential.Backoff, } } @@ -611,16 +606,6 @@ func withMinConnectDeadline(f func() time.Duration) DialOption { }) } -// withResolveNowBackoff specifies the function that clientconn uses to backoff -// between successive calls to resolver.ResolveNow(). -// -// For testing purpose only. -func withResolveNowBackoff(f func(int) time.Duration) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.resolveNowBackoff = f - }) -} - // WithResolvers allows a list of resolver implementations to be registered // locally with the ClientConn without needing to be globally registered via // resolver.Register. They will be matched against the scheme used for the diff --git a/vendor/google.golang.org/grpc/go.mod b/vendor/google.golang.org/grpc/go.mod index cab74e5577..b177cfa66d 100644 --- a/vendor/google.golang.org/grpc/go.mod +++ b/vendor/google.golang.org/grpc/go.mod @@ -4,7 +4,7 @@ go 1.11 require ( github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403 - github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad + github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b github.com/golang/protobuf v1.4.2 github.com/google/go-cmp v0.5.0 diff --git a/vendor/google.golang.org/grpc/go.sum b/vendor/google.golang.org/grpc/go.sum index 77ee70b443..24d2976abb 100644 --- a/vendor/google.golang.org/grpc/go.sum +++ b/vendor/google.golang.org/grpc/go.sum @@ -1,10 +1,8 @@ cloud.google.com/go v0.26.0 h1:e0WKqKTd5BnrG8aKH3J3h+QvEIQtSUcf2n5UZ5ZgLtQ= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/census-instrumentation/opencensus-proto v0.2.1 h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403 h1:cqQfy1jclcSy/FwLjemeg3SR1yaINm74aQyupQ0Bl8M= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= @@ -12,8 +10,8 @@ github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad h1:EmNYJhPYy0pOFjCx2PrgtaBXmee0iUX9hLlxE1xHOJE= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d h1:QyzYnTnPE15SQyUeqU6qLbWxMkwyAyu+vGksa0b7j00= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= @@ -43,7 +41,6 @@ github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1: github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= diff --git a/vendor/google.golang.org/grpc/internal/credentials/credentials.go b/vendor/google.golang.org/grpc/internal/credentials/credentials.go new file mode 100644 index 0000000000..32c9b59033 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/credentials/credentials.go @@ -0,0 +1,49 @@ +/* + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import ( + "context" +) + +// requestInfoKey is a struct to be used as the key to store RequestInfo in a +// context. +type requestInfoKey struct{} + +// NewRequestInfoContext creates a context with ri. +func NewRequestInfoContext(ctx context.Context, ri interface{}) context.Context { + return context.WithValue(ctx, requestInfoKey{}, ri) +} + +// RequestInfoFromContext extracts the RequestInfo from ctx. +func RequestInfoFromContext(ctx context.Context) interface{} { + return ctx.Value(requestInfoKey{}) +} + +// clientHandshakeInfoKey is a struct used as the key to store +// ClientHandshakeInfo in a context. +type clientHandshakeInfoKey struct{} + +// ClientHandshakeInfoFromContext extracts the ClientHandshakeInfo from ctx. +func ClientHandshakeInfoFromContext(ctx context.Context) interface{} { + return ctx.Value(clientHandshakeInfoKey{}) +} + +// NewClientHandshakeInfoContext creates a context with chi. +func NewClientHandshakeInfoContext(ctx context.Context, chi interface{}) context.Context { + return context.WithValue(ctx, clientHandshakeInfoKey{}, chi) +} diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index 1e2834c70f..1b596bf357 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -38,12 +38,6 @@ var ( // KeepaliveMinPingTime is the minimum ping interval. This must be 10s by // default, but tests may wish to set it lower for convenience. KeepaliveMinPingTime = 10 * time.Second - // NewRequestInfoContext creates a new context based on the argument context attaching - // the passed in RequestInfo to the new context. - NewRequestInfoContext interface{} // func(context.Context, credentials.RequestInfo) context.Context - // NewClientHandshakeInfoContext returns a copy of the input context with - // the passed in ClientHandshakeInfo struct added to it. - NewClientHandshakeInfoContext interface{} // func(context.Context, credentials.ClientHandshakeInfo) context.Context // ParseServiceConfigForTesting is for creating a fake // ClientConn for resolver testing only ParseServiceConfigForTesting interface{} // func(string) *serviceconfig.ParseResult @@ -65,6 +59,11 @@ var ( // gRPC server. An xDS-enabled server needs to know what type of credentials // is configured on the underlying gRPC server. This is set by server.go. GetServerCredentials interface{} // func (*grpc.Server) credentials.TransportCredentials + // DrainServerTransports initiates a graceful close of existing connections + // on a gRPC server accepted on the provided listener address. An + // xDS-enabled server invokes this method on a grpc.Server when a particular + // listener moves to "not-serving" mode. + DrainServerTransports interface{} // func(*grpc.Server, string) ) // HealthChecker defines the signature of the client-side LB channel health checking function. diff --git a/vendor/google.golang.org/grpc/internal/resolver/config_selector.go b/vendor/google.golang.org/grpc/internal/resolver/config_selector.go index e699004005..5e7f36703d 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/config_selector.go +++ b/vendor/google.golang.org/grpc/internal/resolver/config_selector.go @@ -24,6 +24,7 @@ import ( "sync" "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/metadata" "google.golang.org/grpc/resolver" ) @@ -51,6 +52,74 @@ type RPCConfig struct { Context context.Context MethodConfig serviceconfig.MethodConfig // configuration to use for this RPC OnCommitted func() // Called when the RPC has been committed (retries no longer possible) + Interceptor ClientInterceptor +} + +// ClientStream is the same as grpc.ClientStream, but defined here for circular +// dependency reasons. +type ClientStream interface { + // Header returns the header metadata received from the server if there + // is any. It blocks if the metadata is not ready to read. + Header() (metadata.MD, error) + // Trailer returns the trailer metadata from the server, if there is any. + // It must only be called after stream.CloseAndRecv has returned, or + // stream.Recv has returned a non-nil error (including io.EOF). + Trailer() metadata.MD + // CloseSend closes the send direction of the stream. It closes the stream + // when non-nil error is met. It is also not safe to call CloseSend + // concurrently with SendMsg. + CloseSend() error + // Context returns the context for this stream. + // + // It should not be called until after Header or RecvMsg has returned. Once + // called, subsequent client-side retries are disabled. + Context() context.Context + // SendMsg is generally called by generated code. On error, SendMsg aborts + // the stream. If the error was generated by the client, the status is + // returned directly; otherwise, io.EOF is returned and the status of + // the stream may be discovered using RecvMsg. + // + // SendMsg blocks until: + // - There is sufficient flow control to schedule m with the transport, or + // - The stream is done, or + // - The stream breaks. + // + // SendMsg does not wait until the message is received by the server. An + // untimely stream closure may result in lost messages. To ensure delivery, + // users should ensure the RPC completed successfully using RecvMsg. + // + // It is safe to have a goroutine calling SendMsg and another goroutine + // calling RecvMsg on the same stream at the same time, but it is not safe + // to call SendMsg on the same stream in different goroutines. It is also + // not safe to call CloseSend concurrently with SendMsg. + SendMsg(m interface{}) error + // RecvMsg blocks until it receives a message into m or the stream is + // done. It returns io.EOF when the stream completes successfully. On + // any other error, the stream is aborted and the error contains the RPC + // status. + // + // It is safe to have a goroutine calling SendMsg and another goroutine + // calling RecvMsg on the same stream at the same time, but it is not + // safe to call RecvMsg on the same stream in different goroutines. + RecvMsg(m interface{}) error +} + +// ClientInterceptor is an interceptor for gRPC client streams. +type ClientInterceptor interface { + // NewStream produces a ClientStream for an RPC which may optionally use + // the provided function to produce a stream for delegation. Note: + // RPCInfo.Context should not be used (will be nil). + // + // done is invoked when the RPC is finished using its connection, or could + // not be assigned a connection. RPC operations may still occur on + // ClientStream after done is called, since the interceptor is invoked by + // application-layer operations. done must never be nil when called. + NewStream(ctx context.Context, ri RPCInfo, done func(), newStream func(ctx context.Context, done func()) (ClientStream, error)) (ClientStream, error) +} + +// ServerInterceptor is unimplementable; do not use. +type ServerInterceptor interface { + notDefined() } type csKeyType string diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go index 3042355665..03825bbe7b 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go @@ -34,6 +34,7 @@ import ( grpclbstate "google.golang.org/grpc/balancer/grpclb/state" "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/resolver" @@ -46,6 +47,13 @@ var EnableSRVLookups = false var logger = grpclog.Component("dns") +// Globals to stub out in tests. TODO: Perhaps these two can be combined into a +// single variable for testing the resolver? +var ( + newTimer = time.NewTimer + newTimerDNSResRate = time.NewTimer +) + func init() { resolver.Register(NewBuilder()) } @@ -143,7 +151,6 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts d.wg.Add(1) go d.watcher() - d.ResolveNow(resolver.ResolveNowOptions{}) return d, nil } @@ -201,28 +208,38 @@ func (d *dnsResolver) Close() { func (d *dnsResolver) watcher() { defer d.wg.Done() + backoffIndex := 1 for { - select { - case <-d.ctx.Done(): - return - case <-d.rn: - } - state, err := d.lookup() if err != nil { + // Report error to the underlying grpc.ClientConn. d.cc.ReportError(err) } else { - d.cc.UpdateState(*state) + err = d.cc.UpdateState(*state) } - // Sleep to prevent excessive re-resolutions. Incoming resolution requests - // will be queued in d.rn. - t := time.NewTimer(minDNSResRate) + var timer *time.Timer + if err == nil { + // Success resolving, wait for the next ResolveNow. However, also wait 30 seconds at the very least + // to prevent constantly re-resolving. + backoffIndex = 1 + timer = newTimerDNSResRate(minDNSResRate) + select { + case <-d.ctx.Done(): + timer.Stop() + return + case <-d.rn: + } + } else { + // Poll on an error found in DNS Resolver or an error received from ClientConn. + timer = newTimer(backoff.DefaultExponential.Backoff(backoffIndex)) + backoffIndex++ + } select { - case <-t.C: case <-d.ctx.Done(): - t.Stop() + timer.Stop() return + case <-timer.C: } } } diff --git a/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go b/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go index bd4b8875f1..c0634d152c 100644 --- a/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go +++ b/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go @@ -46,6 +46,22 @@ type BalancerConfig struct { type intermediateBalancerConfig []map[string]json.RawMessage +// MarshalJSON implements the json.Marshaler interface. +// +// It marshals the balancer and config into a length-1 slice +// ([]map[string]config). +func (bc *BalancerConfig) MarshalJSON() ([]byte, error) { + if bc.Config == nil { + // If config is nil, return empty config `{}`. + return []byte(fmt.Sprintf(`[{%q: %v}]`, bc.Name, "{}")), nil + } + c, err := json.Marshal(bc.Config) + if err != nil { + return nil, err + } + return []byte(fmt.Sprintf(`[{%q: %s}]`, bc.Name, c)), nil +} + // UnmarshalJSON implements the json.Unmarshaler interface. // // ServiceConfig contains a list of loadBalancingConfigs, each with a name and diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go index 40ef23923f..f63a013762 100644 --- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go +++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -20,13 +20,17 @@ package transport import ( "bytes" + "errors" "fmt" "runtime" + "strconv" "sync" "sync/atomic" "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" + "google.golang.org/grpc/internal/grpcutil" + "google.golang.org/grpc/status" ) var updateHeaderTblSize = func(e *hpack.Encoder, v uint32) { @@ -128,6 +132,14 @@ type cleanupStream struct { func (c *cleanupStream) isTransportResponseFrame() bool { return c.rst } // Results in a RST_STREAM +type earlyAbortStream struct { + streamID uint32 + contentSubtype string + status *status.Status +} + +func (*earlyAbortStream) isTransportResponseFrame() bool { return false } + type dataFrame struct { streamID uint32 endStream bool @@ -749,6 +761,24 @@ func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error { return nil } +func (l *loopyWriter) earlyAbortStreamHandler(eas *earlyAbortStream) error { + if l.side == clientSide { + return errors.New("earlyAbortStream not handled on client") + } + + headerFields := []hpack.HeaderField{ + {Name: ":status", Value: "200"}, + {Name: "content-type", Value: grpcutil.ContentType(eas.contentSubtype)}, + {Name: "grpc-status", Value: strconv.Itoa(int(eas.status.Code()))}, + {Name: "grpc-message", Value: encodeGrpcMessage(eas.status.Message())}, + } + + if err := l.writeHeader(eas.streamID, true, headerFields, nil); err != nil { + return err + } + return nil +} + func (l *loopyWriter) incomingGoAwayHandler(*incomingGoAway) error { if l.side == clientSide { l.draining = true @@ -787,6 +817,8 @@ func (l *loopyWriter) handle(i interface{}) error { return l.registerStreamHandler(i) case *cleanupStream: return l.cleanupStreamHandler(i) + case *earlyAbortStream: + return l.earlyAbortStreamHandler(i) case *incomingGoAway: return l.incomingGoAwayHandler(i) case *dataFrame: diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index 8902b7f90d..48c5e52eda 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -32,15 +32,14 @@ import ( "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" - "google.golang.org/grpc/internal/grpcutil" - imetadata "google.golang.org/grpc/internal/metadata" - "google.golang.org/grpc/internal/transport/networktype" - "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" - "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/channelz" + icredentials "google.golang.org/grpc/internal/credentials" + "google.golang.org/grpc/internal/grpcutil" + imetadata "google.golang.org/grpc/internal/metadata" "google.golang.org/grpc/internal/syscall" + "google.golang.org/grpc/internal/transport/networktype" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" @@ -116,6 +115,9 @@ type http2Client struct { // goAwayReason records the http2.ErrCode and debug data received with the // GoAway frame. goAwayReason GoAwayReason + // goAwayDebugMessage contains a detailed human readable string about a + // GoAway frame, useful for error messages. + goAwayDebugMessage string // A condition variable used to signal when the keepalive goroutine should // go dormant. The condition for dormancy is based on the number of active // streams and the `PermitWithoutStream` keepalive client parameter. And @@ -238,8 +240,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts // Attributes field of resolver.Address, which is shoved into connectCtx // and passed to the credential handshaker. This makes it possible for // address specific arbitrary data to reach the credential handshaker. - contextWithHandshakeInfo := internal.NewClientHandshakeInfoContext.(func(context.Context, credentials.ClientHandshakeInfo) context.Context) - connectCtx = contextWithHandshakeInfo(connectCtx, credentials.ClientHandshakeInfo{Attributes: addr.Attributes}) + connectCtx = icredentials.NewClientHandshakeInfoContext(connectCtx, credentials.ClientHandshakeInfo{Attributes: addr.Attributes}) conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.ServerName, conn) if err != nil { return nil, connectionErrorf(isTemporary(err), err, "transport: authentication handshake failed: %v", err) @@ -347,12 +348,14 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts // Send connection preface to server. n, err := t.conn.Write(clientPreface) if err != nil { - t.Close() - return nil, connectionErrorf(true, err, "transport: failed to write client preface: %v", err) + err = connectionErrorf(true, err, "transport: failed to write client preface: %v", err) + t.Close(err) + return nil, err } if n != len(clientPreface) { - t.Close() - return nil, connectionErrorf(true, err, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface)) + err = connectionErrorf(true, nil, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface)) + t.Close(err) + return nil, err } var ss []http2.Setting @@ -370,14 +373,16 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts } err = t.framer.fr.WriteSettings(ss...) if err != nil { - t.Close() - return nil, connectionErrorf(true, err, "transport: failed to write initial settings frame: %v", err) + err = connectionErrorf(true, err, "transport: failed to write initial settings frame: %v", err) + t.Close(err) + return nil, err } // Adjust the connection flow control window if needed. if delta := uint32(icwz - defaultWindowSize); delta > 0 { if err := t.framer.fr.WriteWindowUpdate(0, delta); err != nil { - t.Close() - return nil, connectionErrorf(true, err, "transport: failed to write window update: %v", err) + err = connectionErrorf(true, err, "transport: failed to write window update: %v", err) + t.Close(err) + return nil, err } } @@ -414,6 +419,7 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { buf: newRecvBuffer(), headerChan: make(chan struct{}), contentSubtype: callHdr.ContentSubtype, + doneFunc: callHdr.DoneFunc, } s.wq = newWriteQuota(defaultWriteQuota, s.done) s.requestRead = func(n int) { @@ -453,7 +459,7 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) Method: callHdr.Method, AuthInfo: t.authInfo, } - ctxWithRequestInfo := internal.NewRequestInfoContext.(func(context.Context, credentials.RequestInfo) context.Context)(ctx, ri) + ctxWithRequestInfo := icredentials.NewRequestInfoContext(ctx, ri) authData, err := t.getTrAuthData(ctxWithRequestInfo, aud) if err != nil { return nil, err @@ -832,6 +838,9 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2. t.controlBuf.executeAndPut(addBackStreamQuota, cleanup) // This will unblock write. close(s.done) + if s.doneFunc != nil { + s.doneFunc() + } } // Close kicks off the shutdown process of the transport. This should be called @@ -841,12 +850,12 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2. // This method blocks until the addrConn that initiated this transport is // re-connected. This happens because t.onClose() begins reconnect logic at the // addrConn level and blocks until the addrConn is successfully connected. -func (t *http2Client) Close() error { +func (t *http2Client) Close(err error) { t.mu.Lock() // Make sure we only Close once. if t.state == closing { t.mu.Unlock() - return nil + return } // Call t.onClose before setting the state to closing to prevent the client // from attempting to create new streams ASAP. @@ -862,13 +871,19 @@ func (t *http2Client) Close() error { t.mu.Unlock() t.controlBuf.finish() t.cancel() - err := t.conn.Close() + t.conn.Close() if channelz.IsOn() { channelz.RemoveEntry(t.channelzID) } + // Append info about previous goaways if there were any, since this may be important + // for understanding the root cause for this connection to be closed. + _, goAwayDebugMessage := t.GetGoAwayReason() + if len(goAwayDebugMessage) > 0 { + err = fmt.Errorf("closing transport due to: %v, received prior goaway: %v", err, goAwayDebugMessage) + } // Notify all active streams. for _, s := range streams { - t.closeStream(s, ErrConnClosing, false, http2.ErrCodeNo, status.New(codes.Unavailable, ErrConnClosing.Desc), nil, false) + t.closeStream(s, err, false, http2.ErrCodeNo, status.New(codes.Unavailable, err.Error()), nil, false) } if t.statsHandler != nil { connEnd := &stats.ConnEnd{ @@ -876,7 +891,6 @@ func (t *http2Client) Close() error { } t.statsHandler.HandleConn(t.ctx, connEnd) } - return err } // GracefulClose sets the state to draining, which prevents new streams from @@ -895,7 +909,7 @@ func (t *http2Client) GracefulClose() { active := len(t.activeStreams) t.mu.Unlock() if active == 0 { - t.Close() + t.Close(ErrConnClosing) return } t.controlBuf.put(&incomingGoAway{}) @@ -1141,9 +1155,9 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { } } id := f.LastStreamID - if id > 0 && id%2 != 1 { + if id > 0 && id%2 == 0 { t.mu.Unlock() - t.Close() + t.Close(connectionErrorf(true, nil, "received goaway with non-zero even-numbered numbered stream id: %v", id)) return } // A client can receive multiple GoAways from the server (see @@ -1161,7 +1175,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { // If there are multiple GoAways the first one should always have an ID greater than the following ones. if id > t.prevGoAwayID { t.mu.Unlock() - t.Close() + t.Close(connectionErrorf(true, nil, "received goaway with stream id: %v, which exceeds stream id of previous goaway: %v", id, t.prevGoAwayID)) return } default: @@ -1191,7 +1205,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { active := len(t.activeStreams) t.mu.Unlock() if active == 0 { - t.Close() + t.Close(connectionErrorf(true, nil, "received goaway and there are no active streams")) } } @@ -1207,12 +1221,13 @@ func (t *http2Client) setGoAwayReason(f *http2.GoAwayFrame) { t.goAwayReason = GoAwayTooManyPings } } + t.goAwayDebugMessage = fmt.Sprintf("code: %s, debug data: %v", f.ErrCode, string(f.DebugData())) } -func (t *http2Client) GetGoAwayReason() GoAwayReason { +func (t *http2Client) GetGoAwayReason() (GoAwayReason, string) { t.mu.Lock() defer t.mu.Unlock() - return t.goAwayReason + return t.goAwayReason, t.goAwayDebugMessage } func (t *http2Client) handleWindowUpdate(f *http2.WindowUpdateFrame) { @@ -1309,7 +1324,8 @@ func (t *http2Client) reader() { // Check the validity of server preface. frame, err := t.framer.fr.ReadFrame() if err != nil { - t.Close() // this kicks off resetTransport, so must be last before return + err = connectionErrorf(true, err, "error reading server preface: %v", err) + t.Close(err) // this kicks off resetTransport, so must be last before return return } t.conn.SetReadDeadline(time.Time{}) // reset deadline once we get the settings frame (we didn't time out, yay!) @@ -1318,7 +1334,8 @@ func (t *http2Client) reader() { } sf, ok := frame.(*http2.SettingsFrame) if !ok { - t.Close() // this kicks off resetTransport, so must be last before return + // this kicks off resetTransport, so must be last before return + t.Close(connectionErrorf(true, nil, "initial http2 frame from server is not a settings frame: %T", frame)) return } t.onPrefaceReceipt() @@ -1354,7 +1371,7 @@ func (t *http2Client) reader() { continue } else { // Transport error. - t.Close() + t.Close(connectionErrorf(true, err, "error reading from server: %v", err)) return } } @@ -1413,7 +1430,7 @@ func (t *http2Client) keepalive() { continue } if outstandingPing && timeoutLeft <= 0 { - t.Close() + t.Close(connectionErrorf(true, nil, "keepalive ping failed to receive ACK within timeout")) return } t.mu.Lock() diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index 0cf1cc320b..11be5599cd 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -26,6 +26,7 @@ import ( "io" "math" "net" + "net/http" "strconv" "sync" "sync/atomic" @@ -355,26 +356,6 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( if state.data.statsTrace != nil { s.ctx = stats.SetIncomingTrace(s.ctx, state.data.statsTrace) } - if t.inTapHandle != nil { - var err error - info := &tap.Info{ - FullMethodName: state.data.method, - } - s.ctx, err = t.inTapHandle(s.ctx, info) - if err != nil { - if logger.V(logLevel) { - logger.Warningf("transport: http2Server.operateHeaders got an error from InTapHandle: %v", err) - } - t.controlBuf.put(&cleanupStream{ - streamID: s.id, - rst: true, - rstCode: http2.ErrCodeRefusedStream, - onWrite: func() {}, - }) - s.cancel() - return false - } - } t.mu.Lock() if t.state != reachable { t.mu.Unlock() @@ -402,6 +383,39 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( return true } t.maxStreamID = streamID + if state.data.httpMethod != http.MethodPost { + t.mu.Unlock() + if logger.V(logLevel) { + logger.Warningf("transport: http2Server.operateHeaders parsed a :method field: %v which should be POST", state.data.httpMethod) + } + t.controlBuf.put(&cleanupStream{ + streamID: streamID, + rst: true, + rstCode: http2.ErrCodeProtocol, + onWrite: func() {}, + }) + s.cancel() + return false + } + if t.inTapHandle != nil { + var err error + if s.ctx, err = t.inTapHandle(s.ctx, &tap.Info{FullMethodName: state.data.method}); err != nil { + t.mu.Unlock() + if logger.V(logLevel) { + logger.Infof("transport: http2Server.operateHeaders got an error from InTapHandle: %v", err) + } + stat, ok := status.FromError(err) + if !ok { + stat = status.New(codes.PermissionDenied, err.Error()) + } + t.controlBuf.put(&earlyAbortStream{ + streamID: s.id, + contentSubtype: s.contentSubtype, + status: stat, + }) + return false + } + } t.activeStreams[streamID] = s if len(t.activeStreams) == 1 { t.idle = time.Time{} diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go index 7e41d1183f..c7dee140cf 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http_util.go +++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go @@ -111,6 +111,7 @@ type parsedHeaderData struct { timeoutSet bool timeout time.Duration method string + httpMethod string // key-value metadata map from the peer. mdata map[string][]string statsTags []byte @@ -363,6 +364,8 @@ func (d *decodeState) processHeaderField(f hpack.HeaderField) { } d.data.statsTrace = v d.addMetadata(f.Name, string(v)) + case ":method": + d.data.httpMethod = f.Value default: if isReservedHeader(f.Name) && !isWhitelistedHeader(f.Name) { break diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index 9c8f79cb4b..6cc1031fd9 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -241,6 +241,7 @@ type Stream struct { ctx context.Context // the associated context of the stream cancel context.CancelFunc // always nil for client side Stream done chan struct{} // closed at the end of stream to unblock writers. On the client side. + doneFunc func() // invoked at the end of stream on client side. ctxDone <-chan struct{} // same as done chan but for server side. Cache of ctx.Done() (for performance) method string // the associated RPC method of the stream recvCompress string @@ -611,6 +612,8 @@ type CallHdr struct { ContentSubtype string PreviousAttempts int // value of grpc-previous-rpc-attempts header to set + + DoneFunc func() // called when the stream is finished } // ClientTransport is the common interface for all gRPC client-side transport @@ -619,7 +622,7 @@ type ClientTransport interface { // Close tears down this transport. Once it returns, the transport // should not be accessed any more. The caller must make sure this // is called only once. - Close() error + Close(err error) // GracefulClose starts to tear down the transport: the transport will stop // accepting new RPCs and NewStream will return error. Once all streams are @@ -653,8 +656,9 @@ type ClientTransport interface { // HTTP/2). GoAway() <-chan struct{} - // GetGoAwayReason returns the reason why GoAway frame was received. - GetGoAwayReason() GoAwayReason + // GetGoAwayReason returns the reason why GoAway frame was received, along + // with a human readable string with debug info. + GetGoAwayReason() (GoAwayReason, string) // RemoteAddr returns the remote network address. RemoteAddr() net.Addr diff --git a/vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go b/vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go new file mode 100644 index 0000000000..3677c3f04f --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go @@ -0,0 +1,40 @@ +/* + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package internal + +import ( + "google.golang.org/grpc/attributes" + "google.golang.org/grpc/resolver" +) + +// handshakeClusterNameKey is the type used as the key to store cluster name in +// the Attributes field of resolver.Address. +type handshakeClusterNameKey struct{} + +// SetXDSHandshakeClusterName returns a copy of addr in which the Attributes field +// is updated with the cluster name. +func SetXDSHandshakeClusterName(addr resolver.Address, clusterName string) resolver.Address { + addr.Attributes = addr.Attributes.WithValues(handshakeClusterNameKey{}, clusterName) + return addr +} + +// GetXDSHandshakeClusterName returns cluster name stored in attr. +func GetXDSHandshakeClusterName(attr *attributes.Attributes) (string, bool) { + v := attr.Value(handshakeClusterNameKey{}) + name, ok := v.(string) + return name, ok +} diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go index cf6d1b9478..e4cbea9174 100644 --- a/vendor/google.golang.org/grpc/metadata/metadata.go +++ b/vendor/google.golang.org/grpc/metadata/metadata.go @@ -75,13 +75,9 @@ func Pairs(kv ...string) MD { panic(fmt.Sprintf("metadata: Pairs got the odd number of input pairs for metadata: %d", len(kv))) } md := MD{} - var key string - for i, s := range kv { - if i%2 == 0 { - key = strings.ToLower(s) - continue - } - md[key] = append(md[key], s) + for i := 0; i < len(kv); i += 2 { + key := strings.ToLower(kv[i]) + md[key] = append(md[key], kv[i+1]) } return md } @@ -195,12 +191,18 @@ func FromOutgoingContext(ctx context.Context) (MD, bool) { return nil, false } - mds := make([]MD, 0, len(raw.added)+1) - mds = append(mds, raw.md) - for _, vv := range raw.added { - mds = append(mds, Pairs(vv...)) + out := raw.md.Copy() + for _, added := range raw.added { + if len(added)%2 == 1 { + panic(fmt.Sprintf("metadata: FromOutgoingContext got an odd number of input pairs for metadata: %d", len(added))) + } + + for i := 0; i < len(added); i += 2 { + key := strings.ToLower(added[i]) + out[key] = append(out[key], added[i+1]) + } } - return Join(mds...), ok + return out, ok } type rawMD struct { diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go index 56e33f6c76..b858c2a5e6 100644 --- a/vendor/google.golang.org/grpc/pickfirst.go +++ b/vendor/google.golang.org/grpc/pickfirst.go @@ -84,7 +84,7 @@ func (b *pickfirstBalancer) UpdateClientConnState(cs balancer.ClientConnState) e b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.Idle, Picker: &picker{result: balancer.PickResult{SubConn: b.sc}}}) b.sc.Connect() } else { - b.sc.UpdateAddresses(cs.ResolverState.Addresses) + b.cc.UpdateAddresses(b.sc, cs.ResolverState.Addresses) b.sc.Connect() } return nil diff --git a/vendor/google.golang.org/grpc/regenerate.sh b/vendor/google.golang.org/grpc/regenerate.sh index fc6725b89f..dfd3226a1d 100644 --- a/vendor/google.golang.org/grpc/regenerate.sh +++ b/vendor/google.golang.org/grpc/regenerate.sh @@ -48,11 +48,6 @@ mkdir -p ${WORKDIR}/googleapis/google/rpc echo "curl https://raw.githubusercontent.com/googleapis/googleapis/master/google/rpc/code.proto" curl --silent https://raw.githubusercontent.com/googleapis/googleapis/master/google/rpc/code.proto > ${WORKDIR}/googleapis/google/rpc/code.proto -# Pull in the MeshCA service proto. -mkdir -p ${WORKDIR}/istio/istio/google/security/meshca/v1 -echo "curl https://raw.githubusercontent.com/istio/istio/master/security/proto/providers/google/meshca.proto" -curl --silent https://raw.githubusercontent.com/istio/istio/master/security/proto/providers/google/meshca.proto > ${WORKDIR}/istio/istio/google/security/meshca/v1/meshca.proto - mkdir -p ${WORKDIR}/out # Generates sources without the embed requirement @@ -76,7 +71,6 @@ SOURCES=( ${WORKDIR}/grpc-proto/grpc/service_config/service_config.proto ${WORKDIR}/grpc-proto/grpc/testing/*.proto ${WORKDIR}/grpc-proto/grpc/core/*.proto - ${WORKDIR}/istio/istio/google/security/meshca/v1/meshca.proto ) # These options of the form 'Mfoo.proto=bar' instruct the codegen to use an @@ -122,8 +116,4 @@ mv ${WORKDIR}/out/grpc/service_config/service_config.pb.go internal/proto/grpc_s mv ${WORKDIR}/out/grpc/testing/*.pb.go interop/grpc_testing/ mv ${WORKDIR}/out/grpc/core/*.pb.go interop/grpc_testing/core/ -# istio/google/security/meshca/v1/meshca.proto does not have a go_package option. -mkdir -p ${WORKDIR}/out/google.golang.org/grpc/credentials/tls/certprovider/meshca/internal/v1/ -mv ${WORKDIR}/out/istio/google/security/meshca/v1/* ${WORKDIR}/out/google.golang.org/grpc/credentials/tls/certprovider/meshca/internal/v1/ - cp -R ${WORKDIR}/out/google.golang.org/grpc/* . diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go index e9fa8e33d9..6a9d234a59 100644 --- a/vendor/google.golang.org/grpc/resolver/resolver.go +++ b/vendor/google.golang.org/grpc/resolver/resolver.go @@ -181,7 +181,7 @@ type State struct { // gRPC to add new methods to this interface. type ClientConn interface { // UpdateState updates the state of the ClientConn appropriately. - UpdateState(State) + UpdateState(State) error // ReportError notifies the ClientConn that the Resolver encountered an // error. The ClientConn will notify the load balancer and begin calling // ResolveNow on the Resolver with exponential backoff. diff --git a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go index f2d81968f9..4118de571a 100644 --- a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go +++ b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go @@ -22,7 +22,6 @@ import ( "fmt" "strings" "sync" - "time" "google.golang.org/grpc/balancer" "google.golang.org/grpc/credentials" @@ -40,9 +39,6 @@ type ccResolverWrapper struct { resolver resolver.Resolver done *grpcsync.Event curState resolver.State - - pollingMu sync.Mutex - polling chan struct{} } // newCCResolverWrapper uses the resolver.Builder to build a Resolver and @@ -93,59 +89,19 @@ func (ccr *ccResolverWrapper) close() { ccr.resolverMu.Unlock() } -// poll begins or ends asynchronous polling of the resolver based on whether -// err is ErrBadResolverState. -func (ccr *ccResolverWrapper) poll(err error) { - ccr.pollingMu.Lock() - defer ccr.pollingMu.Unlock() - if err != balancer.ErrBadResolverState { - // stop polling - if ccr.polling != nil { - close(ccr.polling) - ccr.polling = nil - } - return - } - if ccr.polling != nil { - // already polling - return - } - p := make(chan struct{}) - ccr.polling = p - go func() { - for i := 0; ; i++ { - ccr.resolveNow(resolver.ResolveNowOptions{}) - t := time.NewTimer(ccr.cc.dopts.resolveNowBackoff(i)) - select { - case <-p: - t.Stop() - return - case <-ccr.done.Done(): - // Resolver has been closed. - t.Stop() - return - case <-t.C: - select { - case <-p: - return - default: - } - // Timer expired; re-resolve. - } - } - }() -} - -func (ccr *ccResolverWrapper) UpdateState(s resolver.State) { +func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error { if ccr.done.HasFired() { - return + return nil } channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: sending update to cc: %v", s) if channelz.IsOn() { ccr.addChannelzTraceEvent(s) } ccr.curState = s - ccr.poll(ccr.cc.updateResolverState(ccr.curState, nil)) + if err := ccr.cc.updateResolverState(ccr.curState, nil); err == balancer.ErrBadResolverState { + return balancer.ErrBadResolverState + } + return nil } func (ccr *ccResolverWrapper) ReportError(err error) { @@ -153,7 +109,7 @@ func (ccr *ccResolverWrapper) ReportError(err error) { return } channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: reporting error to cc: %v", err) - ccr.poll(ccr.cc.updateResolverState(resolver.State{}, err)) + ccr.cc.updateResolverState(resolver.State{}, err) } // NewAddress is called by the resolver implementation to send addresses to gRPC. @@ -166,7 +122,7 @@ func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig}) } ccr.curState.Addresses = addrs - ccr.poll(ccr.cc.updateResolverState(ccr.curState, nil)) + ccr.cc.updateResolverState(ccr.curState, nil) } // NewServiceConfig is called by the resolver implementation to send service @@ -183,14 +139,13 @@ func (ccr *ccResolverWrapper) NewServiceConfig(sc string) { scpr := parseServiceConfig(sc) if scpr.Err != nil { channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: error parsing service config: %v", scpr.Err) - ccr.poll(balancer.ErrBadResolverState) return } if channelz.IsOn() { ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr}) } ccr.curState.ServiceConfig = scpr - ccr.poll(ccr.cc.updateResolverState(ccr.curState, nil)) + ccr.cc.updateResolverState(ccr.curState, nil) } func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.ParseResult { diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index c0a1208f2f..6db356fa56 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -429,9 +429,10 @@ func (o ContentSubtypeCallOption) before(c *callInfo) error { } func (o ContentSubtypeCallOption) after(c *callInfo, attempt *csAttempt) {} -// ForceCodec returns a CallOption that will set the given Codec to be -// used for all request and response messages for a call. The result of calling -// String() will be used as the content-subtype in a case-insensitive manner. +// ForceCodec returns a CallOption that will set codec to be used for all +// request and response messages for a call. The result of calling Name() will +// be used as the content-subtype after converting to lowercase, unless +// CallContentSubtype is also used. // // See Content-Type on // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for @@ -853,7 +854,17 @@ func toRPCErr(err error) error { // setCallInfoCodec should only be called after CallOptions have been applied. func setCallInfoCodec(c *callInfo) error { if c.codec != nil { - // codec was already set by a CallOption; use it. + // codec was already set by a CallOption; use it, but set the content + // subtype if it is not set. + if c.contentSubtype == "" { + // c.codec is a baseCodec to hide the difference between grpc.Codec and + // encoding.Codec (Name vs. String method name). We only support + // setting content subtype from encoding.Codec to avoid a behavior + // change with the deprecated version. + if ec, ok := c.codec.(encoding.Codec); ok { + c.contentSubtype = strings.ToLower(ec.Name()) + } + } return nil } diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index 7a2aa28a11..0a151dee4f 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -57,12 +57,22 @@ import ( const ( defaultServerMaxReceiveMessageSize = 1024 * 1024 * 4 defaultServerMaxSendMessageSize = math.MaxInt32 + + // Server transports are tracked in a map which is keyed on listener + // address. For regular gRPC traffic, connections are accepted in Serve() + // through a call to Accept(), and we use the actual listener address as key + // when we add it to the map. But for connections received through + // ServeHTTP(), we do not have a listener and hence use this dummy value. + listenerAddressForServeHTTP = "listenerAddressForServeHTTP" ) func init() { internal.GetServerCredentials = func(srv *Server) credentials.TransportCredentials { return srv.opts.creds } + internal.DrainServerTransports = func(srv *Server, addr string) { + srv.drainServerTransports(addr) + } } var statusOK = status.New(codes.OK, "") @@ -107,9 +117,12 @@ type serverWorkerData struct { type Server struct { opts serverOptions - mu sync.Mutex // guards following - lis map[net.Listener]bool - conns map[transport.ServerTransport]bool + mu sync.Mutex // guards following + lis map[net.Listener]bool + // conns contains all active server transports. It is a map keyed on a + // listener address with the value being the set of active transports + // belonging to that listener. + conns map[string]map[transport.ServerTransport]bool serve bool drain bool cv *sync.Cond // signaled when connections close for GracefulStop @@ -266,6 +279,35 @@ func CustomCodec(codec Codec) ServerOption { }) } +// ForceServerCodec returns a ServerOption that sets a codec for message +// marshaling and unmarshaling. +// +// This will override any lookups by content-subtype for Codecs registered +// with RegisterCodec. +// +// See Content-Type on +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. Also see the documentation on RegisterCodec and +// CallContentSubtype for more details on the interaction between encoding.Codec +// and content-subtype. +// +// This function is provided for advanced users; prefer to register codecs +// using encoding.RegisterCodec. +// The server will automatically use registered codecs based on the incoming +// requests' headers. See also +// https://github.com/grpc/grpc-go/blob/master/Documentation/encoding.md#using-a-codec. +// Will be supported throughout 1.x. +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ForceServerCodec(codec encoding.Codec) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.codec = codec + }) +} + // RPCCompressor returns a ServerOption that sets a compressor for outbound // messages. For backward compatibility, all outbound messages will be sent // using this compressor, regardless of incoming message compression. By @@ -376,6 +418,11 @@ func ChainStreamInterceptor(interceptors ...StreamServerInterceptor) ServerOptio // InTapHandle returns a ServerOption that sets the tap handle for all the server // transport to be created. Only one can be installed. +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. func InTapHandle(h tap.ServerInHandle) ServerOption { return newFuncServerOption(func(o *serverOptions) { if o.inTapHandle != nil { @@ -519,7 +566,7 @@ func NewServer(opt ...ServerOption) *Server { s := &Server{ lis: make(map[net.Listener]bool), opts: opts, - conns: make(map[transport.ServerTransport]bool), + conns: make(map[string]map[transport.ServerTransport]bool), services: make(map[string]*serviceInfo), quit: grpcsync.NewEvent(), done: grpcsync.NewEvent(), @@ -778,7 +825,7 @@ func (s *Server) Serve(lis net.Listener) error { // s.conns before this conn can be added. s.serveWG.Add(1) go func() { - s.handleRawConn(rawConn) + s.handleRawConn(lis.Addr().String(), rawConn) s.serveWG.Done() }() } @@ -786,7 +833,7 @@ func (s *Server) Serve(lis net.Listener) error { // handleRawConn forks a goroutine to handle a just-accepted connection that // has not had any I/O performed on it yet. -func (s *Server) handleRawConn(rawConn net.Conn) { +func (s *Server) handleRawConn(lisAddr string, rawConn net.Conn) { if s.quit.HasFired() { rawConn.Close() return @@ -814,15 +861,24 @@ func (s *Server) handleRawConn(rawConn net.Conn) { } rawConn.SetDeadline(time.Time{}) - if !s.addConn(st) { + if !s.addConn(lisAddr, st) { return } go func() { s.serveStreams(st) - s.removeConn(st) + s.removeConn(lisAddr, st) }() } +func (s *Server) drainServerTransports(addr string) { + s.mu.Lock() + conns := s.conns[addr] + for st := range conns { + st.Drain() + } + s.mu.Unlock() +} + // newHTTP2Transport sets up a http/2 transport (using the // gRPC http2 server transport in transport/http2_server.go). func (s *Server) newHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) transport.ServerTransport { @@ -924,10 +980,10 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { http.Error(w, err.Error(), http.StatusInternalServerError) return } - if !s.addConn(st) { + if !s.addConn(listenerAddressForServeHTTP, st) { return } - defer s.removeConn(st) + defer s.removeConn(listenerAddressForServeHTTP, st) s.serveStreams(st) } @@ -955,7 +1011,7 @@ func (s *Server) traceInfo(st transport.ServerTransport, stream *transport.Strea return trInfo } -func (s *Server) addConn(st transport.ServerTransport) bool { +func (s *Server) addConn(addr string, st transport.ServerTransport) bool { s.mu.Lock() defer s.mu.Unlock() if s.conns == nil { @@ -967,15 +1023,28 @@ func (s *Server) addConn(st transport.ServerTransport) bool { // immediately. st.Drain() } - s.conns[st] = true + + if s.conns[addr] == nil { + // Create a map entry if this is the first connection on this listener. + s.conns[addr] = make(map[transport.ServerTransport]bool) + } + s.conns[addr][st] = true return true } -func (s *Server) removeConn(st transport.ServerTransport) { +func (s *Server) removeConn(addr string, st transport.ServerTransport) { s.mu.Lock() defer s.mu.Unlock() - if s.conns != nil { - delete(s.conns, st) + + conns := s.conns[addr] + if conns != nil { + delete(conns, st) + if len(conns) == 0 { + // If the last connection for this address is being removed, also + // remove the map entry corresponding to the address. This is used + // in GracefulStop() when waiting for all connections to be closed. + delete(s.conns, addr) + } s.cv.Broadcast() } } @@ -1639,7 +1708,7 @@ func (s *Server) Stop() { s.mu.Lock() listeners := s.lis s.lis = nil - st := s.conns + conns := s.conns s.conns = nil // interrupt GracefulStop if Stop and GracefulStop are called concurrently. s.cv.Broadcast() @@ -1648,8 +1717,10 @@ func (s *Server) Stop() { for lis := range listeners { lis.Close() } - for c := range st { - c.Close() + for _, cs := range conns { + for st := range cs { + st.Close() + } } if s.opts.numServerWorkers > 0 { s.stopServerWorkers() @@ -1686,8 +1757,10 @@ func (s *Server) GracefulStop() { } s.lis = nil if !s.drain { - for st := range s.conns { - st.Drain() + for _, conns := range s.conns { + for st := range conns { + st.Drain() + } } s.drain = true } diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index eda1248d60..1f3e70d2c4 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -52,14 +52,20 @@ import ( // of the RPC. type StreamHandler func(srv interface{}, stream ServerStream) error -// StreamDesc represents a streaming RPC service's method specification. +// StreamDesc represents a streaming RPC service's method specification. Used +// on the server when registering services and on the client when initiating +// new streams. type StreamDesc struct { - StreamName string - Handler StreamHandler - - // At least one of these is true. - ServerStreams bool - ClientStreams bool + // StreamName and Handler are only used when registering handlers on a + // server. + StreamName string // the name of the method excluding the service + Handler StreamHandler // the handler called for the method + + // ServerStreams and ClientStreams are used for registering handlers on a + // server as well as defining RPC behavior when passed to NewClientStream + // and ClientConn.NewStream. At least one must be true. + ServerStreams bool // indicates the server can perform streaming sends + ClientStreams bool // indicates the client can perform streaming sends } // Stream defines the common interface a client or server stream has to satisfy. @@ -166,7 +172,6 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth } }() } - c := defaultCallInfo() // Provide an opportunity for the first RPC to see the first service config // provided by the resolver. if err := cc.waitForResolvedAddrs(ctx); err != nil { @@ -175,18 +180,40 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth var mc serviceconfig.MethodConfig var onCommit func() - rpcConfig, err := cc.safeConfigSelector.SelectConfig(iresolver.RPCInfo{Context: ctx, Method: method}) + var newStream = func(ctx context.Context, done func()) (iresolver.ClientStream, error) { + return newClientStreamWithParams(ctx, desc, cc, method, mc, onCommit, done, opts...) + } + + rpcInfo := iresolver.RPCInfo{Context: ctx, Method: method} + rpcConfig, err := cc.safeConfigSelector.SelectConfig(rpcInfo) if err != nil { - return nil, status.Convert(err).Err() + return nil, toRPCErr(err) } + if rpcConfig != nil { if rpcConfig.Context != nil { ctx = rpcConfig.Context } mc = rpcConfig.MethodConfig onCommit = rpcConfig.OnCommitted + if rpcConfig.Interceptor != nil { + rpcInfo.Context = nil + ns := newStream + newStream = func(ctx context.Context, done func()) (iresolver.ClientStream, error) { + cs, err := rpcConfig.Interceptor.NewStream(ctx, rpcInfo, done, ns) + if err != nil { + return nil, toRPCErr(err) + } + return cs, nil + } + } } + return newStream(ctx, func() {}) +} + +func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, mc serviceconfig.MethodConfig, onCommit, doneFunc func(), opts ...CallOption) (_ iresolver.ClientStream, err error) { + c := defaultCallInfo() if mc.WaitForReady != nil { c.failFast = !*mc.WaitForReady } @@ -223,6 +250,7 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth Host: cc.authority, Method: method, ContentSubtype: c.contentSubtype, + DoneFunc: doneFunc, } // Set our outgoing compression according to the UseCompressor CallOption, if diff --git a/vendor/google.golang.org/grpc/tap/tap.go b/vendor/google.golang.org/grpc/tap/tap.go index caea1ebed6..dbf34e6bb5 100644 --- a/vendor/google.golang.org/grpc/tap/tap.go +++ b/vendor/google.golang.org/grpc/tap/tap.go @@ -37,16 +37,16 @@ type Info struct { // TODO: More to be added. } -// ServerInHandle defines the function which runs before a new stream is created -// on the server side. If it returns a non-nil error, the stream will not be -// created and a RST_STREAM will be sent back to the client with REFUSED_STREAM. -// The client will receive an RPC error "code = Unavailable, desc = stream -// terminated by RST_STREAM with error code: REFUSED_STREAM". +// ServerInHandle defines the function which runs before a new stream is +// created on the server side. If it returns a non-nil error, the stream will +// not be created and an error will be returned to the client. If the error +// returned is a status error, that status code and message will be used, +// otherwise PermissionDenied will be the code and err.Error() will be the +// message. // // It's intended to be used in situations where you don't want to waste the -// resources to accept the new stream (e.g. rate-limiting). And the content of -// the error will be ignored and won't be sent back to the client. For other -// general usages, please use interceptors. +// resources to accept the new stream (e.g. rate-limiting). For other general +// usages, please use interceptors. // // Note that it is executed in the per-connection I/O goroutine(s) instead of // per-RPC goroutine. Therefore, users should NOT have any diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index 51024d6b31..bfe5cf8870 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.36.0" +const Version = "1.38.0" diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh index b41df6dc86..1a0dbd7ee5 100644 --- a/vendor/google.golang.org/grpc/vet.sh +++ b/vendor/google.golang.org/grpc/vet.sh @@ -28,7 +28,8 @@ cleanup() { } trap cleanup EXIT -PATH="${GOPATH}/bin:${GOROOT}/bin:${PATH}" +PATH="${HOME}/go/bin:${GOROOT}/bin:${PATH}" +go version if [[ "$1" = "-install" ]]; then # Check for module support @@ -104,12 +105,6 @@ git grep '"github.com/envoyproxy/go-control-plane/envoy' -- '*.go' ':(exclude)*. # TODO: Remove when we drop Go 1.10 support go list -f {{.Dir}} ./... | xargs go run test/go_vet/vet.go -# - gofmt, goimports, golint (with exceptions for generated code), go vet. -gofmt -s -d -l . 2>&1 | fail_on_output -goimports -l . 2>&1 | not grep -vE "\.pb\.go" -golint ./... 2>&1 | not grep -vE "\.pb\.go:" -go vet -all ./... - misspell -error . # - Check that generated proto files are up to date. @@ -119,12 +114,22 @@ if [[ -z "${VET_SKIP_PROTO}" ]]; then (git status; git --no-pager diff; exit 1) fi -# - Check that our modules are tidy. -if go help mod >& /dev/null; then - find . -name 'go.mod' | xargs -IXXX bash -c 'cd $(dirname XXX); go mod tidy' +# - gofmt, goimports, golint (with exceptions for generated code), go vet, +# go mod tidy. +# Perform these checks on each module inside gRPC. +for MOD_FILE in $(find . -name 'go.mod'); do + MOD_DIR=$(dirname ${MOD_FILE}) + pushd ${MOD_DIR} + go vet -all ./... | fail_on_output + gofmt -s -d -l . 2>&1 | fail_on_output + goimports -l . 2>&1 | not grep -vE "\.pb\.go" + golint ./... 2>&1 | not grep -vE "/testv3\.pb\.go:" + + go mod tidy git status --porcelain 2>&1 | fail_on_output || \ (git status; git --no-pager diff; exit 1) -fi + popd +done # - Collection of static analysis checks # @@ -141,8 +146,11 @@ not grep -Fv '.CredsBundle .NewAddress .NewServiceConfig .Type is deprecated: use Attributes +BuildVersion is deprecated balancer.ErrTransientFailure balancer.Picker +extDesc.Filename is deprecated +github.com/golang/protobuf/jsonpb is deprecated grpc.CallCustomCodec grpc.Code grpc.Compressor @@ -164,13 +172,7 @@ grpc.WithServiceConfig grpc.WithTimeout http.CloseNotifier info.SecurityVersion -resolver.Backend -resolver.GRPCLB -extDesc.Filename is deprecated -BuildVersion is deprecated -github.com/golang/protobuf/jsonpb is deprecated proto is deprecated -xxx_messageInfo_ proto.InternalMessageInfo is deprecated proto.EnumName is deprecated proto.ErrInternalBadWireType is deprecated @@ -184,7 +186,12 @@ proto.RegisterExtension is deprecated proto.RegisteredExtension is deprecated proto.RegisteredExtensions is deprecated proto.RegisterMapType is deprecated -proto.Unmarshaler is deprecated' "${SC_OUT}" +proto.Unmarshaler is deprecated +resolver.Backend +resolver.GRPCLB +Target is deprecated: Use the Target field in the BuildOptions instead. +xxx_messageInfo_ +' "${SC_OUT}" # - special golint on package comments. lint_package_comment_per_package() { diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go index 9bf4e8c176..07da5db345 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go @@ -24,6 +24,7 @@ import ( ) // Unmarshal reads the given []byte into the given proto.Message. +// The provided message must be mutable (e.g., a non-nil pointer to a message). func Unmarshal(b []byte, m proto.Message) error { return UnmarshalOptions{}.Unmarshal(b, m) } @@ -48,10 +49,11 @@ type UnmarshalOptions struct { } } -// Unmarshal reads the given []byte and populates the given proto.Message using -// options in UnmarshalOptions object. It will clear the message first before -// setting the fields. If it returns an error, the given message may be -// partially set. +// Unmarshal reads the given []byte and populates the given proto.Message +// using options in the UnmarshalOptions object. +// It will clear the message first before setting the fields. +// If it returns an error, the given message may be partially set. +// The provided message must be mutable (e.g., a non-nil pointer to a message). func (o UnmarshalOptions) Unmarshal(b []byte, m proto.Message) error { return o.unmarshal(b, m) } @@ -124,15 +126,6 @@ func (d decoder) unmarshalMessage(m pref.Message, skipTypeURL bool) error { return d.unexpectedTokenError(tok) } - if err := d.unmarshalFields(m, skipTypeURL); err != nil { - return err - } - - return nil -} - -// unmarshalFields unmarshals the fields into the given protoreflect.Message. -func (d decoder) unmarshalFields(m pref.Message, skipTypeURL bool) error { messageDesc := m.Descriptor() if !flags.ProtoLegacy && messageset.IsMessageSet(messageDesc) { return errors.New("no support for proto1 MessageSets") @@ -170,7 +163,7 @@ func (d decoder) unmarshalFields(m pref.Message, skipTypeURL bool) error { if strings.HasPrefix(name, "[") && strings.HasSuffix(name, "]") { // Only extension names are in [name] format. extName := pref.FullName(name[1 : len(name)-1]) - extType, err := d.findExtension(extName) + extType, err := d.opts.Resolver.FindExtensionByName(extName) if err != nil && err != protoregistry.NotFound { return d.newError(tok.Pos(), "unable to resolve %s: %v", tok.RawString(), err) } @@ -184,17 +177,7 @@ func (d decoder) unmarshalFields(m pref.Message, skipTypeURL bool) error { // The name can either be the JSON name or the proto field name. fd = fieldDescs.ByJSONName(name) if fd == nil { - fd = fieldDescs.ByName(pref.Name(name)) - if fd == nil { - // The proto name of a group field is in all lowercase, - // while the textual field name is the group message name. - gd := fieldDescs.ByName(pref.Name(strings.ToLower(name))) - if gd != nil && gd.Kind() == pref.GroupKind && gd.Message().Name() == pref.Name(name) { - fd = gd - } - } else if fd.Kind() == pref.GroupKind && fd.Message().Name() != pref.Name(name) { - fd = nil // reset since field name is actually the message name - } + fd = fieldDescs.ByTextName(name) } } if flags.ProtoLegacy { @@ -257,15 +240,6 @@ func (d decoder) unmarshalFields(m pref.Message, skipTypeURL bool) error { } } -// findExtension returns protoreflect.ExtensionType from the resolver if found. -func (d decoder) findExtension(xtName pref.FullName) (pref.ExtensionType, error) { - xt, err := d.opts.Resolver.FindExtensionByName(xtName) - if err == nil { - return xt, nil - } - return messageset.FindMessageSetExtension(d.opts.Resolver, xtName) -} - func isKnownValue(fd pref.FieldDescriptor) bool { md := fd.Message() return md != nil && md.FullName() == genid.Value_message_fullname diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/encode.go b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go index 7d61933008..ba971f0781 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/encode.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go @@ -7,15 +7,17 @@ package protojson import ( "encoding/base64" "fmt" - "sort" "google.golang.org/protobuf/internal/encoding/json" "google.golang.org/protobuf/internal/encoding/messageset" "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/filedesc" "google.golang.org/protobuf/internal/flags" "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/order" "google.golang.org/protobuf/internal/pragma" "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" pref "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/reflect/protoregistry" ) @@ -131,7 +133,7 @@ func (o MarshalOptions) marshal(m proto.Message) ([]byte, error) { } enc := encoder{internalEnc, o} - if err := enc.marshalMessage(m.ProtoReflect()); err != nil { + if err := enc.marshalMessage(m.ProtoReflect(), ""); err != nil { return nil, err } if o.AllowPartial { @@ -145,76 +147,94 @@ type encoder struct { opts MarshalOptions } -// marshalMessage marshals the given protoreflect.Message. -func (e encoder) marshalMessage(m pref.Message) error { - if marshal := wellKnownTypeMarshaler(m.Descriptor().FullName()); marshal != nil { - return marshal(e, m) - } +// typeFieldDesc is a synthetic field descriptor used for the "@type" field. +var typeFieldDesc = func() protoreflect.FieldDescriptor { + var fd filedesc.Field + fd.L0.FullName = "@type" + fd.L0.Index = -1 + fd.L1.Cardinality = protoreflect.Optional + fd.L1.Kind = protoreflect.StringKind + return &fd +}() + +// typeURLFieldRanger wraps a protoreflect.Message and modifies its Range method +// to additionally iterate over a synthetic field for the type URL. +type typeURLFieldRanger struct { + order.FieldRanger + typeURL string +} - e.StartObject() - defer e.EndObject() - if err := e.marshalFields(m); err != nil { - return err +func (m typeURLFieldRanger) Range(f func(pref.FieldDescriptor, pref.Value) bool) { + if !f(typeFieldDesc, pref.ValueOfString(m.typeURL)) { + return } + m.FieldRanger.Range(f) +} - return nil +// unpopulatedFieldRanger wraps a protoreflect.Message and modifies its Range +// method to additionally iterate over unpopulated fields. +type unpopulatedFieldRanger struct{ pref.Message } + +func (m unpopulatedFieldRanger) Range(f func(pref.FieldDescriptor, pref.Value) bool) { + fds := m.Descriptor().Fields() + for i := 0; i < fds.Len(); i++ { + fd := fds.Get(i) + if m.Has(fd) || fd.ContainingOneof() != nil { + continue // ignore populated fields and fields within a oneofs + } + + v := m.Get(fd) + isProto2Scalar := fd.Syntax() == pref.Proto2 && fd.Default().IsValid() + isSingularMessage := fd.Cardinality() != pref.Repeated && fd.Message() != nil + if isProto2Scalar || isSingularMessage { + v = pref.Value{} // use invalid value to emit null + } + if !f(fd, v) { + return + } + } + m.Message.Range(f) } -// marshalFields marshals the fields in the given protoreflect.Message. -func (e encoder) marshalFields(m pref.Message) error { - messageDesc := m.Descriptor() - if !flags.ProtoLegacy && messageset.IsMessageSet(messageDesc) { +// marshalMessage marshals the fields in the given protoreflect.Message. +// If the typeURL is non-empty, then a synthetic "@type" field is injected +// containing the URL as the value. +func (e encoder) marshalMessage(m pref.Message, typeURL string) error { + if !flags.ProtoLegacy && messageset.IsMessageSet(m.Descriptor()) { return errors.New("no support for proto1 MessageSets") } - // Marshal out known fields. - fieldDescs := messageDesc.Fields() - for i := 0; i < fieldDescs.Len(); { - fd := fieldDescs.Get(i) - if od := fd.ContainingOneof(); od != nil { - fd = m.WhichOneof(od) - i += od.Fields().Len() - if fd == nil { - continue // unpopulated oneofs are not affected by EmitUnpopulated - } - } else { - i++ - } + if marshal := wellKnownTypeMarshaler(m.Descriptor().FullName()); marshal != nil { + return marshal(e, m) + } - val := m.Get(fd) - if !m.Has(fd) { - if !e.opts.EmitUnpopulated { - continue - } - isProto2Scalar := fd.Syntax() == pref.Proto2 && fd.Default().IsValid() - isSingularMessage := fd.Cardinality() != pref.Repeated && fd.Message() != nil - if isProto2Scalar || isSingularMessage { - // Use invalid value to emit null. - val = pref.Value{} - } - } + e.StartObject() + defer e.EndObject() + var fields order.FieldRanger = m + if e.opts.EmitUnpopulated { + fields = unpopulatedFieldRanger{m} + } + if typeURL != "" { + fields = typeURLFieldRanger{fields, typeURL} + } + + var err error + order.RangeFields(fields, order.IndexNameFieldOrder, func(fd pref.FieldDescriptor, v pref.Value) bool { name := fd.JSONName() if e.opts.UseProtoNames { - name = string(fd.Name()) - // Use type name for group field name. - if fd.Kind() == pref.GroupKind { - name = string(fd.Message().Name()) - } + name = fd.TextName() } - if err := e.WriteName(name); err != nil { - return err + + if err = e.WriteName(name); err != nil { + return false } - if err := e.marshalValue(val, fd); err != nil { - return err + if err = e.marshalValue(v, fd); err != nil { + return false } - } - - // Marshal out extensions. - if err := e.marshalExtensions(m); err != nil { - return err - } - return nil + return true + }) + return err } // marshalValue marshals the given protoreflect.Value. @@ -281,7 +301,7 @@ func (e encoder) marshalSingular(val pref.Value, fd pref.FieldDescriptor) error } case pref.MessageKind, pref.GroupKind: - if err := e.marshalMessage(val.Message()); err != nil { + if err := e.marshalMessage(val.Message(), ""); err != nil { return err } @@ -305,98 +325,20 @@ func (e encoder) marshalList(list pref.List, fd pref.FieldDescriptor) error { return nil } -type mapEntry struct { - key pref.MapKey - value pref.Value -} - // marshalMap marshals given protoreflect.Map. func (e encoder) marshalMap(mmap pref.Map, fd pref.FieldDescriptor) error { e.StartObject() defer e.EndObject() - // Get a sorted list based on keyType first. - entries := make([]mapEntry, 0, mmap.Len()) - mmap.Range(func(key pref.MapKey, val pref.Value) bool { - entries = append(entries, mapEntry{key: key, value: val}) - return true - }) - sortMap(fd.MapKey().Kind(), entries) - - // Write out sorted list. - for _, entry := range entries { - if err := e.WriteName(entry.key.String()); err != nil { - return err - } - if err := e.marshalSingular(entry.value, fd.MapValue()); err != nil { - return err - } - } - return nil -} - -// sortMap orders list based on value of key field for deterministic ordering. -func sortMap(keyKind pref.Kind, values []mapEntry) { - sort.Slice(values, func(i, j int) bool { - switch keyKind { - case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind, - pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind: - return values[i].key.Int() < values[j].key.Int() - - case pref.Uint32Kind, pref.Fixed32Kind, - pref.Uint64Kind, pref.Fixed64Kind: - return values[i].key.Uint() < values[j].key.Uint() + var err error + order.RangeEntries(mmap, order.GenericKeyOrder, func(k pref.MapKey, v pref.Value) bool { + if err = e.WriteName(k.String()); err != nil { + return false } - return values[i].key.String() < values[j].key.String() - }) -} - -// marshalExtensions marshals extension fields. -func (e encoder) marshalExtensions(m pref.Message) error { - type entry struct { - key string - value pref.Value - desc pref.FieldDescriptor - } - - // Get a sorted list based on field key first. - var entries []entry - m.Range(func(fd pref.FieldDescriptor, v pref.Value) bool { - if !fd.IsExtension() { - return true + if err = e.marshalSingular(v, fd.MapValue()); err != nil { + return false } - - // For MessageSet extensions, the name used is the parent message. - name := fd.FullName() - if messageset.IsMessageSetExtension(fd) { - name = name.Parent() - } - - // Use [name] format for JSON field name. - entries = append(entries, entry{ - key: string(name), - value: v, - desc: fd, - }) return true }) - - // Sort extensions lexicographically. - sort.Slice(entries, func(i, j int) bool { - return entries[i].key < entries[j].key - }) - - // Write out sorted list. - for _, entry := range entries { - // JSON field name is the proto field name enclosed in [], similar to - // textproto. This is consistent with Go v1 lib. C++ lib v3.7.0 does not - // marshal out extension fields. - if err := e.WriteName("[" + entry.key + "]"); err != nil { - return err - } - if err := e.marshalValue(entry.value, entry.desc); err != nil { - return err - } - } - return nil + return err } diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go index def7377c78..72924a9050 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go @@ -7,6 +7,7 @@ package protojson import ( "bytes" "fmt" + "math" "strconv" "strings" "time" @@ -106,13 +107,11 @@ func (e encoder) marshalAny(m pref.Message) error { fdType := fds.ByNumber(genid.Any_TypeUrl_field_number) fdValue := fds.ByNumber(genid.Any_Value_field_number) - // Start writing the JSON object. - e.StartObject() - defer e.EndObject() - if !m.Has(fdType) { if !m.Has(fdValue) { // If message is empty, marshal out empty JSON object. + e.StartObject() + e.EndObject() return nil } else { // Return error if type_url field is not set, but value is set. @@ -123,14 +122,8 @@ func (e encoder) marshalAny(m pref.Message) error { typeVal := m.Get(fdType) valueVal := m.Get(fdValue) - // Marshal out @type field. - typeURL := typeVal.String() - e.WriteName("@type") - if err := e.WriteString(typeURL); err != nil { - return err - } - // Resolve the type in order to unmarshal value field. + typeURL := typeVal.String() emt, err := e.opts.Resolver.FindMessageByURL(typeURL) if err != nil { return errors.New("%s: unable to resolve %q: %v", genid.Any_message_fullname, typeURL, err) @@ -149,12 +142,21 @@ func (e encoder) marshalAny(m pref.Message) error { // with corresponding custom JSON encoding of the embedded message as a // field. if marshal := wellKnownTypeMarshaler(emt.Descriptor().FullName()); marshal != nil { + e.StartObject() + defer e.EndObject() + + // Marshal out @type field. + e.WriteName("@type") + if err := e.WriteString(typeURL); err != nil { + return err + } + e.WriteName("value") return marshal(e, em) } // Else, marshal out the embedded message's fields in this Any object. - if err := e.marshalFields(em); err != nil { + if err := e.marshalMessage(em, typeURL); err != nil { return err } @@ -494,6 +496,11 @@ func (e encoder) marshalKnownValue(m pref.Message) error { if fd == nil { return errors.New("%s: none of the oneof fields is set", genid.Value_message_fullname) } + if fd.Number() == genid.Value_NumberValue_field_number { + if v := m.Get(fd).Float(); math.IsNaN(v) || math.IsInf(v, 0) { + return errors.New("%s: invalid %v value", genid.Value_NumberValue_field_fullname, v) + } + } return e.marshalSingular(m.Get(fd), fd) } @@ -604,14 +611,11 @@ func (e encoder) marshalDuration(m pref.Message) error { } // Generated output always contains 0, 3, 6, or 9 fractional digits, // depending on required precision, followed by the suffix "s". - f := "%d.%09d" - if nanos < 0 { - nanos = -nanos - if secs == 0 { - f = "-%d.%09d" - } + var sign string + if secs < 0 || nanos < 0 { + sign, secs, nanos = "-", -1*secs, -1*nanos } - x := fmt.Sprintf(f, secs, nanos) + x := fmt.Sprintf("%s%d.%09d", sign, secs, nanos) x = strings.TrimSuffix(x, "000") x = strings.TrimSuffix(x, "000") x = strings.TrimSuffix(x, ".000") diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go index cab95a4273..8fb1d9e086 100644 --- a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go +++ b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go @@ -6,7 +6,6 @@ package prototext import ( "fmt" - "strings" "unicode/utf8" "google.golang.org/protobuf/internal/encoding/messageset" @@ -23,6 +22,7 @@ import ( ) // Unmarshal reads the given []byte into the given proto.Message. +// The provided message must be mutable (e.g., a non-nil pointer to a message). func Unmarshal(b []byte, m proto.Message) error { return UnmarshalOptions{}.Unmarshal(b, m) } @@ -51,8 +51,9 @@ type UnmarshalOptions struct { } } -// Unmarshal reads the given []byte and populates the given proto.Message using options in -// UnmarshalOptions object. +// Unmarshal reads the given []byte and populates the given proto.Message +// using options in the UnmarshalOptions object. +// The provided message must be mutable (e.g., a non-nil pointer to a message). func (o UnmarshalOptions) Unmarshal(b []byte, m proto.Message) error { return o.unmarshal(b, m) } @@ -158,21 +159,11 @@ func (d decoder) unmarshalMessage(m pref.Message, checkDelims bool) error { switch tok.NameKind() { case text.IdentName: name = pref.Name(tok.IdentName()) - fd = fieldDescs.ByName(name) - if fd == nil { - // The proto name of a group field is in all lowercase, - // while the textproto field name is the group message name. - gd := fieldDescs.ByName(pref.Name(strings.ToLower(string(name)))) - if gd != nil && gd.Kind() == pref.GroupKind && gd.Message().Name() == name { - fd = gd - } - } else if fd.Kind() == pref.GroupKind && fd.Message().Name() != name { - fd = nil // reset since field name is actually the message name - } + fd = fieldDescs.ByTextName(string(name)) case text.TypeName: // Handle extensions only. This code path is not for Any. - xt, xtErr = d.findExtension(pref.FullName(tok.TypeName())) + xt, xtErr = d.opts.Resolver.FindExtensionByName(pref.FullName(tok.TypeName())) case text.FieldNumber: isFieldNumberName = true @@ -269,15 +260,6 @@ func (d decoder) unmarshalMessage(m pref.Message, checkDelims bool) error { return nil } -// findExtension returns protoreflect.ExtensionType from the Resolver if found. -func (d decoder) findExtension(xtName pref.FullName) (pref.ExtensionType, error) { - xt, err := d.opts.Resolver.FindExtensionByName(xtName) - if err == nil { - return xt, nil - } - return messageset.FindMessageSetExtension(d.opts.Resolver, xtName) -} - // unmarshalSingular unmarshals a non-repeated field value specified by the // given FieldDescriptor. func (d decoder) unmarshalSingular(fd pref.FieldDescriptor, m pref.Message) error { diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/encode.go b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go index 0877d71c51..8d5304dc5b 100644 --- a/vendor/google.golang.org/protobuf/encoding/prototext/encode.go +++ b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go @@ -6,7 +6,6 @@ package prototext import ( "fmt" - "sort" "strconv" "unicode/utf8" @@ -16,10 +15,11 @@ import ( "google.golang.org/protobuf/internal/errors" "google.golang.org/protobuf/internal/flags" "google.golang.org/protobuf/internal/genid" - "google.golang.org/protobuf/internal/mapsort" + "google.golang.org/protobuf/internal/order" "google.golang.org/protobuf/internal/pragma" "google.golang.org/protobuf/internal/strs" "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" pref "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/reflect/protoregistry" ) @@ -169,35 +169,15 @@ func (e encoder) marshalMessage(m pref.Message, inclDelims bool) error { // If unable to expand, continue on to marshal Any as a regular message. } - // Marshal known fields. - fieldDescs := messageDesc.Fields() - size := fieldDescs.Len() - for i := 0; i < size; { - fd := fieldDescs.Get(i) - if od := fd.ContainingOneof(); od != nil { - fd = m.WhichOneof(od) - i += od.Fields().Len() - } else { - i++ - } - - if fd == nil || !m.Has(fd) { - continue - } - - name := fd.Name() - // Use type name for group field name. - if fd.Kind() == pref.GroupKind { - name = fd.Message().Name() - } - val := m.Get(fd) - if err := e.marshalField(string(name), val, fd); err != nil { - return err + // Marshal fields. + var err error + order.RangeFields(m, order.IndexNameFieldOrder, func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + if err = e.marshalField(fd.TextName(), v, fd); err != nil { + return false } - } - - // Marshal extensions. - if err := e.marshalExtensions(m); err != nil { + return true + }) + if err != nil { return err } @@ -290,7 +270,7 @@ func (e encoder) marshalList(name string, list pref.List, fd pref.FieldDescripto // marshalMap marshals the given protoreflect.Map as multiple name-value fields. func (e encoder) marshalMap(name string, mmap pref.Map, fd pref.FieldDescriptor) error { var err error - mapsort.Range(mmap, fd.MapKey().Kind(), func(key pref.MapKey, val pref.Value) bool { + order.RangeEntries(mmap, order.GenericKeyOrder, func(key pref.MapKey, val pref.Value) bool { e.WriteName(name) e.StartMessage() defer e.EndMessage() @@ -311,48 +291,6 @@ func (e encoder) marshalMap(name string, mmap pref.Map, fd pref.FieldDescriptor) return err } -// marshalExtensions marshals extension fields. -func (e encoder) marshalExtensions(m pref.Message) error { - type entry struct { - key string - value pref.Value - desc pref.FieldDescriptor - } - - // Get a sorted list based on field key first. - var entries []entry - m.Range(func(fd pref.FieldDescriptor, v pref.Value) bool { - if !fd.IsExtension() { - return true - } - // For MessageSet extensions, the name used is the parent message. - name := fd.FullName() - if messageset.IsMessageSetExtension(fd) { - name = name.Parent() - } - entries = append(entries, entry{ - key: string(name), - value: v, - desc: fd, - }) - return true - }) - // Sort extensions lexicographically. - sort.Slice(entries, func(i, j int) bool { - return entries[i].key < entries[j].key - }) - - // Write out sorted list. - for _, entry := range entries { - // Extension field name is the proto field name enclosed in []. - name := "[" + entry.key + "]" - if err := e.marshalField(name, entry.value, entry.desc); err != nil { - return err - } - } - return nil -} - // marshalUnknown parses the given []byte and marshals fields out. // This function assumes proper encoding in the given []byte. func (e encoder) marshalUnknown(b []byte) { diff --git a/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go b/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go index e7af0fe0de..360c63329d 100644 --- a/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go +++ b/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go @@ -42,6 +42,8 @@ func formatListOpt(vs list, isRoot, allowMulti bool) string { name = "FileImports" case pref.Descriptor: name = reflect.ValueOf(vs).MethodByName("Get").Type().Out(0).Name() + "s" + default: + name = reflect.ValueOf(vs).Elem().Type().Name() } start, end = name+"{", "}" } diff --git a/vendor/google.golang.org/protobuf/internal/detrand/rand.go b/vendor/google.golang.org/protobuf/internal/detrand/rand.go index a904dd1f91..49c8676d48 100644 --- a/vendor/google.golang.org/protobuf/internal/detrand/rand.go +++ b/vendor/google.golang.org/protobuf/internal/detrand/rand.go @@ -26,6 +26,14 @@ func Bool() bool { return randSeed%2 == 1 } +// Intn returns a deterministically random integer between 0 and n-1, inclusive. +func Intn(n int) int { + if n <= 0 { + panic("must be positive") + } + return int(randSeed % uint64(n)) +} + // randSeed is a best-effort at an approximate hash of the Go binary. var randSeed = binaryHash() diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/decode_token.go b/vendor/google.golang.org/protobuf/internal/encoding/json/decode_token.go index 2eb7023b2f..50578d6593 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/json/decode_token.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/json/decode_token.go @@ -94,7 +94,7 @@ func (t Token) Pos() int { return t.pos } -// Name returns the object name if token is Name, else it will return an error. +// Name returns the object name if token is Name, else it panics. func (t Token) Name() string { if t.kind == Name { return t.str @@ -154,8 +154,7 @@ func (t Token) Int(bitSize int) (int64, bool) { return n, true } -// Uint returns the signed integer number if token is Number, else it will -// return an error. +// Uint returns the signed integer number if token is Number. // // The given bitSize specifies the unsigned integer type that the result must // fit into. It returns false if the number is not an unsigned integer value diff --git a/vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go b/vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go index b1eeea5079..c1866f3c1a 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go @@ -11,10 +11,9 @@ import ( "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/errors" pref "google.golang.org/protobuf/reflect/protoreflect" - preg "google.golang.org/protobuf/reflect/protoregistry" ) -// The MessageSet wire format is equivalent to a message defiend as follows, +// The MessageSet wire format is equivalent to a message defined as follows, // where each Item defines an extension field with a field number of 'type_id' // and content of 'message'. MessageSet extensions must be non-repeated message // fields. @@ -48,33 +47,17 @@ func IsMessageSet(md pref.MessageDescriptor) bool { return ok && xmd.IsMessageSet() } -// IsMessageSetExtension reports this field extends a MessageSet. +// IsMessageSetExtension reports this field properly extends a MessageSet. func IsMessageSetExtension(fd pref.FieldDescriptor) bool { - if fd.Name() != ExtensionName { + switch { + case fd.Name() != ExtensionName: return false - } - if fd.FullName().Parent() != fd.Message().FullName() { + case !IsMessageSet(fd.ContainingMessage()): + return false + case fd.FullName().Parent() != fd.Message().FullName(): return false } - return IsMessageSet(fd.ContainingMessage()) -} - -// FindMessageSetExtension locates a MessageSet extension field by name. -// In text and JSON formats, the extension name used is the message itself. -// The extension field name is derived by appending ExtensionName. -func FindMessageSetExtension(r preg.ExtensionTypeResolver, s pref.FullName) (pref.ExtensionType, error) { - name := s.Append(ExtensionName) - xt, err := r.FindExtensionByName(name) - if err != nil { - if err == preg.NotFound { - return nil, err - } - return nil, errors.Wrap(err, "%q", name) - } - if !IsMessageSetExtension(xt.TypeDescriptor()) { - return nil, preg.NotFound - } - return xt, nil + return true } // SizeField returns the size of a MessageSet item field containing an extension diff --git a/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go index 16c02d7b62..38f1931c6f 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go @@ -104,7 +104,7 @@ func Unmarshal(tag string, goType reflect.Type, evs pref.EnumValueDescriptors) p case strings.HasPrefix(s, "json="): jsonName := s[len("json="):] if jsonName != strs.JSONCamelCase(string(f.L0.FullName.Name())) { - f.L1.JSONName.Init(jsonName) + f.L1.StringName.InitJSON(jsonName) } case s == "packed": f.L1.HasPacked = true diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/encode.go b/vendor/google.golang.org/protobuf/internal/encoding/text/encode.go index c4ba1c598f..aa66bdd06a 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/text/encode.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/text/encode.go @@ -32,7 +32,6 @@ type Encoder struct { encoderState indent string - newline string // set to "\n" if len(indent) > 0 delims [2]byte outputASCII bool } @@ -61,7 +60,6 @@ func NewEncoder(indent string, delims [2]byte, outputASCII bool) (*Encoder, erro return nil, errors.New("indent may only be composed of space and tab characters") } e.indent = indent - e.newline = "\n" } switch delims { case [2]byte{0, 0}: @@ -126,7 +124,7 @@ func appendString(out []byte, in string, outputASCII bool) []byte { // are used to represent both the proto string and bytes type. r = rune(in[0]) fallthrough - case r < ' ' || r == '"' || r == '\\': + case r < ' ' || r == '"' || r == '\\' || r == 0x7f: out = append(out, '\\') switch r { case '"', '\\': @@ -143,7 +141,7 @@ func appendString(out []byte, in string, outputASCII bool) []byte { out = strconv.AppendUint(out, uint64(r), 16) } in = in[n:] - case outputASCII && r >= utf8.RuneSelf: + case r >= utf8.RuneSelf && (outputASCII || r <= 0x009f): out = append(out, '\\') if r <= math.MaxUint16 { out = append(out, 'u') @@ -168,7 +166,7 @@ func appendString(out []byte, in string, outputASCII bool) []byte { // escaping. If no characters need escaping, this returns the input length. func indexNeedEscapeInString(s string) int { for i := 0; i < len(s); i++ { - if c := s[i]; c < ' ' || c == '"' || c == '\'' || c == '\\' || c >= utf8.RuneSelf { + if c := s[i]; c < ' ' || c == '"' || c == '\'' || c == '\\' || c >= 0x7f { return i } } diff --git a/vendor/google.golang.org/protobuf/internal/fieldsort/fieldsort.go b/vendor/google.golang.org/protobuf/internal/fieldsort/fieldsort.go deleted file mode 100644 index 517c4e2a04..0000000000 --- a/vendor/google.golang.org/protobuf/internal/fieldsort/fieldsort.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package fieldsort defines an ordering of fields. -// -// The ordering defined by this package matches the historic behavior of the proto -// package, placing extensions first and oneofs last. -// -// There is no guarantee about stability of the wire encoding, and users should not -// depend on the order defined in this package as it is subject to change without -// notice. -package fieldsort - -import ( - "google.golang.org/protobuf/reflect/protoreflect" -) - -// Less returns true if field a comes before field j in ordered wire marshal output. -func Less(a, b protoreflect.FieldDescriptor) bool { - ea := a.IsExtension() - eb := b.IsExtension() - oa := a.ContainingOneof() - ob := b.ContainingOneof() - switch { - case ea != eb: - return ea - case oa != nil && ob != nil: - if oa == ob { - return a.Number() < b.Number() - } - return oa.Index() < ob.Index() - case oa != nil && !oa.IsSynthetic(): - return false - case ob != nil && !ob.IsSynthetic(): - return true - default: - return a.Number() < b.Number() - } -} diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/build.go b/vendor/google.golang.org/protobuf/internal/filedesc/build.go index d02d770c98..b293b69473 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/build.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/build.go @@ -3,6 +3,9 @@ // license that can be found in the LICENSE file. // Package filedesc provides functionality for constructing descriptors. +// +// The types in this package implement interfaces in the protoreflect package +// related to protobuf descripriptors. package filedesc import ( diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go index 9385126fba..98ab142aee 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go @@ -13,6 +13,7 @@ import ( "google.golang.org/protobuf/internal/descfmt" "google.golang.org/protobuf/internal/descopts" "google.golang.org/protobuf/internal/encoding/defval" + "google.golang.org/protobuf/internal/encoding/messageset" "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/internal/pragma" "google.golang.org/protobuf/internal/strs" @@ -99,15 +100,6 @@ func (fd *File) lazyInitOnce() { fd.mu.Unlock() } -// ProtoLegacyRawDesc is a pseudo-internal API for allowing the v1 code -// to be able to retrieve the raw descriptor. -// -// WARNING: This method is exempt from the compatibility promise and may be -// removed in the future without warning. -func (fd *File) ProtoLegacyRawDesc() []byte { - return fd.builder.RawDescriptor -} - // GoPackagePath is a pseudo-internal API for determining the Go package path // that this file descriptor is declared in. // @@ -207,7 +199,7 @@ type ( Number pref.FieldNumber Cardinality pref.Cardinality // must be consistent with Message.RequiredNumbers Kind pref.Kind - JSONName jsonName + StringName stringName IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto IsWeak bool // promoted from google.protobuf.FieldOptions HasPacked bool // promoted from google.protobuf.FieldOptions @@ -277,8 +269,9 @@ func (fd *Field) Options() pref.ProtoMessage { func (fd *Field) Number() pref.FieldNumber { return fd.L1.Number } func (fd *Field) Cardinality() pref.Cardinality { return fd.L1.Cardinality } func (fd *Field) Kind() pref.Kind { return fd.L1.Kind } -func (fd *Field) HasJSONName() bool { return fd.L1.JSONName.has } -func (fd *Field) JSONName() string { return fd.L1.JSONName.get(fd) } +func (fd *Field) HasJSONName() bool { return fd.L1.StringName.hasJSON } +func (fd *Field) JSONName() string { return fd.L1.StringName.getJSON(fd) } +func (fd *Field) TextName() string { return fd.L1.StringName.getText(fd) } func (fd *Field) HasPresence() bool { return fd.L1.Cardinality != pref.Repeated && (fd.L0.ParentFile.L1.Syntax == pref.Proto2 || fd.L1.Message != nil || fd.L1.ContainingOneof != nil) } @@ -373,7 +366,7 @@ type ( } ExtensionL2 struct { Options func() pref.ProtoMessage - JSONName jsonName + StringName stringName IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto IsPacked bool // promoted from google.protobuf.FieldOptions Default defaultValue @@ -391,8 +384,9 @@ func (xd *Extension) Options() pref.ProtoMessage { func (xd *Extension) Number() pref.FieldNumber { return xd.L1.Number } func (xd *Extension) Cardinality() pref.Cardinality { return xd.L1.Cardinality } func (xd *Extension) Kind() pref.Kind { return xd.L1.Kind } -func (xd *Extension) HasJSONName() bool { return xd.lazyInit().JSONName.has } -func (xd *Extension) JSONName() string { return xd.lazyInit().JSONName.get(xd) } +func (xd *Extension) HasJSONName() bool { return xd.lazyInit().StringName.hasJSON } +func (xd *Extension) JSONName() string { return xd.lazyInit().StringName.getJSON(xd) } +func (xd *Extension) TextName() string { return xd.lazyInit().StringName.getText(xd) } func (xd *Extension) HasPresence() bool { return xd.L1.Cardinality != pref.Repeated } func (xd *Extension) HasOptionalKeyword() bool { return (xd.L0.ParentFile.L1.Syntax == pref.Proto2 && xd.L1.Cardinality == pref.Optional) || xd.lazyInit().IsProto3Optional @@ -506,27 +500,50 @@ func (d *Base) Syntax() pref.Syntax { return d.L0.ParentFile.Syn func (d *Base) IsPlaceholder() bool { return false } func (d *Base) ProtoInternal(pragma.DoNotImplement) {} -type jsonName struct { - has bool - once sync.Once - name string +type stringName struct { + hasJSON bool + once sync.Once + nameJSON string + nameText string } -// Init initializes the name. It is exported for use by other internal packages. -func (js *jsonName) Init(s string) { - js.has = true - js.name = s +// InitJSON initializes the name. It is exported for use by other internal packages. +func (s *stringName) InitJSON(name string) { + s.hasJSON = true + s.nameJSON = name } -func (js *jsonName) get(fd pref.FieldDescriptor) string { - if !js.has { - js.once.Do(func() { - js.name = strs.JSONCamelCase(string(fd.Name())) - }) - } - return js.name +func (s *stringName) lazyInit(fd pref.FieldDescriptor) *stringName { + s.once.Do(func() { + if fd.IsExtension() { + // For extensions, JSON and text are formatted the same way. + var name string + if messageset.IsMessageSetExtension(fd) { + name = string("[" + fd.FullName().Parent() + "]") + } else { + name = string("[" + fd.FullName() + "]") + } + s.nameJSON = name + s.nameText = name + } else { + // Format the JSON name. + if !s.hasJSON { + s.nameJSON = strs.JSONCamelCase(string(fd.Name())) + } + + // Format the text name. + s.nameText = string(fd.Name()) + if fd.Kind() == pref.GroupKind { + s.nameText = string(fd.Message().Name()) + } + } + }) + return s } +func (s *stringName) getJSON(fd pref.FieldDescriptor) string { return s.lazyInit(fd).nameJSON } +func (s *stringName) getText(fd pref.FieldDescriptor) string { return s.lazyInit(fd).nameText } + func DefaultValue(v pref.Value, ev pref.EnumValueDescriptor) defaultValue { dv := defaultValue{has: v.IsValid(), val: v, enum: ev} if b, ok := v.Interface().([]byte); ok { diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go index e672233e77..198451e3ec 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go @@ -451,7 +451,7 @@ func (fd *Field) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Des case genid.FieldDescriptorProto_Name_field_number: fd.L0.FullName = appendFullName(sb, pd.FullName(), v) case genid.FieldDescriptorProto_JsonName_field_number: - fd.L1.JSONName.Init(sb.MakeString(v)) + fd.L1.StringName.InitJSON(sb.MakeString(v)) case genid.FieldDescriptorProto_DefaultValue_field_number: fd.L1.Default.val = pref.ValueOfBytes(v) // temporarily store as bytes; later resolved in resolveMessages case genid.FieldDescriptorProto_TypeName_field_number: @@ -551,7 +551,7 @@ func (xd *Extension) unmarshalFull(b []byte, sb *strs.Builder) { b = b[m:] switch num { case genid.FieldDescriptorProto_JsonName_field_number: - xd.L2.JSONName.Init(sb.MakeString(v)) + xd.L2.StringName.InitJSON(sb.MakeString(v)) case genid.FieldDescriptorProto_DefaultValue_field_number: xd.L2.Default.val = pref.ValueOfBytes(v) // temporarily store as bytes; later resolved in resolveExtensions case genid.FieldDescriptorProto_TypeName_field_number: diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go index c876cd34d7..aa294fff99 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go @@ -6,9 +6,12 @@ package filedesc import ( "fmt" + "math" "sort" "sync" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/descfmt" "google.golang.org/protobuf/internal/errors" @@ -245,6 +248,7 @@ type OneofFields struct { once sync.Once byName map[pref.Name]pref.FieldDescriptor // protected by once byJSON map[string]pref.FieldDescriptor // protected by once + byText map[string]pref.FieldDescriptor // protected by once byNum map[pref.FieldNumber]pref.FieldDescriptor // protected by once } @@ -252,6 +256,7 @@ func (p *OneofFields) Len() int { return func (p *OneofFields) Get(i int) pref.FieldDescriptor { return p.List[i] } func (p *OneofFields) ByName(s pref.Name) pref.FieldDescriptor { return p.lazyInit().byName[s] } func (p *OneofFields) ByJSONName(s string) pref.FieldDescriptor { return p.lazyInit().byJSON[s] } +func (p *OneofFields) ByTextName(s string) pref.FieldDescriptor { return p.lazyInit().byText[s] } func (p *OneofFields) ByNumber(n pref.FieldNumber) pref.FieldDescriptor { return p.lazyInit().byNum[n] } func (p *OneofFields) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) } func (p *OneofFields) ProtoInternal(pragma.DoNotImplement) {} @@ -261,11 +266,13 @@ func (p *OneofFields) lazyInit() *OneofFields { if len(p.List) > 0 { p.byName = make(map[pref.Name]pref.FieldDescriptor, len(p.List)) p.byJSON = make(map[string]pref.FieldDescriptor, len(p.List)) + p.byText = make(map[string]pref.FieldDescriptor, len(p.List)) p.byNum = make(map[pref.FieldNumber]pref.FieldDescriptor, len(p.List)) for _, f := range p.List { // Field names and numbers are guaranteed to be unique. p.byName[f.Name()] = f p.byJSON[f.JSONName()] = f + p.byText[f.TextName()] = f p.byNum[f.Number()] = f } } @@ -274,9 +281,170 @@ func (p *OneofFields) lazyInit() *OneofFields { } type SourceLocations struct { + // List is a list of SourceLocations. + // The SourceLocation.Next field does not need to be populated + // as it will be lazily populated upon first need. List []pref.SourceLocation + + // File is the parent file descriptor that these locations are relative to. + // If non-nil, ByDescriptor verifies that the provided descriptor + // is a child of this file descriptor. + File pref.FileDescriptor + + once sync.Once + byPath map[pathKey]int +} + +func (p *SourceLocations) Len() int { return len(p.List) } +func (p *SourceLocations) Get(i int) pref.SourceLocation { return p.lazyInit().List[i] } +func (p *SourceLocations) byKey(k pathKey) pref.SourceLocation { + if i, ok := p.lazyInit().byPath[k]; ok { + return p.List[i] + } + return pref.SourceLocation{} +} +func (p *SourceLocations) ByPath(path pref.SourcePath) pref.SourceLocation { + return p.byKey(newPathKey(path)) +} +func (p *SourceLocations) ByDescriptor(desc pref.Descriptor) pref.SourceLocation { + if p.File != nil && desc != nil && p.File != desc.ParentFile() { + return pref.SourceLocation{} // mismatching parent files + } + var pathArr [16]int32 + path := pathArr[:0] + for { + switch desc.(type) { + case pref.FileDescriptor: + // Reverse the path since it was constructed in reverse. + for i, j := 0, len(path)-1; i < j; i, j = i+1, j-1 { + path[i], path[j] = path[j], path[i] + } + return p.byKey(newPathKey(path)) + case pref.MessageDescriptor: + path = append(path, int32(desc.Index())) + desc = desc.Parent() + switch desc.(type) { + case pref.FileDescriptor: + path = append(path, int32(genid.FileDescriptorProto_MessageType_field_number)) + case pref.MessageDescriptor: + path = append(path, int32(genid.DescriptorProto_NestedType_field_number)) + default: + return pref.SourceLocation{} + } + case pref.FieldDescriptor: + isExtension := desc.(pref.FieldDescriptor).IsExtension() + path = append(path, int32(desc.Index())) + desc = desc.Parent() + if isExtension { + switch desc.(type) { + case pref.FileDescriptor: + path = append(path, int32(genid.FileDescriptorProto_Extension_field_number)) + case pref.MessageDescriptor: + path = append(path, int32(genid.DescriptorProto_Extension_field_number)) + default: + return pref.SourceLocation{} + } + } else { + switch desc.(type) { + case pref.MessageDescriptor: + path = append(path, int32(genid.DescriptorProto_Field_field_number)) + default: + return pref.SourceLocation{} + } + } + case pref.OneofDescriptor: + path = append(path, int32(desc.Index())) + desc = desc.Parent() + switch desc.(type) { + case pref.MessageDescriptor: + path = append(path, int32(genid.DescriptorProto_OneofDecl_field_number)) + default: + return pref.SourceLocation{} + } + case pref.EnumDescriptor: + path = append(path, int32(desc.Index())) + desc = desc.Parent() + switch desc.(type) { + case pref.FileDescriptor: + path = append(path, int32(genid.FileDescriptorProto_EnumType_field_number)) + case pref.MessageDescriptor: + path = append(path, int32(genid.DescriptorProto_EnumType_field_number)) + default: + return pref.SourceLocation{} + } + case pref.EnumValueDescriptor: + path = append(path, int32(desc.Index())) + desc = desc.Parent() + switch desc.(type) { + case pref.EnumDescriptor: + path = append(path, int32(genid.EnumDescriptorProto_Value_field_number)) + default: + return pref.SourceLocation{} + } + case pref.ServiceDescriptor: + path = append(path, int32(desc.Index())) + desc = desc.Parent() + switch desc.(type) { + case pref.FileDescriptor: + path = append(path, int32(genid.FileDescriptorProto_Service_field_number)) + default: + return pref.SourceLocation{} + } + case pref.MethodDescriptor: + path = append(path, int32(desc.Index())) + desc = desc.Parent() + switch desc.(type) { + case pref.ServiceDescriptor: + path = append(path, int32(genid.ServiceDescriptorProto_Method_field_number)) + default: + return pref.SourceLocation{} + } + default: + return pref.SourceLocation{} + } + } } +func (p *SourceLocations) lazyInit() *SourceLocations { + p.once.Do(func() { + if len(p.List) > 0 { + // Collect all the indexes for a given path. + pathIdxs := make(map[pathKey][]int, len(p.List)) + for i, l := range p.List { + k := newPathKey(l.Path) + pathIdxs[k] = append(pathIdxs[k], i) + } -func (p *SourceLocations) Len() int { return len(p.List) } -func (p *SourceLocations) Get(i int) pref.SourceLocation { return p.List[i] } + // Update the next index for all locations. + p.byPath = make(map[pathKey]int, len(p.List)) + for k, idxs := range pathIdxs { + for i := 0; i < len(idxs)-1; i++ { + p.List[idxs[i]].Next = idxs[i+1] + } + p.List[idxs[len(idxs)-1]].Next = 0 + p.byPath[k] = idxs[0] // record the first location for this path + } + } + }) + return p +} func (p *SourceLocations) ProtoInternal(pragma.DoNotImplement) {} + +// pathKey is a comparable representation of protoreflect.SourcePath. +type pathKey struct { + arr [16]uint8 // first n-1 path segments; last element is the length + str string // used if the path does not fit in arr +} + +func newPathKey(p pref.SourcePath) (k pathKey) { + if len(p) < len(k.arr) { + for i, ps := range p { + if ps < 0 || math.MaxUint8 <= ps { + return pathKey{str: p.String()} + } + k.arr[i] = uint8(ps) + } + k.arr[len(k.arr)-1] = uint8(len(p)) + return k + } + return pathKey{str: p.String()} +} diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go index 6a8825e802..30db19fdc7 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go @@ -142,6 +142,7 @@ type Fields struct { once sync.Once byName map[protoreflect.Name]*Field // protected by once byJSON map[string]*Field // protected by once + byText map[string]*Field // protected by once byNum map[protoreflect.FieldNumber]*Field // protected by once } @@ -163,6 +164,12 @@ func (p *Fields) ByJSONName(s string) protoreflect.FieldDescriptor { } return nil } +func (p *Fields) ByTextName(s string) protoreflect.FieldDescriptor { + if d := p.lazyInit().byText[s]; d != nil { + return d + } + return nil +} func (p *Fields) ByNumber(n protoreflect.FieldNumber) protoreflect.FieldDescriptor { if d := p.lazyInit().byNum[n]; d != nil { return d @@ -178,6 +185,7 @@ func (p *Fields) lazyInit() *Fields { if len(p.List) > 0 { p.byName = make(map[protoreflect.Name]*Field, len(p.List)) p.byJSON = make(map[string]*Field, len(p.List)) + p.byText = make(map[string]*Field, len(p.List)) p.byNum = make(map[protoreflect.FieldNumber]*Field, len(p.List)) for i := range p.List { d := &p.List[i] @@ -187,6 +195,9 @@ func (p *Fields) lazyInit() *Fields { if _, ok := p.byJSON[d.JSONName()]; !ok { p.byJSON[d.JSONName()] = d } + if _, ok := p.byText[d.TextName()]; !ok { + p.byText[d.TextName()] = d + } if _, ok := p.byNum[d.Number()]; !ok { p.byNum[d.Number()] = d } diff --git a/vendor/google.golang.org/protobuf/internal/impl/api_export.go b/vendor/google.golang.org/protobuf/internal/impl/api_export.go index b5974528db..abee5f30e9 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/api_export.go +++ b/vendor/google.golang.org/protobuf/internal/impl/api_export.go @@ -167,7 +167,7 @@ func (Export) MessageTypeOf(m message) pref.MessageType { if mv := (Export{}).protoMessageV2Of(m); mv != nil { return mv.ProtoReflect().Type() } - return legacyLoadMessageInfo(reflect.TypeOf(m), "") + return legacyLoadMessageType(reflect.TypeOf(m), "") } // MessageStringOf returns the message value as a string, diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go index c00744d385..cb4b482d16 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go @@ -10,6 +10,7 @@ import ( "sync" "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/errors" "google.golang.org/protobuf/proto" pref "google.golang.org/protobuf/reflect/protoreflect" preg "google.golang.org/protobuf/reflect/protoregistry" @@ -20,6 +21,7 @@ type errInvalidUTF8 struct{} func (errInvalidUTF8) Error() string { return "string field contains invalid UTF-8" } func (errInvalidUTF8) InvalidUTF8() bool { return true } +func (errInvalidUTF8) Unwrap() error { return errors.Error } // initOneofFieldCoders initializes the fast-path functions for the fields in a oneof. // @@ -242,7 +244,7 @@ func consumeMessageInfo(b []byte, p pointer, wtyp protowire.Type, f *coderFieldI } v, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } if p.Elem().IsNil() { p.SetPointer(pointerOfValue(reflect.New(f.mi.GoReflectType.Elem()))) @@ -276,7 +278,7 @@ func consumeMessage(b []byte, m proto.Message, wtyp protowire.Type, opts unmarsh } v, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ Buf: v, @@ -420,7 +422,7 @@ func consumeGroup(b []byte, m proto.Message, num protowire.Number, wtyp protowir } b, n := protowire.ConsumeGroup(num, b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ Buf: b, @@ -494,7 +496,7 @@ func consumeMessageSliceInfo(b []byte, p pointer, wtyp protowire.Type, f *coderF } v, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } m := reflect.New(f.mi.GoReflectType.Elem()).Interface() mp := pointerOfIface(m) @@ -550,7 +552,7 @@ func consumeMessageSlice(b []byte, p pointer, goType reflect.Type, wtyp protowir } v, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } mp := reflect.New(goType.Elem()) o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ @@ -613,7 +615,7 @@ func consumeMessageSliceValue(b []byte, listv pref.Value, _ protowire.Number, wt } v, n := protowire.ConsumeBytes(b) if n < 0 { - return pref.Value{}, out, protowire.ParseError(n) + return pref.Value{}, out, errDecode } m := list.NewElement() o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ @@ -681,7 +683,7 @@ func consumeGroupSliceValue(b []byte, listv pref.Value, num protowire.Number, wt } b, n := protowire.ConsumeGroup(num, b) if n < 0 { - return pref.Value{}, out, protowire.ParseError(n) + return pref.Value{}, out, errDecode } m := list.NewElement() o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ @@ -767,7 +769,7 @@ func consumeGroupSlice(b []byte, p pointer, num protowire.Number, wtyp protowire } b, n := protowire.ConsumeGroup(num, b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } mp := reflect.New(goType.Elem()) o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go b/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go index ff198d0a15..1a509b63eb 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go @@ -15,13 +15,13 @@ import ( ) // sizeBool returns the size of wire encoding a bool pointer as a Bool. -func sizeBool(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeBool(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Bool() return f.tagsize + protowire.SizeVarint(protowire.EncodeBool(v)) } // appendBool wire encodes a bool pointer as a Bool. -func appendBool(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendBool(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Bool() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendVarint(b, protowire.EncodeBool(v)) @@ -29,7 +29,7 @@ func appendBool(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byt } // consumeBool wire decodes a bool pointer as a Bool. -func consumeBool(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeBool(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return out, errUnknown } @@ -45,7 +45,7 @@ func consumeBool(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *p.Bool() = protowire.DecodeBool(v) out.n = n @@ -61,7 +61,7 @@ var coderBool = pointerCoderFuncs{ // sizeBoolNoZero returns the size of wire encoding a bool pointer as a Bool. // The zero value is not encoded. -func sizeBoolNoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeBoolNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Bool() if v == false { return 0 @@ -71,7 +71,7 @@ func sizeBoolNoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { // appendBoolNoZero wire encodes a bool pointer as a Bool. // The zero value is not encoded. -func appendBoolNoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendBoolNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Bool() if v == false { return b, nil @@ -90,14 +90,14 @@ var coderBoolNoZero = pointerCoderFuncs{ // sizeBoolPtr returns the size of wire encoding a *bool pointer as a Bool. // It panics if the pointer is nil. -func sizeBoolPtr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeBoolPtr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := **p.BoolPtr() return f.tagsize + protowire.SizeVarint(protowire.EncodeBool(v)) } // appendBoolPtr wire encodes a *bool pointer as a Bool. // It panics if the pointer is nil. -func appendBoolPtr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendBoolPtr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := **p.BoolPtr() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendVarint(b, protowire.EncodeBool(v)) @@ -105,7 +105,7 @@ func appendBoolPtr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([] } // consumeBoolPtr wire decodes a *bool pointer as a Bool. -func consumeBoolPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeBoolPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return out, errUnknown } @@ -121,7 +121,7 @@ func consumeBoolPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } vp := p.BoolPtr() if *vp == nil { @@ -140,7 +140,7 @@ var coderBoolPtr = pointerCoderFuncs{ } // sizeBoolSlice returns the size of wire encoding a []bool pointer as a repeated Bool. -func sizeBoolSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeBoolSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.BoolSlice() for _, v := range s { size += f.tagsize + protowire.SizeVarint(protowire.EncodeBool(v)) @@ -149,7 +149,7 @@ func sizeBoolSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { } // appendBoolSlice encodes a []bool pointer as a repeated Bool. -func appendBoolSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendBoolSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.BoolSlice() for _, v := range s { b = protowire.AppendVarint(b, f.wiretag) @@ -159,13 +159,13 @@ func appendBoolSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ( } // consumeBoolSlice wire decodes a []bool pointer as a repeated Bool. -func consumeBoolSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeBoolSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.BoolSlice() if wtyp == protowire.BytesType { s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } for len(b) > 0 { var v uint64 @@ -180,7 +180,7 @@ func consumeBoolSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInf v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } s = append(s, protowire.DecodeBool(v)) b = b[n:] @@ -204,7 +204,7 @@ func consumeBoolSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInf v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *sp = append(*sp, protowire.DecodeBool(v)) out.n = n @@ -219,7 +219,7 @@ var coderBoolSlice = pointerCoderFuncs{ } // sizeBoolPackedSlice returns the size of wire encoding a []bool pointer as a packed repeated Bool. -func sizeBoolPackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeBoolPackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.BoolSlice() if len(s) == 0 { return 0 @@ -232,7 +232,7 @@ func sizeBoolPackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size i } // appendBoolPackedSlice encodes a []bool pointer as a packed repeated Bool. -func appendBoolPackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendBoolPackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.BoolSlice() if len(s) == 0 { return b, nil @@ -257,19 +257,19 @@ var coderBoolPackedSlice = pointerCoderFuncs{ } // sizeBoolValue returns the size of wire encoding a bool value as a Bool. -func sizeBoolValue(v protoreflect.Value, tagsize int, _ marshalOptions) int { +func sizeBoolValue(v protoreflect.Value, tagsize int, opts marshalOptions) int { return tagsize + protowire.SizeVarint(protowire.EncodeBool(v.Bool())) } // appendBoolValue encodes a bool value as a Bool. -func appendBoolValue(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendBoolValue(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { b = protowire.AppendVarint(b, wiretag) b = protowire.AppendVarint(b, protowire.EncodeBool(v.Bool())) return b, nil } // consumeBoolValue decodes a bool value as a Bool. -func consumeBoolValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeBoolValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return protoreflect.Value{}, out, errUnknown } @@ -285,7 +285,7 @@ func consumeBoolValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp p v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } out.n = n return protoreflect.ValueOfBool(protowire.DecodeBool(v)), out, nil @@ -299,7 +299,7 @@ var coderBoolValue = valueCoderFuncs{ } // sizeBoolSliceValue returns the size of wire encoding a []bool value as a repeated Bool. -func sizeBoolSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeBoolSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -309,7 +309,7 @@ func sizeBoolSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) } // appendBoolSliceValue encodes a []bool value as a repeated Bool. -func appendBoolSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendBoolSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -320,12 +320,12 @@ func appendBoolSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ } // consumeBoolSliceValue wire decodes a []bool value as a repeated Bool. -func consumeBoolSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeBoolSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { list := listv.List() if wtyp == protowire.BytesType { b, n := protowire.ConsumeBytes(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } for len(b) > 0 { var v uint64 @@ -340,7 +340,7 @@ func consumeBoolSliceValue(b []byte, listv protoreflect.Value, _ protowire.Numbe v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfBool(protowire.DecodeBool(v))) b = b[n:] @@ -363,7 +363,7 @@ func consumeBoolSliceValue(b []byte, listv protoreflect.Value, _ protowire.Numbe v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfBool(protowire.DecodeBool(v))) out.n = n @@ -378,7 +378,7 @@ var coderBoolSliceValue = valueCoderFuncs{ } // sizeBoolPackedSliceValue returns the size of wire encoding a []bool value as a packed repeated Bool. -func sizeBoolPackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeBoolPackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() llen := list.Len() if llen == 0 { @@ -393,7 +393,7 @@ func sizeBoolPackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOp } // appendBoolPackedSliceValue encodes a []bool value as a packed repeated Bool. -func appendBoolPackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendBoolPackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() llen := list.Len() if llen == 0 { @@ -421,19 +421,19 @@ var coderBoolPackedSliceValue = valueCoderFuncs{ } // sizeEnumValue returns the size of wire encoding a value as a Enum. -func sizeEnumValue(v protoreflect.Value, tagsize int, _ marshalOptions) int { +func sizeEnumValue(v protoreflect.Value, tagsize int, opts marshalOptions) int { return tagsize + protowire.SizeVarint(uint64(v.Enum())) } // appendEnumValue encodes a value as a Enum. -func appendEnumValue(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendEnumValue(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { b = protowire.AppendVarint(b, wiretag) b = protowire.AppendVarint(b, uint64(v.Enum())) return b, nil } // consumeEnumValue decodes a value as a Enum. -func consumeEnumValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeEnumValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return protoreflect.Value{}, out, errUnknown } @@ -449,7 +449,7 @@ func consumeEnumValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp p v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } out.n = n return protoreflect.ValueOfEnum(protoreflect.EnumNumber(v)), out, nil @@ -463,7 +463,7 @@ var coderEnumValue = valueCoderFuncs{ } // sizeEnumSliceValue returns the size of wire encoding a [] value as a repeated Enum. -func sizeEnumSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeEnumSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -473,7 +473,7 @@ func sizeEnumSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) } // appendEnumSliceValue encodes a [] value as a repeated Enum. -func appendEnumSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendEnumSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -484,12 +484,12 @@ func appendEnumSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ } // consumeEnumSliceValue wire decodes a [] value as a repeated Enum. -func consumeEnumSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeEnumSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { list := listv.List() if wtyp == protowire.BytesType { b, n := protowire.ConsumeBytes(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } for len(b) > 0 { var v uint64 @@ -504,7 +504,7 @@ func consumeEnumSliceValue(b []byte, listv protoreflect.Value, _ protowire.Numbe v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfEnum(protoreflect.EnumNumber(v))) b = b[n:] @@ -527,7 +527,7 @@ func consumeEnumSliceValue(b []byte, listv protoreflect.Value, _ protowire.Numbe v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfEnum(protoreflect.EnumNumber(v))) out.n = n @@ -542,7 +542,7 @@ var coderEnumSliceValue = valueCoderFuncs{ } // sizeEnumPackedSliceValue returns the size of wire encoding a [] value as a packed repeated Enum. -func sizeEnumPackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeEnumPackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() llen := list.Len() if llen == 0 { @@ -557,7 +557,7 @@ func sizeEnumPackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOp } // appendEnumPackedSliceValue encodes a [] value as a packed repeated Enum. -func appendEnumPackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendEnumPackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() llen := list.Len() if llen == 0 { @@ -585,13 +585,13 @@ var coderEnumPackedSliceValue = valueCoderFuncs{ } // sizeInt32 returns the size of wire encoding a int32 pointer as a Int32. -func sizeInt32(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeInt32(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Int32() return f.tagsize + protowire.SizeVarint(uint64(v)) } // appendInt32 wire encodes a int32 pointer as a Int32. -func appendInt32(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendInt32(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Int32() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendVarint(b, uint64(v)) @@ -599,7 +599,7 @@ func appendInt32(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]by } // consumeInt32 wire decodes a int32 pointer as a Int32. -func consumeInt32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeInt32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return out, errUnknown } @@ -615,7 +615,7 @@ func consumeInt32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *p.Int32() = int32(v) out.n = n @@ -631,7 +631,7 @@ var coderInt32 = pointerCoderFuncs{ // sizeInt32NoZero returns the size of wire encoding a int32 pointer as a Int32. // The zero value is not encoded. -func sizeInt32NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeInt32NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Int32() if v == 0 { return 0 @@ -641,7 +641,7 @@ func sizeInt32NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) // appendInt32NoZero wire encodes a int32 pointer as a Int32. // The zero value is not encoded. -func appendInt32NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendInt32NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Int32() if v == 0 { return b, nil @@ -660,14 +660,14 @@ var coderInt32NoZero = pointerCoderFuncs{ // sizeInt32Ptr returns the size of wire encoding a *int32 pointer as a Int32. // It panics if the pointer is nil. -func sizeInt32Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeInt32Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := **p.Int32Ptr() return f.tagsize + protowire.SizeVarint(uint64(v)) } // appendInt32Ptr wire encodes a *int32 pointer as a Int32. // It panics if the pointer is nil. -func appendInt32Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendInt32Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := **p.Int32Ptr() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendVarint(b, uint64(v)) @@ -675,7 +675,7 @@ func appendInt32Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([ } // consumeInt32Ptr wire decodes a *int32 pointer as a Int32. -func consumeInt32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeInt32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return out, errUnknown } @@ -691,7 +691,7 @@ func consumeInt32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } vp := p.Int32Ptr() if *vp == nil { @@ -710,7 +710,7 @@ var coderInt32Ptr = pointerCoderFuncs{ } // sizeInt32Slice returns the size of wire encoding a []int32 pointer as a repeated Int32. -func sizeInt32Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeInt32Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Int32Slice() for _, v := range s { size += f.tagsize + protowire.SizeVarint(uint64(v)) @@ -719,7 +719,7 @@ func sizeInt32Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { } // appendInt32Slice encodes a []int32 pointer as a repeated Int32. -func appendInt32Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendInt32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Int32Slice() for _, v := range s { b = protowire.AppendVarint(b, f.wiretag) @@ -729,13 +729,13 @@ func appendInt32Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) } // consumeInt32Slice wire decodes a []int32 pointer as a repeated Int32. -func consumeInt32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeInt32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Int32Slice() if wtyp == protowire.BytesType { s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } for len(b) > 0 { var v uint64 @@ -750,7 +750,7 @@ func consumeInt32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldIn v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } s = append(s, int32(v)) b = b[n:] @@ -774,7 +774,7 @@ func consumeInt32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldIn v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *sp = append(*sp, int32(v)) out.n = n @@ -789,7 +789,7 @@ var coderInt32Slice = pointerCoderFuncs{ } // sizeInt32PackedSlice returns the size of wire encoding a []int32 pointer as a packed repeated Int32. -func sizeInt32PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeInt32PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Int32Slice() if len(s) == 0 { return 0 @@ -802,7 +802,7 @@ func sizeInt32PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size } // appendInt32PackedSlice encodes a []int32 pointer as a packed repeated Int32. -func appendInt32PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendInt32PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Int32Slice() if len(s) == 0 { return b, nil @@ -827,19 +827,19 @@ var coderInt32PackedSlice = pointerCoderFuncs{ } // sizeInt32Value returns the size of wire encoding a int32 value as a Int32. -func sizeInt32Value(v protoreflect.Value, tagsize int, _ marshalOptions) int { +func sizeInt32Value(v protoreflect.Value, tagsize int, opts marshalOptions) int { return tagsize + protowire.SizeVarint(uint64(int32(v.Int()))) } // appendInt32Value encodes a int32 value as a Int32. -func appendInt32Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendInt32Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { b = protowire.AppendVarint(b, wiretag) b = protowire.AppendVarint(b, uint64(int32(v.Int()))) return b, nil } // consumeInt32Value decodes a int32 value as a Int32. -func consumeInt32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeInt32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return protoreflect.Value{}, out, errUnknown } @@ -855,7 +855,7 @@ func consumeInt32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } out.n = n return protoreflect.ValueOfInt32(int32(v)), out, nil @@ -869,7 +869,7 @@ var coderInt32Value = valueCoderFuncs{ } // sizeInt32SliceValue returns the size of wire encoding a []int32 value as a repeated Int32. -func sizeInt32SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeInt32SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -879,7 +879,7 @@ func sizeInt32SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions } // appendInt32SliceValue encodes a []int32 value as a repeated Int32. -func appendInt32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendInt32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -890,12 +890,12 @@ func appendInt32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ } // consumeInt32SliceValue wire decodes a []int32 value as a repeated Int32. -func consumeInt32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeInt32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { list := listv.List() if wtyp == protowire.BytesType { b, n := protowire.ConsumeBytes(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } for len(b) > 0 { var v uint64 @@ -910,7 +910,7 @@ func consumeInt32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Numb v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfInt32(int32(v))) b = b[n:] @@ -933,7 +933,7 @@ func consumeInt32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Numb v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfInt32(int32(v))) out.n = n @@ -948,7 +948,7 @@ var coderInt32SliceValue = valueCoderFuncs{ } // sizeInt32PackedSliceValue returns the size of wire encoding a []int32 value as a packed repeated Int32. -func sizeInt32PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeInt32PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() llen := list.Len() if llen == 0 { @@ -963,7 +963,7 @@ func sizeInt32PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalO } // appendInt32PackedSliceValue encodes a []int32 value as a packed repeated Int32. -func appendInt32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendInt32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() llen := list.Len() if llen == 0 { @@ -991,13 +991,13 @@ var coderInt32PackedSliceValue = valueCoderFuncs{ } // sizeSint32 returns the size of wire encoding a int32 pointer as a Sint32. -func sizeSint32(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSint32(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Int32() return f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(int64(v))) } // appendSint32 wire encodes a int32 pointer as a Sint32. -func appendSint32(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSint32(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Int32() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(v))) @@ -1005,7 +1005,7 @@ func appendSint32(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]b } // consumeSint32 wire decodes a int32 pointer as a Sint32. -func consumeSint32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeSint32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return out, errUnknown } @@ -1021,7 +1021,7 @@ func consumeSint32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *p.Int32() = int32(protowire.DecodeZigZag(v & math.MaxUint32)) out.n = n @@ -1037,7 +1037,7 @@ var coderSint32 = pointerCoderFuncs{ // sizeSint32NoZero returns the size of wire encoding a int32 pointer as a Sint32. // The zero value is not encoded. -func sizeSint32NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSint32NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Int32() if v == 0 { return 0 @@ -1047,7 +1047,7 @@ func sizeSint32NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) // appendSint32NoZero wire encodes a int32 pointer as a Sint32. // The zero value is not encoded. -func appendSint32NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSint32NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Int32() if v == 0 { return b, nil @@ -1066,14 +1066,14 @@ var coderSint32NoZero = pointerCoderFuncs{ // sizeSint32Ptr returns the size of wire encoding a *int32 pointer as a Sint32. // It panics if the pointer is nil. -func sizeSint32Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSint32Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := **p.Int32Ptr() return f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(int64(v))) } // appendSint32Ptr wire encodes a *int32 pointer as a Sint32. // It panics if the pointer is nil. -func appendSint32Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSint32Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := **p.Int32Ptr() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(v))) @@ -1081,7 +1081,7 @@ func appendSint32Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ( } // consumeSint32Ptr wire decodes a *int32 pointer as a Sint32. -func consumeSint32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeSint32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return out, errUnknown } @@ -1097,7 +1097,7 @@ func consumeSint32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInf v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } vp := p.Int32Ptr() if *vp == nil { @@ -1116,7 +1116,7 @@ var coderSint32Ptr = pointerCoderFuncs{ } // sizeSint32Slice returns the size of wire encoding a []int32 pointer as a repeated Sint32. -func sizeSint32Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSint32Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Int32Slice() for _, v := range s { size += f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(int64(v))) @@ -1125,7 +1125,7 @@ func sizeSint32Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) } // appendSint32Slice encodes a []int32 pointer as a repeated Sint32. -func appendSint32Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSint32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Int32Slice() for _, v := range s { b = protowire.AppendVarint(b, f.wiretag) @@ -1135,13 +1135,13 @@ func appendSint32Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) } // consumeSint32Slice wire decodes a []int32 pointer as a repeated Sint32. -func consumeSint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeSint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Int32Slice() if wtyp == protowire.BytesType { s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } for len(b) > 0 { var v uint64 @@ -1156,7 +1156,7 @@ func consumeSint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldI v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } s = append(s, int32(protowire.DecodeZigZag(v&math.MaxUint32))) b = b[n:] @@ -1180,7 +1180,7 @@ func consumeSint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldI v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *sp = append(*sp, int32(protowire.DecodeZigZag(v&math.MaxUint32))) out.n = n @@ -1195,7 +1195,7 @@ var coderSint32Slice = pointerCoderFuncs{ } // sizeSint32PackedSlice returns the size of wire encoding a []int32 pointer as a packed repeated Sint32. -func sizeSint32PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSint32PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Int32Slice() if len(s) == 0 { return 0 @@ -1208,7 +1208,7 @@ func sizeSint32PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size } // appendSint32PackedSlice encodes a []int32 pointer as a packed repeated Sint32. -func appendSint32PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSint32PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Int32Slice() if len(s) == 0 { return b, nil @@ -1233,19 +1233,19 @@ var coderSint32PackedSlice = pointerCoderFuncs{ } // sizeSint32Value returns the size of wire encoding a int32 value as a Sint32. -func sizeSint32Value(v protoreflect.Value, tagsize int, _ marshalOptions) int { +func sizeSint32Value(v protoreflect.Value, tagsize int, opts marshalOptions) int { return tagsize + protowire.SizeVarint(protowire.EncodeZigZag(int64(int32(v.Int())))) } // appendSint32Value encodes a int32 value as a Sint32. -func appendSint32Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendSint32Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { b = protowire.AppendVarint(b, wiretag) b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(int32(v.Int())))) return b, nil } // consumeSint32Value decodes a int32 value as a Sint32. -func consumeSint32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeSint32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return protoreflect.Value{}, out, errUnknown } @@ -1261,7 +1261,7 @@ func consumeSint32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } out.n = n return protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32))), out, nil @@ -1275,7 +1275,7 @@ var coderSint32Value = valueCoderFuncs{ } // sizeSint32SliceValue returns the size of wire encoding a []int32 value as a repeated Sint32. -func sizeSint32SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeSint32SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -1285,7 +1285,7 @@ func sizeSint32SliceValue(listv protoreflect.Value, tagsize int, _ marshalOption } // appendSint32SliceValue encodes a []int32 value as a repeated Sint32. -func appendSint32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendSint32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -1296,12 +1296,12 @@ func appendSint32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, } // consumeSint32SliceValue wire decodes a []int32 value as a repeated Sint32. -func consumeSint32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeSint32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { list := listv.List() if wtyp == protowire.BytesType { b, n := protowire.ConsumeBytes(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } for len(b) > 0 { var v uint64 @@ -1316,7 +1316,7 @@ func consumeSint32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Num v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32)))) b = b[n:] @@ -1339,7 +1339,7 @@ func consumeSint32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Num v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32)))) out.n = n @@ -1354,7 +1354,7 @@ var coderSint32SliceValue = valueCoderFuncs{ } // sizeSint32PackedSliceValue returns the size of wire encoding a []int32 value as a packed repeated Sint32. -func sizeSint32PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeSint32PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() llen := list.Len() if llen == 0 { @@ -1369,7 +1369,7 @@ func sizeSint32PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshal } // appendSint32PackedSliceValue encodes a []int32 value as a packed repeated Sint32. -func appendSint32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendSint32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() llen := list.Len() if llen == 0 { @@ -1397,13 +1397,13 @@ var coderSint32PackedSliceValue = valueCoderFuncs{ } // sizeUint32 returns the size of wire encoding a uint32 pointer as a Uint32. -func sizeUint32(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeUint32(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Uint32() return f.tagsize + protowire.SizeVarint(uint64(v)) } // appendUint32 wire encodes a uint32 pointer as a Uint32. -func appendUint32(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendUint32(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Uint32() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendVarint(b, uint64(v)) @@ -1411,7 +1411,7 @@ func appendUint32(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]b } // consumeUint32 wire decodes a uint32 pointer as a Uint32. -func consumeUint32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeUint32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return out, errUnknown } @@ -1427,7 +1427,7 @@ func consumeUint32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *p.Uint32() = uint32(v) out.n = n @@ -1443,7 +1443,7 @@ var coderUint32 = pointerCoderFuncs{ // sizeUint32NoZero returns the size of wire encoding a uint32 pointer as a Uint32. // The zero value is not encoded. -func sizeUint32NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeUint32NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Uint32() if v == 0 { return 0 @@ -1453,7 +1453,7 @@ func sizeUint32NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) // appendUint32NoZero wire encodes a uint32 pointer as a Uint32. // The zero value is not encoded. -func appendUint32NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendUint32NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Uint32() if v == 0 { return b, nil @@ -1472,14 +1472,14 @@ var coderUint32NoZero = pointerCoderFuncs{ // sizeUint32Ptr returns the size of wire encoding a *uint32 pointer as a Uint32. // It panics if the pointer is nil. -func sizeUint32Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeUint32Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := **p.Uint32Ptr() return f.tagsize + protowire.SizeVarint(uint64(v)) } // appendUint32Ptr wire encodes a *uint32 pointer as a Uint32. // It panics if the pointer is nil. -func appendUint32Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendUint32Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := **p.Uint32Ptr() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendVarint(b, uint64(v)) @@ -1487,7 +1487,7 @@ func appendUint32Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ( } // consumeUint32Ptr wire decodes a *uint32 pointer as a Uint32. -func consumeUint32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeUint32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return out, errUnknown } @@ -1503,7 +1503,7 @@ func consumeUint32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInf v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } vp := p.Uint32Ptr() if *vp == nil { @@ -1522,7 +1522,7 @@ var coderUint32Ptr = pointerCoderFuncs{ } // sizeUint32Slice returns the size of wire encoding a []uint32 pointer as a repeated Uint32. -func sizeUint32Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeUint32Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Uint32Slice() for _, v := range s { size += f.tagsize + protowire.SizeVarint(uint64(v)) @@ -1531,7 +1531,7 @@ func sizeUint32Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) } // appendUint32Slice encodes a []uint32 pointer as a repeated Uint32. -func appendUint32Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendUint32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Uint32Slice() for _, v := range s { b = protowire.AppendVarint(b, f.wiretag) @@ -1541,13 +1541,13 @@ func appendUint32Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) } // consumeUint32Slice wire decodes a []uint32 pointer as a repeated Uint32. -func consumeUint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeUint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Uint32Slice() if wtyp == protowire.BytesType { s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } for len(b) > 0 { var v uint64 @@ -1562,7 +1562,7 @@ func consumeUint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldI v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } s = append(s, uint32(v)) b = b[n:] @@ -1586,7 +1586,7 @@ func consumeUint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldI v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *sp = append(*sp, uint32(v)) out.n = n @@ -1601,7 +1601,7 @@ var coderUint32Slice = pointerCoderFuncs{ } // sizeUint32PackedSlice returns the size of wire encoding a []uint32 pointer as a packed repeated Uint32. -func sizeUint32PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeUint32PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Uint32Slice() if len(s) == 0 { return 0 @@ -1614,7 +1614,7 @@ func sizeUint32PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size } // appendUint32PackedSlice encodes a []uint32 pointer as a packed repeated Uint32. -func appendUint32PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendUint32PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Uint32Slice() if len(s) == 0 { return b, nil @@ -1639,19 +1639,19 @@ var coderUint32PackedSlice = pointerCoderFuncs{ } // sizeUint32Value returns the size of wire encoding a uint32 value as a Uint32. -func sizeUint32Value(v protoreflect.Value, tagsize int, _ marshalOptions) int { +func sizeUint32Value(v protoreflect.Value, tagsize int, opts marshalOptions) int { return tagsize + protowire.SizeVarint(uint64(uint32(v.Uint()))) } // appendUint32Value encodes a uint32 value as a Uint32. -func appendUint32Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendUint32Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { b = protowire.AppendVarint(b, wiretag) b = protowire.AppendVarint(b, uint64(uint32(v.Uint()))) return b, nil } // consumeUint32Value decodes a uint32 value as a Uint32. -func consumeUint32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeUint32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return protoreflect.Value{}, out, errUnknown } @@ -1667,7 +1667,7 @@ func consumeUint32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } out.n = n return protoreflect.ValueOfUint32(uint32(v)), out, nil @@ -1681,7 +1681,7 @@ var coderUint32Value = valueCoderFuncs{ } // sizeUint32SliceValue returns the size of wire encoding a []uint32 value as a repeated Uint32. -func sizeUint32SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeUint32SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -1691,7 +1691,7 @@ func sizeUint32SliceValue(listv protoreflect.Value, tagsize int, _ marshalOption } // appendUint32SliceValue encodes a []uint32 value as a repeated Uint32. -func appendUint32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendUint32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -1702,12 +1702,12 @@ func appendUint32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, } // consumeUint32SliceValue wire decodes a []uint32 value as a repeated Uint32. -func consumeUint32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeUint32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { list := listv.List() if wtyp == protowire.BytesType { b, n := protowire.ConsumeBytes(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } for len(b) > 0 { var v uint64 @@ -1722,7 +1722,7 @@ func consumeUint32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Num v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfUint32(uint32(v))) b = b[n:] @@ -1745,7 +1745,7 @@ func consumeUint32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Num v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfUint32(uint32(v))) out.n = n @@ -1760,7 +1760,7 @@ var coderUint32SliceValue = valueCoderFuncs{ } // sizeUint32PackedSliceValue returns the size of wire encoding a []uint32 value as a packed repeated Uint32. -func sizeUint32PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeUint32PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() llen := list.Len() if llen == 0 { @@ -1775,7 +1775,7 @@ func sizeUint32PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshal } // appendUint32PackedSliceValue encodes a []uint32 value as a packed repeated Uint32. -func appendUint32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendUint32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() llen := list.Len() if llen == 0 { @@ -1803,13 +1803,13 @@ var coderUint32PackedSliceValue = valueCoderFuncs{ } // sizeInt64 returns the size of wire encoding a int64 pointer as a Int64. -func sizeInt64(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeInt64(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Int64() return f.tagsize + protowire.SizeVarint(uint64(v)) } // appendInt64 wire encodes a int64 pointer as a Int64. -func appendInt64(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendInt64(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Int64() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendVarint(b, uint64(v)) @@ -1817,7 +1817,7 @@ func appendInt64(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]by } // consumeInt64 wire decodes a int64 pointer as a Int64. -func consumeInt64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeInt64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return out, errUnknown } @@ -1833,7 +1833,7 @@ func consumeInt64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *p.Int64() = int64(v) out.n = n @@ -1849,7 +1849,7 @@ var coderInt64 = pointerCoderFuncs{ // sizeInt64NoZero returns the size of wire encoding a int64 pointer as a Int64. // The zero value is not encoded. -func sizeInt64NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeInt64NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Int64() if v == 0 { return 0 @@ -1859,7 +1859,7 @@ func sizeInt64NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) // appendInt64NoZero wire encodes a int64 pointer as a Int64. // The zero value is not encoded. -func appendInt64NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendInt64NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Int64() if v == 0 { return b, nil @@ -1878,14 +1878,14 @@ var coderInt64NoZero = pointerCoderFuncs{ // sizeInt64Ptr returns the size of wire encoding a *int64 pointer as a Int64. // It panics if the pointer is nil. -func sizeInt64Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeInt64Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := **p.Int64Ptr() return f.tagsize + protowire.SizeVarint(uint64(v)) } // appendInt64Ptr wire encodes a *int64 pointer as a Int64. // It panics if the pointer is nil. -func appendInt64Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendInt64Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := **p.Int64Ptr() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendVarint(b, uint64(v)) @@ -1893,7 +1893,7 @@ func appendInt64Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([ } // consumeInt64Ptr wire decodes a *int64 pointer as a Int64. -func consumeInt64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeInt64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return out, errUnknown } @@ -1909,7 +1909,7 @@ func consumeInt64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } vp := p.Int64Ptr() if *vp == nil { @@ -1928,7 +1928,7 @@ var coderInt64Ptr = pointerCoderFuncs{ } // sizeInt64Slice returns the size of wire encoding a []int64 pointer as a repeated Int64. -func sizeInt64Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeInt64Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Int64Slice() for _, v := range s { size += f.tagsize + protowire.SizeVarint(uint64(v)) @@ -1937,7 +1937,7 @@ func sizeInt64Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { } // appendInt64Slice encodes a []int64 pointer as a repeated Int64. -func appendInt64Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendInt64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Int64Slice() for _, v := range s { b = protowire.AppendVarint(b, f.wiretag) @@ -1947,13 +1947,13 @@ func appendInt64Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) } // consumeInt64Slice wire decodes a []int64 pointer as a repeated Int64. -func consumeInt64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeInt64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Int64Slice() if wtyp == protowire.BytesType { s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } for len(b) > 0 { var v uint64 @@ -1968,7 +1968,7 @@ func consumeInt64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldIn v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } s = append(s, int64(v)) b = b[n:] @@ -1992,7 +1992,7 @@ func consumeInt64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldIn v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *sp = append(*sp, int64(v)) out.n = n @@ -2007,7 +2007,7 @@ var coderInt64Slice = pointerCoderFuncs{ } // sizeInt64PackedSlice returns the size of wire encoding a []int64 pointer as a packed repeated Int64. -func sizeInt64PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeInt64PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Int64Slice() if len(s) == 0 { return 0 @@ -2020,7 +2020,7 @@ func sizeInt64PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size } // appendInt64PackedSlice encodes a []int64 pointer as a packed repeated Int64. -func appendInt64PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendInt64PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Int64Slice() if len(s) == 0 { return b, nil @@ -2045,19 +2045,19 @@ var coderInt64PackedSlice = pointerCoderFuncs{ } // sizeInt64Value returns the size of wire encoding a int64 value as a Int64. -func sizeInt64Value(v protoreflect.Value, tagsize int, _ marshalOptions) int { +func sizeInt64Value(v protoreflect.Value, tagsize int, opts marshalOptions) int { return tagsize + protowire.SizeVarint(uint64(v.Int())) } // appendInt64Value encodes a int64 value as a Int64. -func appendInt64Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendInt64Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { b = protowire.AppendVarint(b, wiretag) b = protowire.AppendVarint(b, uint64(v.Int())) return b, nil } // consumeInt64Value decodes a int64 value as a Int64. -func consumeInt64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeInt64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return protoreflect.Value{}, out, errUnknown } @@ -2073,7 +2073,7 @@ func consumeInt64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } out.n = n return protoreflect.ValueOfInt64(int64(v)), out, nil @@ -2087,7 +2087,7 @@ var coderInt64Value = valueCoderFuncs{ } // sizeInt64SliceValue returns the size of wire encoding a []int64 value as a repeated Int64. -func sizeInt64SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeInt64SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -2097,7 +2097,7 @@ func sizeInt64SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions } // appendInt64SliceValue encodes a []int64 value as a repeated Int64. -func appendInt64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendInt64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -2108,12 +2108,12 @@ func appendInt64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ } // consumeInt64SliceValue wire decodes a []int64 value as a repeated Int64. -func consumeInt64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeInt64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { list := listv.List() if wtyp == protowire.BytesType { b, n := protowire.ConsumeBytes(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } for len(b) > 0 { var v uint64 @@ -2128,7 +2128,7 @@ func consumeInt64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Numb v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfInt64(int64(v))) b = b[n:] @@ -2151,7 +2151,7 @@ func consumeInt64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Numb v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfInt64(int64(v))) out.n = n @@ -2166,7 +2166,7 @@ var coderInt64SliceValue = valueCoderFuncs{ } // sizeInt64PackedSliceValue returns the size of wire encoding a []int64 value as a packed repeated Int64. -func sizeInt64PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeInt64PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() llen := list.Len() if llen == 0 { @@ -2181,7 +2181,7 @@ func sizeInt64PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalO } // appendInt64PackedSliceValue encodes a []int64 value as a packed repeated Int64. -func appendInt64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendInt64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() llen := list.Len() if llen == 0 { @@ -2209,13 +2209,13 @@ var coderInt64PackedSliceValue = valueCoderFuncs{ } // sizeSint64 returns the size of wire encoding a int64 pointer as a Sint64. -func sizeSint64(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSint64(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Int64() return f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(v)) } // appendSint64 wire encodes a int64 pointer as a Sint64. -func appendSint64(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSint64(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Int64() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendVarint(b, protowire.EncodeZigZag(v)) @@ -2223,7 +2223,7 @@ func appendSint64(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]b } // consumeSint64 wire decodes a int64 pointer as a Sint64. -func consumeSint64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeSint64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return out, errUnknown } @@ -2239,7 +2239,7 @@ func consumeSint64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *p.Int64() = protowire.DecodeZigZag(v) out.n = n @@ -2255,7 +2255,7 @@ var coderSint64 = pointerCoderFuncs{ // sizeSint64NoZero returns the size of wire encoding a int64 pointer as a Sint64. // The zero value is not encoded. -func sizeSint64NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSint64NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Int64() if v == 0 { return 0 @@ -2265,7 +2265,7 @@ func sizeSint64NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) // appendSint64NoZero wire encodes a int64 pointer as a Sint64. // The zero value is not encoded. -func appendSint64NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSint64NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Int64() if v == 0 { return b, nil @@ -2284,14 +2284,14 @@ var coderSint64NoZero = pointerCoderFuncs{ // sizeSint64Ptr returns the size of wire encoding a *int64 pointer as a Sint64. // It panics if the pointer is nil. -func sizeSint64Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSint64Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := **p.Int64Ptr() return f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(v)) } // appendSint64Ptr wire encodes a *int64 pointer as a Sint64. // It panics if the pointer is nil. -func appendSint64Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSint64Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := **p.Int64Ptr() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendVarint(b, protowire.EncodeZigZag(v)) @@ -2299,7 +2299,7 @@ func appendSint64Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ( } // consumeSint64Ptr wire decodes a *int64 pointer as a Sint64. -func consumeSint64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeSint64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return out, errUnknown } @@ -2315,7 +2315,7 @@ func consumeSint64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInf v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } vp := p.Int64Ptr() if *vp == nil { @@ -2334,7 +2334,7 @@ var coderSint64Ptr = pointerCoderFuncs{ } // sizeSint64Slice returns the size of wire encoding a []int64 pointer as a repeated Sint64. -func sizeSint64Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSint64Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Int64Slice() for _, v := range s { size += f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(v)) @@ -2343,7 +2343,7 @@ func sizeSint64Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) } // appendSint64Slice encodes a []int64 pointer as a repeated Sint64. -func appendSint64Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSint64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Int64Slice() for _, v := range s { b = protowire.AppendVarint(b, f.wiretag) @@ -2353,13 +2353,13 @@ func appendSint64Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) } // consumeSint64Slice wire decodes a []int64 pointer as a repeated Sint64. -func consumeSint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeSint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Int64Slice() if wtyp == protowire.BytesType { s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } for len(b) > 0 { var v uint64 @@ -2374,7 +2374,7 @@ func consumeSint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldI v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } s = append(s, protowire.DecodeZigZag(v)) b = b[n:] @@ -2398,7 +2398,7 @@ func consumeSint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldI v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *sp = append(*sp, protowire.DecodeZigZag(v)) out.n = n @@ -2413,7 +2413,7 @@ var coderSint64Slice = pointerCoderFuncs{ } // sizeSint64PackedSlice returns the size of wire encoding a []int64 pointer as a packed repeated Sint64. -func sizeSint64PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSint64PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Int64Slice() if len(s) == 0 { return 0 @@ -2426,7 +2426,7 @@ func sizeSint64PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size } // appendSint64PackedSlice encodes a []int64 pointer as a packed repeated Sint64. -func appendSint64PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSint64PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Int64Slice() if len(s) == 0 { return b, nil @@ -2451,19 +2451,19 @@ var coderSint64PackedSlice = pointerCoderFuncs{ } // sizeSint64Value returns the size of wire encoding a int64 value as a Sint64. -func sizeSint64Value(v protoreflect.Value, tagsize int, _ marshalOptions) int { +func sizeSint64Value(v protoreflect.Value, tagsize int, opts marshalOptions) int { return tagsize + protowire.SizeVarint(protowire.EncodeZigZag(v.Int())) } // appendSint64Value encodes a int64 value as a Sint64. -func appendSint64Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendSint64Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { b = protowire.AppendVarint(b, wiretag) b = protowire.AppendVarint(b, protowire.EncodeZigZag(v.Int())) return b, nil } // consumeSint64Value decodes a int64 value as a Sint64. -func consumeSint64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeSint64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return protoreflect.Value{}, out, errUnknown } @@ -2479,7 +2479,7 @@ func consumeSint64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } out.n = n return protoreflect.ValueOfInt64(protowire.DecodeZigZag(v)), out, nil @@ -2493,7 +2493,7 @@ var coderSint64Value = valueCoderFuncs{ } // sizeSint64SliceValue returns the size of wire encoding a []int64 value as a repeated Sint64. -func sizeSint64SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeSint64SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -2503,7 +2503,7 @@ func sizeSint64SliceValue(listv protoreflect.Value, tagsize int, _ marshalOption } // appendSint64SliceValue encodes a []int64 value as a repeated Sint64. -func appendSint64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendSint64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -2514,12 +2514,12 @@ func appendSint64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, } // consumeSint64SliceValue wire decodes a []int64 value as a repeated Sint64. -func consumeSint64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeSint64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { list := listv.List() if wtyp == protowire.BytesType { b, n := protowire.ConsumeBytes(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } for len(b) > 0 { var v uint64 @@ -2534,7 +2534,7 @@ func consumeSint64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Num v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfInt64(protowire.DecodeZigZag(v))) b = b[n:] @@ -2557,7 +2557,7 @@ func consumeSint64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Num v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfInt64(protowire.DecodeZigZag(v))) out.n = n @@ -2572,7 +2572,7 @@ var coderSint64SliceValue = valueCoderFuncs{ } // sizeSint64PackedSliceValue returns the size of wire encoding a []int64 value as a packed repeated Sint64. -func sizeSint64PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeSint64PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() llen := list.Len() if llen == 0 { @@ -2587,7 +2587,7 @@ func sizeSint64PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshal } // appendSint64PackedSliceValue encodes a []int64 value as a packed repeated Sint64. -func appendSint64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendSint64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() llen := list.Len() if llen == 0 { @@ -2615,13 +2615,13 @@ var coderSint64PackedSliceValue = valueCoderFuncs{ } // sizeUint64 returns the size of wire encoding a uint64 pointer as a Uint64. -func sizeUint64(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeUint64(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Uint64() return f.tagsize + protowire.SizeVarint(v) } // appendUint64 wire encodes a uint64 pointer as a Uint64. -func appendUint64(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendUint64(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Uint64() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendVarint(b, v) @@ -2629,7 +2629,7 @@ func appendUint64(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]b } // consumeUint64 wire decodes a uint64 pointer as a Uint64. -func consumeUint64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeUint64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return out, errUnknown } @@ -2645,7 +2645,7 @@ func consumeUint64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *p.Uint64() = v out.n = n @@ -2661,7 +2661,7 @@ var coderUint64 = pointerCoderFuncs{ // sizeUint64NoZero returns the size of wire encoding a uint64 pointer as a Uint64. // The zero value is not encoded. -func sizeUint64NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeUint64NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Uint64() if v == 0 { return 0 @@ -2671,7 +2671,7 @@ func sizeUint64NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) // appendUint64NoZero wire encodes a uint64 pointer as a Uint64. // The zero value is not encoded. -func appendUint64NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendUint64NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Uint64() if v == 0 { return b, nil @@ -2690,14 +2690,14 @@ var coderUint64NoZero = pointerCoderFuncs{ // sizeUint64Ptr returns the size of wire encoding a *uint64 pointer as a Uint64. // It panics if the pointer is nil. -func sizeUint64Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeUint64Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := **p.Uint64Ptr() return f.tagsize + protowire.SizeVarint(v) } // appendUint64Ptr wire encodes a *uint64 pointer as a Uint64. // It panics if the pointer is nil. -func appendUint64Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendUint64Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := **p.Uint64Ptr() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendVarint(b, v) @@ -2705,7 +2705,7 @@ func appendUint64Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ( } // consumeUint64Ptr wire decodes a *uint64 pointer as a Uint64. -func consumeUint64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeUint64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return out, errUnknown } @@ -2721,7 +2721,7 @@ func consumeUint64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInf v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } vp := p.Uint64Ptr() if *vp == nil { @@ -2740,7 +2740,7 @@ var coderUint64Ptr = pointerCoderFuncs{ } // sizeUint64Slice returns the size of wire encoding a []uint64 pointer as a repeated Uint64. -func sizeUint64Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeUint64Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Uint64Slice() for _, v := range s { size += f.tagsize + protowire.SizeVarint(v) @@ -2749,7 +2749,7 @@ func sizeUint64Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) } // appendUint64Slice encodes a []uint64 pointer as a repeated Uint64. -func appendUint64Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendUint64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Uint64Slice() for _, v := range s { b = protowire.AppendVarint(b, f.wiretag) @@ -2759,13 +2759,13 @@ func appendUint64Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) } // consumeUint64Slice wire decodes a []uint64 pointer as a repeated Uint64. -func consumeUint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeUint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Uint64Slice() if wtyp == protowire.BytesType { s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } for len(b) > 0 { var v uint64 @@ -2780,7 +2780,7 @@ func consumeUint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldI v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } s = append(s, v) b = b[n:] @@ -2804,7 +2804,7 @@ func consumeUint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldI v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *sp = append(*sp, v) out.n = n @@ -2819,7 +2819,7 @@ var coderUint64Slice = pointerCoderFuncs{ } // sizeUint64PackedSlice returns the size of wire encoding a []uint64 pointer as a packed repeated Uint64. -func sizeUint64PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeUint64PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Uint64Slice() if len(s) == 0 { return 0 @@ -2832,7 +2832,7 @@ func sizeUint64PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size } // appendUint64PackedSlice encodes a []uint64 pointer as a packed repeated Uint64. -func appendUint64PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendUint64PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Uint64Slice() if len(s) == 0 { return b, nil @@ -2857,19 +2857,19 @@ var coderUint64PackedSlice = pointerCoderFuncs{ } // sizeUint64Value returns the size of wire encoding a uint64 value as a Uint64. -func sizeUint64Value(v protoreflect.Value, tagsize int, _ marshalOptions) int { +func sizeUint64Value(v protoreflect.Value, tagsize int, opts marshalOptions) int { return tagsize + protowire.SizeVarint(v.Uint()) } // appendUint64Value encodes a uint64 value as a Uint64. -func appendUint64Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendUint64Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { b = protowire.AppendVarint(b, wiretag) b = protowire.AppendVarint(b, v.Uint()) return b, nil } // consumeUint64Value decodes a uint64 value as a Uint64. -func consumeUint64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeUint64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return protoreflect.Value{}, out, errUnknown } @@ -2885,7 +2885,7 @@ func consumeUint64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } out.n = n return protoreflect.ValueOfUint64(v), out, nil @@ -2899,7 +2899,7 @@ var coderUint64Value = valueCoderFuncs{ } // sizeUint64SliceValue returns the size of wire encoding a []uint64 value as a repeated Uint64. -func sizeUint64SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeUint64SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -2909,7 +2909,7 @@ func sizeUint64SliceValue(listv protoreflect.Value, tagsize int, _ marshalOption } // appendUint64SliceValue encodes a []uint64 value as a repeated Uint64. -func appendUint64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendUint64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -2920,12 +2920,12 @@ func appendUint64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, } // consumeUint64SliceValue wire decodes a []uint64 value as a repeated Uint64. -func consumeUint64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeUint64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { list := listv.List() if wtyp == protowire.BytesType { b, n := protowire.ConsumeBytes(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } for len(b) > 0 { var v uint64 @@ -2940,7 +2940,7 @@ func consumeUint64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Num v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfUint64(v)) b = b[n:] @@ -2963,7 +2963,7 @@ func consumeUint64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Num v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfUint64(v)) out.n = n @@ -2978,7 +2978,7 @@ var coderUint64SliceValue = valueCoderFuncs{ } // sizeUint64PackedSliceValue returns the size of wire encoding a []uint64 value as a packed repeated Uint64. -func sizeUint64PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeUint64PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() llen := list.Len() if llen == 0 { @@ -2993,7 +2993,7 @@ func sizeUint64PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshal } // appendUint64PackedSliceValue encodes a []uint64 value as a packed repeated Uint64. -func appendUint64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendUint64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() llen := list.Len() if llen == 0 { @@ -3021,13 +3021,13 @@ var coderUint64PackedSliceValue = valueCoderFuncs{ } // sizeSfixed32 returns the size of wire encoding a int32 pointer as a Sfixed32. -func sizeSfixed32(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSfixed32(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { return f.tagsize + protowire.SizeFixed32() } // appendSfixed32 wire encodes a int32 pointer as a Sfixed32. -func appendSfixed32(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSfixed32(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Int32() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendFixed32(b, uint32(v)) @@ -3035,13 +3035,13 @@ func appendSfixed32(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([ } // consumeSfixed32 wire decodes a int32 pointer as a Sfixed32. -func consumeSfixed32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeSfixed32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.Fixed32Type { return out, errUnknown } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *p.Int32() = int32(v) out.n = n @@ -3057,7 +3057,7 @@ var coderSfixed32 = pointerCoderFuncs{ // sizeSfixed32NoZero returns the size of wire encoding a int32 pointer as a Sfixed32. // The zero value is not encoded. -func sizeSfixed32NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSfixed32NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Int32() if v == 0 { return 0 @@ -3067,7 +3067,7 @@ func sizeSfixed32NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size in // appendSfixed32NoZero wire encodes a int32 pointer as a Sfixed32. // The zero value is not encoded. -func appendSfixed32NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSfixed32NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Int32() if v == 0 { return b, nil @@ -3086,13 +3086,13 @@ var coderSfixed32NoZero = pointerCoderFuncs{ // sizeSfixed32Ptr returns the size of wire encoding a *int32 pointer as a Sfixed32. // It panics if the pointer is nil. -func sizeSfixed32Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSfixed32Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { return f.tagsize + protowire.SizeFixed32() } // appendSfixed32Ptr wire encodes a *int32 pointer as a Sfixed32. // It panics if the pointer is nil. -func appendSfixed32Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSfixed32Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := **p.Int32Ptr() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendFixed32(b, uint32(v)) @@ -3100,13 +3100,13 @@ func appendSfixed32Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) } // consumeSfixed32Ptr wire decodes a *int32 pointer as a Sfixed32. -func consumeSfixed32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeSfixed32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.Fixed32Type { return out, errUnknown } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } vp := p.Int32Ptr() if *vp == nil { @@ -3125,14 +3125,14 @@ var coderSfixed32Ptr = pointerCoderFuncs{ } // sizeSfixed32Slice returns the size of wire encoding a []int32 pointer as a repeated Sfixed32. -func sizeSfixed32Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSfixed32Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Int32Slice() size = len(s) * (f.tagsize + protowire.SizeFixed32()) return size } // appendSfixed32Slice encodes a []int32 pointer as a repeated Sfixed32. -func appendSfixed32Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSfixed32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Int32Slice() for _, v := range s { b = protowire.AppendVarint(b, f.wiretag) @@ -3142,18 +3142,18 @@ func appendSfixed32Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOption } // consumeSfixed32Slice wire decodes a []int32 pointer as a repeated Sfixed32. -func consumeSfixed32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeSfixed32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Int32Slice() if wtyp == protowire.BytesType { s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } for len(b) > 0 { v, n := protowire.ConsumeFixed32(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } s = append(s, int32(v)) b = b[n:] @@ -3167,7 +3167,7 @@ func consumeSfixed32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFiel } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *sp = append(*sp, int32(v)) out.n = n @@ -3182,7 +3182,7 @@ var coderSfixed32Slice = pointerCoderFuncs{ } // sizeSfixed32PackedSlice returns the size of wire encoding a []int32 pointer as a packed repeated Sfixed32. -func sizeSfixed32PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSfixed32PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Int32Slice() if len(s) == 0 { return 0 @@ -3192,7 +3192,7 @@ func sizeSfixed32PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (si } // appendSfixed32PackedSlice encodes a []int32 pointer as a packed repeated Sfixed32. -func appendSfixed32PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSfixed32PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Int32Slice() if len(s) == 0 { return b, nil @@ -3214,25 +3214,25 @@ var coderSfixed32PackedSlice = pointerCoderFuncs{ } // sizeSfixed32Value returns the size of wire encoding a int32 value as a Sfixed32. -func sizeSfixed32Value(v protoreflect.Value, tagsize int, _ marshalOptions) int { +func sizeSfixed32Value(v protoreflect.Value, tagsize int, opts marshalOptions) int { return tagsize + protowire.SizeFixed32() } // appendSfixed32Value encodes a int32 value as a Sfixed32. -func appendSfixed32Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendSfixed32Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { b = protowire.AppendVarint(b, wiretag) b = protowire.AppendFixed32(b, uint32(v.Int())) return b, nil } // consumeSfixed32Value decodes a int32 value as a Sfixed32. -func consumeSfixed32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeSfixed32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { if wtyp != protowire.Fixed32Type { return protoreflect.Value{}, out, errUnknown } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } out.n = n return protoreflect.ValueOfInt32(int32(v)), out, nil @@ -3246,14 +3246,14 @@ var coderSfixed32Value = valueCoderFuncs{ } // sizeSfixed32SliceValue returns the size of wire encoding a []int32 value as a repeated Sfixed32. -func sizeSfixed32SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeSfixed32SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() size = list.Len() * (tagsize + protowire.SizeFixed32()) return size } // appendSfixed32SliceValue encodes a []int32 value as a repeated Sfixed32. -func appendSfixed32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendSfixed32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -3264,17 +3264,17 @@ func appendSfixed32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64 } // consumeSfixed32SliceValue wire decodes a []int32 value as a repeated Sfixed32. -func consumeSfixed32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeSfixed32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { list := listv.List() if wtyp == protowire.BytesType { b, n := protowire.ConsumeBytes(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } for len(b) > 0 { v, n := protowire.ConsumeFixed32(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfInt32(int32(v))) b = b[n:] @@ -3287,7 +3287,7 @@ func consumeSfixed32SliceValue(b []byte, listv protoreflect.Value, _ protowire.N } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfInt32(int32(v))) out.n = n @@ -3302,7 +3302,7 @@ var coderSfixed32SliceValue = valueCoderFuncs{ } // sizeSfixed32PackedSliceValue returns the size of wire encoding a []int32 value as a packed repeated Sfixed32. -func sizeSfixed32PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeSfixed32PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() llen := list.Len() if llen == 0 { @@ -3313,7 +3313,7 @@ func sizeSfixed32PackedSliceValue(listv protoreflect.Value, tagsize int, _ marsh } // appendSfixed32PackedSliceValue encodes a []int32 value as a packed repeated Sfixed32. -func appendSfixed32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendSfixed32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() llen := list.Len() if llen == 0 { @@ -3337,13 +3337,13 @@ var coderSfixed32PackedSliceValue = valueCoderFuncs{ } // sizeFixed32 returns the size of wire encoding a uint32 pointer as a Fixed32. -func sizeFixed32(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeFixed32(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { return f.tagsize + protowire.SizeFixed32() } // appendFixed32 wire encodes a uint32 pointer as a Fixed32. -func appendFixed32(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendFixed32(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Uint32() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendFixed32(b, v) @@ -3351,13 +3351,13 @@ func appendFixed32(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([] } // consumeFixed32 wire decodes a uint32 pointer as a Fixed32. -func consumeFixed32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeFixed32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.Fixed32Type { return out, errUnknown } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *p.Uint32() = v out.n = n @@ -3373,7 +3373,7 @@ var coderFixed32 = pointerCoderFuncs{ // sizeFixed32NoZero returns the size of wire encoding a uint32 pointer as a Fixed32. // The zero value is not encoded. -func sizeFixed32NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeFixed32NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Uint32() if v == 0 { return 0 @@ -3383,7 +3383,7 @@ func sizeFixed32NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int // appendFixed32NoZero wire encodes a uint32 pointer as a Fixed32. // The zero value is not encoded. -func appendFixed32NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendFixed32NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Uint32() if v == 0 { return b, nil @@ -3402,13 +3402,13 @@ var coderFixed32NoZero = pointerCoderFuncs{ // sizeFixed32Ptr returns the size of wire encoding a *uint32 pointer as a Fixed32. // It panics if the pointer is nil. -func sizeFixed32Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeFixed32Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { return f.tagsize + protowire.SizeFixed32() } // appendFixed32Ptr wire encodes a *uint32 pointer as a Fixed32. // It panics if the pointer is nil. -func appendFixed32Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendFixed32Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := **p.Uint32Ptr() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendFixed32(b, v) @@ -3416,13 +3416,13 @@ func appendFixed32Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) } // consumeFixed32Ptr wire decodes a *uint32 pointer as a Fixed32. -func consumeFixed32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeFixed32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.Fixed32Type { return out, errUnknown } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } vp := p.Uint32Ptr() if *vp == nil { @@ -3441,14 +3441,14 @@ var coderFixed32Ptr = pointerCoderFuncs{ } // sizeFixed32Slice returns the size of wire encoding a []uint32 pointer as a repeated Fixed32. -func sizeFixed32Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeFixed32Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Uint32Slice() size = len(s) * (f.tagsize + protowire.SizeFixed32()) return size } // appendFixed32Slice encodes a []uint32 pointer as a repeated Fixed32. -func appendFixed32Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendFixed32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Uint32Slice() for _, v := range s { b = protowire.AppendVarint(b, f.wiretag) @@ -3458,18 +3458,18 @@ func appendFixed32Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions } // consumeFixed32Slice wire decodes a []uint32 pointer as a repeated Fixed32. -func consumeFixed32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeFixed32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Uint32Slice() if wtyp == protowire.BytesType { s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } for len(b) > 0 { v, n := protowire.ConsumeFixed32(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } s = append(s, v) b = b[n:] @@ -3483,7 +3483,7 @@ func consumeFixed32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderField } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *sp = append(*sp, v) out.n = n @@ -3498,7 +3498,7 @@ var coderFixed32Slice = pointerCoderFuncs{ } // sizeFixed32PackedSlice returns the size of wire encoding a []uint32 pointer as a packed repeated Fixed32. -func sizeFixed32PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeFixed32PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Uint32Slice() if len(s) == 0 { return 0 @@ -3508,7 +3508,7 @@ func sizeFixed32PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (siz } // appendFixed32PackedSlice encodes a []uint32 pointer as a packed repeated Fixed32. -func appendFixed32PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendFixed32PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Uint32Slice() if len(s) == 0 { return b, nil @@ -3530,25 +3530,25 @@ var coderFixed32PackedSlice = pointerCoderFuncs{ } // sizeFixed32Value returns the size of wire encoding a uint32 value as a Fixed32. -func sizeFixed32Value(v protoreflect.Value, tagsize int, _ marshalOptions) int { +func sizeFixed32Value(v protoreflect.Value, tagsize int, opts marshalOptions) int { return tagsize + protowire.SizeFixed32() } // appendFixed32Value encodes a uint32 value as a Fixed32. -func appendFixed32Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendFixed32Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { b = protowire.AppendVarint(b, wiretag) b = protowire.AppendFixed32(b, uint32(v.Uint())) return b, nil } // consumeFixed32Value decodes a uint32 value as a Fixed32. -func consumeFixed32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeFixed32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { if wtyp != protowire.Fixed32Type { return protoreflect.Value{}, out, errUnknown } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } out.n = n return protoreflect.ValueOfUint32(uint32(v)), out, nil @@ -3562,14 +3562,14 @@ var coderFixed32Value = valueCoderFuncs{ } // sizeFixed32SliceValue returns the size of wire encoding a []uint32 value as a repeated Fixed32. -func sizeFixed32SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeFixed32SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() size = list.Len() * (tagsize + protowire.SizeFixed32()) return size } // appendFixed32SliceValue encodes a []uint32 value as a repeated Fixed32. -func appendFixed32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendFixed32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -3580,17 +3580,17 @@ func appendFixed32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, } // consumeFixed32SliceValue wire decodes a []uint32 value as a repeated Fixed32. -func consumeFixed32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeFixed32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { list := listv.List() if wtyp == protowire.BytesType { b, n := protowire.ConsumeBytes(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } for len(b) > 0 { v, n := protowire.ConsumeFixed32(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfUint32(uint32(v))) b = b[n:] @@ -3603,7 +3603,7 @@ func consumeFixed32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Nu } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfUint32(uint32(v))) out.n = n @@ -3618,7 +3618,7 @@ var coderFixed32SliceValue = valueCoderFuncs{ } // sizeFixed32PackedSliceValue returns the size of wire encoding a []uint32 value as a packed repeated Fixed32. -func sizeFixed32PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeFixed32PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() llen := list.Len() if llen == 0 { @@ -3629,7 +3629,7 @@ func sizeFixed32PackedSliceValue(listv protoreflect.Value, tagsize int, _ marsha } // appendFixed32PackedSliceValue encodes a []uint32 value as a packed repeated Fixed32. -func appendFixed32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendFixed32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() llen := list.Len() if llen == 0 { @@ -3653,13 +3653,13 @@ var coderFixed32PackedSliceValue = valueCoderFuncs{ } // sizeFloat returns the size of wire encoding a float32 pointer as a Float. -func sizeFloat(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeFloat(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { return f.tagsize + protowire.SizeFixed32() } // appendFloat wire encodes a float32 pointer as a Float. -func appendFloat(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendFloat(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Float32() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendFixed32(b, math.Float32bits(v)) @@ -3667,13 +3667,13 @@ func appendFloat(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]by } // consumeFloat wire decodes a float32 pointer as a Float. -func consumeFloat(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeFloat(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.Fixed32Type { return out, errUnknown } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *p.Float32() = math.Float32frombits(v) out.n = n @@ -3689,7 +3689,7 @@ var coderFloat = pointerCoderFuncs{ // sizeFloatNoZero returns the size of wire encoding a float32 pointer as a Float. // The zero value is not encoded. -func sizeFloatNoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeFloatNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Float32() if v == 0 && !math.Signbit(float64(v)) { return 0 @@ -3699,7 +3699,7 @@ func sizeFloatNoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) // appendFloatNoZero wire encodes a float32 pointer as a Float. // The zero value is not encoded. -func appendFloatNoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendFloatNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Float32() if v == 0 && !math.Signbit(float64(v)) { return b, nil @@ -3718,13 +3718,13 @@ var coderFloatNoZero = pointerCoderFuncs{ // sizeFloatPtr returns the size of wire encoding a *float32 pointer as a Float. // It panics if the pointer is nil. -func sizeFloatPtr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeFloatPtr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { return f.tagsize + protowire.SizeFixed32() } // appendFloatPtr wire encodes a *float32 pointer as a Float. // It panics if the pointer is nil. -func appendFloatPtr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendFloatPtr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := **p.Float32Ptr() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendFixed32(b, math.Float32bits(v)) @@ -3732,13 +3732,13 @@ func appendFloatPtr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([ } // consumeFloatPtr wire decodes a *float32 pointer as a Float. -func consumeFloatPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeFloatPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.Fixed32Type { return out, errUnknown } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } vp := p.Float32Ptr() if *vp == nil { @@ -3757,14 +3757,14 @@ var coderFloatPtr = pointerCoderFuncs{ } // sizeFloatSlice returns the size of wire encoding a []float32 pointer as a repeated Float. -func sizeFloatSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeFloatSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Float32Slice() size = len(s) * (f.tagsize + protowire.SizeFixed32()) return size } // appendFloatSlice encodes a []float32 pointer as a repeated Float. -func appendFloatSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendFloatSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Float32Slice() for _, v := range s { b = protowire.AppendVarint(b, f.wiretag) @@ -3774,18 +3774,18 @@ func appendFloatSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) } // consumeFloatSlice wire decodes a []float32 pointer as a repeated Float. -func consumeFloatSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeFloatSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Float32Slice() if wtyp == protowire.BytesType { s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } for len(b) > 0 { v, n := protowire.ConsumeFixed32(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } s = append(s, math.Float32frombits(v)) b = b[n:] @@ -3799,7 +3799,7 @@ func consumeFloatSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldIn } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *sp = append(*sp, math.Float32frombits(v)) out.n = n @@ -3814,7 +3814,7 @@ var coderFloatSlice = pointerCoderFuncs{ } // sizeFloatPackedSlice returns the size of wire encoding a []float32 pointer as a packed repeated Float. -func sizeFloatPackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeFloatPackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Float32Slice() if len(s) == 0 { return 0 @@ -3824,7 +3824,7 @@ func sizeFloatPackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size } // appendFloatPackedSlice encodes a []float32 pointer as a packed repeated Float. -func appendFloatPackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendFloatPackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Float32Slice() if len(s) == 0 { return b, nil @@ -3846,25 +3846,25 @@ var coderFloatPackedSlice = pointerCoderFuncs{ } // sizeFloatValue returns the size of wire encoding a float32 value as a Float. -func sizeFloatValue(v protoreflect.Value, tagsize int, _ marshalOptions) int { +func sizeFloatValue(v protoreflect.Value, tagsize int, opts marshalOptions) int { return tagsize + protowire.SizeFixed32() } // appendFloatValue encodes a float32 value as a Float. -func appendFloatValue(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendFloatValue(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { b = protowire.AppendVarint(b, wiretag) b = protowire.AppendFixed32(b, math.Float32bits(float32(v.Float()))) return b, nil } // consumeFloatValue decodes a float32 value as a Float. -func consumeFloatValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeFloatValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { if wtyp != protowire.Fixed32Type { return protoreflect.Value{}, out, errUnknown } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } out.n = n return protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v))), out, nil @@ -3878,14 +3878,14 @@ var coderFloatValue = valueCoderFuncs{ } // sizeFloatSliceValue returns the size of wire encoding a []float32 value as a repeated Float. -func sizeFloatSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeFloatSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() size = list.Len() * (tagsize + protowire.SizeFixed32()) return size } // appendFloatSliceValue encodes a []float32 value as a repeated Float. -func appendFloatSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendFloatSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -3896,17 +3896,17 @@ func appendFloatSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ } // consumeFloatSliceValue wire decodes a []float32 value as a repeated Float. -func consumeFloatSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeFloatSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { list := listv.List() if wtyp == protowire.BytesType { b, n := protowire.ConsumeBytes(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } for len(b) > 0 { v, n := protowire.ConsumeFixed32(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v)))) b = b[n:] @@ -3919,7 +3919,7 @@ func consumeFloatSliceValue(b []byte, listv protoreflect.Value, _ protowire.Numb } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v)))) out.n = n @@ -3934,7 +3934,7 @@ var coderFloatSliceValue = valueCoderFuncs{ } // sizeFloatPackedSliceValue returns the size of wire encoding a []float32 value as a packed repeated Float. -func sizeFloatPackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeFloatPackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() llen := list.Len() if llen == 0 { @@ -3945,7 +3945,7 @@ func sizeFloatPackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalO } // appendFloatPackedSliceValue encodes a []float32 value as a packed repeated Float. -func appendFloatPackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendFloatPackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() llen := list.Len() if llen == 0 { @@ -3969,13 +3969,13 @@ var coderFloatPackedSliceValue = valueCoderFuncs{ } // sizeSfixed64 returns the size of wire encoding a int64 pointer as a Sfixed64. -func sizeSfixed64(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSfixed64(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { return f.tagsize + protowire.SizeFixed64() } // appendSfixed64 wire encodes a int64 pointer as a Sfixed64. -func appendSfixed64(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSfixed64(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Int64() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendFixed64(b, uint64(v)) @@ -3983,13 +3983,13 @@ func appendSfixed64(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([ } // consumeSfixed64 wire decodes a int64 pointer as a Sfixed64. -func consumeSfixed64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeSfixed64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.Fixed64Type { return out, errUnknown } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *p.Int64() = int64(v) out.n = n @@ -4005,7 +4005,7 @@ var coderSfixed64 = pointerCoderFuncs{ // sizeSfixed64NoZero returns the size of wire encoding a int64 pointer as a Sfixed64. // The zero value is not encoded. -func sizeSfixed64NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSfixed64NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Int64() if v == 0 { return 0 @@ -4015,7 +4015,7 @@ func sizeSfixed64NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size in // appendSfixed64NoZero wire encodes a int64 pointer as a Sfixed64. // The zero value is not encoded. -func appendSfixed64NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSfixed64NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Int64() if v == 0 { return b, nil @@ -4034,13 +4034,13 @@ var coderSfixed64NoZero = pointerCoderFuncs{ // sizeSfixed64Ptr returns the size of wire encoding a *int64 pointer as a Sfixed64. // It panics if the pointer is nil. -func sizeSfixed64Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSfixed64Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { return f.tagsize + protowire.SizeFixed64() } // appendSfixed64Ptr wire encodes a *int64 pointer as a Sfixed64. // It panics if the pointer is nil. -func appendSfixed64Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSfixed64Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := **p.Int64Ptr() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendFixed64(b, uint64(v)) @@ -4048,13 +4048,13 @@ func appendSfixed64Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) } // consumeSfixed64Ptr wire decodes a *int64 pointer as a Sfixed64. -func consumeSfixed64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeSfixed64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.Fixed64Type { return out, errUnknown } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } vp := p.Int64Ptr() if *vp == nil { @@ -4073,14 +4073,14 @@ var coderSfixed64Ptr = pointerCoderFuncs{ } // sizeSfixed64Slice returns the size of wire encoding a []int64 pointer as a repeated Sfixed64. -func sizeSfixed64Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSfixed64Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Int64Slice() size = len(s) * (f.tagsize + protowire.SizeFixed64()) return size } // appendSfixed64Slice encodes a []int64 pointer as a repeated Sfixed64. -func appendSfixed64Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSfixed64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Int64Slice() for _, v := range s { b = protowire.AppendVarint(b, f.wiretag) @@ -4090,18 +4090,18 @@ func appendSfixed64Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOption } // consumeSfixed64Slice wire decodes a []int64 pointer as a repeated Sfixed64. -func consumeSfixed64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeSfixed64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Int64Slice() if wtyp == protowire.BytesType { s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } for len(b) > 0 { v, n := protowire.ConsumeFixed64(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } s = append(s, int64(v)) b = b[n:] @@ -4115,7 +4115,7 @@ func consumeSfixed64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFiel } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *sp = append(*sp, int64(v)) out.n = n @@ -4130,7 +4130,7 @@ var coderSfixed64Slice = pointerCoderFuncs{ } // sizeSfixed64PackedSlice returns the size of wire encoding a []int64 pointer as a packed repeated Sfixed64. -func sizeSfixed64PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSfixed64PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Int64Slice() if len(s) == 0 { return 0 @@ -4140,7 +4140,7 @@ func sizeSfixed64PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (si } // appendSfixed64PackedSlice encodes a []int64 pointer as a packed repeated Sfixed64. -func appendSfixed64PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSfixed64PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Int64Slice() if len(s) == 0 { return b, nil @@ -4162,25 +4162,25 @@ var coderSfixed64PackedSlice = pointerCoderFuncs{ } // sizeSfixed64Value returns the size of wire encoding a int64 value as a Sfixed64. -func sizeSfixed64Value(v protoreflect.Value, tagsize int, _ marshalOptions) int { +func sizeSfixed64Value(v protoreflect.Value, tagsize int, opts marshalOptions) int { return tagsize + protowire.SizeFixed64() } // appendSfixed64Value encodes a int64 value as a Sfixed64. -func appendSfixed64Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendSfixed64Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { b = protowire.AppendVarint(b, wiretag) b = protowire.AppendFixed64(b, uint64(v.Int())) return b, nil } // consumeSfixed64Value decodes a int64 value as a Sfixed64. -func consumeSfixed64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeSfixed64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { if wtyp != protowire.Fixed64Type { return protoreflect.Value{}, out, errUnknown } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } out.n = n return protoreflect.ValueOfInt64(int64(v)), out, nil @@ -4194,14 +4194,14 @@ var coderSfixed64Value = valueCoderFuncs{ } // sizeSfixed64SliceValue returns the size of wire encoding a []int64 value as a repeated Sfixed64. -func sizeSfixed64SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeSfixed64SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() size = list.Len() * (tagsize + protowire.SizeFixed64()) return size } // appendSfixed64SliceValue encodes a []int64 value as a repeated Sfixed64. -func appendSfixed64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendSfixed64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -4212,17 +4212,17 @@ func appendSfixed64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64 } // consumeSfixed64SliceValue wire decodes a []int64 value as a repeated Sfixed64. -func consumeSfixed64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeSfixed64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { list := listv.List() if wtyp == protowire.BytesType { b, n := protowire.ConsumeBytes(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } for len(b) > 0 { v, n := protowire.ConsumeFixed64(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfInt64(int64(v))) b = b[n:] @@ -4235,7 +4235,7 @@ func consumeSfixed64SliceValue(b []byte, listv protoreflect.Value, _ protowire.N } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfInt64(int64(v))) out.n = n @@ -4250,7 +4250,7 @@ var coderSfixed64SliceValue = valueCoderFuncs{ } // sizeSfixed64PackedSliceValue returns the size of wire encoding a []int64 value as a packed repeated Sfixed64. -func sizeSfixed64PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeSfixed64PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() llen := list.Len() if llen == 0 { @@ -4261,7 +4261,7 @@ func sizeSfixed64PackedSliceValue(listv protoreflect.Value, tagsize int, _ marsh } // appendSfixed64PackedSliceValue encodes a []int64 value as a packed repeated Sfixed64. -func appendSfixed64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendSfixed64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() llen := list.Len() if llen == 0 { @@ -4285,13 +4285,13 @@ var coderSfixed64PackedSliceValue = valueCoderFuncs{ } // sizeFixed64 returns the size of wire encoding a uint64 pointer as a Fixed64. -func sizeFixed64(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeFixed64(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { return f.tagsize + protowire.SizeFixed64() } // appendFixed64 wire encodes a uint64 pointer as a Fixed64. -func appendFixed64(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendFixed64(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Uint64() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendFixed64(b, v) @@ -4299,13 +4299,13 @@ func appendFixed64(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([] } // consumeFixed64 wire decodes a uint64 pointer as a Fixed64. -func consumeFixed64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeFixed64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.Fixed64Type { return out, errUnknown } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *p.Uint64() = v out.n = n @@ -4321,7 +4321,7 @@ var coderFixed64 = pointerCoderFuncs{ // sizeFixed64NoZero returns the size of wire encoding a uint64 pointer as a Fixed64. // The zero value is not encoded. -func sizeFixed64NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeFixed64NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Uint64() if v == 0 { return 0 @@ -4331,7 +4331,7 @@ func sizeFixed64NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int // appendFixed64NoZero wire encodes a uint64 pointer as a Fixed64. // The zero value is not encoded. -func appendFixed64NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendFixed64NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Uint64() if v == 0 { return b, nil @@ -4350,13 +4350,13 @@ var coderFixed64NoZero = pointerCoderFuncs{ // sizeFixed64Ptr returns the size of wire encoding a *uint64 pointer as a Fixed64. // It panics if the pointer is nil. -func sizeFixed64Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeFixed64Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { return f.tagsize + protowire.SizeFixed64() } // appendFixed64Ptr wire encodes a *uint64 pointer as a Fixed64. // It panics if the pointer is nil. -func appendFixed64Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendFixed64Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := **p.Uint64Ptr() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendFixed64(b, v) @@ -4364,13 +4364,13 @@ func appendFixed64Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) } // consumeFixed64Ptr wire decodes a *uint64 pointer as a Fixed64. -func consumeFixed64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeFixed64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.Fixed64Type { return out, errUnknown } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } vp := p.Uint64Ptr() if *vp == nil { @@ -4389,14 +4389,14 @@ var coderFixed64Ptr = pointerCoderFuncs{ } // sizeFixed64Slice returns the size of wire encoding a []uint64 pointer as a repeated Fixed64. -func sizeFixed64Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeFixed64Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Uint64Slice() size = len(s) * (f.tagsize + protowire.SizeFixed64()) return size } // appendFixed64Slice encodes a []uint64 pointer as a repeated Fixed64. -func appendFixed64Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendFixed64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Uint64Slice() for _, v := range s { b = protowire.AppendVarint(b, f.wiretag) @@ -4406,18 +4406,18 @@ func appendFixed64Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions } // consumeFixed64Slice wire decodes a []uint64 pointer as a repeated Fixed64. -func consumeFixed64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeFixed64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Uint64Slice() if wtyp == protowire.BytesType { s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } for len(b) > 0 { v, n := protowire.ConsumeFixed64(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } s = append(s, v) b = b[n:] @@ -4431,7 +4431,7 @@ func consumeFixed64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderField } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *sp = append(*sp, v) out.n = n @@ -4446,7 +4446,7 @@ var coderFixed64Slice = pointerCoderFuncs{ } // sizeFixed64PackedSlice returns the size of wire encoding a []uint64 pointer as a packed repeated Fixed64. -func sizeFixed64PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeFixed64PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Uint64Slice() if len(s) == 0 { return 0 @@ -4456,7 +4456,7 @@ func sizeFixed64PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (siz } // appendFixed64PackedSlice encodes a []uint64 pointer as a packed repeated Fixed64. -func appendFixed64PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendFixed64PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Uint64Slice() if len(s) == 0 { return b, nil @@ -4478,25 +4478,25 @@ var coderFixed64PackedSlice = pointerCoderFuncs{ } // sizeFixed64Value returns the size of wire encoding a uint64 value as a Fixed64. -func sizeFixed64Value(v protoreflect.Value, tagsize int, _ marshalOptions) int { +func sizeFixed64Value(v protoreflect.Value, tagsize int, opts marshalOptions) int { return tagsize + protowire.SizeFixed64() } // appendFixed64Value encodes a uint64 value as a Fixed64. -func appendFixed64Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendFixed64Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { b = protowire.AppendVarint(b, wiretag) b = protowire.AppendFixed64(b, v.Uint()) return b, nil } // consumeFixed64Value decodes a uint64 value as a Fixed64. -func consumeFixed64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeFixed64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { if wtyp != protowire.Fixed64Type { return protoreflect.Value{}, out, errUnknown } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } out.n = n return protoreflect.ValueOfUint64(v), out, nil @@ -4510,14 +4510,14 @@ var coderFixed64Value = valueCoderFuncs{ } // sizeFixed64SliceValue returns the size of wire encoding a []uint64 value as a repeated Fixed64. -func sizeFixed64SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeFixed64SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() size = list.Len() * (tagsize + protowire.SizeFixed64()) return size } // appendFixed64SliceValue encodes a []uint64 value as a repeated Fixed64. -func appendFixed64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendFixed64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -4528,17 +4528,17 @@ func appendFixed64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, } // consumeFixed64SliceValue wire decodes a []uint64 value as a repeated Fixed64. -func consumeFixed64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeFixed64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { list := listv.List() if wtyp == protowire.BytesType { b, n := protowire.ConsumeBytes(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } for len(b) > 0 { v, n := protowire.ConsumeFixed64(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfUint64(v)) b = b[n:] @@ -4551,7 +4551,7 @@ func consumeFixed64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Nu } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfUint64(v)) out.n = n @@ -4566,7 +4566,7 @@ var coderFixed64SliceValue = valueCoderFuncs{ } // sizeFixed64PackedSliceValue returns the size of wire encoding a []uint64 value as a packed repeated Fixed64. -func sizeFixed64PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeFixed64PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() llen := list.Len() if llen == 0 { @@ -4577,7 +4577,7 @@ func sizeFixed64PackedSliceValue(listv protoreflect.Value, tagsize int, _ marsha } // appendFixed64PackedSliceValue encodes a []uint64 value as a packed repeated Fixed64. -func appendFixed64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendFixed64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() llen := list.Len() if llen == 0 { @@ -4601,13 +4601,13 @@ var coderFixed64PackedSliceValue = valueCoderFuncs{ } // sizeDouble returns the size of wire encoding a float64 pointer as a Double. -func sizeDouble(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeDouble(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { return f.tagsize + protowire.SizeFixed64() } // appendDouble wire encodes a float64 pointer as a Double. -func appendDouble(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendDouble(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Float64() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendFixed64(b, math.Float64bits(v)) @@ -4615,13 +4615,13 @@ func appendDouble(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]b } // consumeDouble wire decodes a float64 pointer as a Double. -func consumeDouble(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeDouble(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.Fixed64Type { return out, errUnknown } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *p.Float64() = math.Float64frombits(v) out.n = n @@ -4637,7 +4637,7 @@ var coderDouble = pointerCoderFuncs{ // sizeDoubleNoZero returns the size of wire encoding a float64 pointer as a Double. // The zero value is not encoded. -func sizeDoubleNoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeDoubleNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Float64() if v == 0 && !math.Signbit(float64(v)) { return 0 @@ -4647,7 +4647,7 @@ func sizeDoubleNoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) // appendDoubleNoZero wire encodes a float64 pointer as a Double. // The zero value is not encoded. -func appendDoubleNoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendDoubleNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Float64() if v == 0 && !math.Signbit(float64(v)) { return b, nil @@ -4666,13 +4666,13 @@ var coderDoubleNoZero = pointerCoderFuncs{ // sizeDoublePtr returns the size of wire encoding a *float64 pointer as a Double. // It panics if the pointer is nil. -func sizeDoublePtr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeDoublePtr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { return f.tagsize + protowire.SizeFixed64() } // appendDoublePtr wire encodes a *float64 pointer as a Double. // It panics if the pointer is nil. -func appendDoublePtr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendDoublePtr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := **p.Float64Ptr() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendFixed64(b, math.Float64bits(v)) @@ -4680,13 +4680,13 @@ func appendDoublePtr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ( } // consumeDoublePtr wire decodes a *float64 pointer as a Double. -func consumeDoublePtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeDoublePtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.Fixed64Type { return out, errUnknown } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } vp := p.Float64Ptr() if *vp == nil { @@ -4705,14 +4705,14 @@ var coderDoublePtr = pointerCoderFuncs{ } // sizeDoubleSlice returns the size of wire encoding a []float64 pointer as a repeated Double. -func sizeDoubleSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeDoubleSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Float64Slice() size = len(s) * (f.tagsize + protowire.SizeFixed64()) return size } // appendDoubleSlice encodes a []float64 pointer as a repeated Double. -func appendDoubleSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendDoubleSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Float64Slice() for _, v := range s { b = protowire.AppendVarint(b, f.wiretag) @@ -4722,18 +4722,18 @@ func appendDoubleSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) } // consumeDoubleSlice wire decodes a []float64 pointer as a repeated Double. -func consumeDoubleSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeDoubleSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Float64Slice() if wtyp == protowire.BytesType { s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } for len(b) > 0 { v, n := protowire.ConsumeFixed64(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } s = append(s, math.Float64frombits(v)) b = b[n:] @@ -4747,7 +4747,7 @@ func consumeDoubleSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldI } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *sp = append(*sp, math.Float64frombits(v)) out.n = n @@ -4762,7 +4762,7 @@ var coderDoubleSlice = pointerCoderFuncs{ } // sizeDoublePackedSlice returns the size of wire encoding a []float64 pointer as a packed repeated Double. -func sizeDoublePackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeDoublePackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Float64Slice() if len(s) == 0 { return 0 @@ -4772,7 +4772,7 @@ func sizeDoublePackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size } // appendDoublePackedSlice encodes a []float64 pointer as a packed repeated Double. -func appendDoublePackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendDoublePackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Float64Slice() if len(s) == 0 { return b, nil @@ -4794,25 +4794,25 @@ var coderDoublePackedSlice = pointerCoderFuncs{ } // sizeDoubleValue returns the size of wire encoding a float64 value as a Double. -func sizeDoubleValue(v protoreflect.Value, tagsize int, _ marshalOptions) int { +func sizeDoubleValue(v protoreflect.Value, tagsize int, opts marshalOptions) int { return tagsize + protowire.SizeFixed64() } // appendDoubleValue encodes a float64 value as a Double. -func appendDoubleValue(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendDoubleValue(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { b = protowire.AppendVarint(b, wiretag) b = protowire.AppendFixed64(b, math.Float64bits(v.Float())) return b, nil } // consumeDoubleValue decodes a float64 value as a Double. -func consumeDoubleValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeDoubleValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { if wtyp != protowire.Fixed64Type { return protoreflect.Value{}, out, errUnknown } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } out.n = n return protoreflect.ValueOfFloat64(math.Float64frombits(v)), out, nil @@ -4826,14 +4826,14 @@ var coderDoubleValue = valueCoderFuncs{ } // sizeDoubleSliceValue returns the size of wire encoding a []float64 value as a repeated Double. -func sizeDoubleSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeDoubleSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() size = list.Len() * (tagsize + protowire.SizeFixed64()) return size } // appendDoubleSliceValue encodes a []float64 value as a repeated Double. -func appendDoubleSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendDoubleSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -4844,17 +4844,17 @@ func appendDoubleSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, } // consumeDoubleSliceValue wire decodes a []float64 value as a repeated Double. -func consumeDoubleSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeDoubleSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { list := listv.List() if wtyp == protowire.BytesType { b, n := protowire.ConsumeBytes(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } for len(b) > 0 { v, n := protowire.ConsumeFixed64(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfFloat64(math.Float64frombits(v))) b = b[n:] @@ -4867,7 +4867,7 @@ func consumeDoubleSliceValue(b []byte, listv protoreflect.Value, _ protowire.Num } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfFloat64(math.Float64frombits(v))) out.n = n @@ -4882,7 +4882,7 @@ var coderDoubleSliceValue = valueCoderFuncs{ } // sizeDoublePackedSliceValue returns the size of wire encoding a []float64 value as a packed repeated Double. -func sizeDoublePackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeDoublePackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() llen := list.Len() if llen == 0 { @@ -4893,7 +4893,7 @@ func sizeDoublePackedSliceValue(listv protoreflect.Value, tagsize int, _ marshal } // appendDoublePackedSliceValue encodes a []float64 value as a packed repeated Double. -func appendDoublePackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendDoublePackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() llen := list.Len() if llen == 0 { @@ -4917,13 +4917,13 @@ var coderDoublePackedSliceValue = valueCoderFuncs{ } // sizeString returns the size of wire encoding a string pointer as a String. -func sizeString(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeString(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.String() return f.tagsize + protowire.SizeBytes(len(v)) } // appendString wire encodes a string pointer as a String. -func appendString(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendString(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.String() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendString(b, v) @@ -4931,15 +4931,15 @@ func appendString(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]b } // consumeString wire decodes a string pointer as a String. -func consumeString(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeString(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.BytesType { return out, errUnknown } - v, n := protowire.ConsumeString(b) + v, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } - *p.String() = v + *p.String() = string(v) out.n = n return out, nil } @@ -4952,7 +4952,7 @@ var coderString = pointerCoderFuncs{ } // appendStringValidateUTF8 wire encodes a string pointer as a String. -func appendStringValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendStringValidateUTF8(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.String() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendString(b, v) @@ -4963,18 +4963,18 @@ func appendStringValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalO } // consumeStringValidateUTF8 wire decodes a string pointer as a String. -func consumeStringValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeStringValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.BytesType { return out, errUnknown } - v, n := protowire.ConsumeString(b) + v, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } - if !utf8.ValidString(v) { + if !utf8.Valid(v) { return out, errInvalidUTF8{} } - *p.String() = v + *p.String() = string(v) out.n = n return out, nil } @@ -4988,7 +4988,7 @@ var coderStringValidateUTF8 = pointerCoderFuncs{ // sizeStringNoZero returns the size of wire encoding a string pointer as a String. // The zero value is not encoded. -func sizeStringNoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeStringNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.String() if len(v) == 0 { return 0 @@ -4998,7 +4998,7 @@ func sizeStringNoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) // appendStringNoZero wire encodes a string pointer as a String. // The zero value is not encoded. -func appendStringNoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendStringNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.String() if len(v) == 0 { return b, nil @@ -5017,7 +5017,7 @@ var coderStringNoZero = pointerCoderFuncs{ // appendStringNoZeroValidateUTF8 wire encodes a string pointer as a String. // The zero value is not encoded. -func appendStringNoZeroValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendStringNoZeroValidateUTF8(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.String() if len(v) == 0 { return b, nil @@ -5039,14 +5039,14 @@ var coderStringNoZeroValidateUTF8 = pointerCoderFuncs{ // sizeStringPtr returns the size of wire encoding a *string pointer as a String. // It panics if the pointer is nil. -func sizeStringPtr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeStringPtr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := **p.StringPtr() return f.tagsize + protowire.SizeBytes(len(v)) } // appendStringPtr wire encodes a *string pointer as a String. // It panics if the pointer is nil. -func appendStringPtr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendStringPtr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := **p.StringPtr() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendString(b, v) @@ -5054,19 +5054,19 @@ func appendStringPtr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ( } // consumeStringPtr wire decodes a *string pointer as a String. -func consumeStringPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeStringPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.BytesType { return out, errUnknown } - v, n := protowire.ConsumeString(b) + v, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } vp := p.StringPtr() if *vp == nil { *vp = new(string) } - **vp = v + **vp = string(v) out.n = n return out, nil } @@ -5080,7 +5080,7 @@ var coderStringPtr = pointerCoderFuncs{ // appendStringPtrValidateUTF8 wire encodes a *string pointer as a String. // It panics if the pointer is nil. -func appendStringPtrValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendStringPtrValidateUTF8(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := **p.StringPtr() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendString(b, v) @@ -5091,22 +5091,22 @@ func appendStringPtrValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marsh } // consumeStringPtrValidateUTF8 wire decodes a *string pointer as a String. -func consumeStringPtrValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeStringPtrValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.BytesType { return out, errUnknown } - v, n := protowire.ConsumeString(b) + v, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } - if !utf8.ValidString(v) { + if !utf8.Valid(v) { return out, errInvalidUTF8{} } vp := p.StringPtr() if *vp == nil { *vp = new(string) } - **vp = v + **vp = string(v) out.n = n return out, nil } @@ -5119,7 +5119,7 @@ var coderStringPtrValidateUTF8 = pointerCoderFuncs{ } // sizeStringSlice returns the size of wire encoding a []string pointer as a repeated String. -func sizeStringSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeStringSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.StringSlice() for _, v := range s { size += f.tagsize + protowire.SizeBytes(len(v)) @@ -5128,7 +5128,7 @@ func sizeStringSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) } // appendStringSlice encodes a []string pointer as a repeated String. -func appendStringSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendStringSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.StringSlice() for _, v := range s { b = protowire.AppendVarint(b, f.wiretag) @@ -5138,16 +5138,16 @@ func appendStringSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) } // consumeStringSlice wire decodes a []string pointer as a repeated String. -func consumeStringSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeStringSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.StringSlice() if wtyp != protowire.BytesType { return out, errUnknown } - v, n := protowire.ConsumeString(b) + v, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } - *sp = append(*sp, v) + *sp = append(*sp, string(v)) out.n = n return out, nil } @@ -5160,7 +5160,7 @@ var coderStringSlice = pointerCoderFuncs{ } // appendStringSliceValidateUTF8 encodes a []string pointer as a repeated String. -func appendStringSliceValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendStringSliceValidateUTF8(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.StringSlice() for _, v := range s { b = protowire.AppendVarint(b, f.wiretag) @@ -5173,19 +5173,19 @@ func appendStringSliceValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ mar } // consumeStringSliceValidateUTF8 wire decodes a []string pointer as a repeated String. -func consumeStringSliceValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { - sp := p.StringSlice() +func consumeStringSliceValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.BytesType { return out, errUnknown } - v, n := protowire.ConsumeString(b) + v, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } - if !utf8.ValidString(v) { + if !utf8.Valid(v) { return out, errInvalidUTF8{} } - *sp = append(*sp, v) + sp := p.StringSlice() + *sp = append(*sp, string(v)) out.n = n return out, nil } @@ -5198,25 +5198,25 @@ var coderStringSliceValidateUTF8 = pointerCoderFuncs{ } // sizeStringValue returns the size of wire encoding a string value as a String. -func sizeStringValue(v protoreflect.Value, tagsize int, _ marshalOptions) int { +func sizeStringValue(v protoreflect.Value, tagsize int, opts marshalOptions) int { return tagsize + protowire.SizeBytes(len(v.String())) } // appendStringValue encodes a string value as a String. -func appendStringValue(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendStringValue(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { b = protowire.AppendVarint(b, wiretag) b = protowire.AppendString(b, v.String()) return b, nil } // consumeStringValue decodes a string value as a String. -func consumeStringValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeStringValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { if wtyp != protowire.BytesType { return protoreflect.Value{}, out, errUnknown } - v, n := protowire.ConsumeString(b) + v, n := protowire.ConsumeBytes(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } out.n = n return protoreflect.ValueOfString(string(v)), out, nil @@ -5230,7 +5230,7 @@ var coderStringValue = valueCoderFuncs{ } // appendStringValueValidateUTF8 encodes a string value as a String. -func appendStringValueValidateUTF8(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendStringValueValidateUTF8(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { b = protowire.AppendVarint(b, wiretag) b = protowire.AppendString(b, v.String()) if !utf8.ValidString(v.String()) { @@ -5240,15 +5240,15 @@ func appendStringValueValidateUTF8(b []byte, v protoreflect.Value, wiretag uint6 } // consumeStringValueValidateUTF8 decodes a string value as a String. -func consumeStringValueValidateUTF8(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeStringValueValidateUTF8(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { if wtyp != protowire.BytesType { return protoreflect.Value{}, out, errUnknown } - v, n := protowire.ConsumeString(b) + v, n := protowire.ConsumeBytes(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } - if !utf8.ValidString(v) { + if !utf8.Valid(v) { return protoreflect.Value{}, out, errInvalidUTF8{} } out.n = n @@ -5263,7 +5263,7 @@ var coderStringValueValidateUTF8 = valueCoderFuncs{ } // sizeStringSliceValue returns the size of wire encoding a []string value as a repeated String. -func sizeStringSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeStringSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -5273,7 +5273,7 @@ func sizeStringSliceValue(listv protoreflect.Value, tagsize int, _ marshalOption } // appendStringSliceValue encodes a []string value as a repeated String. -func appendStringSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendStringSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -5284,14 +5284,14 @@ func appendStringSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, } // consumeStringSliceValue wire decodes a []string value as a repeated String. -func consumeStringSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeStringSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { list := listv.List() if wtyp != protowire.BytesType { return protoreflect.Value{}, out, errUnknown } - v, n := protowire.ConsumeString(b) + v, n := protowire.ConsumeBytes(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfString(string(v))) out.n = n @@ -5306,13 +5306,13 @@ var coderStringSliceValue = valueCoderFuncs{ } // sizeBytes returns the size of wire encoding a []byte pointer as a Bytes. -func sizeBytes(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeBytes(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Bytes() return f.tagsize + protowire.SizeBytes(len(v)) } // appendBytes wire encodes a []byte pointer as a Bytes. -func appendBytes(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendBytes(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Bytes() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendBytes(b, v) @@ -5320,13 +5320,13 @@ func appendBytes(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]by } // consumeBytes wire decodes a []byte pointer as a Bytes. -func consumeBytes(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeBytes(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.BytesType { return out, errUnknown } v, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *p.Bytes() = append(emptyBuf[:], v...) out.n = n @@ -5341,7 +5341,7 @@ var coderBytes = pointerCoderFuncs{ } // appendBytesValidateUTF8 wire encodes a []byte pointer as a Bytes. -func appendBytesValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendBytesValidateUTF8(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Bytes() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendBytes(b, v) @@ -5352,13 +5352,13 @@ func appendBytesValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalOp } // consumeBytesValidateUTF8 wire decodes a []byte pointer as a Bytes. -func consumeBytesValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeBytesValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.BytesType { return out, errUnknown } v, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } if !utf8.Valid(v) { return out, errInvalidUTF8{} @@ -5377,7 +5377,7 @@ var coderBytesValidateUTF8 = pointerCoderFuncs{ // sizeBytesNoZero returns the size of wire encoding a []byte pointer as a Bytes. // The zero value is not encoded. -func sizeBytesNoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeBytesNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Bytes() if len(v) == 0 { return 0 @@ -5387,7 +5387,7 @@ func sizeBytesNoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) // appendBytesNoZero wire encodes a []byte pointer as a Bytes. // The zero value is not encoded. -func appendBytesNoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendBytesNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Bytes() if len(v) == 0 { return b, nil @@ -5399,13 +5399,13 @@ func appendBytesNoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) // consumeBytesNoZero wire decodes a []byte pointer as a Bytes. // The zero value is not decoded. -func consumeBytesNoZero(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeBytesNoZero(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.BytesType { return out, errUnknown } v, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *p.Bytes() = append(([]byte)(nil), v...) out.n = n @@ -5421,7 +5421,7 @@ var coderBytesNoZero = pointerCoderFuncs{ // appendBytesNoZeroValidateUTF8 wire encodes a []byte pointer as a Bytes. // The zero value is not encoded. -func appendBytesNoZeroValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendBytesNoZeroValidateUTF8(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Bytes() if len(v) == 0 { return b, nil @@ -5435,13 +5435,13 @@ func appendBytesNoZeroValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ mar } // consumeBytesNoZeroValidateUTF8 wire decodes a []byte pointer as a Bytes. -func consumeBytesNoZeroValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeBytesNoZeroValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.BytesType { return out, errUnknown } v, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } if !utf8.Valid(v) { return out, errInvalidUTF8{} @@ -5459,7 +5459,7 @@ var coderBytesNoZeroValidateUTF8 = pointerCoderFuncs{ } // sizeBytesSlice returns the size of wire encoding a [][]byte pointer as a repeated Bytes. -func sizeBytesSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeBytesSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.BytesSlice() for _, v := range s { size += f.tagsize + protowire.SizeBytes(len(v)) @@ -5468,7 +5468,7 @@ func sizeBytesSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { } // appendBytesSlice encodes a [][]byte pointer as a repeated Bytes. -func appendBytesSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendBytesSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.BytesSlice() for _, v := range s { b = protowire.AppendVarint(b, f.wiretag) @@ -5478,14 +5478,14 @@ func appendBytesSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) } // consumeBytesSlice wire decodes a [][]byte pointer as a repeated Bytes. -func consumeBytesSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeBytesSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.BytesSlice() if wtyp != protowire.BytesType { return out, errUnknown } v, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *sp = append(*sp, append(emptyBuf[:], v...)) out.n = n @@ -5500,7 +5500,7 @@ var coderBytesSlice = pointerCoderFuncs{ } // appendBytesSliceValidateUTF8 encodes a [][]byte pointer as a repeated Bytes. -func appendBytesSliceValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendBytesSliceValidateUTF8(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.BytesSlice() for _, v := range s { b = protowire.AppendVarint(b, f.wiretag) @@ -5513,18 +5513,18 @@ func appendBytesSliceValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ mars } // consumeBytesSliceValidateUTF8 wire decodes a [][]byte pointer as a repeated Bytes. -func consumeBytesSliceValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { - sp := p.BytesSlice() +func consumeBytesSliceValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.BytesType { return out, errUnknown } v, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } if !utf8.Valid(v) { return out, errInvalidUTF8{} } + sp := p.BytesSlice() *sp = append(*sp, append(emptyBuf[:], v...)) out.n = n return out, nil @@ -5538,25 +5538,25 @@ var coderBytesSliceValidateUTF8 = pointerCoderFuncs{ } // sizeBytesValue returns the size of wire encoding a []byte value as a Bytes. -func sizeBytesValue(v protoreflect.Value, tagsize int, _ marshalOptions) int { +func sizeBytesValue(v protoreflect.Value, tagsize int, opts marshalOptions) int { return tagsize + protowire.SizeBytes(len(v.Bytes())) } // appendBytesValue encodes a []byte value as a Bytes. -func appendBytesValue(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendBytesValue(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { b = protowire.AppendVarint(b, wiretag) b = protowire.AppendBytes(b, v.Bytes()) return b, nil } // consumeBytesValue decodes a []byte value as a Bytes. -func consumeBytesValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeBytesValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { if wtyp != protowire.BytesType { return protoreflect.Value{}, out, errUnknown } v, n := protowire.ConsumeBytes(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } out.n = n return protoreflect.ValueOfBytes(append(emptyBuf[:], v...)), out, nil @@ -5570,7 +5570,7 @@ var coderBytesValue = valueCoderFuncs{ } // sizeBytesSliceValue returns the size of wire encoding a [][]byte value as a repeated Bytes. -func sizeBytesSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeBytesSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -5580,7 +5580,7 @@ func sizeBytesSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions } // appendBytesSliceValue encodes a [][]byte value as a repeated Bytes. -func appendBytesSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendBytesSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -5591,14 +5591,14 @@ func appendBytesSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ } // consumeBytesSliceValue wire decodes a [][]byte value as a repeated Bytes. -func consumeBytesSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeBytesSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { list := listv.List() if wtyp != protowire.BytesType { return protoreflect.Value{}, out, errUnknown } v, n := protowire.ConsumeBytes(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfBytes(append(emptyBuf[:], v...))) out.n = n diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go index 44885a761f..c1245fef48 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go @@ -5,7 +5,6 @@ package impl import ( - "errors" "reflect" "sort" @@ -118,7 +117,7 @@ func consumeMap(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi *mapInfo } b, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } var ( key = mapi.keyZero @@ -127,10 +126,10 @@ func consumeMap(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi *mapInfo for len(b) > 0 { num, wtyp, n := protowire.ConsumeTag(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } if num > protowire.MaxValidNumber { - return out, errors.New("invalid field number") + return out, errDecode } b = b[n:] err := errUnknown @@ -157,7 +156,7 @@ func consumeMap(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi *mapInfo if err == errUnknown { n = protowire.ConsumeFieldValue(num, wtyp, b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } } else if err != nil { return out, err @@ -175,7 +174,7 @@ func consumeMapOfMessage(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi } b, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } var ( key = mapi.keyZero @@ -184,10 +183,10 @@ func consumeMapOfMessage(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi for len(b) > 0 { num, wtyp, n := protowire.ConsumeTag(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } if num > protowire.MaxValidNumber { - return out, errors.New("invalid field number") + return out, errDecode } b = b[n:] err := errUnknown @@ -208,7 +207,7 @@ func consumeMapOfMessage(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi var v []byte v, n = protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } var o unmarshalOutput o, err = f.mi.unmarshalPointer(v, pointerOfValue(val), 0, opts) @@ -221,7 +220,7 @@ func consumeMapOfMessage(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi if err == errUnknown { n = protowire.ConsumeFieldValue(num, wtyp, b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } } else if err != nil { return out, err diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go index 0e176d565d..cd40527ff6 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go @@ -11,7 +11,7 @@ import ( "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/encoding/messageset" - "google.golang.org/protobuf/internal/fieldsort" + "google.golang.org/protobuf/internal/order" pref "google.golang.org/protobuf/reflect/protoreflect" piface "google.golang.org/protobuf/runtime/protoiface" ) @@ -27,6 +27,7 @@ type coderMessageInfo struct { coderFields map[protowire.Number]*coderFieldInfo sizecacheOffset offset unknownOffset offset + unknownPtrKind bool extensionOffset offset needsInitCheck bool isMessageSet bool @@ -47,9 +48,20 @@ type coderFieldInfo struct { } func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) { - mi.sizecacheOffset = si.sizecacheOffset - mi.unknownOffset = si.unknownOffset - mi.extensionOffset = si.extensionOffset + mi.sizecacheOffset = invalidOffset + mi.unknownOffset = invalidOffset + mi.extensionOffset = invalidOffset + + if si.sizecacheOffset.IsValid() && si.sizecacheType == sizecacheType { + mi.sizecacheOffset = si.sizecacheOffset + } + if si.unknownOffset.IsValid() && (si.unknownType == unknownFieldsAType || si.unknownType == unknownFieldsBType) { + mi.unknownOffset = si.unknownOffset + mi.unknownPtrKind = si.unknownType.Kind() == reflect.Ptr + } + if si.extensionOffset.IsValid() && si.extensionType == extensionFieldsType { + mi.extensionOffset = si.extensionOffset + } mi.coderFields = make(map[protowire.Number]*coderFieldInfo) fields := mi.Desc.Fields() @@ -73,6 +85,27 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) { var funcs pointerCoderFuncs var childMessage *MessageInfo switch { + case ft == nil: + // This never occurs for generated message types. + // It implies that a hand-crafted type has missing Go fields + // for specific protobuf message fields. + funcs = pointerCoderFuncs{ + size: func(p pointer, f *coderFieldInfo, opts marshalOptions) int { + return 0 + }, + marshal: func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + return nil, nil + }, + unmarshal: func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) { + panic("missing Go struct field for " + string(fd.FullName())) + }, + isInit: func(p pointer, f *coderFieldInfo) error { + panic("missing Go struct field for " + string(fd.FullName())) + }, + merge: func(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { + panic("missing Go struct field for " + string(fd.FullName())) + }, + } case isOneof: fieldOffset = offsetOf(fs, mi.Exporter) case fd.IsWeak(): @@ -136,7 +169,7 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) { sort.Slice(mi.orderedCoderFields, func(i, j int) bool { fi := fields.ByNumber(mi.orderedCoderFields[i].num) fj := fields.ByNumber(mi.orderedCoderFields[j].num) - return fieldsort.Less(fi, fj) + return order.LegacyFieldOrder(fi, fj) }) } @@ -157,3 +190,28 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) { mi.methods.Merge = mi.merge } } + +// getUnknownBytes returns a *[]byte for the unknown fields. +// It is the caller's responsibility to check whether the pointer is nil. +// This function is specially designed to be inlineable. +func (mi *MessageInfo) getUnknownBytes(p pointer) *[]byte { + if mi.unknownPtrKind { + return *p.Apply(mi.unknownOffset).BytesPtr() + } else { + return p.Apply(mi.unknownOffset).Bytes() + } +} + +// mutableUnknownBytes returns a *[]byte for the unknown fields. +// The returned pointer is guaranteed to not be nil. +func (mi *MessageInfo) mutableUnknownBytes(p pointer) *[]byte { + if mi.unknownPtrKind { + bp := p.Apply(mi.unknownOffset).BytesPtr() + if *bp == nil { + *bp = new([]byte) + } + return *bp + } else { + return p.Apply(mi.unknownOffset).Bytes() + } +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go b/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go index cfb68e12fb..b7a23faf1e 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go @@ -29,8 +29,9 @@ func sizeMessageSet(mi *MessageInfo, p pointer, opts marshalOptions) (size int) size += xi.funcs.size(x.Value(), protowire.SizeTag(messageset.FieldMessage), opts) } - unknown := *p.Apply(mi.unknownOffset).Bytes() - size += messageset.SizeUnknown(unknown) + if u := mi.getUnknownBytes(p); u != nil { + size += messageset.SizeUnknown(*u) + } return size } @@ -69,10 +70,12 @@ func marshalMessageSet(mi *MessageInfo, b []byte, p pointer, opts marshalOptions } } - unknown := *p.Apply(mi.unknownOffset).Bytes() - b, err := messageset.AppendUnknown(b, unknown) - if err != nil { - return b, err + if u := mi.getUnknownBytes(p); u != nil { + var err error + b, err = messageset.AppendUnknown(b, *u) + if err != nil { + return b, err + } } return b, nil @@ -100,13 +103,13 @@ func unmarshalMessageSet(mi *MessageInfo, b []byte, p pointer, opts unmarshalOpt *ep = make(map[int32]ExtensionField) } ext := *ep - unknown := p.Apply(mi.unknownOffset).Bytes() initialized := true err = messageset.Unmarshal(b, true, func(num protowire.Number, v []byte) error { o, err := mi.unmarshalExtension(v, num, protowire.BytesType, ext, opts) if err == errUnknown { - *unknown = protowire.AppendTag(*unknown, num, protowire.BytesType) - *unknown = append(*unknown, v...) + u := mi.mutableUnknownBytes(p) + *u = protowire.AppendTag(*u, num, protowire.BytesType) + *u = append(*u, v...) return nil } if !o.initialized { diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go index 86f7dc3c9d..90705e3aea 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go @@ -30,7 +30,7 @@ func consumeEnum(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ } v, n := protowire.ConsumeVarint(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } p.v.Elem().SetInt(int64(v)) out.n = n @@ -130,12 +130,12 @@ func consumeEnumSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInf if wtyp == protowire.BytesType { b, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } for len(b) > 0 { v, n := protowire.ConsumeVarint(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } rv := reflect.New(s.Type().Elem()).Elem() rv.SetInt(int64(v)) @@ -150,7 +150,7 @@ func consumeEnumSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInf } v, n := protowire.ConsumeVarint(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } rv := reflect.New(s.Type().Elem()).Elem() rv.SetInt(int64(v)) diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert.go b/vendor/google.golang.org/protobuf/internal/impl/convert.go index 36a90dff38..acd61bb50b 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/convert.go +++ b/vendor/google.golang.org/protobuf/internal/impl/convert.go @@ -423,6 +423,13 @@ func (c *messageConverter) PBValueOf(v reflect.Value) pref.Value { if v.Type() != c.goType { panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) } + if c.isNonPointer() { + if v.CanAddr() { + v = v.Addr() // T => *T + } else { + v = reflect.Zero(reflect.PtrTo(v.Type())) + } + } if m, ok := v.Interface().(pref.ProtoMessage); ok { return pref.ValueOfMessage(m.ProtoReflect()) } @@ -437,6 +444,16 @@ func (c *messageConverter) GoValueOf(v pref.Value) reflect.Value { } else { rv = reflect.ValueOf(m.Interface()) } + if c.isNonPointer() { + if rv.Type() != reflect.PtrTo(c.goType) { + panic(fmt.Sprintf("invalid type: got %v, want %v", rv.Type(), reflect.PtrTo(c.goType))) + } + if !rv.IsNil() { + rv = rv.Elem() // *T => T + } else { + rv = reflect.Zero(rv.Type().Elem()) + } + } if rv.Type() != c.goType { panic(fmt.Sprintf("invalid type: got %v, want %v", rv.Type(), c.goType)) } @@ -451,6 +468,9 @@ func (c *messageConverter) IsValidPB(v pref.Value) bool { } else { rv = reflect.ValueOf(m.Interface()) } + if c.isNonPointer() { + return rv.Type() == reflect.PtrTo(c.goType) + } return rv.Type() == c.goType } @@ -459,9 +479,18 @@ func (c *messageConverter) IsValidGo(v reflect.Value) bool { } func (c *messageConverter) New() pref.Value { + if c.isNonPointer() { + return c.PBValueOf(reflect.New(c.goType).Elem()) + } return c.PBValueOf(reflect.New(c.goType.Elem())) } func (c *messageConverter) Zero() pref.Value { return c.PBValueOf(reflect.Zero(c.goType)) } + +// isNonPointer reports whether the type is a non-pointer type. +// This never occurs for generated message types. +func (c *messageConverter) isNonPointer() bool { + return c.goType.Kind() != reflect.Ptr +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/decode.go b/vendor/google.golang.org/protobuf/internal/impl/decode.go index 85ba1d3b33..949dc49a65 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/decode.go +++ b/vendor/google.golang.org/protobuf/internal/impl/decode.go @@ -17,6 +17,8 @@ import ( piface "google.golang.org/protobuf/runtime/protoiface" ) +var errDecode = errors.New("cannot parse invalid wire-format data") + type unmarshalOptions struct { flags protoiface.UnmarshalInputFlags resolver interface { @@ -100,13 +102,13 @@ func (mi *MessageInfo) unmarshalPointer(b []byte, p pointer, groupTag protowire. var n int tag, n = protowire.ConsumeVarint(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } b = b[n:] } var num protowire.Number if n := tag >> 3; n < uint64(protowire.MinValidNumber) || n > uint64(protowire.MaxValidNumber) { - return out, errors.New("invalid field number") + return out, errDecode } else { num = protowire.Number(n) } @@ -114,7 +116,7 @@ func (mi *MessageInfo) unmarshalPointer(b []byte, p pointer, groupTag protowire. if wtyp == protowire.EndGroupType { if num != groupTag { - return out, errors.New("mismatching end group marker") + return out, errDecode } groupTag = 0 break @@ -170,10 +172,10 @@ func (mi *MessageInfo) unmarshalPointer(b []byte, p pointer, groupTag protowire. } n = protowire.ConsumeFieldValue(num, wtyp, b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } if !opts.DiscardUnknown() && mi.unknownOffset.IsValid() { - u := p.Apply(mi.unknownOffset).Bytes() + u := mi.mutableUnknownBytes(p) *u = protowire.AppendTag(*u, num, wtyp) *u = append(*u, b[:n]...) } @@ -181,7 +183,7 @@ func (mi *MessageInfo) unmarshalPointer(b []byte, p pointer, groupTag protowire. b = b[n:] } if groupTag != 0 { - return out, errors.New("missing end group marker") + return out, errDecode } if mi.numRequiredFields > 0 && bits.OnesCount64(requiredMask) != int(mi.numRequiredFields) { initialized = false @@ -221,7 +223,7 @@ func (mi *MessageInfo) unmarshalExtension(b []byte, num protowire.Number, wtyp p return out, nil } case ValidationInvalid: - return out, errors.New("invalid wire format") + return out, errDecode case ValidationUnknown: } } diff --git a/vendor/google.golang.org/protobuf/internal/impl/encode.go b/vendor/google.golang.org/protobuf/internal/impl/encode.go index 8c8a794c63..845c67d6e7 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/encode.go +++ b/vendor/google.golang.org/protobuf/internal/impl/encode.go @@ -79,8 +79,9 @@ func (mi *MessageInfo) sizePointerSlow(p pointer, opts marshalOptions) (size int size += f.funcs.size(fptr, f, opts) } if mi.unknownOffset.IsValid() { - u := *p.Apply(mi.unknownOffset).Bytes() - size += len(u) + if u := mi.getUnknownBytes(p); u != nil { + size += len(*u) + } } if mi.sizecacheOffset.IsValid() { if size > math.MaxInt32 { @@ -141,8 +142,9 @@ func (mi *MessageInfo) marshalAppendPointer(b []byte, p pointer, opts marshalOpt } } if mi.unknownOffset.IsValid() && !mi.isMessageSet { - u := *p.Apply(mi.unknownOffset).Bytes() - b = append(b, u...) + if u := mi.getUnknownBytes(p); u != nil { + b = append(b, (*u)...) + } } return b, nil } diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_export.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_export.go index c3d741c2f0..e3fb0b5785 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_export.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_export.go @@ -30,7 +30,7 @@ func (Export) LegacyMessageTypeOf(m piface.MessageV1, name pref.FullName) pref.M if mv := (Export{}).protoMessageV2Of(m); mv != nil { return mv.ProtoReflect().Type() } - return legacyLoadMessageInfo(reflect.TypeOf(m), name) + return legacyLoadMessageType(reflect.TypeOf(m), name) } // UnmarshalJSONEnum unmarshals an enum from a JSON-encoded input. diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go index 61757ce50a..49e723161c 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go @@ -154,7 +154,8 @@ func (x placeholderExtension) Number() pref.FieldNumber { retu func (x placeholderExtension) Cardinality() pref.Cardinality { return 0 } func (x placeholderExtension) Kind() pref.Kind { return 0 } func (x placeholderExtension) HasJSONName() bool { return false } -func (x placeholderExtension) JSONName() string { return "" } +func (x placeholderExtension) JSONName() string { return "[" + string(x.name) + "]" } +func (x placeholderExtension) TextName() string { return "[" + string(x.name) + "]" } func (x placeholderExtension) HasPresence() bool { return false } func (x placeholderExtension) HasOptionalKeyword() bool { return false } func (x placeholderExtension) IsExtension() bool { return true } diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go index 06c68e1170..3759b010c0 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go @@ -24,14 +24,24 @@ import ( // legacyWrapMessage wraps v as a protoreflect.Message, // where v must be a *struct kind and not implement the v2 API already. func legacyWrapMessage(v reflect.Value) pref.Message { - typ := v.Type() - if typ.Kind() != reflect.Ptr || typ.Elem().Kind() != reflect.Struct { + t := v.Type() + if t.Kind() != reflect.Ptr || t.Elem().Kind() != reflect.Struct { return aberrantMessage{v: v} } - mt := legacyLoadMessageInfo(typ, "") + mt := legacyLoadMessageInfo(t, "") return mt.MessageOf(v.Interface()) } +// legacyLoadMessageType dynamically loads a protoreflect.Type for t, +// where t must be not implement the v2 API already. +// The provided name is used if it cannot be determined from the message. +func legacyLoadMessageType(t reflect.Type, name pref.FullName) protoreflect.MessageType { + if t.Kind() != reflect.Ptr || t.Elem().Kind() != reflect.Struct { + return aberrantMessageType{t} + } + return legacyLoadMessageInfo(t, name) +} + var legacyMessageTypeCache sync.Map // map[reflect.Type]*MessageInfo // legacyLoadMessageInfo dynamically loads a *MessageInfo for t, @@ -49,8 +59,9 @@ func legacyLoadMessageInfo(t reflect.Type, name pref.FullName) *MessageInfo { GoReflectType: t, } + var hasMarshal, hasUnmarshal bool v := reflect.Zero(t).Interface() - if _, ok := v.(legacyMarshaler); ok { + if _, hasMarshal = v.(legacyMarshaler); hasMarshal { mi.methods.Marshal = legacyMarshal // We have no way to tell whether the type's Marshal method @@ -59,10 +70,10 @@ func legacyLoadMessageInfo(t reflect.Type, name pref.FullName) *MessageInfo { // calling Marshal methods when present. mi.methods.Flags |= piface.SupportMarshalDeterministic } - if _, ok := v.(legacyUnmarshaler); ok { + if _, hasUnmarshal = v.(legacyUnmarshaler); hasUnmarshal { mi.methods.Unmarshal = legacyUnmarshal } - if _, ok := v.(legacyMerger); ok { + if _, hasMerge := v.(legacyMerger); hasMerge || (hasMarshal && hasUnmarshal) { mi.methods.Merge = legacyMerge } @@ -75,7 +86,7 @@ func legacyLoadMessageInfo(t reflect.Type, name pref.FullName) *MessageInfo { var legacyMessageDescCache sync.Map // map[reflect.Type]protoreflect.MessageDescriptor // LegacyLoadMessageDesc returns an MessageDescriptor derived from the Go type, -// which must be a *struct kind and not implement the v2 API already. +// which should be a *struct kind and must not implement the v2 API already. // // This is exported for testing purposes. func LegacyLoadMessageDesc(t reflect.Type) pref.MessageDescriptor { @@ -114,17 +125,19 @@ func legacyLoadMessageDesc(t reflect.Type, name pref.FullName) pref.MessageDescr // If the Go type has no fields, then this might be a proto3 empty message // from before the size cache was added. If there are any fields, check to // see that at least one of them looks like something we generated. - if nfield := t.Elem().NumField(); nfield > 0 { - hasProtoField := false - for i := 0; i < nfield; i++ { - f := t.Elem().Field(i) - if f.Tag.Get("protobuf") != "" || f.Tag.Get("protobuf_oneof") != "" || strings.HasPrefix(f.Name, "XXX_") { - hasProtoField = true - break + if t.Elem().Kind() == reflect.Struct { + if nfield := t.Elem().NumField(); nfield > 0 { + hasProtoField := false + for i := 0; i < nfield; i++ { + f := t.Elem().Field(i) + if f.Tag.Get("protobuf") != "" || f.Tag.Get("protobuf_oneof") != "" || strings.HasPrefix(f.Name, "XXX_") { + hasProtoField = true + break + } + } + if !hasProtoField { + return aberrantLoadMessageDesc(t, name) } - } - if !hasProtoField { - return aberrantLoadMessageDesc(t, name) } } @@ -370,7 +383,7 @@ type legacyMerger interface { Merge(protoiface.MessageV1) } -var legacyProtoMethods = &piface.Methods{ +var aberrantProtoMethods = &piface.Methods{ Marshal: legacyMarshal, Unmarshal: legacyUnmarshal, Merge: legacyMerge, @@ -401,18 +414,40 @@ func legacyUnmarshal(in piface.UnmarshalInput) (piface.UnmarshalOutput, error) { v := in.Message.(unwrapper).protoUnwrap() unmarshaler, ok := v.(legacyUnmarshaler) if !ok { - return piface.UnmarshalOutput{}, errors.New("%T does not implement Marshal", v) + return piface.UnmarshalOutput{}, errors.New("%T does not implement Unmarshal", v) } return piface.UnmarshalOutput{}, unmarshaler.Unmarshal(in.Buf) } func legacyMerge(in piface.MergeInput) piface.MergeOutput { + // Check whether this supports the legacy merger. dstv := in.Destination.(unwrapper).protoUnwrap() merger, ok := dstv.(legacyMerger) + if ok { + merger.Merge(Export{}.ProtoMessageV1Of(in.Source)) + return piface.MergeOutput{Flags: piface.MergeComplete} + } + + // If legacy merger is unavailable, implement merge in terms of + // a marshal and unmarshal operation. + srcv := in.Source.(unwrapper).protoUnwrap() + marshaler, ok := srcv.(legacyMarshaler) if !ok { return piface.MergeOutput{} } - merger.Merge(Export{}.ProtoMessageV1Of(in.Source)) + dstv = in.Destination.(unwrapper).protoUnwrap() + unmarshaler, ok := dstv.(legacyUnmarshaler) + if !ok { + return piface.MergeOutput{} + } + b, err := marshaler.Marshal() + if err != nil { + return piface.MergeOutput{} + } + err = unmarshaler.Unmarshal(b) + if err != nil { + return piface.MergeOutput{} + } return piface.MergeOutput{Flags: piface.MergeComplete} } @@ -422,6 +457,9 @@ type aberrantMessageType struct { } func (mt aberrantMessageType) New() pref.Message { + if mt.t.Kind() == reflect.Ptr { + return aberrantMessage{reflect.New(mt.t.Elem())} + } return aberrantMessage{reflect.Zero(mt.t)} } func (mt aberrantMessageType) Zero() pref.Message { @@ -443,6 +481,17 @@ type aberrantMessage struct { v reflect.Value } +// Reset implements the v1 proto.Message.Reset method. +func (m aberrantMessage) Reset() { + if mr, ok := m.v.Interface().(interface{ Reset() }); ok { + mr.Reset() + return + } + if m.v.Kind() == reflect.Ptr && !m.v.IsNil() { + m.v.Elem().Set(reflect.Zero(m.v.Type().Elem())) + } +} + func (m aberrantMessage) ProtoReflect() pref.Message { return m } @@ -454,33 +503,40 @@ func (m aberrantMessage) Type() pref.MessageType { return aberrantMessageType{m.v.Type()} } func (m aberrantMessage) New() pref.Message { + if m.v.Type().Kind() == reflect.Ptr { + return aberrantMessage{reflect.New(m.v.Type().Elem())} + } return aberrantMessage{reflect.Zero(m.v.Type())} } func (m aberrantMessage) Interface() pref.ProtoMessage { return m } func (m aberrantMessage) Range(f func(pref.FieldDescriptor, pref.Value) bool) { + return } func (m aberrantMessage) Has(pref.FieldDescriptor) bool { - panic("invalid field descriptor") + return false } func (m aberrantMessage) Clear(pref.FieldDescriptor) { - panic("invalid field descriptor") + panic("invalid Message.Clear on " + string(m.Descriptor().FullName())) } -func (m aberrantMessage) Get(pref.FieldDescriptor) pref.Value { - panic("invalid field descriptor") +func (m aberrantMessage) Get(fd pref.FieldDescriptor) pref.Value { + if fd.Default().IsValid() { + return fd.Default() + } + panic("invalid Message.Get on " + string(m.Descriptor().FullName())) } func (m aberrantMessage) Set(pref.FieldDescriptor, pref.Value) { - panic("invalid field descriptor") + panic("invalid Message.Set on " + string(m.Descriptor().FullName())) } func (m aberrantMessage) Mutable(pref.FieldDescriptor) pref.Value { - panic("invalid field descriptor") + panic("invalid Message.Mutable on " + string(m.Descriptor().FullName())) } func (m aberrantMessage) NewField(pref.FieldDescriptor) pref.Value { - panic("invalid field descriptor") + panic("invalid Message.NewField on " + string(m.Descriptor().FullName())) } func (m aberrantMessage) WhichOneof(pref.OneofDescriptor) pref.FieldDescriptor { - panic("invalid oneof descriptor") + panic("invalid Message.WhichOneof descriptor on " + string(m.Descriptor().FullName())) } func (m aberrantMessage) GetUnknown() pref.RawFields { return nil @@ -489,13 +545,13 @@ func (m aberrantMessage) SetUnknown(pref.RawFields) { // SetUnknown discards its input on messages which don't support unknown field storage. } func (m aberrantMessage) IsValid() bool { - // An invalid message is a read-only, empty message. Since we don't know anything - // about the alleged contents of this message, we can't say with confidence that - // it is invalid in this sense. Therefore, report it as valid. - return true + if m.v.Kind() == reflect.Ptr { + return !m.v.IsNil() + } + return false } func (m aberrantMessage) ProtoMethods() *piface.Methods { - return legacyProtoMethods + return aberrantProtoMethods } func (m aberrantMessage) protoUnwrap() interface{} { return m.v.Interface() diff --git a/vendor/google.golang.org/protobuf/internal/impl/merge.go b/vendor/google.golang.org/protobuf/internal/impl/merge.go index cdc4267dfa..c65bbc0446 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/merge.go +++ b/vendor/google.golang.org/protobuf/internal/impl/merge.go @@ -77,9 +77,9 @@ func (mi *MessageInfo) mergePointer(dst, src pointer, opts mergeOptions) { } } if mi.unknownOffset.IsValid() { - du := dst.Apply(mi.unknownOffset).Bytes() - su := src.Apply(mi.unknownOffset).Bytes() - if len(*su) > 0 { + su := mi.getUnknownBytes(src) + if su != nil && len(*su) > 0 { + du := mi.mutableUnknownBytes(dst) *du = append(*du, *su...) } } diff --git a/vendor/google.golang.org/protobuf/internal/impl/message.go b/vendor/google.golang.org/protobuf/internal/impl/message.go index c026a98180..a104e28e85 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message.go @@ -15,6 +15,7 @@ import ( "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/reflect/protoreflect" pref "google.golang.org/protobuf/reflect/protoreflect" + preg "google.golang.org/protobuf/reflect/protoregistry" ) // MessageInfo provides protobuf related functionality for a given Go type @@ -109,22 +110,29 @@ func (mi *MessageInfo) getPointer(m pref.Message) (p pointer, ok bool) { type ( SizeCache = int32 WeakFields = map[int32]protoreflect.ProtoMessage - UnknownFields = []byte + UnknownFields = unknownFieldsA // TODO: switch to unknownFieldsB + unknownFieldsA = []byte + unknownFieldsB = *[]byte ExtensionFields = map[int32]ExtensionField ) var ( sizecacheType = reflect.TypeOf(SizeCache(0)) weakFieldsType = reflect.TypeOf(WeakFields(nil)) - unknownFieldsType = reflect.TypeOf(UnknownFields(nil)) + unknownFieldsAType = reflect.TypeOf(unknownFieldsA(nil)) + unknownFieldsBType = reflect.TypeOf(unknownFieldsB(nil)) extensionFieldsType = reflect.TypeOf(ExtensionFields(nil)) ) type structInfo struct { sizecacheOffset offset + sizecacheType reflect.Type weakOffset offset + weakType reflect.Type unknownOffset offset + unknownType reflect.Type extensionOffset offset + extensionType reflect.Type fieldsByNumber map[pref.FieldNumber]reflect.StructField oneofsByName map[pref.Name]reflect.StructField @@ -151,18 +159,22 @@ fieldLoop: case genid.SizeCache_goname, genid.SizeCacheA_goname: if f.Type == sizecacheType { si.sizecacheOffset = offsetOf(f, mi.Exporter) + si.sizecacheType = f.Type } case genid.WeakFields_goname, genid.WeakFieldsA_goname: if f.Type == weakFieldsType { si.weakOffset = offsetOf(f, mi.Exporter) + si.weakType = f.Type } case genid.UnknownFields_goname, genid.UnknownFieldsA_goname: - if f.Type == unknownFieldsType { + if f.Type == unknownFieldsAType || f.Type == unknownFieldsBType { si.unknownOffset = offsetOf(f, mi.Exporter) + si.unknownType = f.Type } case genid.ExtensionFields_goname, genid.ExtensionFieldsA_goname, genid.ExtensionFieldsB_goname: if f.Type == extensionFieldsType { si.extensionOffset = offsetOf(f, mi.Exporter) + si.extensionType = f.Type } default: for _, s := range strings.Split(f.Tag.Get("protobuf"), ",") { @@ -212,4 +224,53 @@ func (mi *MessageInfo) New() protoreflect.Message { func (mi *MessageInfo) Zero() protoreflect.Message { return mi.MessageOf(reflect.Zero(mi.GoReflectType).Interface()) } -func (mi *MessageInfo) Descriptor() protoreflect.MessageDescriptor { return mi.Desc } +func (mi *MessageInfo) Descriptor() protoreflect.MessageDescriptor { + return mi.Desc +} +func (mi *MessageInfo) Enum(i int) protoreflect.EnumType { + mi.init() + fd := mi.Desc.Fields().Get(i) + return Export{}.EnumTypeOf(mi.fieldTypes[fd.Number()]) +} +func (mi *MessageInfo) Message(i int) protoreflect.MessageType { + mi.init() + fd := mi.Desc.Fields().Get(i) + switch { + case fd.IsWeak(): + mt, _ := preg.GlobalTypes.FindMessageByName(fd.Message().FullName()) + return mt + case fd.IsMap(): + return mapEntryType{fd.Message(), mi.fieldTypes[fd.Number()]} + default: + return Export{}.MessageTypeOf(mi.fieldTypes[fd.Number()]) + } +} + +type mapEntryType struct { + desc protoreflect.MessageDescriptor + valType interface{} // zero value of enum or message type +} + +func (mt mapEntryType) New() protoreflect.Message { + return nil +} +func (mt mapEntryType) Zero() protoreflect.Message { + return nil +} +func (mt mapEntryType) Descriptor() protoreflect.MessageDescriptor { + return mt.desc +} +func (mt mapEntryType) Enum(i int) protoreflect.EnumType { + fd := mt.desc.Fields().Get(i) + if fd.Enum() == nil { + return nil + } + return Export{}.EnumTypeOf(mt.valType) +} +func (mt mapEntryType) Message(i int) protoreflect.MessageType { + fd := mt.desc.Fields().Get(i) + if fd.Message() == nil { + return nil + } + return Export{}.MessageTypeOf(mt.valType) +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go index 0f4b8db760..9488b72613 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go @@ -8,6 +8,7 @@ import ( "fmt" "reflect" + "google.golang.org/protobuf/internal/detrand" "google.golang.org/protobuf/internal/pragma" pref "google.golang.org/protobuf/reflect/protoreflect" ) @@ -16,6 +17,11 @@ type reflectMessageInfo struct { fields map[pref.FieldNumber]*fieldInfo oneofs map[pref.Name]*oneofInfo + // fieldTypes contains the zero value of an enum or message field. + // For lists, it contains the element type. + // For maps, it contains the entry value type. + fieldTypes map[pref.FieldNumber]interface{} + // denseFields is a subset of fields where: // 0 < fieldDesc.Number() < len(denseFields) // It provides faster access to the fieldInfo, but may be incomplete. @@ -36,6 +42,7 @@ func (mi *MessageInfo) makeReflectFuncs(t reflect.Type, si structInfo) { mi.makeKnownFieldsFunc(si) mi.makeUnknownFieldsFunc(t, si) mi.makeExtensionFieldsFunc(t, si) + mi.makeFieldTypes(si) } // makeKnownFieldsFunc generates functions for operations that can be performed @@ -51,17 +58,23 @@ func (mi *MessageInfo) makeKnownFieldsFunc(si structInfo) { for i := 0; i < fds.Len(); i++ { fd := fds.Get(i) fs := si.fieldsByNumber[fd.Number()] + isOneof := fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic() + if isOneof { + fs = si.oneofsByName[fd.ContainingOneof().Name()] + } var fi fieldInfo switch { - case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic(): - fi = fieldInfoForOneof(fd, si.oneofsByName[fd.ContainingOneof().Name()], mi.Exporter, si.oneofWrappersByNumber[fd.Number()]) + case fs.Type == nil: + fi = fieldInfoForMissing(fd) // never occurs for officially generated message types + case isOneof: + fi = fieldInfoForOneof(fd, fs, mi.Exporter, si.oneofWrappersByNumber[fd.Number()]) case fd.IsMap(): fi = fieldInfoForMap(fd, fs, mi.Exporter) case fd.IsList(): fi = fieldInfoForList(fd, fs, mi.Exporter) case fd.IsWeak(): fi = fieldInfoForWeakMessage(fd, si.weakOffset) - case fd.Kind() == pref.MessageKind || fd.Kind() == pref.GroupKind: + case fd.Message() != nil: fi = fieldInfoForMessage(fd, fs, mi.Exporter) default: fi = fieldInfoForScalar(fd, fs, mi.Exporter) @@ -92,27 +105,53 @@ func (mi *MessageInfo) makeKnownFieldsFunc(si structInfo) { i++ } } + + // Introduce instability to iteration order, but keep it deterministic. + if len(mi.rangeInfos) > 1 && detrand.Bool() { + i := detrand.Intn(len(mi.rangeInfos) - 1) + mi.rangeInfos[i], mi.rangeInfos[i+1] = mi.rangeInfos[i+1], mi.rangeInfos[i] + } } func (mi *MessageInfo) makeUnknownFieldsFunc(t reflect.Type, si structInfo) { - mi.getUnknown = func(pointer) pref.RawFields { return nil } - mi.setUnknown = func(pointer, pref.RawFields) { return } - if si.unknownOffset.IsValid() { + switch { + case si.unknownOffset.IsValid() && si.unknownType == unknownFieldsAType: + // Handle as []byte. mi.getUnknown = func(p pointer) pref.RawFields { if p.IsNil() { return nil } - rv := p.Apply(si.unknownOffset).AsValueOf(unknownFieldsType) - return pref.RawFields(*rv.Interface().(*[]byte)) + return *p.Apply(mi.unknownOffset).Bytes() } mi.setUnknown = func(p pointer, b pref.RawFields) { if p.IsNil() { panic("invalid SetUnknown on nil Message") } - rv := p.Apply(si.unknownOffset).AsValueOf(unknownFieldsType) - *rv.Interface().(*[]byte) = []byte(b) + *p.Apply(mi.unknownOffset).Bytes() = b } - } else { + case si.unknownOffset.IsValid() && si.unknownType == unknownFieldsBType: + // Handle as *[]byte. + mi.getUnknown = func(p pointer) pref.RawFields { + if p.IsNil() { + return nil + } + bp := p.Apply(mi.unknownOffset).BytesPtr() + if *bp == nil { + return nil + } + return **bp + } + mi.setUnknown = func(p pointer, b pref.RawFields) { + if p.IsNil() { + panic("invalid SetUnknown on nil Message") + } + bp := p.Apply(mi.unknownOffset).BytesPtr() + if *bp == nil { + *bp = new([]byte) + } + **bp = b + } + default: mi.getUnknown = func(pointer) pref.RawFields { return nil } @@ -139,6 +178,58 @@ func (mi *MessageInfo) makeExtensionFieldsFunc(t reflect.Type, si structInfo) { } } } +func (mi *MessageInfo) makeFieldTypes(si structInfo) { + md := mi.Desc + fds := md.Fields() + for i := 0; i < fds.Len(); i++ { + var ft reflect.Type + fd := fds.Get(i) + fs := si.fieldsByNumber[fd.Number()] + isOneof := fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic() + if isOneof { + fs = si.oneofsByName[fd.ContainingOneof().Name()] + } + var isMessage bool + switch { + case fs.Type == nil: + continue // never occurs for officially generated message types + case isOneof: + if fd.Enum() != nil || fd.Message() != nil { + ft = si.oneofWrappersByNumber[fd.Number()].Field(0).Type + } + case fd.IsMap(): + if fd.MapValue().Enum() != nil || fd.MapValue().Message() != nil { + ft = fs.Type.Elem() + } + isMessage = fd.MapValue().Message() != nil + case fd.IsList(): + if fd.Enum() != nil || fd.Message() != nil { + ft = fs.Type.Elem() + } + isMessage = fd.Message() != nil + case fd.Enum() != nil: + ft = fs.Type + if fd.HasPresence() && ft.Kind() == reflect.Ptr { + ft = ft.Elem() + } + case fd.Message() != nil: + ft = fs.Type + if fd.IsWeak() { + ft = nil + } + isMessage = true + } + if isMessage && ft != nil && ft.Kind() != reflect.Ptr { + ft = reflect.PtrTo(ft) // never occurs for officially generated message types + } + if ft != nil { + if mi.fieldTypes == nil { + mi.fieldTypes = make(map[pref.FieldNumber]interface{}) + } + mi.fieldTypes[fd.Number()] = reflect.Zero(ft).Interface() + } + } +} type extensionMap map[int32]ExtensionField @@ -306,7 +397,6 @@ var ( // pointer to a named Go struct. If the provided type has a ProtoReflect method, // it must be implemented by calling this method. func (mi *MessageInfo) MessageOf(m interface{}) pref.Message { - // TODO: Switch the input to be an opaque Pointer. if reflect.TypeOf(m) != mi.GoReflectType { panic(fmt.Sprintf("type mismatch: got %T, want %v", m, mi.GoReflectType)) } @@ -320,6 +410,17 @@ func (mi *MessageInfo) MessageOf(m interface{}) pref.Message { func (m *messageReflectWrapper) pointer() pointer { return m.p } func (m *messageReflectWrapper) messageInfo() *MessageInfo { return m.mi } +// Reset implements the v1 proto.Message.Reset method. +func (m *messageIfaceWrapper) Reset() { + if mr, ok := m.protoUnwrap().(interface{ Reset() }); ok { + mr.Reset() + return + } + rv := reflect.ValueOf(m.protoUnwrap()) + if rv.Kind() == reflect.Ptr && !rv.IsNil() { + rv.Elem().Set(reflect.Zero(rv.Type().Elem())) + } +} func (m *messageIfaceWrapper) ProtoReflect() pref.Message { return (*messageReflectWrapper)(m) } diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go index 23124a86e4..343cf87219 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go @@ -28,6 +28,39 @@ type fieldInfo struct { newField func() pref.Value } +func fieldInfoForMissing(fd pref.FieldDescriptor) fieldInfo { + // This never occurs for generated message types. + // It implies that a hand-crafted type has missing Go fields + // for specific protobuf message fields. + return fieldInfo{ + fieldDesc: fd, + has: func(p pointer) bool { + return false + }, + clear: func(p pointer) { + panic("missing Go struct field for " + string(fd.FullName())) + }, + get: func(p pointer) pref.Value { + return fd.Default() + }, + set: func(p pointer, v pref.Value) { + panic("missing Go struct field for " + string(fd.FullName())) + }, + mutable: func(p pointer) pref.Value { + panic("missing Go struct field for " + string(fd.FullName())) + }, + newMessage: func() pref.Message { + panic("missing Go struct field for " + string(fd.FullName())) + }, + newField: func() pref.Value { + if v := fd.Default(); v.IsValid() { + return v + } + panic("missing Go struct field for " + string(fd.FullName())) + }, + } +} + func fieldInfoForOneof(fd pref.FieldDescriptor, fs reflect.StructField, x exporter, ot reflect.Type) fieldInfo { ft := fs.Type if ft.Kind() != reflect.Interface { @@ -97,7 +130,7 @@ func fieldInfoForOneof(fd pref.FieldDescriptor, fs reflect.StructField, x export rv.Set(reflect.New(ot)) } rv = rv.Elem().Elem().Field(0) - if rv.IsNil() { + if rv.Kind() == reflect.Ptr && rv.IsNil() { rv.Set(conv.GoValueOf(pref.ValueOfMessage(conv.New().Message()))) } return conv.PBValueOf(rv) @@ -225,7 +258,10 @@ func fieldInfoForScalar(fd pref.FieldDescriptor, fs reflect.StructField, x expor isBytes := ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 if nullable { if ft.Kind() != reflect.Ptr && ft.Kind() != reflect.Slice { - panic(fmt.Sprintf("field %v has invalid type: got %v, want pointer", fd.FullName(), ft)) + // This never occurs for generated message types. + // Despite the protobuf type system specifying presence, + // the Go field type cannot represent it. + nullable = false } if ft.Kind() == reflect.Ptr { ft = ft.Elem() @@ -388,6 +424,9 @@ func fieldInfoForMessage(fd pref.FieldDescriptor, fs reflect.StructField, x expo return false } rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if fs.Type.Kind() != reflect.Ptr { + return !isZero(rv) + } return !rv.IsNil() }, clear: func(p pointer) { @@ -404,13 +443,13 @@ func fieldInfoForMessage(fd pref.FieldDescriptor, fs reflect.StructField, x expo set: func(p pointer, v pref.Value) { rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() rv.Set(conv.GoValueOf(v)) - if rv.IsNil() { + if fs.Type.Kind() == reflect.Ptr && rv.IsNil() { panic(fmt.Sprintf("field %v has invalid nil pointer", fd.FullName())) } }, mutable: func(p pointer) pref.Value { rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() - if rv.IsNil() { + if fs.Type.Kind() == reflect.Ptr && rv.IsNil() { rv.Set(conv.GoValueOf(conv.New())) } return conv.PBValueOf(rv) @@ -464,3 +503,41 @@ func makeOneofInfo(od pref.OneofDescriptor, si structInfo, x exporter) *oneofInf } return oi } + +// isZero is identical to reflect.Value.IsZero. +// TODO: Remove this when Go1.13 is the minimally supported Go version. +func isZero(v reflect.Value) bool { + switch v.Kind() { + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return math.Float64bits(v.Float()) == 0 + case reflect.Complex64, reflect.Complex128: + c := v.Complex() + return math.Float64bits(real(c)) == 0 && math.Float64bits(imag(c)) == 0 + case reflect.Array: + for i := 0; i < v.Len(); i++ { + if !isZero(v.Index(i)) { + return false + } + } + return true + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice, reflect.UnsafePointer: + return v.IsNil() + case reflect.String: + return v.Len() == 0 + case reflect.Struct: + for i := 0; i < v.NumField(); i++ { + if !isZero(v.Field(i)) { + return false + } + } + return true + default: + panic(&reflect.ValueError{"reflect.Value.IsZero", v.Kind()}) + } +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go index 67b4ede670..9e3ed821ef 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go +++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go @@ -121,6 +121,7 @@ func (p pointer) String() *string { return p.v.Interface().(*string) } func (p pointer) StringPtr() **string { return p.v.Interface().(**string) } func (p pointer) StringSlice() *[]string { return p.v.Interface().(*[]string) } func (p pointer) Bytes() *[]byte { return p.v.Interface().(*[]byte) } +func (p pointer) BytesPtr() **[]byte { return p.v.Interface().(**[]byte) } func (p pointer) BytesSlice() *[][]byte { return p.v.Interface().(*[][]byte) } func (p pointer) WeakFields() *weakFields { return (*weakFields)(p.v.Interface().(*WeakFields)) } func (p pointer) Extensions() *map[int32]ExtensionField { diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go index 088aa85d48..9ecf23a85b 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go +++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go @@ -109,6 +109,7 @@ func (p pointer) String() *string { return (*string)(p.p) func (p pointer) StringPtr() **string { return (**string)(p.p) } func (p pointer) StringSlice() *[]string { return (*[]string)(p.p) } func (p pointer) Bytes() *[]byte { return (*[]byte)(p.p) } +func (p pointer) BytesPtr() **[]byte { return (**[]byte)(p.p) } func (p pointer) BytesSlice() *[][]byte { return (*[][]byte)(p.p) } func (p pointer) WeakFields() *weakFields { return (*weakFields)(p.p) } func (p pointer) Extensions() *map[int32]ExtensionField { return (*map[int32]ExtensionField)(p.p) } diff --git a/vendor/google.golang.org/protobuf/internal/mapsort/mapsort.go b/vendor/google.golang.org/protobuf/internal/mapsort/mapsort.go deleted file mode 100644 index a3de1cf324..0000000000 --- a/vendor/google.golang.org/protobuf/internal/mapsort/mapsort.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package mapsort provides sorted access to maps. -package mapsort - -import ( - "sort" - - "google.golang.org/protobuf/reflect/protoreflect" -) - -// Range iterates over every map entry in sorted key order, -// calling f for each key and value encountered. -func Range(mapv protoreflect.Map, keyKind protoreflect.Kind, f func(protoreflect.MapKey, protoreflect.Value) bool) { - var keys []protoreflect.MapKey - mapv.Range(func(key protoreflect.MapKey, _ protoreflect.Value) bool { - keys = append(keys, key) - return true - }) - sort.Slice(keys, func(i, j int) bool { - switch keyKind { - case protoreflect.BoolKind: - return !keys[i].Bool() && keys[j].Bool() - case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, - protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: - return keys[i].Int() < keys[j].Int() - case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, - protoreflect.Uint64Kind, protoreflect.Fixed64Kind: - return keys[i].Uint() < keys[j].Uint() - case protoreflect.StringKind: - return keys[i].String() < keys[j].String() - default: - panic("invalid kind: " + keyKind.String()) - } - }) - for _, key := range keys { - if !f(key, mapv.Get(key)) { - break - } - } -} diff --git a/vendor/google.golang.org/protobuf/internal/order/order.go b/vendor/google.golang.org/protobuf/internal/order/order.go new file mode 100644 index 0000000000..2a24953f6a --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/order/order.go @@ -0,0 +1,89 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package order + +import ( + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +// FieldOrder specifies the ordering to visit message fields. +// It is a function that reports whether x is ordered before y. +type FieldOrder func(x, y pref.FieldDescriptor) bool + +var ( + // AnyFieldOrder specifies no specific field ordering. + AnyFieldOrder FieldOrder = nil + + // LegacyFieldOrder sorts fields in the same ordering as emitted by + // wire serialization in the github.com/golang/protobuf implementation. + LegacyFieldOrder FieldOrder = func(x, y pref.FieldDescriptor) bool { + ox, oy := x.ContainingOneof(), y.ContainingOneof() + inOneof := func(od pref.OneofDescriptor) bool { + return od != nil && !od.IsSynthetic() + } + + // Extension fields sort before non-extension fields. + if x.IsExtension() != y.IsExtension() { + return x.IsExtension() && !y.IsExtension() + } + // Fields not within a oneof sort before those within a oneof. + if inOneof(ox) != inOneof(oy) { + return !inOneof(ox) && inOneof(oy) + } + // Fields in disjoint oneof sets are sorted by declaration index. + if ox != nil && oy != nil && ox != oy { + return ox.Index() < oy.Index() + } + // Fields sorted by field number. + return x.Number() < y.Number() + } + + // NumberFieldOrder sorts fields by their field number. + NumberFieldOrder FieldOrder = func(x, y pref.FieldDescriptor) bool { + return x.Number() < y.Number() + } + + // IndexNameFieldOrder sorts non-extension fields before extension fields. + // Non-extensions are sorted according to their declaration index. + // Extensions are sorted according to their full name. + IndexNameFieldOrder FieldOrder = func(x, y pref.FieldDescriptor) bool { + // Non-extension fields sort before extension fields. + if x.IsExtension() != y.IsExtension() { + return !x.IsExtension() && y.IsExtension() + } + // Extensions sorted by fullname. + if x.IsExtension() && y.IsExtension() { + return x.FullName() < y.FullName() + } + // Non-extensions sorted by declaration index. + return x.Index() < y.Index() + } +) + +// KeyOrder specifies the ordering to visit map entries. +// It is a function that reports whether x is ordered before y. +type KeyOrder func(x, y pref.MapKey) bool + +var ( + // AnyKeyOrder specifies no specific key ordering. + AnyKeyOrder KeyOrder = nil + + // GenericKeyOrder sorts false before true, numeric keys in ascending order, + // and strings in lexicographical ordering according to UTF-8 codepoints. + GenericKeyOrder KeyOrder = func(x, y pref.MapKey) bool { + switch x.Interface().(type) { + case bool: + return !x.Bool() && y.Bool() + case int32, int64: + return x.Int() < y.Int() + case uint32, uint64: + return x.Uint() < y.Uint() + case string: + return x.String() < y.String() + default: + panic("invalid map key type") + } + } +) diff --git a/vendor/google.golang.org/protobuf/internal/order/range.go b/vendor/google.golang.org/protobuf/internal/order/range.go new file mode 100644 index 0000000000..c8090e0c54 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/order/range.go @@ -0,0 +1,115 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package order provides ordered access to messages and maps. +package order + +import ( + "sort" + "sync" + + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +type messageField struct { + fd pref.FieldDescriptor + v pref.Value +} + +var messageFieldPool = sync.Pool{ + New: func() interface{} { return new([]messageField) }, +} + +type ( + // FieldRnger is an interface for visiting all fields in a message. + // The protoreflect.Message type implements this interface. + FieldRanger interface{ Range(VisitField) } + // VisitField is called everytime a message field is visited. + VisitField = func(pref.FieldDescriptor, pref.Value) bool +) + +// RangeFields iterates over the fields of fs according to the specified order. +func RangeFields(fs FieldRanger, less FieldOrder, fn VisitField) { + if less == nil { + fs.Range(fn) + return + } + + // Obtain a pre-allocated scratch buffer. + p := messageFieldPool.Get().(*[]messageField) + fields := (*p)[:0] + defer func() { + if cap(fields) < 1024 { + *p = fields + messageFieldPool.Put(p) + } + }() + + // Collect all fields in the message and sort them. + fs.Range(func(fd pref.FieldDescriptor, v pref.Value) bool { + fields = append(fields, messageField{fd, v}) + return true + }) + sort.Slice(fields, func(i, j int) bool { + return less(fields[i].fd, fields[j].fd) + }) + + // Visit the fields in the specified ordering. + for _, f := range fields { + if !fn(f.fd, f.v) { + return + } + } +} + +type mapEntry struct { + k pref.MapKey + v pref.Value +} + +var mapEntryPool = sync.Pool{ + New: func() interface{} { return new([]mapEntry) }, +} + +type ( + // EntryRanger is an interface for visiting all fields in a message. + // The protoreflect.Map type implements this interface. + EntryRanger interface{ Range(VisitEntry) } + // VisitEntry is called everytime a map entry is visited. + VisitEntry = func(pref.MapKey, pref.Value) bool +) + +// RangeEntries iterates over the entries of es according to the specified order. +func RangeEntries(es EntryRanger, less KeyOrder, fn VisitEntry) { + if less == nil { + es.Range(fn) + return + } + + // Obtain a pre-allocated scratch buffer. + p := mapEntryPool.Get().(*[]mapEntry) + entries := (*p)[:0] + defer func() { + if cap(entries) < 1024 { + *p = entries + mapEntryPool.Put(p) + } + }() + + // Collect all entries in the map and sort them. + es.Range(func(k pref.MapKey, v pref.Value) bool { + entries = append(entries, mapEntry{k, v}) + return true + }) + sort.Slice(entries, func(i, j int) bool { + return less(entries[i].k, entries[j].k) + }) + + // Visit the entries in the specified ordering. + for _, e := range entries { + if !fn(e.k, e.v) { + return + } + } +} diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go index 72cf770b42..5879131da7 100644 --- a/vendor/google.golang.org/protobuf/internal/version/version.go +++ b/vendor/google.golang.org/protobuf/internal/version/version.go @@ -52,7 +52,7 @@ import ( // 10. Send out the CL for review and submit it. const ( Major = 1 - Minor = 25 + Minor = 26 Patch = 0 PreRelease = "" ) diff --git a/vendor/google.golang.org/protobuf/proto/decode.go b/vendor/google.golang.org/protobuf/proto/decode.go index 42fc5195e8..49f9b8c88c 100644 --- a/vendor/google.golang.org/protobuf/proto/decode.go +++ b/vendor/google.golang.org/protobuf/proto/decode.go @@ -45,12 +45,14 @@ type UnmarshalOptions struct { } // Unmarshal parses the wire-format message in b and places the result in m. +// The provided message must be mutable (e.g., a non-nil pointer to a message). func Unmarshal(b []byte, m Message) error { _, err := UnmarshalOptions{}.unmarshal(b, m.ProtoReflect()) return err } // Unmarshal parses the wire-format message in b and places the result in m. +// The provided message must be mutable (e.g., a non-nil pointer to a message). func (o UnmarshalOptions) Unmarshal(b []byte, m Message) error { _, err := o.unmarshal(b, m.ProtoReflect()) return err @@ -116,10 +118,10 @@ func (o UnmarshalOptions) unmarshalMessageSlow(b []byte, m protoreflect.Message) // Parse the tag (field number and wire type). num, wtyp, tagLen := protowire.ConsumeTag(b) if tagLen < 0 { - return protowire.ParseError(tagLen) + return errDecode } if num > protowire.MaxValidNumber { - return errors.New("invalid field number") + return errDecode } // Find the field descriptor for this field number. @@ -159,7 +161,7 @@ func (o UnmarshalOptions) unmarshalMessageSlow(b []byte, m protoreflect.Message) } valLen = protowire.ConsumeFieldValue(num, wtyp, b[tagLen:]) if valLen < 0 { - return protowire.ParseError(valLen) + return errDecode } if !o.DiscardUnknown { m.SetUnknown(append(m.GetUnknown(), b[:tagLen+valLen]...)) @@ -194,7 +196,7 @@ func (o UnmarshalOptions) unmarshalMap(b []byte, wtyp protowire.Type, mapv proto } b, n = protowire.ConsumeBytes(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } var ( keyField = fd.MapKey() @@ -213,10 +215,10 @@ func (o UnmarshalOptions) unmarshalMap(b []byte, wtyp protowire.Type, mapv proto for len(b) > 0 { num, wtyp, n := protowire.ConsumeTag(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } if num > protowire.MaxValidNumber { - return 0, errors.New("invalid field number") + return 0, errDecode } b = b[n:] err = errUnknown @@ -246,7 +248,7 @@ func (o UnmarshalOptions) unmarshalMap(b []byte, wtyp protowire.Type, mapv proto if err == errUnknown { n = protowire.ConsumeFieldValue(num, wtyp, b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } } else if err != nil { return 0, err @@ -272,3 +274,5 @@ func (o UnmarshalOptions) unmarshalMap(b []byte, wtyp protowire.Type, mapv proto // to the unknown field set of a message. It is never returned from an exported // function. var errUnknown = errors.New("BUG: internal error (unknown)") + +var errDecode = errors.New("cannot parse invalid wire-format data") diff --git a/vendor/google.golang.org/protobuf/proto/decode_gen.go b/vendor/google.golang.org/protobuf/proto/decode_gen.go index d6dc904dcc..301eeb20f8 100644 --- a/vendor/google.golang.org/protobuf/proto/decode_gen.go +++ b/vendor/google.golang.org/protobuf/proto/decode_gen.go @@ -27,7 +27,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot } v, n := protowire.ConsumeVarint(b) if n < 0 { - return val, 0, protowire.ParseError(n) + return val, 0, errDecode } return protoreflect.ValueOfBool(protowire.DecodeBool(v)), n, nil case protoreflect.EnumKind: @@ -36,7 +36,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot } v, n := protowire.ConsumeVarint(b) if n < 0 { - return val, 0, protowire.ParseError(n) + return val, 0, errDecode } return protoreflect.ValueOfEnum(protoreflect.EnumNumber(v)), n, nil case protoreflect.Int32Kind: @@ -45,7 +45,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot } v, n := protowire.ConsumeVarint(b) if n < 0 { - return val, 0, protowire.ParseError(n) + return val, 0, errDecode } return protoreflect.ValueOfInt32(int32(v)), n, nil case protoreflect.Sint32Kind: @@ -54,7 +54,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot } v, n := protowire.ConsumeVarint(b) if n < 0 { - return val, 0, protowire.ParseError(n) + return val, 0, errDecode } return protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32))), n, nil case protoreflect.Uint32Kind: @@ -63,7 +63,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot } v, n := protowire.ConsumeVarint(b) if n < 0 { - return val, 0, protowire.ParseError(n) + return val, 0, errDecode } return protoreflect.ValueOfUint32(uint32(v)), n, nil case protoreflect.Int64Kind: @@ -72,7 +72,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot } v, n := protowire.ConsumeVarint(b) if n < 0 { - return val, 0, protowire.ParseError(n) + return val, 0, errDecode } return protoreflect.ValueOfInt64(int64(v)), n, nil case protoreflect.Sint64Kind: @@ -81,7 +81,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot } v, n := protowire.ConsumeVarint(b) if n < 0 { - return val, 0, protowire.ParseError(n) + return val, 0, errDecode } return protoreflect.ValueOfInt64(protowire.DecodeZigZag(v)), n, nil case protoreflect.Uint64Kind: @@ -90,7 +90,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot } v, n := protowire.ConsumeVarint(b) if n < 0 { - return val, 0, protowire.ParseError(n) + return val, 0, errDecode } return protoreflect.ValueOfUint64(v), n, nil case protoreflect.Sfixed32Kind: @@ -99,7 +99,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return val, 0, protowire.ParseError(n) + return val, 0, errDecode } return protoreflect.ValueOfInt32(int32(v)), n, nil case protoreflect.Fixed32Kind: @@ -108,7 +108,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return val, 0, protowire.ParseError(n) + return val, 0, errDecode } return protoreflect.ValueOfUint32(uint32(v)), n, nil case protoreflect.FloatKind: @@ -117,7 +117,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return val, 0, protowire.ParseError(n) + return val, 0, errDecode } return protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v))), n, nil case protoreflect.Sfixed64Kind: @@ -126,7 +126,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return val, 0, protowire.ParseError(n) + return val, 0, errDecode } return protoreflect.ValueOfInt64(int64(v)), n, nil case protoreflect.Fixed64Kind: @@ -135,7 +135,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return val, 0, protowire.ParseError(n) + return val, 0, errDecode } return protoreflect.ValueOfUint64(v), n, nil case protoreflect.DoubleKind: @@ -144,7 +144,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return val, 0, protowire.ParseError(n) + return val, 0, errDecode } return protoreflect.ValueOfFloat64(math.Float64frombits(v)), n, nil case protoreflect.StringKind: @@ -153,7 +153,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot } v, n := protowire.ConsumeBytes(b) if n < 0 { - return val, 0, protowire.ParseError(n) + return val, 0, errDecode } if strs.EnforceUTF8(fd) && !utf8.Valid(v) { return protoreflect.Value{}, 0, errors.InvalidUTF8(string(fd.FullName())) @@ -165,7 +165,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot } v, n := protowire.ConsumeBytes(b) if n < 0 { - return val, 0, protowire.ParseError(n) + return val, 0, errDecode } return protoreflect.ValueOfBytes(append(emptyBuf[:], v...)), n, nil case protoreflect.MessageKind: @@ -174,7 +174,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot } v, n := protowire.ConsumeBytes(b) if n < 0 { - return val, 0, protowire.ParseError(n) + return val, 0, errDecode } return protoreflect.ValueOfBytes(v), n, nil case protoreflect.GroupKind: @@ -183,7 +183,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot } v, n := protowire.ConsumeGroup(fd.Number(), b) if n < 0 { - return val, 0, protowire.ParseError(n) + return val, 0, errDecode } return protoreflect.ValueOfBytes(v), n, nil default: @@ -197,12 +197,12 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot if wtyp == protowire.BytesType { buf, n := protowire.ConsumeBytes(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } for len(buf) > 0 { v, n := protowire.ConsumeVarint(buf) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } buf = buf[n:] list.Append(protoreflect.ValueOfBool(protowire.DecodeBool(v))) @@ -214,7 +214,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot } v, n := protowire.ConsumeVarint(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } list.Append(protoreflect.ValueOfBool(protowire.DecodeBool(v))) return n, nil @@ -222,12 +222,12 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot if wtyp == protowire.BytesType { buf, n := protowire.ConsumeBytes(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } for len(buf) > 0 { v, n := protowire.ConsumeVarint(buf) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } buf = buf[n:] list.Append(protoreflect.ValueOfEnum(protoreflect.EnumNumber(v))) @@ -239,7 +239,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot } v, n := protowire.ConsumeVarint(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } list.Append(protoreflect.ValueOfEnum(protoreflect.EnumNumber(v))) return n, nil @@ -247,12 +247,12 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot if wtyp == protowire.BytesType { buf, n := protowire.ConsumeBytes(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } for len(buf) > 0 { v, n := protowire.ConsumeVarint(buf) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } buf = buf[n:] list.Append(protoreflect.ValueOfInt32(int32(v))) @@ -264,7 +264,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot } v, n := protowire.ConsumeVarint(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } list.Append(protoreflect.ValueOfInt32(int32(v))) return n, nil @@ -272,12 +272,12 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot if wtyp == protowire.BytesType { buf, n := protowire.ConsumeBytes(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } for len(buf) > 0 { v, n := protowire.ConsumeVarint(buf) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } buf = buf[n:] list.Append(protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32)))) @@ -289,7 +289,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot } v, n := protowire.ConsumeVarint(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } list.Append(protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32)))) return n, nil @@ -297,12 +297,12 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot if wtyp == protowire.BytesType { buf, n := protowire.ConsumeBytes(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } for len(buf) > 0 { v, n := protowire.ConsumeVarint(buf) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } buf = buf[n:] list.Append(protoreflect.ValueOfUint32(uint32(v))) @@ -314,7 +314,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot } v, n := protowire.ConsumeVarint(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } list.Append(protoreflect.ValueOfUint32(uint32(v))) return n, nil @@ -322,12 +322,12 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot if wtyp == protowire.BytesType { buf, n := protowire.ConsumeBytes(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } for len(buf) > 0 { v, n := protowire.ConsumeVarint(buf) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } buf = buf[n:] list.Append(protoreflect.ValueOfInt64(int64(v))) @@ -339,7 +339,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot } v, n := protowire.ConsumeVarint(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } list.Append(protoreflect.ValueOfInt64(int64(v))) return n, nil @@ -347,12 +347,12 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot if wtyp == protowire.BytesType { buf, n := protowire.ConsumeBytes(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } for len(buf) > 0 { v, n := protowire.ConsumeVarint(buf) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } buf = buf[n:] list.Append(protoreflect.ValueOfInt64(protowire.DecodeZigZag(v))) @@ -364,7 +364,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot } v, n := protowire.ConsumeVarint(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } list.Append(protoreflect.ValueOfInt64(protowire.DecodeZigZag(v))) return n, nil @@ -372,12 +372,12 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot if wtyp == protowire.BytesType { buf, n := protowire.ConsumeBytes(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } for len(buf) > 0 { v, n := protowire.ConsumeVarint(buf) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } buf = buf[n:] list.Append(protoreflect.ValueOfUint64(v)) @@ -389,7 +389,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot } v, n := protowire.ConsumeVarint(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } list.Append(protoreflect.ValueOfUint64(v)) return n, nil @@ -397,12 +397,12 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot if wtyp == protowire.BytesType { buf, n := protowire.ConsumeBytes(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } for len(buf) > 0 { v, n := protowire.ConsumeFixed32(buf) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } buf = buf[n:] list.Append(protoreflect.ValueOfInt32(int32(v))) @@ -414,7 +414,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } list.Append(protoreflect.ValueOfInt32(int32(v))) return n, nil @@ -422,12 +422,12 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot if wtyp == protowire.BytesType { buf, n := protowire.ConsumeBytes(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } for len(buf) > 0 { v, n := protowire.ConsumeFixed32(buf) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } buf = buf[n:] list.Append(protoreflect.ValueOfUint32(uint32(v))) @@ -439,7 +439,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } list.Append(protoreflect.ValueOfUint32(uint32(v))) return n, nil @@ -447,12 +447,12 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot if wtyp == protowire.BytesType { buf, n := protowire.ConsumeBytes(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } for len(buf) > 0 { v, n := protowire.ConsumeFixed32(buf) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } buf = buf[n:] list.Append(protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v)))) @@ -464,7 +464,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } list.Append(protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v)))) return n, nil @@ -472,12 +472,12 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot if wtyp == protowire.BytesType { buf, n := protowire.ConsumeBytes(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } for len(buf) > 0 { v, n := protowire.ConsumeFixed64(buf) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } buf = buf[n:] list.Append(protoreflect.ValueOfInt64(int64(v))) @@ -489,7 +489,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } list.Append(protoreflect.ValueOfInt64(int64(v))) return n, nil @@ -497,12 +497,12 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot if wtyp == protowire.BytesType { buf, n := protowire.ConsumeBytes(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } for len(buf) > 0 { v, n := protowire.ConsumeFixed64(buf) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } buf = buf[n:] list.Append(protoreflect.ValueOfUint64(v)) @@ -514,7 +514,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } list.Append(protoreflect.ValueOfUint64(v)) return n, nil @@ -522,12 +522,12 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot if wtyp == protowire.BytesType { buf, n := protowire.ConsumeBytes(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } for len(buf) > 0 { v, n := protowire.ConsumeFixed64(buf) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } buf = buf[n:] list.Append(protoreflect.ValueOfFloat64(math.Float64frombits(v))) @@ -539,7 +539,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } list.Append(protoreflect.ValueOfFloat64(math.Float64frombits(v))) return n, nil @@ -549,7 +549,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot } v, n := protowire.ConsumeBytes(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } if strs.EnforceUTF8(fd) && !utf8.Valid(v) { return 0, errors.InvalidUTF8(string(fd.FullName())) @@ -562,7 +562,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot } v, n := protowire.ConsumeBytes(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } list.Append(protoreflect.ValueOfBytes(append(emptyBuf[:], v...))) return n, nil @@ -572,7 +572,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot } v, n := protowire.ConsumeBytes(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } m := list.NewElement() if err := o.unmarshalMessage(v, m.Message()); err != nil { @@ -586,7 +586,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot } v, n := protowire.ConsumeGroup(fd.Number(), b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } m := list.NewElement() if err := o.unmarshalMessage(v, m.Message()); err != nil { diff --git a/vendor/google.golang.org/protobuf/proto/encode.go b/vendor/google.golang.org/protobuf/proto/encode.go index 7b47a1180e..d18239c237 100644 --- a/vendor/google.golang.org/protobuf/proto/encode.go +++ b/vendor/google.golang.org/protobuf/proto/encode.go @@ -5,12 +5,9 @@ package proto import ( - "sort" - "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/encoding/messageset" - "google.golang.org/protobuf/internal/fieldsort" - "google.golang.org/protobuf/internal/mapsort" + "google.golang.org/protobuf/internal/order" "google.golang.org/protobuf/internal/pragma" "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/runtime/protoiface" @@ -211,14 +208,15 @@ func (o MarshalOptions) marshalMessageSlow(b []byte, m protoreflect.Message) ([] if messageset.IsMessageSet(m.Descriptor()) { return o.marshalMessageSet(b, m) } - // There are many choices for what order we visit fields in. The default one here - // is chosen for reasonable efficiency and simplicity given the protoreflect API. - // It is not deterministic, since Message.Range does not return fields in any - // defined order. - // - // When using deterministic serialization, we sort the known fields. + fieldOrder := order.AnyFieldOrder + if o.Deterministic { + // TODO: This should use a more natural ordering like NumberFieldOrder, + // but doing so breaks golden tests that make invalid assumption about + // output stability of this implementation. + fieldOrder = order.LegacyFieldOrder + } var err error - o.rangeFields(m, func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + order.RangeFields(m, fieldOrder, func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { b, err = o.marshalField(b, fd, v) return err == nil }) @@ -229,27 +227,6 @@ func (o MarshalOptions) marshalMessageSlow(b []byte, m protoreflect.Message) ([] return b, nil } -// rangeFields visits fields in a defined order when deterministic serialization is enabled. -func (o MarshalOptions) rangeFields(m protoreflect.Message, f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { - if !o.Deterministic { - m.Range(f) - return - } - var fds []protoreflect.FieldDescriptor - m.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool { - fds = append(fds, fd) - return true - }) - sort.Slice(fds, func(a, b int) bool { - return fieldsort.Less(fds[a], fds[b]) - }) - for _, fd := range fds { - if !f(fd, m.Get(fd)) { - break - } - } -} - func (o MarshalOptions) marshalField(b []byte, fd protoreflect.FieldDescriptor, value protoreflect.Value) ([]byte, error) { switch { case fd.IsList(): @@ -292,8 +269,12 @@ func (o MarshalOptions) marshalList(b []byte, fd protoreflect.FieldDescriptor, l func (o MarshalOptions) marshalMap(b []byte, fd protoreflect.FieldDescriptor, mapv protoreflect.Map) ([]byte, error) { keyf := fd.MapKey() valf := fd.MapValue() + keyOrder := order.AnyKeyOrder + if o.Deterministic { + keyOrder = order.GenericKeyOrder + } var err error - o.rangeMap(mapv, keyf.Kind(), func(key protoreflect.MapKey, value protoreflect.Value) bool { + order.RangeEntries(mapv, keyOrder, func(key protoreflect.MapKey, value protoreflect.Value) bool { b = protowire.AppendTag(b, fd.Number(), protowire.BytesType) var pos int b, pos = appendSpeculativeLength(b) @@ -312,14 +293,6 @@ func (o MarshalOptions) marshalMap(b []byte, fd protoreflect.FieldDescriptor, ma return b, err } -func (o MarshalOptions) rangeMap(mapv protoreflect.Map, kind protoreflect.Kind, f func(protoreflect.MapKey, protoreflect.Value) bool) { - if !o.Deterministic { - mapv.Range(f) - return - } - mapsort.Range(mapv, kind, f) -} - // When encoding length-prefixed fields, we speculatively set aside some number of bytes // for the length, encode the data, and then encode the length (shifting the data if necessary // to make room). diff --git a/vendor/google.golang.org/protobuf/proto/equal.go b/vendor/google.golang.org/protobuf/proto/equal.go index 10902bd851..4dba2b9699 100644 --- a/vendor/google.golang.org/protobuf/proto/equal.go +++ b/vendor/google.golang.org/protobuf/proto/equal.go @@ -111,18 +111,31 @@ func equalList(fd pref.FieldDescriptor, x, y pref.List) bool { // equalValue compares two singular values. func equalValue(fd pref.FieldDescriptor, x, y pref.Value) bool { - switch { - case fd.Message() != nil: - return equalMessage(x.Message(), y.Message()) - case fd.Kind() == pref.BytesKind: - return bytes.Equal(x.Bytes(), y.Bytes()) - case fd.Kind() == pref.FloatKind, fd.Kind() == pref.DoubleKind: + switch fd.Kind() { + case pref.BoolKind: + return x.Bool() == y.Bool() + case pref.EnumKind: + return x.Enum() == y.Enum() + case pref.Int32Kind, pref.Sint32Kind, + pref.Int64Kind, pref.Sint64Kind, + pref.Sfixed32Kind, pref.Sfixed64Kind: + return x.Int() == y.Int() + case pref.Uint32Kind, pref.Uint64Kind, + pref.Fixed32Kind, pref.Fixed64Kind: + return x.Uint() == y.Uint() + case pref.FloatKind, pref.DoubleKind: fx := x.Float() fy := y.Float() if math.IsNaN(fx) || math.IsNaN(fy) { return math.IsNaN(fx) && math.IsNaN(fy) } return fx == fy + case pref.StringKind: + return x.String() == y.String() + case pref.BytesKind: + return bytes.Equal(x.Bytes(), y.Bytes()) + case pref.MessageKind, pref.GroupKind: + return equalMessage(x.Message(), y.Message()) default: return x.Interface() == y.Interface() } diff --git a/vendor/google.golang.org/protobuf/proto/messageset.go b/vendor/google.golang.org/protobuf/proto/messageset.go index 1d692c3a8b..312d5d45c6 100644 --- a/vendor/google.golang.org/protobuf/proto/messageset.go +++ b/vendor/google.golang.org/protobuf/proto/messageset.go @@ -9,6 +9,7 @@ import ( "google.golang.org/protobuf/internal/encoding/messageset" "google.golang.org/protobuf/internal/errors" "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/internal/order" "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/reflect/protoregistry" ) @@ -28,8 +29,12 @@ func (o MarshalOptions) marshalMessageSet(b []byte, m protoreflect.Message) ([]b if !flags.ProtoLegacy { return b, errors.New("no support for message_set_wire_format") } + fieldOrder := order.AnyFieldOrder + if o.Deterministic { + fieldOrder = order.NumberFieldOrder + } var err error - o.rangeFields(m, func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + order.RangeFields(m, fieldOrder, func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { b, err = o.marshalMessageSetField(b, fd, v) return err == nil }) diff --git a/vendor/google.golang.org/protobuf/proto/proto.go b/vendor/google.golang.org/protobuf/proto/proto.go index ca14b09c34..1f0d183b10 100644 --- a/vendor/google.golang.org/protobuf/proto/proto.go +++ b/vendor/google.golang.org/protobuf/proto/proto.go @@ -32,3 +32,12 @@ var Error error func init() { Error = errors.Error } + +// MessageName returns the full name of m. +// If m is nil, it returns an empty string. +func MessageName(m Message) protoreflect.FullName { + if m == nil { + return "" + } + return m.ProtoReflect().Descriptor().FullName() +} diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go index 37f254d4c0..e4dfb12050 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go @@ -144,6 +144,7 @@ func (o FileOptions) New(fd *descriptorpb.FileDescriptorProto, r Resolver) (prot } // Handle source locations. + f.L2.Locations.File = f for _, loc := range fd.GetSourceCodeInfo().GetLocation() { var l protoreflect.SourceLocation // TODO: Validate that the path points to an actual declaration? diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go index 673a230e7a..37efda1afe 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go @@ -135,7 +135,7 @@ func (r descsByName) initFieldsFromDescriptorProto(fds []*descriptorpb.FieldDesc f.L1.Kind = protoreflect.Kind(fd.GetType()) } if fd.JsonName != nil { - f.L1.JSONName.Init(fd.GetJsonName()) + f.L1.StringName.InitJSON(fd.GetJsonName()) } } return fs, nil @@ -175,7 +175,7 @@ func (r descsByName) initExtensionDeclarations(xds []*descriptorpb.FieldDescript x.L1.Kind = protoreflect.Kind(xd.GetType()) } if xd.JsonName != nil { - x.L2.JSONName.Init(xd.GetJsonName()) + x.L2.StringName.InitJSON(xd.GetJsonName()) } } return xs, nil diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go index 2d5fa9936b..9af1d56487 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go @@ -239,6 +239,9 @@ func validateExtensionDeclarations(xs []filedesc.Extension, xds []*descriptorpb. return errors.New("extension field %q has an invalid cardinality: %d", x.FullName(), x.Cardinality()) } if xd.JsonName != nil { + // A bug in older versions of protoc would always populate the + // "json_name" option for extensions when it is meaningless. + // When it did so, it would always use the camel-cased field name. if xd.GetJsonName() != strs.JSONCamelCase(string(x.Name())) { return errors.New("extension field %q may not have an explicitly set JSON name: %q", x.FullName(), xd.GetJsonName()) } diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go b/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go index 00d35e02ee..a7c5ceffc9 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go @@ -9,6 +9,7 @@ import ( "strings" "google.golang.org/protobuf/internal/encoding/defval" + "google.golang.org/protobuf/internal/strs" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/reflect/protoreflect" @@ -20,9 +21,11 @@ import ( func ToFileDescriptorProto(file protoreflect.FileDescriptor) *descriptorpb.FileDescriptorProto { p := &descriptorpb.FileDescriptorProto{ Name: proto.String(file.Path()), - Package: proto.String(string(file.Package())), Options: proto.Clone(file.Options()).(*descriptorpb.FileOptions), } + if file.Package() != "" { + p.Package = proto.String(string(file.Package())) + } for i, imports := 0, file.Imports(); i < imports.Len(); i++ { imp := imports.Get(i) p.Dependency = append(p.Dependency, imp.Path()) @@ -138,7 +141,14 @@ func ToFieldDescriptorProto(field protoreflect.FieldDescriptor) *descriptorpb.Fi p.TypeName = fullNameOf(field.Message()) } if field.HasJSONName() { - p.JsonName = proto.String(field.JSONName()) + // A bug in older versions of protoc would always populate the + // "json_name" option for extensions when it is meaningless. + // When it did so, it would always use the camel-cased field name. + if field.IsExtension() { + p.JsonName = proto.String(strs.JSONCamelCase(string(field.Name()))) + } else { + p.JsonName = proto.String(field.JSONName()) + } } if field.Syntax() == protoreflect.Proto3 && field.HasOptionalKeyword() { p.Proto3Optional = proto.Bool(true) diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/source.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/source.go index 32ea3d98cd..121ba3a07b 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/source.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/source.go @@ -4,6 +4,10 @@ package protoreflect +import ( + "strconv" +) + // SourceLocations is a list of source locations. type SourceLocations interface { // Len reports the number of source locations in the proto file. @@ -11,9 +15,20 @@ type SourceLocations interface { // Get returns the ith SourceLocation. It panics if out of bounds. Get(int) SourceLocation - doNotImplement + // ByPath returns the SourceLocation for the given path, + // returning the first location if multiple exist for the same path. + // If multiple locations exist for the same path, + // then SourceLocation.Next index can be used to identify the + // index of the next SourceLocation. + // If no location exists for this path, it returns the zero value. + ByPath(path SourcePath) SourceLocation - // TODO: Add ByPath and ByDescriptor helper methods. + // ByDescriptor returns the SourceLocation for the given descriptor, + // returning the first location if multiple exist for the same path. + // If no location exists for this descriptor, it returns the zero value. + ByDescriptor(desc Descriptor) SourceLocation + + doNotImplement } // SourceLocation describes a source location and @@ -39,6 +54,10 @@ type SourceLocation struct { LeadingComments string // TrailingComments is the trailing attached comment for the declaration. TrailingComments string + + // Next is an index into SourceLocations for the next source location that + // has the same Path. It is zero if there is no next location. + Next int } // SourcePath identifies part of a file descriptor for a source location. @@ -48,5 +67,62 @@ type SourceLocation struct { // See google.protobuf.SourceCodeInfo.Location.path. type SourcePath []int32 -// TODO: Add SourcePath.String method to pretty-print the path. For example: -// ".message_type[6].nested_type[15].field[3]" +// Equal reports whether p1 equals p2. +func (p1 SourcePath) Equal(p2 SourcePath) bool { + if len(p1) != len(p2) { + return false + } + for i := range p1 { + if p1[i] != p2[i] { + return false + } + } + return true +} + +// String formats the path in a humanly readable manner. +// The output is guaranteed to be deterministic, +// making it suitable for use as a key into a Go map. +// It is not guaranteed to be stable as the exact output could change +// in a future version of this module. +// +// Example output: +// .message_type[6].nested_type[15].field[3] +func (p SourcePath) String() string { + b := p.appendFileDescriptorProto(nil) + for _, i := range p { + b = append(b, '.') + b = strconv.AppendInt(b, int64(i), 10) + } + return string(b) +} + +type appendFunc func(*SourcePath, []byte) []byte + +func (p *SourcePath) appendSingularField(b []byte, name string, f appendFunc) []byte { + if len(*p) == 0 { + return b + } + b = append(b, '.') + b = append(b, name...) + *p = (*p)[1:] + if f != nil { + b = f(p, b) + } + return b +} + +func (p *SourcePath) appendRepeatedField(b []byte, name string, f appendFunc) []byte { + b = p.appendSingularField(b, name, nil) + if len(*p) == 0 || (*p)[0] < 0 { + return b + } + b = append(b, '[') + b = strconv.AppendUint(b, uint64((*p)[0]), 10) + b = append(b, ']') + *p = (*p)[1:] + if f != nil { + b = f(p, b) + } + return b +} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go new file mode 100644 index 0000000000..b03c1223c4 --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go @@ -0,0 +1,461 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package protoreflect + +func (p *SourcePath) appendFileDescriptorProto(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "name", nil) + case 2: + b = p.appendSingularField(b, "package", nil) + case 3: + b = p.appendRepeatedField(b, "dependency", nil) + case 10: + b = p.appendRepeatedField(b, "public_dependency", nil) + case 11: + b = p.appendRepeatedField(b, "weak_dependency", nil) + case 4: + b = p.appendRepeatedField(b, "message_type", (*SourcePath).appendDescriptorProto) + case 5: + b = p.appendRepeatedField(b, "enum_type", (*SourcePath).appendEnumDescriptorProto) + case 6: + b = p.appendRepeatedField(b, "service", (*SourcePath).appendServiceDescriptorProto) + case 7: + b = p.appendRepeatedField(b, "extension", (*SourcePath).appendFieldDescriptorProto) + case 8: + b = p.appendSingularField(b, "options", (*SourcePath).appendFileOptions) + case 9: + b = p.appendSingularField(b, "source_code_info", (*SourcePath).appendSourceCodeInfo) + case 12: + b = p.appendSingularField(b, "syntax", nil) + } + return b +} + +func (p *SourcePath) appendDescriptorProto(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "name", nil) + case 2: + b = p.appendRepeatedField(b, "field", (*SourcePath).appendFieldDescriptorProto) + case 6: + b = p.appendRepeatedField(b, "extension", (*SourcePath).appendFieldDescriptorProto) + case 3: + b = p.appendRepeatedField(b, "nested_type", (*SourcePath).appendDescriptorProto) + case 4: + b = p.appendRepeatedField(b, "enum_type", (*SourcePath).appendEnumDescriptorProto) + case 5: + b = p.appendRepeatedField(b, "extension_range", (*SourcePath).appendDescriptorProto_ExtensionRange) + case 8: + b = p.appendRepeatedField(b, "oneof_decl", (*SourcePath).appendOneofDescriptorProto) + case 7: + b = p.appendSingularField(b, "options", (*SourcePath).appendMessageOptions) + case 9: + b = p.appendRepeatedField(b, "reserved_range", (*SourcePath).appendDescriptorProto_ReservedRange) + case 10: + b = p.appendRepeatedField(b, "reserved_name", nil) + } + return b +} + +func (p *SourcePath) appendEnumDescriptorProto(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "name", nil) + case 2: + b = p.appendRepeatedField(b, "value", (*SourcePath).appendEnumValueDescriptorProto) + case 3: + b = p.appendSingularField(b, "options", (*SourcePath).appendEnumOptions) + case 4: + b = p.appendRepeatedField(b, "reserved_range", (*SourcePath).appendEnumDescriptorProto_EnumReservedRange) + case 5: + b = p.appendRepeatedField(b, "reserved_name", nil) + } + return b +} + +func (p *SourcePath) appendServiceDescriptorProto(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "name", nil) + case 2: + b = p.appendRepeatedField(b, "method", (*SourcePath).appendMethodDescriptorProto) + case 3: + b = p.appendSingularField(b, "options", (*SourcePath).appendServiceOptions) + } + return b +} + +func (p *SourcePath) appendFieldDescriptorProto(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "name", nil) + case 3: + b = p.appendSingularField(b, "number", nil) + case 4: + b = p.appendSingularField(b, "label", nil) + case 5: + b = p.appendSingularField(b, "type", nil) + case 6: + b = p.appendSingularField(b, "type_name", nil) + case 2: + b = p.appendSingularField(b, "extendee", nil) + case 7: + b = p.appendSingularField(b, "default_value", nil) + case 9: + b = p.appendSingularField(b, "oneof_index", nil) + case 10: + b = p.appendSingularField(b, "json_name", nil) + case 8: + b = p.appendSingularField(b, "options", (*SourcePath).appendFieldOptions) + case 17: + b = p.appendSingularField(b, "proto3_optional", nil) + } + return b +} + +func (p *SourcePath) appendFileOptions(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "java_package", nil) + case 8: + b = p.appendSingularField(b, "java_outer_classname", nil) + case 10: + b = p.appendSingularField(b, "java_multiple_files", nil) + case 20: + b = p.appendSingularField(b, "java_generate_equals_and_hash", nil) + case 27: + b = p.appendSingularField(b, "java_string_check_utf8", nil) + case 9: + b = p.appendSingularField(b, "optimize_for", nil) + case 11: + b = p.appendSingularField(b, "go_package", nil) + case 16: + b = p.appendSingularField(b, "cc_generic_services", nil) + case 17: + b = p.appendSingularField(b, "java_generic_services", nil) + case 18: + b = p.appendSingularField(b, "py_generic_services", nil) + case 42: + b = p.appendSingularField(b, "php_generic_services", nil) + case 23: + b = p.appendSingularField(b, "deprecated", nil) + case 31: + b = p.appendSingularField(b, "cc_enable_arenas", nil) + case 36: + b = p.appendSingularField(b, "objc_class_prefix", nil) + case 37: + b = p.appendSingularField(b, "csharp_namespace", nil) + case 39: + b = p.appendSingularField(b, "swift_prefix", nil) + case 40: + b = p.appendSingularField(b, "php_class_prefix", nil) + case 41: + b = p.appendSingularField(b, "php_namespace", nil) + case 44: + b = p.appendSingularField(b, "php_metadata_namespace", nil) + case 45: + b = p.appendSingularField(b, "ruby_package", nil) + case 999: + b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) + } + return b +} + +func (p *SourcePath) appendSourceCodeInfo(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendRepeatedField(b, "location", (*SourcePath).appendSourceCodeInfo_Location) + } + return b +} + +func (p *SourcePath) appendDescriptorProto_ExtensionRange(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "start", nil) + case 2: + b = p.appendSingularField(b, "end", nil) + case 3: + b = p.appendSingularField(b, "options", (*SourcePath).appendExtensionRangeOptions) + } + return b +} + +func (p *SourcePath) appendOneofDescriptorProto(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "name", nil) + case 2: + b = p.appendSingularField(b, "options", (*SourcePath).appendOneofOptions) + } + return b +} + +func (p *SourcePath) appendMessageOptions(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "message_set_wire_format", nil) + case 2: + b = p.appendSingularField(b, "no_standard_descriptor_accessor", nil) + case 3: + b = p.appendSingularField(b, "deprecated", nil) + case 7: + b = p.appendSingularField(b, "map_entry", nil) + case 999: + b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) + } + return b +} + +func (p *SourcePath) appendDescriptorProto_ReservedRange(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "start", nil) + case 2: + b = p.appendSingularField(b, "end", nil) + } + return b +} + +func (p *SourcePath) appendEnumValueDescriptorProto(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "name", nil) + case 2: + b = p.appendSingularField(b, "number", nil) + case 3: + b = p.appendSingularField(b, "options", (*SourcePath).appendEnumValueOptions) + } + return b +} + +func (p *SourcePath) appendEnumOptions(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 2: + b = p.appendSingularField(b, "allow_alias", nil) + case 3: + b = p.appendSingularField(b, "deprecated", nil) + case 999: + b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) + } + return b +} + +func (p *SourcePath) appendEnumDescriptorProto_EnumReservedRange(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "start", nil) + case 2: + b = p.appendSingularField(b, "end", nil) + } + return b +} + +func (p *SourcePath) appendMethodDescriptorProto(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "name", nil) + case 2: + b = p.appendSingularField(b, "input_type", nil) + case 3: + b = p.appendSingularField(b, "output_type", nil) + case 4: + b = p.appendSingularField(b, "options", (*SourcePath).appendMethodOptions) + case 5: + b = p.appendSingularField(b, "client_streaming", nil) + case 6: + b = p.appendSingularField(b, "server_streaming", nil) + } + return b +} + +func (p *SourcePath) appendServiceOptions(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 33: + b = p.appendSingularField(b, "deprecated", nil) + case 999: + b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) + } + return b +} + +func (p *SourcePath) appendFieldOptions(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "ctype", nil) + case 2: + b = p.appendSingularField(b, "packed", nil) + case 6: + b = p.appendSingularField(b, "jstype", nil) + case 5: + b = p.appendSingularField(b, "lazy", nil) + case 3: + b = p.appendSingularField(b, "deprecated", nil) + case 10: + b = p.appendSingularField(b, "weak", nil) + case 999: + b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) + } + return b +} + +func (p *SourcePath) appendUninterpretedOption(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 2: + b = p.appendRepeatedField(b, "name", (*SourcePath).appendUninterpretedOption_NamePart) + case 3: + b = p.appendSingularField(b, "identifier_value", nil) + case 4: + b = p.appendSingularField(b, "positive_int_value", nil) + case 5: + b = p.appendSingularField(b, "negative_int_value", nil) + case 6: + b = p.appendSingularField(b, "double_value", nil) + case 7: + b = p.appendSingularField(b, "string_value", nil) + case 8: + b = p.appendSingularField(b, "aggregate_value", nil) + } + return b +} + +func (p *SourcePath) appendSourceCodeInfo_Location(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendRepeatedField(b, "path", nil) + case 2: + b = p.appendRepeatedField(b, "span", nil) + case 3: + b = p.appendSingularField(b, "leading_comments", nil) + case 4: + b = p.appendSingularField(b, "trailing_comments", nil) + case 6: + b = p.appendRepeatedField(b, "leading_detached_comments", nil) + } + return b +} + +func (p *SourcePath) appendExtensionRangeOptions(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 999: + b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) + } + return b +} + +func (p *SourcePath) appendOneofOptions(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 999: + b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) + } + return b +} + +func (p *SourcePath) appendEnumValueOptions(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "deprecated", nil) + case 999: + b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) + } + return b +} + +func (p *SourcePath) appendMethodOptions(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 33: + b = p.appendSingularField(b, "deprecated", nil) + case 34: + b = p.appendSingularField(b, "idempotency_level", nil) + case 999: + b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) + } + return b +} + +func (p *SourcePath) appendUninterpretedOption_NamePart(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "name_part", nil) + case 2: + b = p.appendSingularField(b, "is_extension", nil) + } + return b +} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go index 5be14a7258..8e53c44a91 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go @@ -232,11 +232,15 @@ type MessageDescriptor interface { type isMessageDescriptor interface{ ProtoType(MessageDescriptor) } // MessageType encapsulates a MessageDescriptor with a concrete Go implementation. +// It is recommended that implementations of this interface also implement the +// MessageFieldTypes interface. type MessageType interface { // New returns a newly allocated empty message. + // It may return nil for synthetic messages representing a map entry. New() Message // Zero returns an empty, read-only message. + // It may return nil for synthetic messages representing a map entry. Zero() Message // Descriptor returns the message descriptor. @@ -245,6 +249,26 @@ type MessageType interface { Descriptor() MessageDescriptor } +// MessageFieldTypes extends a MessageType by providing type information +// regarding enums and messages referenced by the message fields. +type MessageFieldTypes interface { + MessageType + + // Enum returns the EnumType for the ith field in Descriptor.Fields. + // It returns nil if the ith field is not an enum kind. + // It panics if out of bounds. + // + // Invariant: mt.Enum(i).Descriptor() == mt.Descriptor().Fields(i).Enum() + Enum(i int) EnumType + + // Message returns the MessageType for the ith field in Descriptor.Fields. + // It returns nil if the ith field is not a message or group kind. + // It panics if out of bounds. + // + // Invariant: mt.Message(i).Descriptor() == mt.Descriptor().Fields(i).Message() + Message(i int) MessageType +} + // MessageDescriptors is a list of message declarations. type MessageDescriptors interface { // Len reports the number of messages. @@ -279,8 +303,15 @@ type FieldDescriptor interface { // JSONName reports the name used for JSON serialization. // It is usually the camel-cased form of the field name. + // Extension fields are represented by the full name surrounded by brackets. JSONName() string + // TextName reports the name used for text serialization. + // It is usually the name of the field, except that groups use the name + // of the inlined message, and extension fields are represented by the + // full name surrounded by brackets. + TextName() string + // HasPresence reports whether the field distinguishes between unpopulated // and default values. HasPresence() bool @@ -371,6 +402,9 @@ type FieldDescriptors interface { // ByJSONName returns the FieldDescriptor for a field with s as the JSON name. // It returns nil if not found. ByJSONName(s string) FieldDescriptor + // ByTextName returns the FieldDescriptor for a field with s as the text name. + // It returns nil if not found. + ByTextName(s string) FieldDescriptor // ByNumber returns the FieldDescriptor for a field numbered n. // It returns nil if not found. ByNumber(n FieldNumber) FieldDescriptor diff --git a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go index 5e5f967164..66dcbcd0d2 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go +++ b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go @@ -17,24 +17,49 @@ package protoregistry import ( "fmt" - "log" + "os" "strings" "sync" + "google.golang.org/protobuf/internal/encoding/messageset" "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/flags" "google.golang.org/protobuf/reflect/protoreflect" ) +// conflictPolicy configures the policy for handling registration conflicts. +// +// It can be over-written at compile time with a linker-initialized variable: +// go build -ldflags "-X google.golang.org/protobuf/reflect/protoregistry.conflictPolicy=warn" +// +// It can be over-written at program execution with an environment variable: +// GOLANG_PROTOBUF_REGISTRATION_CONFLICT=warn ./main +// +// Neither of the above are covered by the compatibility promise and +// may be removed in a future release of this module. +var conflictPolicy = "panic" // "panic" | "warn" | "ignore" + // ignoreConflict reports whether to ignore a registration conflict // given the descriptor being registered and the error. // It is a variable so that the behavior is easily overridden in another file. var ignoreConflict = func(d protoreflect.Descriptor, err error) bool { - log.Printf(""+ - "WARNING: %v\n"+ - "A future release will panic on registration conflicts. See:\n"+ - "https://developers.google.com/protocol-buffers/docs/reference/go/faq#namespace-conflict\n"+ - "\n", err) - return true + const env = "GOLANG_PROTOBUF_REGISTRATION_CONFLICT" + const faq = "https://developers.google.com/protocol-buffers/docs/reference/go/faq#namespace-conflict" + policy := conflictPolicy + if v := os.Getenv(env); v != "" { + policy = v + } + switch policy { + case "panic": + panic(fmt.Sprintf("%v\nSee %v\n", err, faq)) + case "warn": + fmt.Fprintf(os.Stderr, "WARNING: %v\nSee %v\n\n", err, faq) + return true + case "ignore": + return true + default: + panic("invalid " + env + " value: " + os.Getenv(env)) + } } var globalMutex sync.RWMutex @@ -96,38 +121,7 @@ func (r *Files) RegisterFile(file protoreflect.FileDescriptor) error { } path := file.Path() if prev := r.filesByPath[path]; prev != nil { - // TODO: Remove this after some soak-in period after moving these types. - var prevPath string - const prevModule = "google.golang.org/genproto" - const prevVersion = "cb27e3aa (May 26th, 2020)" - switch path { - case "google/protobuf/field_mask.proto": - prevPath = prevModule + "/protobuf/field_mask" - case "google/protobuf/api.proto": - prevPath = prevModule + "/protobuf/api" - case "google/protobuf/type.proto": - prevPath = prevModule + "/protobuf/ptype" - case "google/protobuf/source_context.proto": - prevPath = prevModule + "/protobuf/source_context" - } - if r == GlobalFiles && prevPath != "" { - pkgName := strings.TrimSuffix(strings.TrimPrefix(path, "google/protobuf/"), ".proto") - pkgName = strings.Replace(pkgName, "_", "", -1) + "pb" - currPath := "google.golang.org/protobuf/types/known/" + pkgName - panic(fmt.Sprintf(""+ - "duplicate registration of %q\n"+ - "\n"+ - "The generated definition for this file has moved:\n"+ - "\tfrom: %q\n"+ - "\tto: %q\n"+ - "A dependency on the %q module must\n"+ - "be at version %v or higher.\n"+ - "\n"+ - "Upgrade the dependency by running:\n"+ - "\tgo get -u %v\n", - path, prevPath, currPath, prevModule, prevVersion, prevPath)) - } - + r.checkGenProtoConflict(path) err := errors.New("file %q is already registered", file.Path()) err = amendErrorWithCaller(err, prev, file) if r == GlobalFiles && ignoreConflict(file, err) { @@ -178,6 +172,47 @@ func (r *Files) RegisterFile(file protoreflect.FileDescriptor) error { return nil } +// Several well-known types were hosted in the google.golang.org/genproto module +// but were later moved to this module. To avoid a weak dependency on the +// genproto module (and its relatively large set of transitive dependencies), +// we rely on a registration conflict to determine whether the genproto version +// is too old (i.e., does not contain aliases to the new type declarations). +func (r *Files) checkGenProtoConflict(path string) { + if r != GlobalFiles { + return + } + var prevPath string + const prevModule = "google.golang.org/genproto" + const prevVersion = "cb27e3aa (May 26th, 2020)" + switch path { + case "google/protobuf/field_mask.proto": + prevPath = prevModule + "/protobuf/field_mask" + case "google/protobuf/api.proto": + prevPath = prevModule + "/protobuf/api" + case "google/protobuf/type.proto": + prevPath = prevModule + "/protobuf/ptype" + case "google/protobuf/source_context.proto": + prevPath = prevModule + "/protobuf/source_context" + default: + return + } + pkgName := strings.TrimSuffix(strings.TrimPrefix(path, "google/protobuf/"), ".proto") + pkgName = strings.Replace(pkgName, "_", "", -1) + "pb" // e.g., "field_mask" => "fieldmaskpb" + currPath := "google.golang.org/protobuf/types/known/" + pkgName + panic(fmt.Sprintf(""+ + "duplicate registration of %q\n"+ + "\n"+ + "The generated definition for this file has moved:\n"+ + "\tfrom: %q\n"+ + "\tto: %q\n"+ + "A dependency on the %q module must\n"+ + "be at version %v or higher.\n"+ + "\n"+ + "Upgrade the dependency by running:\n"+ + "\tgo get -u %v\n", + path, prevPath, currPath, prevModule, prevVersion, prevPath)) +} + // FindDescriptorByName looks up a descriptor by the full name. // // This returns (nil, NotFound) if not found. @@ -560,13 +595,25 @@ func (r *Types) FindEnumByName(enum protoreflect.FullName) (protoreflect.EnumTyp return nil, NotFound } -// FindMessageByName looks up a message by its full name. -// E.g., "google.protobuf.Any" +// FindMessageByName looks up a message by its full name, +// e.g. "google.protobuf.Any". // -// This return (nil, NotFound) if not found. +// This returns (nil, NotFound) if not found. func (r *Types) FindMessageByName(message protoreflect.FullName) (protoreflect.MessageType, error) { - // The full name by itself is a valid URL. - return r.FindMessageByURL(string(message)) + if r == nil { + return nil, NotFound + } + if r == GlobalTypes { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + if v := r.typesByName[message]; v != nil { + if mt, _ := v.(protoreflect.MessageType); mt != nil { + return mt, nil + } + return nil, errors.New("found wrong type: got %v, want message", typeName(v)) + } + return nil, NotFound } // FindMessageByURL looks up a message by a URL identifier. @@ -574,6 +621,8 @@ func (r *Types) FindMessageByName(message protoreflect.FullName) (protoreflect.M // // This returns (nil, NotFound) if not found. func (r *Types) FindMessageByURL(url string) (protoreflect.MessageType, error) { + // This function is similar to FindMessageByName but + // truncates anything before and including '/' in the URL. if r == nil { return nil, NotFound } @@ -613,6 +662,26 @@ func (r *Types) FindExtensionByName(field protoreflect.FullName) (protoreflect.E if xt, _ := v.(protoreflect.ExtensionType); xt != nil { return xt, nil } + + // MessageSet extensions are special in that the name of the extension + // is the name of the message type used to extend the MessageSet. + // This naming scheme is used by text and JSON serialization. + // + // This feature is protected by the ProtoLegacy flag since MessageSets + // are a proto1 feature that is long deprecated. + if flags.ProtoLegacy { + if _, ok := v.(protoreflect.MessageType); ok { + field := field.Append(messageset.ExtensionName) + if v := r.typesByName[field]; v != nil { + if xt, _ := v.(protoreflect.ExtensionType); xt != nil { + if messageset.IsMessageSetExtension(xt.TypeDescriptor()) { + return xt, nil + } + } + } + } + } + return nil, errors.New("found wrong type: got %v, want extension", typeName(v)) } return nil, NotFound diff --git a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go index 8242378569..f77239fc3b 100644 --- a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go +++ b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go @@ -3557,16 +3557,15 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, - 0x64, 0x42, 0x8f, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x10, 0x44, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x48, 0x01, 0x5a, 0x3e, 0x67, - 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, - 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, - 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x67, 0x6f, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, - 0x6f, 0x72, 0x3b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0xf8, 0x01, 0x01, - 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, + 0x64, 0x42, 0x7e, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x10, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x48, 0x01, 0x5a, 0x2d, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, + 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, } var ( diff --git a/vendor/google.golang.org/protobuf/types/dynamicpb/dynamic.go b/vendor/google.golang.org/protobuf/types/dynamicpb/dynamic.go deleted file mode 100644 index 7db6e55987..0000000000 --- a/vendor/google.golang.org/protobuf/types/dynamicpb/dynamic.go +++ /dev/null @@ -1,701 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package dynamicpb creates protocol buffer messages using runtime type information. -package dynamicpb - -import ( - "math" - - "google.golang.org/protobuf/internal/errors" - pref "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/runtime/protoiface" - "google.golang.org/protobuf/runtime/protoimpl" -) - -// enum is a dynamic protoreflect.Enum. -type enum struct { - num pref.EnumNumber - typ pref.EnumType -} - -func (e enum) Descriptor() pref.EnumDescriptor { return e.typ.Descriptor() } -func (e enum) Type() pref.EnumType { return e.typ } -func (e enum) Number() pref.EnumNumber { return e.num } - -// enumType is a dynamic protoreflect.EnumType. -type enumType struct { - desc pref.EnumDescriptor -} - -// NewEnumType creates a new EnumType with the provided descriptor. -// -// EnumTypes created by this package are equal if their descriptors are equal. -// That is, if ed1 == ed2, then NewEnumType(ed1) == NewEnumType(ed2). -// -// Enum values created by the EnumType are equal if their numbers are equal. -func NewEnumType(desc pref.EnumDescriptor) pref.EnumType { - return enumType{desc} -} - -func (et enumType) New(n pref.EnumNumber) pref.Enum { return enum{n, et} } -func (et enumType) Descriptor() pref.EnumDescriptor { return et.desc } - -// extensionType is a dynamic protoreflect.ExtensionType. -type extensionType struct { - desc extensionTypeDescriptor -} - -// A Message is a dynamically constructed protocol buffer message. -// -// Message implements the proto.Message interface, and may be used with all -// standard proto package functions such as Marshal, Unmarshal, and so forth. -// -// Message also implements the protoreflect.Message interface. See the protoreflect -// package documentation for that interface for how to get and set fields and -// otherwise interact with the contents of a Message. -// -// Reflection API functions which construct messages, such as NewField, -// return new dynamic messages of the appropriate type. Functions which take -// messages, such as Set for a message-value field, will accept any message -// with a compatible type. -// -// Operations which modify a Message are not safe for concurrent use. -type Message struct { - typ messageType - known map[pref.FieldNumber]pref.Value - ext map[pref.FieldNumber]pref.FieldDescriptor - unknown pref.RawFields -} - -var ( - _ pref.Message = (*Message)(nil) - _ pref.ProtoMessage = (*Message)(nil) - _ protoiface.MessageV1 = (*Message)(nil) -) - -// NewMessage creates a new message with the provided descriptor. -func NewMessage(desc pref.MessageDescriptor) *Message { - return &Message{ - typ: messageType{desc}, - known: make(map[pref.FieldNumber]pref.Value), - ext: make(map[pref.FieldNumber]pref.FieldDescriptor), - } -} - -// ProtoMessage implements the legacy message interface. -func (m *Message) ProtoMessage() {} - -// ProtoReflect implements the protoreflect.ProtoMessage interface. -func (m *Message) ProtoReflect() pref.Message { - return m -} - -// String returns a string representation of a message. -func (m *Message) String() string { - return protoimpl.X.MessageStringOf(m) -} - -// Reset clears the message to be empty, but preserves the dynamic message type. -func (m *Message) Reset() { - m.known = make(map[pref.FieldNumber]pref.Value) - m.ext = make(map[pref.FieldNumber]pref.FieldDescriptor) - m.unknown = nil -} - -// Descriptor returns the message descriptor. -func (m *Message) Descriptor() pref.MessageDescriptor { - return m.typ.desc -} - -// Type returns the message type. -func (m *Message) Type() pref.MessageType { - return m.typ -} - -// New returns a newly allocated empty message with the same descriptor. -// See protoreflect.Message for details. -func (m *Message) New() pref.Message { - return m.Type().New() -} - -// Interface returns the message. -// See protoreflect.Message for details. -func (m *Message) Interface() pref.ProtoMessage { - return m -} - -// ProtoMethods is an internal detail of the protoreflect.Message interface. -// Users should never call this directly. -func (m *Message) ProtoMethods() *protoiface.Methods { - return nil -} - -// Range visits every populated field in undefined order. -// See protoreflect.Message for details. -func (m *Message) Range(f func(pref.FieldDescriptor, pref.Value) bool) { - for num, v := range m.known { - fd := m.ext[num] - if fd == nil { - fd = m.Descriptor().Fields().ByNumber(num) - } - if !isSet(fd, v) { - continue - } - if !f(fd, v) { - return - } - } -} - -// Has reports whether a field is populated. -// See protoreflect.Message for details. -func (m *Message) Has(fd pref.FieldDescriptor) bool { - m.checkField(fd) - if fd.IsExtension() && m.ext[fd.Number()] != fd { - return false - } - v, ok := m.known[fd.Number()] - if !ok { - return false - } - return isSet(fd, v) -} - -// Clear clears a field. -// See protoreflect.Message for details. -func (m *Message) Clear(fd pref.FieldDescriptor) { - m.checkField(fd) - num := fd.Number() - delete(m.known, num) - delete(m.ext, num) -} - -// Get returns the value of a field. -// See protoreflect.Message for details. -func (m *Message) Get(fd pref.FieldDescriptor) pref.Value { - m.checkField(fd) - num := fd.Number() - if fd.IsExtension() { - if fd != m.ext[num] { - return fd.(pref.ExtensionTypeDescriptor).Type().Zero() - } - return m.known[num] - } - if v, ok := m.known[num]; ok { - switch { - case fd.IsMap(): - if v.Map().Len() > 0 { - return v - } - case fd.IsList(): - if v.List().Len() > 0 { - return v - } - default: - return v - } - } - switch { - case fd.IsMap(): - return pref.ValueOfMap(&dynamicMap{desc: fd}) - case fd.IsList(): - return pref.ValueOfList(emptyList{desc: fd}) - case fd.Message() != nil: - return pref.ValueOfMessage(&Message{typ: messageType{fd.Message()}}) - case fd.Kind() == pref.BytesKind: - return pref.ValueOfBytes(append([]byte(nil), fd.Default().Bytes()...)) - default: - return fd.Default() - } -} - -// Mutable returns a mutable reference to a repeated, map, or message field. -// See protoreflect.Message for details. -func (m *Message) Mutable(fd pref.FieldDescriptor) pref.Value { - m.checkField(fd) - if !fd.IsMap() && !fd.IsList() && fd.Message() == nil { - panic(errors.New("%v: getting mutable reference to non-composite type", fd.FullName())) - } - if m.known == nil { - panic(errors.New("%v: modification of read-only message", fd.FullName())) - } - num := fd.Number() - if fd.IsExtension() { - if fd != m.ext[num] { - m.ext[num] = fd - m.known[num] = fd.(pref.ExtensionTypeDescriptor).Type().New() - } - return m.known[num] - } - if v, ok := m.known[num]; ok { - return v - } - m.clearOtherOneofFields(fd) - m.known[num] = m.NewField(fd) - if fd.IsExtension() { - m.ext[num] = fd - } - return m.known[num] -} - -// Set stores a value in a field. -// See protoreflect.Message for details. -func (m *Message) Set(fd pref.FieldDescriptor, v pref.Value) { - m.checkField(fd) - if m.known == nil { - panic(errors.New("%v: modification of read-only message", fd.FullName())) - } - if fd.IsExtension() { - isValid := true - switch { - case !fd.(pref.ExtensionTypeDescriptor).Type().IsValidValue(v): - isValid = false - case fd.IsList(): - isValid = v.List().IsValid() - case fd.IsMap(): - isValid = v.Map().IsValid() - case fd.Message() != nil: - isValid = v.Message().IsValid() - } - if !isValid { - panic(errors.New("%v: assigning invalid type %T", fd.FullName(), v.Interface())) - } - m.ext[fd.Number()] = fd - } else { - typecheck(fd, v) - } - m.clearOtherOneofFields(fd) - m.known[fd.Number()] = v -} - -func (m *Message) clearOtherOneofFields(fd pref.FieldDescriptor) { - od := fd.ContainingOneof() - if od == nil { - return - } - num := fd.Number() - for i := 0; i < od.Fields().Len(); i++ { - if n := od.Fields().Get(i).Number(); n != num { - delete(m.known, n) - } - } -} - -// NewField returns a new value for assignable to the field of a given descriptor. -// See protoreflect.Message for details. -func (m *Message) NewField(fd pref.FieldDescriptor) pref.Value { - m.checkField(fd) - switch { - case fd.IsExtension(): - return fd.(pref.ExtensionTypeDescriptor).Type().New() - case fd.IsMap(): - return pref.ValueOfMap(&dynamicMap{ - desc: fd, - mapv: make(map[interface{}]pref.Value), - }) - case fd.IsList(): - return pref.ValueOfList(&dynamicList{desc: fd}) - case fd.Message() != nil: - return pref.ValueOfMessage(NewMessage(fd.Message()).ProtoReflect()) - default: - return fd.Default() - } -} - -// WhichOneof reports which field in a oneof is populated, returning nil if none are populated. -// See protoreflect.Message for details. -func (m *Message) WhichOneof(od pref.OneofDescriptor) pref.FieldDescriptor { - for i := 0; i < od.Fields().Len(); i++ { - fd := od.Fields().Get(i) - if m.Has(fd) { - return fd - } - } - return nil -} - -// GetUnknown returns the raw unknown fields. -// See protoreflect.Message for details. -func (m *Message) GetUnknown() pref.RawFields { - return m.unknown -} - -// SetUnknown sets the raw unknown fields. -// See protoreflect.Message for details. -func (m *Message) SetUnknown(r pref.RawFields) { - if m.known == nil { - panic(errors.New("%v: modification of read-only message", m.typ.desc.FullName())) - } - m.unknown = r -} - -// IsValid reports whether the message is valid. -// See protoreflect.Message for details. -func (m *Message) IsValid() bool { - return m.known != nil -} - -func (m *Message) checkField(fd pref.FieldDescriptor) { - if fd.IsExtension() && fd.ContainingMessage().FullName() == m.Descriptor().FullName() { - if _, ok := fd.(pref.ExtensionTypeDescriptor); !ok { - panic(errors.New("%v: extension field descriptor does not implement ExtensionTypeDescriptor", fd.FullName())) - } - return - } - if fd.Parent() == m.Descriptor() { - return - } - fields := m.Descriptor().Fields() - index := fd.Index() - if index >= fields.Len() || fields.Get(index) != fd { - panic(errors.New("%v: field descriptor does not belong to this message", fd.FullName())) - } -} - -type messageType struct { - desc pref.MessageDescriptor -} - -// NewMessageType creates a new MessageType with the provided descriptor. -// -// MessageTypes created by this package are equal if their descriptors are equal. -// That is, if md1 == md2, then NewMessageType(md1) == NewMessageType(md2). -func NewMessageType(desc pref.MessageDescriptor) pref.MessageType { - return messageType{desc} -} - -func (mt messageType) New() pref.Message { return NewMessage(mt.desc) } -func (mt messageType) Zero() pref.Message { return &Message{typ: messageType{mt.desc}} } -func (mt messageType) Descriptor() pref.MessageDescriptor { return mt.desc } - -type emptyList struct { - desc pref.FieldDescriptor -} - -func (x emptyList) Len() int { return 0 } -func (x emptyList) Get(n int) pref.Value { panic(errors.New("out of range")) } -func (x emptyList) Set(n int, v pref.Value) { panic(errors.New("modification of immutable list")) } -func (x emptyList) Append(v pref.Value) { panic(errors.New("modification of immutable list")) } -func (x emptyList) AppendMutable() pref.Value { panic(errors.New("modification of immutable list")) } -func (x emptyList) Truncate(n int) { panic(errors.New("modification of immutable list")) } -func (x emptyList) NewElement() pref.Value { return newListEntry(x.desc) } -func (x emptyList) IsValid() bool { return false } - -type dynamicList struct { - desc pref.FieldDescriptor - list []pref.Value -} - -func (x *dynamicList) Len() int { - return len(x.list) -} - -func (x *dynamicList) Get(n int) pref.Value { - return x.list[n] -} - -func (x *dynamicList) Set(n int, v pref.Value) { - typecheckSingular(x.desc, v) - x.list[n] = v -} - -func (x *dynamicList) Append(v pref.Value) { - typecheckSingular(x.desc, v) - x.list = append(x.list, v) -} - -func (x *dynamicList) AppendMutable() pref.Value { - if x.desc.Message() == nil { - panic(errors.New("%v: invalid AppendMutable on list with non-message type", x.desc.FullName())) - } - v := x.NewElement() - x.Append(v) - return v -} - -func (x *dynamicList) Truncate(n int) { - // Zero truncated elements to avoid keeping data live. - for i := n; i < len(x.list); i++ { - x.list[i] = pref.Value{} - } - x.list = x.list[:n] -} - -func (x *dynamicList) NewElement() pref.Value { - return newListEntry(x.desc) -} - -func (x *dynamicList) IsValid() bool { - return true -} - -type dynamicMap struct { - desc pref.FieldDescriptor - mapv map[interface{}]pref.Value -} - -func (x *dynamicMap) Get(k pref.MapKey) pref.Value { return x.mapv[k.Interface()] } -func (x *dynamicMap) Set(k pref.MapKey, v pref.Value) { - typecheckSingular(x.desc.MapKey(), k.Value()) - typecheckSingular(x.desc.MapValue(), v) - x.mapv[k.Interface()] = v -} -func (x *dynamicMap) Has(k pref.MapKey) bool { return x.Get(k).IsValid() } -func (x *dynamicMap) Clear(k pref.MapKey) { delete(x.mapv, k.Interface()) } -func (x *dynamicMap) Mutable(k pref.MapKey) pref.Value { - if x.desc.MapValue().Message() == nil { - panic(errors.New("%v: invalid Mutable on map with non-message value type", x.desc.FullName())) - } - v := x.Get(k) - if !v.IsValid() { - v = x.NewValue() - x.Set(k, v) - } - return v -} -func (x *dynamicMap) Len() int { return len(x.mapv) } -func (x *dynamicMap) NewValue() pref.Value { - if md := x.desc.MapValue().Message(); md != nil { - return pref.ValueOfMessage(NewMessage(md).ProtoReflect()) - } - return x.desc.MapValue().Default() -} -func (x *dynamicMap) IsValid() bool { - return x.mapv != nil -} - -func (x *dynamicMap) Range(f func(pref.MapKey, pref.Value) bool) { - for k, v := range x.mapv { - if !f(pref.ValueOf(k).MapKey(), v) { - return - } - } -} - -func isSet(fd pref.FieldDescriptor, v pref.Value) bool { - switch { - case fd.IsMap(): - return v.Map().Len() > 0 - case fd.IsList(): - return v.List().Len() > 0 - case fd.ContainingOneof() != nil: - return true - case fd.Syntax() == pref.Proto3 && !fd.IsExtension(): - switch fd.Kind() { - case pref.BoolKind: - return v.Bool() - case pref.EnumKind: - return v.Enum() != 0 - case pref.Int32Kind, pref.Sint32Kind, pref.Int64Kind, pref.Sint64Kind, pref.Sfixed32Kind, pref.Sfixed64Kind: - return v.Int() != 0 - case pref.Uint32Kind, pref.Uint64Kind, pref.Fixed32Kind, pref.Fixed64Kind: - return v.Uint() != 0 - case pref.FloatKind, pref.DoubleKind: - return v.Float() != 0 || math.Signbit(v.Float()) - case pref.StringKind: - return v.String() != "" - case pref.BytesKind: - return len(v.Bytes()) > 0 - } - } - return true -} - -func typecheck(fd pref.FieldDescriptor, v pref.Value) { - if err := typeIsValid(fd, v); err != nil { - panic(err) - } -} - -func typeIsValid(fd pref.FieldDescriptor, v pref.Value) error { - switch { - case !v.IsValid(): - return errors.New("%v: assigning invalid value", fd.FullName()) - case fd.IsMap(): - if mapv, ok := v.Interface().(*dynamicMap); !ok || mapv.desc != fd || !mapv.IsValid() { - return errors.New("%v: assigning invalid type %T", fd.FullName(), v.Interface()) - } - return nil - case fd.IsList(): - switch list := v.Interface().(type) { - case *dynamicList: - if list.desc == fd && list.IsValid() { - return nil - } - case emptyList: - if list.desc == fd && list.IsValid() { - return nil - } - } - return errors.New("%v: assigning invalid type %T", fd.FullName(), v.Interface()) - default: - return singularTypeIsValid(fd, v) - } -} - -func typecheckSingular(fd pref.FieldDescriptor, v pref.Value) { - if err := singularTypeIsValid(fd, v); err != nil { - panic(err) - } -} - -func singularTypeIsValid(fd pref.FieldDescriptor, v pref.Value) error { - vi := v.Interface() - var ok bool - switch fd.Kind() { - case pref.BoolKind: - _, ok = vi.(bool) - case pref.EnumKind: - // We could check against the valid set of enum values, but do not. - _, ok = vi.(pref.EnumNumber) - case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind: - _, ok = vi.(int32) - case pref.Uint32Kind, pref.Fixed32Kind: - _, ok = vi.(uint32) - case pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind: - _, ok = vi.(int64) - case pref.Uint64Kind, pref.Fixed64Kind: - _, ok = vi.(uint64) - case pref.FloatKind: - _, ok = vi.(float32) - case pref.DoubleKind: - _, ok = vi.(float64) - case pref.StringKind: - _, ok = vi.(string) - case pref.BytesKind: - _, ok = vi.([]byte) - case pref.MessageKind, pref.GroupKind: - var m pref.Message - m, ok = vi.(pref.Message) - if ok && m.Descriptor().FullName() != fd.Message().FullName() { - return errors.New("%v: assigning invalid message type %v", fd.FullName(), m.Descriptor().FullName()) - } - if dm, ok := vi.(*Message); ok && dm.known == nil { - return errors.New("%v: assigning invalid zero-value message", fd.FullName()) - } - } - if !ok { - return errors.New("%v: assigning invalid type %T", fd.FullName(), v.Interface()) - } - return nil -} - -func newListEntry(fd pref.FieldDescriptor) pref.Value { - switch fd.Kind() { - case pref.BoolKind: - return pref.ValueOfBool(false) - case pref.EnumKind: - return pref.ValueOfEnum(fd.Enum().Values().Get(0).Number()) - case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind: - return pref.ValueOfInt32(0) - case pref.Uint32Kind, pref.Fixed32Kind: - return pref.ValueOfUint32(0) - case pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind: - return pref.ValueOfInt64(0) - case pref.Uint64Kind, pref.Fixed64Kind: - return pref.ValueOfUint64(0) - case pref.FloatKind: - return pref.ValueOfFloat32(0) - case pref.DoubleKind: - return pref.ValueOfFloat64(0) - case pref.StringKind: - return pref.ValueOfString("") - case pref.BytesKind: - return pref.ValueOfBytes(nil) - case pref.MessageKind, pref.GroupKind: - return pref.ValueOfMessage(NewMessage(fd.Message()).ProtoReflect()) - } - panic(errors.New("%v: unknown kind %v", fd.FullName(), fd.Kind())) -} - -// NewExtensionType creates a new ExtensionType with the provided descriptor. -// -// Dynamic ExtensionTypes with the same descriptor compare as equal. That is, -// if xd1 == xd2, then NewExtensionType(xd1) == NewExtensionType(xd2). -// -// The InterfaceOf and ValueOf methods of the extension type are defined as: -// -// func (xt extensionType) ValueOf(iv interface{}) protoreflect.Value { -// return protoreflect.ValueOf(iv) -// } -// -// func (xt extensionType) InterfaceOf(v protoreflect.Value) interface{} { -// return v.Interface() -// } -// -// The Go type used by the proto.GetExtension and proto.SetExtension functions -// is determined by these methods, and is therefore equivalent to the Go type -// used to represent a protoreflect.Value. See the protoreflect.Value -// documentation for more details. -func NewExtensionType(desc pref.ExtensionDescriptor) pref.ExtensionType { - if xt, ok := desc.(pref.ExtensionTypeDescriptor); ok { - desc = xt.Descriptor() - } - return extensionType{extensionTypeDescriptor{desc}} -} - -func (xt extensionType) New() pref.Value { - switch { - case xt.desc.IsMap(): - return pref.ValueOfMap(&dynamicMap{ - desc: xt.desc, - mapv: make(map[interface{}]pref.Value), - }) - case xt.desc.IsList(): - return pref.ValueOfList(&dynamicList{desc: xt.desc}) - case xt.desc.Message() != nil: - return pref.ValueOfMessage(NewMessage(xt.desc.Message())) - default: - return xt.desc.Default() - } -} - -func (xt extensionType) Zero() pref.Value { - switch { - case xt.desc.IsMap(): - return pref.ValueOfMap(&dynamicMap{desc: xt.desc}) - case xt.desc.Cardinality() == pref.Repeated: - return pref.ValueOfList(emptyList{desc: xt.desc}) - case xt.desc.Message() != nil: - return pref.ValueOfMessage(&Message{typ: messageType{xt.desc.Message()}}) - default: - return xt.desc.Default() - } -} - -func (xt extensionType) TypeDescriptor() pref.ExtensionTypeDescriptor { - return xt.desc -} - -func (xt extensionType) ValueOf(iv interface{}) pref.Value { - v := pref.ValueOf(iv) - typecheck(xt.desc, v) - return v -} - -func (xt extensionType) InterfaceOf(v pref.Value) interface{} { - typecheck(xt.desc, v) - return v.Interface() -} - -func (xt extensionType) IsValidInterface(iv interface{}) bool { - return typeIsValid(xt.desc, pref.ValueOf(iv)) == nil -} - -func (xt extensionType) IsValidValue(v pref.Value) bool { - return typeIsValid(xt.desc, v) == nil -} - -type extensionTypeDescriptor struct { - pref.ExtensionDescriptor -} - -func (xt extensionTypeDescriptor) Type() pref.ExtensionType { - return extensionType{xt} -} - -func (xt extensionTypeDescriptor) Descriptor() pref.ExtensionDescriptor { - return xt.ExtensionDescriptor -} diff --git a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go index 82a473e265..8c10797b90 100644 --- a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go @@ -166,10 +166,13 @@ import ( // Example 4: Pack and unpack a message in Go // // foo := &pb.Foo{...} -// any, err := ptypes.MarshalAny(foo) +// any, err := anypb.New(foo) +// if err != nil { +// ... +// } // ... // foo := &pb.Foo{} -// if err := ptypes.UnmarshalAny(any, foo); err != nil { +// if err := any.UnmarshalTo(foo); err != nil { // ... // } // @@ -420,14 +423,15 @@ var file_google_protobuf_any_proto_rawDesc = []byte{ 0x41, 0x6e, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x74, 0x79, 0x70, 0x65, 0x55, 0x72, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x42, 0x6f, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x61, 0x6c, 0x75, 0x65, 0x42, 0x76, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x08, 0x41, 0x6e, 0x79, - 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x25, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, - 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x61, 0x6e, 0x79, 0xa2, 0x02, - 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, - 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, + 0x61, 0x6e, 0x79, 0x70, 0x62, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, + 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go index f7a1109940..a583ca2f6c 100644 --- a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go @@ -303,16 +303,16 @@ var file_google_protobuf_duration_proto_rawDesc = []byte{ 0x66, 0x22, 0x3a, 0x0a, 0x08, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, 0x42, 0x7c, 0x0a, - 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0d, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, - 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, - 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x33, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, 0x42, 0x83, 0x01, + 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0d, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x31, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, + 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x64, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, + 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, + 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go b/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go deleted file mode 100644 index 32a583df54..0000000000 --- a/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go +++ /dev/null @@ -1,168 +0,0 @@ -// Protocol Buffers - Google's data interchange format -// Copyright 2008 Google Inc. All rights reserved. -// https://developers.google.com/protocol-buffers/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/protobuf/empty.proto - -package emptypb - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -// A generic empty message that you can re-use to avoid defining duplicated -// empty messages in your APIs. A typical example is to use it as the request -// or the response type of an API method. For instance: -// -// service Foo { -// rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); -// } -// -// The JSON representation for `Empty` is empty JSON object `{}`. -type Empty struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *Empty) Reset() { - *x = Empty{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_empty_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Empty) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Empty) ProtoMessage() {} - -func (x *Empty) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_empty_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Empty.ProtoReflect.Descriptor instead. -func (*Empty) Descriptor() ([]byte, []int) { - return file_google_protobuf_empty_proto_rawDescGZIP(), []int{0} -} - -var File_google_protobuf_empty_proto protoreflect.FileDescriptor - -var file_google_protobuf_empty_proto_rawDesc = []byte{ - 0x0a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22, 0x07, - 0x0a, 0x05, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x42, 0x76, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0a, - 0x45, 0x6d, 0x70, 0x74, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x27, 0x67, 0x69, - 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, - 0x65, 0x6d, 0x70, 0x74, 0x79, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, - 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, - 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_google_protobuf_empty_proto_rawDescOnce sync.Once - file_google_protobuf_empty_proto_rawDescData = file_google_protobuf_empty_proto_rawDesc -) - -func file_google_protobuf_empty_proto_rawDescGZIP() []byte { - file_google_protobuf_empty_proto_rawDescOnce.Do(func() { - file_google_protobuf_empty_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_empty_proto_rawDescData) - }) - return file_google_protobuf_empty_proto_rawDescData -} - -var file_google_protobuf_empty_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_protobuf_empty_proto_goTypes = []interface{}{ - (*Empty)(nil), // 0: google.protobuf.Empty -} -var file_google_protobuf_empty_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_google_protobuf_empty_proto_init() } -func file_google_protobuf_empty_proto_init() { - if File_google_protobuf_empty_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_empty_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Empty); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_protobuf_empty_proto_rawDesc, - NumEnums: 0, - NumMessages: 1, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_google_protobuf_empty_proto_goTypes, - DependencyIndexes: file_google_protobuf_empty_proto_depIdxs, - MessageInfos: file_google_protobuf_empty_proto_msgTypes, - }.Build() - File_google_protobuf_empty_proto = out.File - file_google_protobuf_empty_proto_rawDesc = nil - file_google_protobuf_empty_proto_goTypes = nil - file_google_protobuf_empty_proto_depIdxs = nil -} diff --git a/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go index 6a8d872c08..7f94443d26 100644 --- a/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go @@ -393,9 +393,12 @@ func numValidPaths(m proto.Message, paths []string) int { // Identify the next message to search within. md = fd.Message() // may be nil - if fd.IsMap() { - md = fd.MapValue().Message() // may be nil + + // Repeated fields are only allowed at the last postion. + if fd.IsList() || fd.IsMap() { + md = nil } + return true }) { return i @@ -512,16 +515,16 @@ var file_google_protobuf_field_mask_proto_rawDesc = []byte{ 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22, 0x21, 0x0a, 0x09, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x12, 0x14, 0x0a, 0x05, 0x70, 0x61, 0x74, 0x68, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, - 0x05, 0x70, 0x61, 0x74, 0x68, 0x73, 0x42, 0x8c, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, + 0x05, 0x70, 0x61, 0x74, 0x68, 0x73, 0x42, 0x85, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, - 0x5a, 0x39, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, - 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, - 0x3b, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0xf8, 0x01, 0x01, 0xa2, 0x02, - 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, - 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x5a, 0x32, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, + 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, + 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x6d, 0x61, + 0x73, 0x6b, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, + 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go b/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go deleted file mode 100644 index 7433a4c41c..0000000000 --- a/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go +++ /dev/null @@ -1,810 +0,0 @@ -// Protocol Buffers - Google's data interchange format -// Copyright 2008 Google Inc. All rights reserved. -// https://developers.google.com/protocol-buffers/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/protobuf/struct.proto - -// Package structpb contains generated types for google/protobuf/struct.proto. -// -// The messages (i.e., Value, Struct, and ListValue) defined in struct.proto are -// used to represent arbitrary JSON. The Value message represents a JSON value, -// the Struct message represents a JSON object, and the ListValue message -// represents a JSON array. See https://json.org for more information. -// -// The Value, Struct, and ListValue types have generated MarshalJSON and -// UnmarshalJSON methods such that they serialize JSON equivalent to what the -// messages themselves represent. Use of these types with the -// "google.golang.org/protobuf/encoding/protojson" package -// ensures that they will be serialized as their JSON equivalent. -// -// -// Conversion to and from a Go interface -// -// The standard Go "encoding/json" package has functionality to serialize -// arbitrary types to a large degree. The Value.AsInterface, Struct.AsMap, and -// ListValue.AsSlice methods can convert the protobuf message representation into -// a form represented by interface{}, map[string]interface{}, and []interface{}. -// This form can be used with other packages that operate on such data structures -// and also directly with the standard json package. -// -// In order to convert the interface{}, map[string]interface{}, and []interface{} -// forms back as Value, Struct, and ListValue messages, use the NewStruct, -// NewList, and NewValue constructor functions. -// -// -// Example usage -// -// Consider the following example JSON object: -// -// { -// "firstName": "John", -// "lastName": "Smith", -// "isAlive": true, -// "age": 27, -// "address": { -// "streetAddress": "21 2nd Street", -// "city": "New York", -// "state": "NY", -// "postalCode": "10021-3100" -// }, -// "phoneNumbers": [ -// { -// "type": "home", -// "number": "212 555-1234" -// }, -// { -// "type": "office", -// "number": "646 555-4567" -// } -// ], -// "children": [], -// "spouse": null -// } -// -// To construct a Value message representing the above JSON object: -// -// m, err := structpb.NewValue(map[string]interface{}{ -// "firstName": "John", -// "lastName": "Smith", -// "isAlive": true, -// "age": 27, -// "address": map[string]interface{}{ -// "streetAddress": "21 2nd Street", -// "city": "New York", -// "state": "NY", -// "postalCode": "10021-3100", -// }, -// "phoneNumbers": []interface{}{ -// map[string]interface{}{ -// "type": "home", -// "number": "212 555-1234", -// }, -// map[string]interface{}{ -// "type": "office", -// "number": "646 555-4567", -// }, -// }, -// "children": []interface{}{}, -// "spouse": nil, -// }) -// if err != nil { -// ... // handle error -// } -// ... // make use of m as a *structpb.Value -// -package structpb - -import ( - base64 "encoding/base64" - protojson "google.golang.org/protobuf/encoding/protojson" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - math "math" - reflect "reflect" - sync "sync" - utf8 "unicode/utf8" -) - -// `NullValue` is a singleton enumeration to represent the null value for the -// `Value` type union. -// -// The JSON representation for `NullValue` is JSON `null`. -type NullValue int32 - -const ( - // Null value. - NullValue_NULL_VALUE NullValue = 0 -) - -// Enum value maps for NullValue. -var ( - NullValue_name = map[int32]string{ - 0: "NULL_VALUE", - } - NullValue_value = map[string]int32{ - "NULL_VALUE": 0, - } -) - -func (x NullValue) Enum() *NullValue { - p := new(NullValue) - *p = x - return p -} - -func (x NullValue) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (NullValue) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_struct_proto_enumTypes[0].Descriptor() -} - -func (NullValue) Type() protoreflect.EnumType { - return &file_google_protobuf_struct_proto_enumTypes[0] -} - -func (x NullValue) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use NullValue.Descriptor instead. -func (NullValue) EnumDescriptor() ([]byte, []int) { - return file_google_protobuf_struct_proto_rawDescGZIP(), []int{0} -} - -// `Struct` represents a structured data value, consisting of fields -// which map to dynamically typed values. In some languages, `Struct` -// might be supported by a native representation. For example, in -// scripting languages like JS a struct is represented as an -// object. The details of that representation are described together -// with the proto support for the language. -// -// The JSON representation for `Struct` is JSON object. -type Struct struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Unordered map of dynamically typed values. - Fields map[string]*Value `protobuf:"bytes,1,rep,name=fields,proto3" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` -} - -// NewStruct constructs a Struct from a general-purpose Go map. -// The map keys must be valid UTF-8. -// The map values are converted using NewValue. -func NewStruct(v map[string]interface{}) (*Struct, error) { - x := &Struct{Fields: make(map[string]*Value, len(v))} - for k, v := range v { - if !utf8.ValidString(k) { - return nil, protoimpl.X.NewError("invalid UTF-8 in string: %q", k) - } - var err error - x.Fields[k], err = NewValue(v) - if err != nil { - return nil, err - } - } - return x, nil -} - -// AsMap converts x to a general-purpose Go map. -// The map values are converted by calling Value.AsInterface. -func (x *Struct) AsMap() map[string]interface{} { - vs := make(map[string]interface{}) - for k, v := range x.GetFields() { - vs[k] = v.AsInterface() - } - return vs -} - -func (x *Struct) MarshalJSON() ([]byte, error) { - return protojson.Marshal(x) -} - -func (x *Struct) UnmarshalJSON(b []byte) error { - return protojson.Unmarshal(b, x) -} - -func (x *Struct) Reset() { - *x = Struct{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_struct_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Struct) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Struct) ProtoMessage() {} - -func (x *Struct) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_struct_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Struct.ProtoReflect.Descriptor instead. -func (*Struct) Descriptor() ([]byte, []int) { - return file_google_protobuf_struct_proto_rawDescGZIP(), []int{0} -} - -func (x *Struct) GetFields() map[string]*Value { - if x != nil { - return x.Fields - } - return nil -} - -// `Value` represents a dynamically typed value which can be either -// null, a number, a string, a boolean, a recursive struct value, or a -// list of values. A producer of value is expected to set one of that -// variants, absence of any variant indicates an error. -// -// The JSON representation for `Value` is JSON value. -type Value struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The kind of value. - // - // Types that are assignable to Kind: - // *Value_NullValue - // *Value_NumberValue - // *Value_StringValue - // *Value_BoolValue - // *Value_StructValue - // *Value_ListValue - Kind isValue_Kind `protobuf_oneof:"kind"` -} - -// NewValue constructs a Value from a general-purpose Go interface. -// -// ╔════════════════════════╤════════════════════════════════════════════╗ -// ║ Go type │ Conversion ║ -// ╠════════════════════════╪════════════════════════════════════════════╣ -// ║ nil │ stored as NullValue ║ -// ║ bool │ stored as BoolValue ║ -// ║ int, int32, int64 │ stored as NumberValue ║ -// ║ uint, uint32, uint64 │ stored as NumberValue ║ -// ║ float32, float64 │ stored as NumberValue ║ -// ║ string │ stored as StringValue; must be valid UTF-8 ║ -// ║ []byte │ stored as StringValue; base64-encoded ║ -// ║ map[string]interface{} │ stored as StructValue ║ -// ║ []interface{} │ stored as ListValue ║ -// ╚════════════════════════╧════════════════════════════════════════════╝ -// -// When converting an int64 or uint64 to a NumberValue, numeric precision loss -// is possible since they are stored as a float64. -func NewValue(v interface{}) (*Value, error) { - switch v := v.(type) { - case nil: - return NewNullValue(), nil - case bool: - return NewBoolValue(v), nil - case int: - return NewNumberValue(float64(v)), nil - case int32: - return NewNumberValue(float64(v)), nil - case int64: - return NewNumberValue(float64(v)), nil - case uint: - return NewNumberValue(float64(v)), nil - case uint32: - return NewNumberValue(float64(v)), nil - case uint64: - return NewNumberValue(float64(v)), nil - case float32: - return NewNumberValue(float64(v)), nil - case float64: - return NewNumberValue(float64(v)), nil - case string: - if !utf8.ValidString(v) { - return nil, protoimpl.X.NewError("invalid UTF-8 in string: %q", v) - } - return NewStringValue(v), nil - case []byte: - s := base64.StdEncoding.EncodeToString(v) - return NewStringValue(s), nil - case map[string]interface{}: - v2, err := NewStruct(v) - if err != nil { - return nil, err - } - return NewStructValue(v2), nil - case []interface{}: - v2, err := NewList(v) - if err != nil { - return nil, err - } - return NewListValue(v2), nil - default: - return nil, protoimpl.X.NewError("invalid type: %T", v) - } -} - -// NewNullValue constructs a new null Value. -func NewNullValue() *Value { - return &Value{Kind: &Value_NullValue{NullValue: NullValue_NULL_VALUE}} -} - -// NewBoolValue constructs a new boolean Value. -func NewBoolValue(v bool) *Value { - return &Value{Kind: &Value_BoolValue{BoolValue: v}} -} - -// NewNumberValue constructs a new number Value. -func NewNumberValue(v float64) *Value { - return &Value{Kind: &Value_NumberValue{NumberValue: v}} -} - -// NewStringValue constructs a new string Value. -func NewStringValue(v string) *Value { - return &Value{Kind: &Value_StringValue{StringValue: v}} -} - -// NewStructValue constructs a new struct Value. -func NewStructValue(v *Struct) *Value { - return &Value{Kind: &Value_StructValue{StructValue: v}} -} - -// NewListValue constructs a new list Value. -func NewListValue(v *ListValue) *Value { - return &Value{Kind: &Value_ListValue{ListValue: v}} -} - -// AsInterface converts x to a general-purpose Go interface. -// -// Calling Value.MarshalJSON and "encoding/json".Marshal on this output produce -// semantically equivalent JSON (assuming no errors occur). -// -// Floating-point values (i.e., "NaN", "Infinity", and "-Infinity") are -// converted as strings to remain compatible with MarshalJSON. -func (x *Value) AsInterface() interface{} { - switch v := x.GetKind().(type) { - case *Value_NumberValue: - if v != nil { - switch { - case math.IsNaN(v.NumberValue): - return "NaN" - case math.IsInf(v.NumberValue, +1): - return "Infinity" - case math.IsInf(v.NumberValue, -1): - return "-Infinity" - default: - return v.NumberValue - } - } - case *Value_StringValue: - if v != nil { - return v.StringValue - } - case *Value_BoolValue: - if v != nil { - return v.BoolValue - } - case *Value_StructValue: - if v != nil { - return v.StructValue.AsMap() - } - case *Value_ListValue: - if v != nil { - return v.ListValue.AsSlice() - } - } - return nil -} - -func (x *Value) MarshalJSON() ([]byte, error) { - return protojson.Marshal(x) -} - -func (x *Value) UnmarshalJSON(b []byte) error { - return protojson.Unmarshal(b, x) -} - -func (x *Value) Reset() { - *x = Value{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_struct_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Value) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Value) ProtoMessage() {} - -func (x *Value) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_struct_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Value.ProtoReflect.Descriptor instead. -func (*Value) Descriptor() ([]byte, []int) { - return file_google_protobuf_struct_proto_rawDescGZIP(), []int{1} -} - -func (m *Value) GetKind() isValue_Kind { - if m != nil { - return m.Kind - } - return nil -} - -func (x *Value) GetNullValue() NullValue { - if x, ok := x.GetKind().(*Value_NullValue); ok { - return x.NullValue - } - return NullValue_NULL_VALUE -} - -func (x *Value) GetNumberValue() float64 { - if x, ok := x.GetKind().(*Value_NumberValue); ok { - return x.NumberValue - } - return 0 -} - -func (x *Value) GetStringValue() string { - if x, ok := x.GetKind().(*Value_StringValue); ok { - return x.StringValue - } - return "" -} - -func (x *Value) GetBoolValue() bool { - if x, ok := x.GetKind().(*Value_BoolValue); ok { - return x.BoolValue - } - return false -} - -func (x *Value) GetStructValue() *Struct { - if x, ok := x.GetKind().(*Value_StructValue); ok { - return x.StructValue - } - return nil -} - -func (x *Value) GetListValue() *ListValue { - if x, ok := x.GetKind().(*Value_ListValue); ok { - return x.ListValue - } - return nil -} - -type isValue_Kind interface { - isValue_Kind() -} - -type Value_NullValue struct { - // Represents a null value. - NullValue NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,proto3,enum=google.protobuf.NullValue,oneof"` -} - -type Value_NumberValue struct { - // Represents a double value. - NumberValue float64 `protobuf:"fixed64,2,opt,name=number_value,json=numberValue,proto3,oneof"` -} - -type Value_StringValue struct { - // Represents a string value. - StringValue string `protobuf:"bytes,3,opt,name=string_value,json=stringValue,proto3,oneof"` -} - -type Value_BoolValue struct { - // Represents a boolean value. - BoolValue bool `protobuf:"varint,4,opt,name=bool_value,json=boolValue,proto3,oneof"` -} - -type Value_StructValue struct { - // Represents a structured value. - StructValue *Struct `protobuf:"bytes,5,opt,name=struct_value,json=structValue,proto3,oneof"` -} - -type Value_ListValue struct { - // Represents a repeated `Value`. - ListValue *ListValue `protobuf:"bytes,6,opt,name=list_value,json=listValue,proto3,oneof"` -} - -func (*Value_NullValue) isValue_Kind() {} - -func (*Value_NumberValue) isValue_Kind() {} - -func (*Value_StringValue) isValue_Kind() {} - -func (*Value_BoolValue) isValue_Kind() {} - -func (*Value_StructValue) isValue_Kind() {} - -func (*Value_ListValue) isValue_Kind() {} - -// `ListValue` is a wrapper around a repeated field of values. -// -// The JSON representation for `ListValue` is JSON array. -type ListValue struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Repeated field of dynamically typed values. - Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` -} - -// NewList constructs a ListValue from a general-purpose Go slice. -// The slice elements are converted using NewValue. -func NewList(v []interface{}) (*ListValue, error) { - x := &ListValue{Values: make([]*Value, len(v))} - for i, v := range v { - var err error - x.Values[i], err = NewValue(v) - if err != nil { - return nil, err - } - } - return x, nil -} - -// AsSlice converts x to a general-purpose Go slice. -// The slice elements are converted by calling Value.AsInterface. -func (x *ListValue) AsSlice() []interface{} { - vs := make([]interface{}, len(x.GetValues())) - for i, v := range x.GetValues() { - vs[i] = v.AsInterface() - } - return vs -} - -func (x *ListValue) MarshalJSON() ([]byte, error) { - return protojson.Marshal(x) -} - -func (x *ListValue) UnmarshalJSON(b []byte) error { - return protojson.Unmarshal(b, x) -} - -func (x *ListValue) Reset() { - *x = ListValue{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_struct_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListValue) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListValue) ProtoMessage() {} - -func (x *ListValue) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_struct_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListValue.ProtoReflect.Descriptor instead. -func (*ListValue) Descriptor() ([]byte, []int) { - return file_google_protobuf_struct_proto_rawDescGZIP(), []int{2} -} - -func (x *ListValue) GetValues() []*Value { - if x != nil { - return x.Values - } - return nil -} - -var File_google_protobuf_struct_proto protoreflect.FileDescriptor - -var file_google_protobuf_struct_proto_rawDesc = []byte{ - 0x0a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22, - 0x98, 0x01, 0x0a, 0x06, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x12, 0x3b, 0x0a, 0x06, 0x66, 0x69, - 0x65, 0x6c, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, - 0x75, 0x63, 0x74, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, - 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x1a, 0x51, 0x0a, 0x0b, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xb2, 0x02, 0x0a, 0x05, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3b, 0x0a, 0x0a, 0x6e, 0x75, 0x6c, 0x6c, 0x5f, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4e, 0x75, 0x6c, 0x6c, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x6e, 0x75, 0x6d, 0x62, 0x65, - 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, - 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, - 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0a, 0x62, - 0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x48, - 0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3c, 0x0a, 0x0c, - 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x48, 0x00, 0x52, 0x0b, 0x73, - 0x74, 0x72, 0x75, 0x63, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3b, 0x0a, 0x0a, 0x6c, 0x69, - 0x73, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6c, 0x69, - 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x06, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x22, - 0x3b, 0x0a, 0x09, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2e, 0x0a, 0x06, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x2a, 0x1b, 0x0a, 0x09, - 0x4e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x0e, 0x0a, 0x0a, 0x4e, 0x55, 0x4c, - 0x4c, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x10, 0x00, 0x42, 0x81, 0x01, 0x0a, 0x13, 0x63, 0x6f, - 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x42, 0x0b, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, - 0x5a, 0x31, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, - 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, - 0x70, 0x65, 0x73, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x3b, 0x73, 0x74, 0x72, 0x75, 0x63, - 0x74, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, - 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_google_protobuf_struct_proto_rawDescOnce sync.Once - file_google_protobuf_struct_proto_rawDescData = file_google_protobuf_struct_proto_rawDesc -) - -func file_google_protobuf_struct_proto_rawDescGZIP() []byte { - file_google_protobuf_struct_proto_rawDescOnce.Do(func() { - file_google_protobuf_struct_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_struct_proto_rawDescData) - }) - return file_google_protobuf_struct_proto_rawDescData -} - -var file_google_protobuf_struct_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_google_protobuf_struct_proto_msgTypes = make([]protoimpl.MessageInfo, 4) -var file_google_protobuf_struct_proto_goTypes = []interface{}{ - (NullValue)(0), // 0: google.protobuf.NullValue - (*Struct)(nil), // 1: google.protobuf.Struct - (*Value)(nil), // 2: google.protobuf.Value - (*ListValue)(nil), // 3: google.protobuf.ListValue - nil, // 4: google.protobuf.Struct.FieldsEntry -} -var file_google_protobuf_struct_proto_depIdxs = []int32{ - 4, // 0: google.protobuf.Struct.fields:type_name -> google.protobuf.Struct.FieldsEntry - 0, // 1: google.protobuf.Value.null_value:type_name -> google.protobuf.NullValue - 1, // 2: google.protobuf.Value.struct_value:type_name -> google.protobuf.Struct - 3, // 3: google.protobuf.Value.list_value:type_name -> google.protobuf.ListValue - 2, // 4: google.protobuf.ListValue.values:type_name -> google.protobuf.Value - 2, // 5: google.protobuf.Struct.FieldsEntry.value:type_name -> google.protobuf.Value - 6, // [6:6] is the sub-list for method output_type - 6, // [6:6] is the sub-list for method input_type - 6, // [6:6] is the sub-list for extension type_name - 6, // [6:6] is the sub-list for extension extendee - 0, // [0:6] is the sub-list for field type_name -} - -func init() { file_google_protobuf_struct_proto_init() } -func file_google_protobuf_struct_proto_init() { - if File_google_protobuf_struct_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_struct_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Struct); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_struct_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Value); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_struct_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListValue); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_google_protobuf_struct_proto_msgTypes[1].OneofWrappers = []interface{}{ - (*Value_NullValue)(nil), - (*Value_NumberValue)(nil), - (*Value_StringValue)(nil), - (*Value_BoolValue)(nil), - (*Value_StructValue)(nil), - (*Value_ListValue)(nil), - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_protobuf_struct_proto_rawDesc, - NumEnums: 1, - NumMessages: 4, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_google_protobuf_struct_proto_goTypes, - DependencyIndexes: file_google_protobuf_struct_proto_depIdxs, - EnumInfos: file_google_protobuf_struct_proto_enumTypes, - MessageInfos: file_google_protobuf_struct_proto_msgTypes, - }.Build() - File_google_protobuf_struct_proto = out.File - file_google_protobuf_struct_proto_rawDesc = nil - file_google_protobuf_struct_proto_goTypes = nil - file_google_protobuf_struct_proto_depIdxs = nil -} diff --git a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go index c25e4bd7d0..c9ae92132a 100644 --- a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go @@ -134,7 +134,16 @@ import ( // .setNanos((int) ((millis % 1000) * 1000000)).build(); // // -// Example 5: Compute Timestamp from current time in Python. +// Example 5: Compute Timestamp from Java `Instant.now()`. +// +// Instant now = Instant.now(); +// +// Timestamp timestamp = +// Timestamp.newBuilder().setSeconds(now.getEpochSecond()) +// .setNanos(now.getNano()).build(); +// +// +// Example 6: Compute Timestamp from current time in Python. // // timestamp = Timestamp() // timestamp.GetCurrentTime() @@ -306,15 +315,15 @@ var file_google_protobuf_timestamp_proto_rawDesc = []byte{ 0x18, 0x0a, 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, 0x42, - 0x7e, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, - 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, - 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, - 0x73, 0x74, 0x61, 0x6d, 0x70, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, - 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, - 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x85, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x32, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, + 0x6e, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x70, 0x62, 0xf8, 0x01, 0x01, + 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, + 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go b/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go index 2355adf428..895a8049e2 100644 --- a/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go @@ -580,15 +580,16 @@ var file_google_protobuf_wrappers_proto_rawDesc = []byte{ 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x22, 0x0a, 0x0a, 0x42, 0x79, 0x74, 0x65, 0x73, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x7c, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, - 0x0d, 0x57, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, - 0x5a, 0x2a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, - 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, - 0x70, 0x65, 0x73, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0xf8, 0x01, 0x01, 0xa2, - 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, - 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x83, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x42, 0x0d, 0x57, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, + 0x01, 0x5a, 0x31, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, + 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, + 0x72, 0x73, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, + 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/vendor/gopkg.in/evanphx/json-patch.v4/.travis.yml b/vendor/gopkg.in/evanphx/json-patch.v4/.travis.yml deleted file mode 100644 index 50e4afd19a..0000000000 --- a/vendor/gopkg.in/evanphx/json-patch.v4/.travis.yml +++ /dev/null @@ -1,19 +0,0 @@ -language: go - -go: - - 1.14 - - 1.13 - -install: - - if ! go get code.google.com/p/go.tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi - - go get github.com/jessevdk/go-flags - -script: - - go get - - go test -cover ./... - - cd ./v5 - - go get - - go test -cover ./... - -notifications: - email: false diff --git a/vendor/gopkg.in/evanphx/json-patch.v4/LICENSE b/vendor/gopkg.in/evanphx/json-patch.v4/LICENSE deleted file mode 100644 index df76d7d771..0000000000 --- a/vendor/gopkg.in/evanphx/json-patch.v4/LICENSE +++ /dev/null @@ -1,25 +0,0 @@ -Copyright (c) 2014, Evan Phoenix -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. -* Neither the name of the Evan Phoenix nor the names of its contributors - may be used to endorse or promote products derived from this software - without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/gopkg.in/evanphx/json-patch.v4/README.md b/vendor/gopkg.in/evanphx/json-patch.v4/README.md deleted file mode 100644 index 121b039dba..0000000000 --- a/vendor/gopkg.in/evanphx/json-patch.v4/README.md +++ /dev/null @@ -1,298 +0,0 @@ -# JSON-Patch -`jsonpatch` is a library which provides functionality for both applying -[RFC6902 JSON patches](http://tools.ietf.org/html/rfc6902) against documents, as -well as for calculating & applying [RFC7396 JSON merge patches](https://tools.ietf.org/html/rfc7396). - -[![GoDoc](https://godoc.org/github.com/evanphx/json-patch?status.svg)](http://godoc.org/github.com/evanphx/json-patch) -[![Build Status](https://travis-ci.org/evanphx/json-patch.svg?branch=master)](https://travis-ci.org/evanphx/json-patch) -[![Report Card](https://goreportcard.com/badge/github.com/evanphx/json-patch)](https://goreportcard.com/report/github.com/evanphx/json-patch) - -# Get It! - -**Latest and greatest**: -```bash -go get -u github.com/evanphx/json-patch/v5 -``` - -**Stable Versions**: -* Version 5: `go get -u gopkg.in/evanphx/json-patch.v5` -* Version 4: `go get -u gopkg.in/evanphx/json-patch.v4` - -(previous versions below `v3` are unavailable) - -# Use It! -* [Create and apply a merge patch](#create-and-apply-a-merge-patch) -* [Create and apply a JSON Patch](#create-and-apply-a-json-patch) -* [Comparing JSON documents](#comparing-json-documents) -* [Combine merge patches](#combine-merge-patches) - - -# Configuration - -* There is a global configuration variable `jsonpatch.SupportNegativeIndices`. - This defaults to `true` and enables the non-standard practice of allowing - negative indices to mean indices starting at the end of an array. This - functionality can be disabled by setting `jsonpatch.SupportNegativeIndices = - false`. - -* There is a global configuration variable `jsonpatch.AccumulatedCopySizeLimit`, - which limits the total size increase in bytes caused by "copy" operations in a - patch. It defaults to 0, which means there is no limit. - -## Create and apply a merge patch -Given both an original JSON document and a modified JSON document, you can create -a [Merge Patch](https://tools.ietf.org/html/rfc7396) document. - -It can describe the changes needed to convert from the original to the -modified JSON document. - -Once you have a merge patch, you can apply it to other JSON documents using the -`jsonpatch.MergePatch(document, patch)` function. - -```go -package main - -import ( - "fmt" - - jsonpatch "github.com/evanphx/json-patch" -) - -func main() { - // Let's create a merge patch from these two documents... - original := []byte(`{"name": "John", "age": 24, "height": 3.21}`) - target := []byte(`{"name": "Jane", "age": 24}`) - - patch, err := jsonpatch.CreateMergePatch(original, target) - if err != nil { - panic(err) - } - - // Now lets apply the patch against a different JSON document... - - alternative := []byte(`{"name": "Tina", "age": 28, "height": 3.75}`) - modifiedAlternative, err := jsonpatch.MergePatch(alternative, patch) - - fmt.Printf("patch document: %s\n", patch) - fmt.Printf("updated alternative doc: %s\n", modifiedAlternative) -} -``` - -When ran, you get the following output: - -```bash -$ go run main.go -patch document: {"height":null,"name":"Jane"} -updated alternative doc: {"age":28,"name":"Jane"} -``` - -## Create and apply a JSON Patch -You can create patch objects using `DecodePatch([]byte)`, which can then -be applied against JSON documents. - -The following is an example of creating a patch from two operations, and -applying it against a JSON document. - -```go -package main - -import ( - "fmt" - - jsonpatch "github.com/evanphx/json-patch" -) - -func main() { - original := []byte(`{"name": "John", "age": 24, "height": 3.21}`) - patchJSON := []byte(`[ - {"op": "replace", "path": "/name", "value": "Jane"}, - {"op": "remove", "path": "/height"} - ]`) - - patch, err := jsonpatch.DecodePatch(patchJSON) - if err != nil { - panic(err) - } - - modified, err := patch.Apply(original) - if err != nil { - panic(err) - } - - fmt.Printf("Original document: %s\n", original) - fmt.Printf("Modified document: %s\n", modified) -} -``` - -When ran, you get the following output: - -```bash -$ go run main.go -Original document: {"name": "John", "age": 24, "height": 3.21} -Modified document: {"age":24,"name":"Jane"} -``` - -## Comparing JSON documents -Due to potential whitespace and ordering differences, one cannot simply compare -JSON strings or byte-arrays directly. - -As such, you can instead use `jsonpatch.Equal(document1, document2)` to -determine if two JSON documents are _structurally_ equal. This ignores -whitespace differences, and key-value ordering. - -```go -package main - -import ( - "fmt" - - jsonpatch "github.com/evanphx/json-patch" -) - -func main() { - original := []byte(`{"name": "John", "age": 24, "height": 3.21}`) - similar := []byte(` - { - "age": 24, - "height": 3.21, - "name": "John" - } - `) - different := []byte(`{"name": "Jane", "age": 20, "height": 3.37}`) - - if jsonpatch.Equal(original, similar) { - fmt.Println(`"original" is structurally equal to "similar"`) - } - - if !jsonpatch.Equal(original, different) { - fmt.Println(`"original" is _not_ structurally equal to "different"`) - } -} -``` - -When ran, you get the following output: -```bash -$ go run main.go -"original" is structurally equal to "similar" -"original" is _not_ structurally equal to "different" -``` - -## Combine merge patches -Given two JSON merge patch documents, it is possible to combine them into a -single merge patch which can describe both set of changes. - -The resulting merge patch can be used such that applying it results in a -document structurally similar as merging each merge patch to the document -in succession. - -```go -package main - -import ( - "fmt" - - jsonpatch "github.com/evanphx/json-patch" -) - -func main() { - original := []byte(`{"name": "John", "age": 24, "height": 3.21}`) - - nameAndHeight := []byte(`{"height":null,"name":"Jane"}`) - ageAndEyes := []byte(`{"age":4.23,"eyes":"blue"}`) - - // Let's combine these merge patch documents... - combinedPatch, err := jsonpatch.MergeMergePatches(nameAndHeight, ageAndEyes) - if err != nil { - panic(err) - } - - // Apply each patch individual against the original document - withoutCombinedPatch, err := jsonpatch.MergePatch(original, nameAndHeight) - if err != nil { - panic(err) - } - - withoutCombinedPatch, err = jsonpatch.MergePatch(withoutCombinedPatch, ageAndEyes) - if err != nil { - panic(err) - } - - // Apply the combined patch against the original document - - withCombinedPatch, err := jsonpatch.MergePatch(original, combinedPatch) - if err != nil { - panic(err) - } - - // Do both result in the same thing? They should! - if jsonpatch.Equal(withCombinedPatch, withoutCombinedPatch) { - fmt.Println("Both JSON documents are structurally the same!") - } - - fmt.Printf("combined merge patch: %s", combinedPatch) -} -``` - -When ran, you get the following output: -```bash -$ go run main.go -Both JSON documents are structurally the same! -combined merge patch: {"age":4.23,"eyes":"blue","height":null,"name":"Jane"} -``` - -# CLI for comparing JSON documents -You can install the commandline program `json-patch`. - -This program can take multiple JSON patch documents as arguments, -and fed a JSON document from `stdin`. It will apply the patch(es) against -the document and output the modified doc. - -**patch.1.json** -```json -[ - {"op": "replace", "path": "/name", "value": "Jane"}, - {"op": "remove", "path": "/height"} -] -``` - -**patch.2.json** -```json -[ - {"op": "add", "path": "/address", "value": "123 Main St"}, - {"op": "replace", "path": "/age", "value": "21"} -] -``` - -**document.json** -```json -{ - "name": "John", - "age": 24, - "height": 3.21 -} -``` - -You can then run: - -```bash -$ go install github.com/evanphx/json-patch/cmd/json-patch -$ cat document.json | json-patch -p patch.1.json -p patch.2.json -{"address":"123 Main St","age":"21","name":"Jane"} -``` - -# Help It! -Contributions are welcomed! Leave [an issue](https://github.com/evanphx/json-patch/issues) -or [create a PR](https://github.com/evanphx/json-patch/compare). - - -Before creating a pull request, we'd ask that you make sure tests are passing -and that you have added new tests when applicable. - -Contributors can run tests using: - -```bash -go test -cover ./... -``` - -Builds for pull requests are tested automatically -using [TravisCI](https://travis-ci.org/evanphx/json-patch). diff --git a/vendor/gopkg.in/evanphx/json-patch.v4/errors.go b/vendor/gopkg.in/evanphx/json-patch.v4/errors.go deleted file mode 100644 index 75304b4437..0000000000 --- a/vendor/gopkg.in/evanphx/json-patch.v4/errors.go +++ /dev/null @@ -1,38 +0,0 @@ -package jsonpatch - -import "fmt" - -// AccumulatedCopySizeError is an error type returned when the accumulated size -// increase caused by copy operations in a patch operation has exceeded the -// limit. -type AccumulatedCopySizeError struct { - limit int64 - accumulated int64 -} - -// NewAccumulatedCopySizeError returns an AccumulatedCopySizeError. -func NewAccumulatedCopySizeError(l, a int64) *AccumulatedCopySizeError { - return &AccumulatedCopySizeError{limit: l, accumulated: a} -} - -// Error implements the error interface. -func (a *AccumulatedCopySizeError) Error() string { - return fmt.Sprintf("Unable to complete the copy, the accumulated size increase of copy is %d, exceeding the limit %d", a.accumulated, a.limit) -} - -// ArraySizeError is an error type returned when the array size has exceeded -// the limit. -type ArraySizeError struct { - limit int - size int -} - -// NewArraySizeError returns an ArraySizeError. -func NewArraySizeError(l, s int) *ArraySizeError { - return &ArraySizeError{limit: l, size: s} -} - -// Error implements the error interface. -func (a *ArraySizeError) Error() string { - return fmt.Sprintf("Unable to create array of size %d, limit is %d", a.size, a.limit) -} diff --git a/vendor/gopkg.in/evanphx/json-patch.v4/merge.go b/vendor/gopkg.in/evanphx/json-patch.v4/merge.go deleted file mode 100644 index 14e8bb5ce3..0000000000 --- a/vendor/gopkg.in/evanphx/json-patch.v4/merge.go +++ /dev/null @@ -1,386 +0,0 @@ -package jsonpatch - -import ( - "bytes" - "encoding/json" - "fmt" - "reflect" -) - -func merge(cur, patch *lazyNode, mergeMerge bool) *lazyNode { - curDoc, err := cur.intoDoc() - - if err != nil { - pruneNulls(patch) - return patch - } - - patchDoc, err := patch.intoDoc() - - if err != nil { - return patch - } - - mergeDocs(curDoc, patchDoc, mergeMerge) - - return cur -} - -func mergeDocs(doc, patch *partialDoc, mergeMerge bool) { - for k, v := range *patch { - if v == nil { - if mergeMerge { - (*doc)[k] = nil - } else { - delete(*doc, k) - } - } else { - cur, ok := (*doc)[k] - - if !ok || cur == nil { - pruneNulls(v) - (*doc)[k] = v - } else { - (*doc)[k] = merge(cur, v, mergeMerge) - } - } - } -} - -func pruneNulls(n *lazyNode) { - sub, err := n.intoDoc() - - if err == nil { - pruneDocNulls(sub) - } else { - ary, err := n.intoAry() - - if err == nil { - pruneAryNulls(ary) - } - } -} - -func pruneDocNulls(doc *partialDoc) *partialDoc { - for k, v := range *doc { - if v == nil { - delete(*doc, k) - } else { - pruneNulls(v) - } - } - - return doc -} - -func pruneAryNulls(ary *partialArray) *partialArray { - newAry := []*lazyNode{} - - for _, v := range *ary { - if v != nil { - pruneNulls(v) - newAry = append(newAry, v) - } - } - - *ary = newAry - - return ary -} - -var errBadJSONDoc = fmt.Errorf("Invalid JSON Document") -var errBadJSONPatch = fmt.Errorf("Invalid JSON Patch") -var errBadMergeTypes = fmt.Errorf("Mismatched JSON Documents") - -// MergeMergePatches merges two merge patches together, such that -// applying this resulting merged merge patch to a document yields the same -// as merging each merge patch to the document in succession. -func MergeMergePatches(patch1Data, patch2Data []byte) ([]byte, error) { - return doMergePatch(patch1Data, patch2Data, true) -} - -// MergePatch merges the patchData into the docData. -func MergePatch(docData, patchData []byte) ([]byte, error) { - return doMergePatch(docData, patchData, false) -} - -func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) { - doc := &partialDoc{} - - docErr := json.Unmarshal(docData, doc) - - patch := &partialDoc{} - - patchErr := json.Unmarshal(patchData, patch) - - if _, ok := docErr.(*json.SyntaxError); ok { - return nil, errBadJSONDoc - } - - if _, ok := patchErr.(*json.SyntaxError); ok { - return nil, errBadJSONPatch - } - - if docErr == nil && *doc == nil { - return nil, errBadJSONDoc - } - - if patchErr == nil && *patch == nil { - return nil, errBadJSONPatch - } - - if docErr != nil || patchErr != nil { - // Not an error, just not a doc, so we turn straight into the patch - if patchErr == nil { - if mergeMerge { - doc = patch - } else { - doc = pruneDocNulls(patch) - } - } else { - patchAry := &partialArray{} - patchErr = json.Unmarshal(patchData, patchAry) - - if patchErr != nil { - return nil, errBadJSONPatch - } - - pruneAryNulls(patchAry) - - out, patchErr := json.Marshal(patchAry) - - if patchErr != nil { - return nil, errBadJSONPatch - } - - return out, nil - } - } else { - mergeDocs(doc, patch, mergeMerge) - } - - return json.Marshal(doc) -} - -// resemblesJSONArray indicates whether the byte-slice "appears" to be -// a JSON array or not. -// False-positives are possible, as this function does not check the internal -// structure of the array. It only checks that the outer syntax is present and -// correct. -func resemblesJSONArray(input []byte) bool { - input = bytes.TrimSpace(input) - - hasPrefix := bytes.HasPrefix(input, []byte("[")) - hasSuffix := bytes.HasSuffix(input, []byte("]")) - - return hasPrefix && hasSuffix -} - -// CreateMergePatch will return a merge patch document capable of converting -// the original document(s) to the modified document(s). -// The parameters can be bytes of either two JSON Documents, or two arrays of -// JSON documents. -// The merge patch returned follows the specification defined at http://tools.ietf.org/html/draft-ietf-appsawg-json-merge-patch-07 -func CreateMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) { - originalResemblesArray := resemblesJSONArray(originalJSON) - modifiedResemblesArray := resemblesJSONArray(modifiedJSON) - - // Do both byte-slices seem like JSON arrays? - if originalResemblesArray && modifiedResemblesArray { - return createArrayMergePatch(originalJSON, modifiedJSON) - } - - // Are both byte-slices are not arrays? Then they are likely JSON objects... - if !originalResemblesArray && !modifiedResemblesArray { - return createObjectMergePatch(originalJSON, modifiedJSON) - } - - // None of the above? Then return an error because of mismatched types. - return nil, errBadMergeTypes -} - -// createObjectMergePatch will return a merge-patch document capable of -// converting the original document to the modified document. -func createObjectMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) { - originalDoc := map[string]interface{}{} - modifiedDoc := map[string]interface{}{} - - err := json.Unmarshal(originalJSON, &originalDoc) - if err != nil { - return nil, errBadJSONDoc - } - - err = json.Unmarshal(modifiedJSON, &modifiedDoc) - if err != nil { - return nil, errBadJSONDoc - } - - dest, err := getDiff(originalDoc, modifiedDoc) - if err != nil { - return nil, err - } - - return json.Marshal(dest) -} - -// createArrayMergePatch will return an array of merge-patch documents capable -// of converting the original document to the modified document for each -// pair of JSON documents provided in the arrays. -// Arrays of mismatched sizes will result in an error. -func createArrayMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) { - originalDocs := []json.RawMessage{} - modifiedDocs := []json.RawMessage{} - - err := json.Unmarshal(originalJSON, &originalDocs) - if err != nil { - return nil, errBadJSONDoc - } - - err = json.Unmarshal(modifiedJSON, &modifiedDocs) - if err != nil { - return nil, errBadJSONDoc - } - - total := len(originalDocs) - if len(modifiedDocs) != total { - return nil, errBadJSONDoc - } - - result := []json.RawMessage{} - for i := 0; i < len(originalDocs); i++ { - original := originalDocs[i] - modified := modifiedDocs[i] - - patch, err := createObjectMergePatch(original, modified) - if err != nil { - return nil, err - } - - result = append(result, json.RawMessage(patch)) - } - - return json.Marshal(result) -} - -// Returns true if the array matches (must be json types). -// As is idiomatic for go, an empty array is not the same as a nil array. -func matchesArray(a, b []interface{}) bool { - if len(a) != len(b) { - return false - } - if (a == nil && b != nil) || (a != nil && b == nil) { - return false - } - for i := range a { - if !matchesValue(a[i], b[i]) { - return false - } - } - return true -} - -// Returns true if the values matches (must be json types) -// The types of the values must match, otherwise it will always return false -// If two map[string]interface{} are given, all elements must match. -func matchesValue(av, bv interface{}) bool { - if reflect.TypeOf(av) != reflect.TypeOf(bv) { - return false - } - switch at := av.(type) { - case string: - bt := bv.(string) - if bt == at { - return true - } - case float64: - bt := bv.(float64) - if bt == at { - return true - } - case bool: - bt := bv.(bool) - if bt == at { - return true - } - case nil: - // Both nil, fine. - return true - case map[string]interface{}: - bt := bv.(map[string]interface{}) - if len(bt) != len(at) { - return false - } - for key := range bt { - av, aOK := at[key] - bv, bOK := bt[key] - if aOK != bOK { - return false - } - if !matchesValue(av, bv) { - return false - } - } - return true - case []interface{}: - bt := bv.([]interface{}) - return matchesArray(at, bt) - } - return false -} - -// getDiff returns the (recursive) difference between a and b as a map[string]interface{}. -func getDiff(a, b map[string]interface{}) (map[string]interface{}, error) { - into := map[string]interface{}{} - for key, bv := range b { - av, ok := a[key] - // value was added - if !ok { - into[key] = bv - continue - } - // If types have changed, replace completely - if reflect.TypeOf(av) != reflect.TypeOf(bv) { - into[key] = bv - continue - } - // Types are the same, compare values - switch at := av.(type) { - case map[string]interface{}: - bt := bv.(map[string]interface{}) - dst := make(map[string]interface{}, len(bt)) - dst, err := getDiff(at, bt) - if err != nil { - return nil, err - } - if len(dst) > 0 { - into[key] = dst - } - case string, float64, bool: - if !matchesValue(av, bv) { - into[key] = bv - } - case []interface{}: - bt := bv.([]interface{}) - if !matchesArray(at, bt) { - into[key] = bv - } - case nil: - switch bv.(type) { - case nil: - // Both nil, fine. - default: - into[key] = bv - } - default: - panic(fmt.Sprintf("Unknown type:%T in key %s", av, key)) - } - } - // Now add all deleted values as nil - for key := range a { - _, found := b[key] - if !found { - into[key] = nil - } - } - return into, nil -} diff --git a/vendor/gopkg.in/evanphx/json-patch.v4/patch.go b/vendor/gopkg.in/evanphx/json-patch.v4/patch.go deleted file mode 100644 index f185a45b2c..0000000000 --- a/vendor/gopkg.in/evanphx/json-patch.v4/patch.go +++ /dev/null @@ -1,784 +0,0 @@ -package jsonpatch - -import ( - "bytes" - "encoding/json" - "fmt" - "strconv" - "strings" - - "github.com/pkg/errors" -) - -const ( - eRaw = iota - eDoc - eAry -) - -var ( - // SupportNegativeIndices decides whether to support non-standard practice of - // allowing negative indices to mean indices starting at the end of an array. - // Default to true. - SupportNegativeIndices bool = true - // AccumulatedCopySizeLimit limits the total size increase in bytes caused by - // "copy" operations in a patch. - AccumulatedCopySizeLimit int64 = 0 -) - -var ( - ErrTestFailed = errors.New("test failed") - ErrMissing = errors.New("missing value") - ErrUnknownType = errors.New("unknown object type") - ErrInvalid = errors.New("invalid state detected") - ErrInvalidIndex = errors.New("invalid index referenced") -) - -type lazyNode struct { - raw *json.RawMessage - doc partialDoc - ary partialArray - which int -} - -// Operation is a single JSON-Patch step, such as a single 'add' operation. -type Operation map[string]*json.RawMessage - -// Patch is an ordered collection of Operations. -type Patch []Operation - -type partialDoc map[string]*lazyNode -type partialArray []*lazyNode - -type container interface { - get(key string) (*lazyNode, error) - set(key string, val *lazyNode) error - add(key string, val *lazyNode) error - remove(key string) error -} - -func newLazyNode(raw *json.RawMessage) *lazyNode { - return &lazyNode{raw: raw, doc: nil, ary: nil, which: eRaw} -} - -func (n *lazyNode) MarshalJSON() ([]byte, error) { - switch n.which { - case eRaw: - return json.Marshal(n.raw) - case eDoc: - return json.Marshal(n.doc) - case eAry: - return json.Marshal(n.ary) - default: - return nil, ErrUnknownType - } -} - -func (n *lazyNode) UnmarshalJSON(data []byte) error { - dest := make(json.RawMessage, len(data)) - copy(dest, data) - n.raw = &dest - n.which = eRaw - return nil -} - -func deepCopy(src *lazyNode) (*lazyNode, int, error) { - if src == nil { - return nil, 0, nil - } - a, err := src.MarshalJSON() - if err != nil { - return nil, 0, err - } - sz := len(a) - ra := make(json.RawMessage, sz) - copy(ra, a) - return newLazyNode(&ra), sz, nil -} - -func (n *lazyNode) intoDoc() (*partialDoc, error) { - if n.which == eDoc { - return &n.doc, nil - } - - if n.raw == nil { - return nil, ErrInvalid - } - - err := json.Unmarshal(*n.raw, &n.doc) - - if err != nil { - return nil, err - } - - n.which = eDoc - return &n.doc, nil -} - -func (n *lazyNode) intoAry() (*partialArray, error) { - if n.which == eAry { - return &n.ary, nil - } - - if n.raw == nil { - return nil, ErrInvalid - } - - err := json.Unmarshal(*n.raw, &n.ary) - - if err != nil { - return nil, err - } - - n.which = eAry - return &n.ary, nil -} - -func (n *lazyNode) compact() []byte { - buf := &bytes.Buffer{} - - if n.raw == nil { - return nil - } - - err := json.Compact(buf, *n.raw) - - if err != nil { - return *n.raw - } - - return buf.Bytes() -} - -func (n *lazyNode) tryDoc() bool { - if n.raw == nil { - return false - } - - err := json.Unmarshal(*n.raw, &n.doc) - - if err != nil { - return false - } - - n.which = eDoc - return true -} - -func (n *lazyNode) tryAry() bool { - if n.raw == nil { - return false - } - - err := json.Unmarshal(*n.raw, &n.ary) - - if err != nil { - return false - } - - n.which = eAry - return true -} - -func (n *lazyNode) equal(o *lazyNode) bool { - if n.which == eRaw { - if !n.tryDoc() && !n.tryAry() { - if o.which != eRaw { - return false - } - - return bytes.Equal(n.compact(), o.compact()) - } - } - - if n.which == eDoc { - if o.which == eRaw { - if !o.tryDoc() { - return false - } - } - - if o.which != eDoc { - return false - } - - if len(n.doc) != len(o.doc) { - return false - } - - for k, v := range n.doc { - ov, ok := o.doc[k] - - if !ok { - return false - } - - if (v == nil) != (ov == nil) { - return false - } - - if v == nil && ov == nil { - continue - } - - if !v.equal(ov) { - return false - } - } - - return true - } - - if o.which != eAry && !o.tryAry() { - return false - } - - if len(n.ary) != len(o.ary) { - return false - } - - for idx, val := range n.ary { - if !val.equal(o.ary[idx]) { - return false - } - } - - return true -} - -// Kind reads the "op" field of the Operation. -func (o Operation) Kind() string { - if obj, ok := o["op"]; ok && obj != nil { - var op string - - err := json.Unmarshal(*obj, &op) - - if err != nil { - return "unknown" - } - - return op - } - - return "unknown" -} - -// Path reads the "path" field of the Operation. -func (o Operation) Path() (string, error) { - if obj, ok := o["path"]; ok && obj != nil { - var op string - - err := json.Unmarshal(*obj, &op) - - if err != nil { - return "unknown", err - } - - return op, nil - } - - return "unknown", errors.Wrapf(ErrMissing, "operation missing path field") -} - -// From reads the "from" field of the Operation. -func (o Operation) From() (string, error) { - if obj, ok := o["from"]; ok && obj != nil { - var op string - - err := json.Unmarshal(*obj, &op) - - if err != nil { - return "unknown", err - } - - return op, nil - } - - return "unknown", errors.Wrapf(ErrMissing, "operation, missing from field") -} - -func (o Operation) value() *lazyNode { - if obj, ok := o["value"]; ok { - return newLazyNode(obj) - } - - return nil -} - -// ValueInterface decodes the operation value into an interface. -func (o Operation) ValueInterface() (interface{}, error) { - if obj, ok := o["value"]; ok && obj != nil { - var v interface{} - - err := json.Unmarshal(*obj, &v) - - if err != nil { - return nil, err - } - - return v, nil - } - - return nil, errors.Wrapf(ErrMissing, "operation, missing value field") -} - -func isArray(buf []byte) bool { -Loop: - for _, c := range buf { - switch c { - case ' ': - case '\n': - case '\t': - continue - case '[': - return true - default: - break Loop - } - } - - return false -} - -func findObject(pd *container, path string) (container, string) { - doc := *pd - - split := strings.Split(path, "/") - - if len(split) < 2 { - return nil, "" - } - - parts := split[1 : len(split)-1] - - key := split[len(split)-1] - - var err error - - for _, part := range parts { - - next, ok := doc.get(decodePatchKey(part)) - - if next == nil || ok != nil { - return nil, "" - } - - if isArray(*next.raw) { - doc, err = next.intoAry() - - if err != nil { - return nil, "" - } - } else { - doc, err = next.intoDoc() - - if err != nil { - return nil, "" - } - } - } - - return doc, decodePatchKey(key) -} - -func (d *partialDoc) set(key string, val *lazyNode) error { - (*d)[key] = val - return nil -} - -func (d *partialDoc) add(key string, val *lazyNode) error { - (*d)[key] = val - return nil -} - -func (d *partialDoc) get(key string) (*lazyNode, error) { - return (*d)[key], nil -} - -func (d *partialDoc) remove(key string) error { - _, ok := (*d)[key] - if !ok { - return errors.Wrapf(ErrMissing, "Unable to remove nonexistent key: %s", key) - } - - delete(*d, key) - return nil -} - -// set should only be used to implement the "replace" operation, so "key" must -// be an already existing index in "d". -func (d *partialArray) set(key string, val *lazyNode) error { - idx, err := strconv.Atoi(key) - if err != nil { - return err - } - (*d)[idx] = val - return nil -} - -func (d *partialArray) add(key string, val *lazyNode) error { - if key == "-" { - *d = append(*d, val) - return nil - } - - idx, err := strconv.Atoi(key) - if err != nil { - return errors.Wrapf(err, "value was not a proper array index: '%s'", key) - } - - sz := len(*d) + 1 - - ary := make([]*lazyNode, sz) - - cur := *d - - if idx >= len(ary) { - return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) - } - - if idx < 0 { - if !SupportNegativeIndices { - return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) - } - if idx < -len(ary) { - return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) - } - idx += len(ary) - } - - copy(ary[0:idx], cur[0:idx]) - ary[idx] = val - copy(ary[idx+1:], cur[idx:]) - - *d = ary - return nil -} - -func (d *partialArray) get(key string) (*lazyNode, error) { - idx, err := strconv.Atoi(key) - - if err != nil { - return nil, err - } - - if idx >= len(*d) { - return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) - } - - return (*d)[idx], nil -} - -func (d *partialArray) remove(key string) error { - idx, err := strconv.Atoi(key) - if err != nil { - return err - } - - cur := *d - - if idx >= len(cur) { - return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) - } - - if idx < 0 { - if !SupportNegativeIndices { - return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) - } - if idx < -len(cur) { - return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) - } - idx += len(cur) - } - - ary := make([]*lazyNode, len(cur)-1) - - copy(ary[0:idx], cur[0:idx]) - copy(ary[idx:], cur[idx+1:]) - - *d = ary - return nil - -} - -func (p Patch) add(doc *container, op Operation) error { - path, err := op.Path() - if err != nil { - return errors.Wrapf(ErrMissing, "add operation failed to decode path") - } - - con, key := findObject(doc, path) - - if con == nil { - return errors.Wrapf(ErrMissing, "add operation does not apply: doc is missing path: \"%s\"", path) - } - - err = con.add(key, op.value()) - if err != nil { - return errors.Wrapf(err, "error in add for path: '%s'", path) - } - - return nil -} - -func (p Patch) remove(doc *container, op Operation) error { - path, err := op.Path() - if err != nil { - return errors.Wrapf(ErrMissing, "remove operation failed to decode path") - } - - con, key := findObject(doc, path) - - if con == nil { - return errors.Wrapf(ErrMissing, "remove operation does not apply: doc is missing path: \"%s\"", path) - } - - err = con.remove(key) - if err != nil { - return errors.Wrapf(err, "error in remove for path: '%s'", path) - } - - return nil -} - -func (p Patch) replace(doc *container, op Operation) error { - path, err := op.Path() - if err != nil { - return errors.Wrapf(err, "replace operation failed to decode path") - } - - con, key := findObject(doc, path) - - if con == nil { - return errors.Wrapf(ErrMissing, "replace operation does not apply: doc is missing path: %s", path) - } - - _, ok := con.get(key) - if ok != nil { - return errors.Wrapf(ErrMissing, "replace operation does not apply: doc is missing key: %s", path) - } - - err = con.set(key, op.value()) - if err != nil { - return errors.Wrapf(err, "error in remove for path: '%s'", path) - } - - return nil -} - -func (p Patch) move(doc *container, op Operation) error { - from, err := op.From() - if err != nil { - return errors.Wrapf(err, "move operation failed to decode from") - } - - con, key := findObject(doc, from) - - if con == nil { - return errors.Wrapf(ErrMissing, "move operation does not apply: doc is missing from path: %s", from) - } - - val, err := con.get(key) - if err != nil { - return errors.Wrapf(err, "error in move for path: '%s'", key) - } - - err = con.remove(key) - if err != nil { - return errors.Wrapf(err, "error in move for path: '%s'", key) - } - - path, err := op.Path() - if err != nil { - return errors.Wrapf(err, "move operation failed to decode path") - } - - con, key = findObject(doc, path) - - if con == nil { - return errors.Wrapf(ErrMissing, "move operation does not apply: doc is missing destination path: %s", path) - } - - err = con.add(key, val) - if err != nil { - return errors.Wrapf(err, "error in move for path: '%s'", path) - } - - return nil -} - -func (p Patch) test(doc *container, op Operation) error { - path, err := op.Path() - if err != nil { - return errors.Wrapf(err, "test operation failed to decode path") - } - - con, key := findObject(doc, path) - - if con == nil { - return errors.Wrapf(ErrMissing, "test operation does not apply: is missing path: %s", path) - } - - val, err := con.get(key) - if err != nil { - return errors.Wrapf(err, "error in test for path: '%s'", path) - } - - if val == nil { - if op.value().raw == nil { - return nil - } - return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) - } else if op.value() == nil { - return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) - } - - if val.equal(op.value()) { - return nil - } - - return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) -} - -func (p Patch) copy(doc *container, op Operation, accumulatedCopySize *int64) error { - from, err := op.From() - if err != nil { - return errors.Wrapf(err, "copy operation failed to decode from") - } - - con, key := findObject(doc, from) - - if con == nil { - return errors.Wrapf(ErrMissing, "copy operation does not apply: doc is missing from path: %s", from) - } - - val, err := con.get(key) - if err != nil { - return errors.Wrapf(err, "error in copy for from: '%s'", from) - } - - path, err := op.Path() - if err != nil { - return errors.Wrapf(ErrMissing, "copy operation failed to decode path") - } - - con, key = findObject(doc, path) - - if con == nil { - return errors.Wrapf(ErrMissing, "copy operation does not apply: doc is missing destination path: %s", path) - } - - valCopy, sz, err := deepCopy(val) - if err != nil { - return errors.Wrapf(err, "error while performing deep copy") - } - - (*accumulatedCopySize) += int64(sz) - if AccumulatedCopySizeLimit > 0 && *accumulatedCopySize > AccumulatedCopySizeLimit { - return NewAccumulatedCopySizeError(AccumulatedCopySizeLimit, *accumulatedCopySize) - } - - err = con.add(key, valCopy) - if err != nil { - return errors.Wrapf(err, "error while adding value during copy") - } - - return nil -} - -// Equal indicates if 2 JSON documents have the same structural equality. -func Equal(a, b []byte) bool { - ra := make(json.RawMessage, len(a)) - copy(ra, a) - la := newLazyNode(&ra) - - rb := make(json.RawMessage, len(b)) - copy(rb, b) - lb := newLazyNode(&rb) - - return la.equal(lb) -} - -// DecodePatch decodes the passed JSON document as an RFC 6902 patch. -func DecodePatch(buf []byte) (Patch, error) { - var p Patch - - err := json.Unmarshal(buf, &p) - - if err != nil { - return nil, err - } - - return p, nil -} - -// Apply mutates a JSON document according to the patch, and returns the new -// document. -func (p Patch) Apply(doc []byte) ([]byte, error) { - return p.ApplyIndent(doc, "") -} - -// ApplyIndent mutates a JSON document according to the patch, and returns the new -// document indented. -func (p Patch) ApplyIndent(doc []byte, indent string) ([]byte, error) { - var pd container - if doc[0] == '[' { - pd = &partialArray{} - } else { - pd = &partialDoc{} - } - - err := json.Unmarshal(doc, pd) - - if err != nil { - return nil, err - } - - err = nil - - var accumulatedCopySize int64 - - for _, op := range p { - switch op.Kind() { - case "add": - err = p.add(&pd, op) - case "remove": - err = p.remove(&pd, op) - case "replace": - err = p.replace(&pd, op) - case "move": - err = p.move(&pd, op) - case "test": - err = p.test(&pd, op) - case "copy": - err = p.copy(&pd, op, &accumulatedCopySize) - default: - err = fmt.Errorf("Unexpected kind: %s", op.Kind()) - } - - if err != nil { - return nil, err - } - } - - if indent != "" { - return json.MarshalIndent(pd, "", indent) - } - - return json.Marshal(pd) -} - -// From http://tools.ietf.org/html/rfc6901#section-4 : -// -// Evaluation of each reference token begins by decoding any escaped -// character sequence. This is performed by first transforming any -// occurrence of the sequence '~1' to '/', and then transforming any -// occurrence of the sequence '~0' to '~'. - -var ( - rfc6901Decoder = strings.NewReplacer("~1", "/", "~0", "~") -) - -func decodePatchKey(k string) string { - return rfc6901Decoder.Replace(k) -} diff --git a/vendor/k8s.io/api/admission/v1/generated.pb.go b/vendor/k8s.io/api/admission/v1/generated.pb.go index 04eb206750..f2db634b86 100644 --- a/vendor/k8s.io/api/admission/v1/generated.pb.go +++ b/vendor/k8s.io/api/admission/v1/generated.pb.go @@ -1196,10 +1196,7 @@ func (m *AdmissionRequest) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1514,7 +1511,7 @@ func (m *AdmissionResponse) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -1563,10 +1560,7 @@ func (m *AdmissionResponse) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1688,10 +1682,7 @@ func (m *AdmissionReview) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/k8s.io/api/admission/v1/generated.proto b/vendor/k8s.io/api/admission/v1/generated.proto index 079ab09f21..5fc0e342e8 100644 --- a/vendor/k8s.io/api/admission/v1/generated.proto +++ b/vendor/k8s.io/api/admission/v1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.admission.v1; diff --git a/vendor/k8s.io/api/admissionregistration/v1/generated.pb.go b/vendor/k8s.io/api/admissionregistration/v1/generated.pb.go index adc47be7fa..bce77c2a1e 100644 --- a/vendor/k8s.io/api/admissionregistration/v1/generated.pb.go +++ b/vendor/k8s.io/api/admissionregistration/v1/generated.pb.go @@ -1858,10 +1858,7 @@ func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1978,10 +1975,7 @@ func (m *MutatingWebhookConfiguration) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2098,10 +2092,7 @@ func (m *MutatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2280,10 +2271,7 @@ func (m *Rule) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2398,10 +2386,7 @@ func (m *RuleWithOperations) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2568,10 +2553,7 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2943,10 +2925,7 @@ func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3063,10 +3042,7 @@ func (m *ValidatingWebhookConfiguration) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3183,10 +3159,7 @@ func (m *ValidatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3339,10 +3312,7 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/k8s.io/api/admissionregistration/v1/generated.proto b/vendor/k8s.io/api/admissionregistration/v1/generated.proto index 7f9772e7de..16ab9d5d67 100644 --- a/vendor/k8s.io/api/admissionregistration/v1/generated.proto +++ b/vendor/k8s.io/api/admissionregistration/v1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.admissionregistration.v1; diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go index c98aa7477b..d5e802f3b3 100644 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go @@ -1859,10 +1859,7 @@ func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1979,10 +1976,7 @@ func (m *MutatingWebhookConfiguration) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2099,10 +2093,7 @@ func (m *MutatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2281,10 +2272,7 @@ func (m *Rule) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2399,10 +2387,7 @@ func (m *RuleWithOperations) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2569,10 +2554,7 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2944,10 +2926,7 @@ func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3064,10 +3043,7 @@ func (m *ValidatingWebhookConfiguration) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3184,10 +3160,7 @@ func (m *ValidatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3340,10 +3313,7 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto index 70ffa92196..bdae740376 100644 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.admissionregistration.v1beta1; diff --git a/vendor/knative.dev/pkg/apis/duck/v1alpha1/doc.go b/vendor/k8s.io/api/apiserverinternal/v1alpha1/doc.go similarity index 65% rename from vendor/knative.dev/pkg/apis/duck/v1alpha1/doc.go rename to vendor/k8s.io/api/apiserverinternal/v1alpha1/doc.go index 3638eb7a30..a4da95d44d 100644 --- a/vendor/knative.dev/pkg/apis/duck/v1alpha1/doc.go +++ b/vendor/k8s.io/api/apiserverinternal/v1alpha1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Knative Authors +Copyright 2019 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,10 +14,12 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Api versions allow the api contract for a resource to be changed while keeping -// backward compatibility by support multiple concurrent versions -// of the same resource - // +k8s:deepcopy-gen=package -// +groupName=duck.knative.dev -package v1alpha1 +// +k8s:protobuf-gen=package +// +k8s:openapi-gen=true + +// +groupName=internal.apiserver.k8s.io + +// Package v1alpha1 contains the v1alpha1 version of the API used by the +// apiservers themselves. +package v1alpha1 // import "k8s.io/api/apiserverinternal/v1alpha1" diff --git a/vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.pb.go b/vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.pb.go new file mode 100644 index 0000000000..ee12c7d0dc --- /dev/null +++ b/vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.pb.go @@ -0,0 +1,1700 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.proto + +package v1alpha1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *ServerStorageVersion) Reset() { *m = ServerStorageVersion{} } +func (*ServerStorageVersion) ProtoMessage() {} +func (*ServerStorageVersion) Descriptor() ([]byte, []int) { + return fileDescriptor_a3903ff5e3cc7a03, []int{0} +} +func (m *ServerStorageVersion) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServerStorageVersion) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ServerStorageVersion) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServerStorageVersion.Merge(m, src) +} +func (m *ServerStorageVersion) XXX_Size() int { + return m.Size() +} +func (m *ServerStorageVersion) XXX_DiscardUnknown() { + xxx_messageInfo_ServerStorageVersion.DiscardUnknown(m) +} + +var xxx_messageInfo_ServerStorageVersion proto.InternalMessageInfo + +func (m *StorageVersion) Reset() { *m = StorageVersion{} } +func (*StorageVersion) ProtoMessage() {} +func (*StorageVersion) Descriptor() ([]byte, []int) { + return fileDescriptor_a3903ff5e3cc7a03, []int{1} +} +func (m *StorageVersion) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StorageVersion) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StorageVersion) XXX_Merge(src proto.Message) { + xxx_messageInfo_StorageVersion.Merge(m, src) +} +func (m *StorageVersion) XXX_Size() int { + return m.Size() +} +func (m *StorageVersion) XXX_DiscardUnknown() { + xxx_messageInfo_StorageVersion.DiscardUnknown(m) +} + +var xxx_messageInfo_StorageVersion proto.InternalMessageInfo + +func (m *StorageVersionCondition) Reset() { *m = StorageVersionCondition{} } +func (*StorageVersionCondition) ProtoMessage() {} +func (*StorageVersionCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_a3903ff5e3cc7a03, []int{2} +} +func (m *StorageVersionCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StorageVersionCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StorageVersionCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_StorageVersionCondition.Merge(m, src) +} +func (m *StorageVersionCondition) XXX_Size() int { + return m.Size() +} +func (m *StorageVersionCondition) XXX_DiscardUnknown() { + xxx_messageInfo_StorageVersionCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_StorageVersionCondition proto.InternalMessageInfo + +func (m *StorageVersionList) Reset() { *m = StorageVersionList{} } +func (*StorageVersionList) ProtoMessage() {} +func (*StorageVersionList) Descriptor() ([]byte, []int) { + return fileDescriptor_a3903ff5e3cc7a03, []int{3} +} +func (m *StorageVersionList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StorageVersionList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StorageVersionList) XXX_Merge(src proto.Message) { + xxx_messageInfo_StorageVersionList.Merge(m, src) +} +func (m *StorageVersionList) XXX_Size() int { + return m.Size() +} +func (m *StorageVersionList) XXX_DiscardUnknown() { + xxx_messageInfo_StorageVersionList.DiscardUnknown(m) +} + +var xxx_messageInfo_StorageVersionList proto.InternalMessageInfo + +func (m *StorageVersionSpec) Reset() { *m = StorageVersionSpec{} } +func (*StorageVersionSpec) ProtoMessage() {} +func (*StorageVersionSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_a3903ff5e3cc7a03, []int{4} +} +func (m *StorageVersionSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StorageVersionSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StorageVersionSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_StorageVersionSpec.Merge(m, src) +} +func (m *StorageVersionSpec) XXX_Size() int { + return m.Size() +} +func (m *StorageVersionSpec) XXX_DiscardUnknown() { + xxx_messageInfo_StorageVersionSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_StorageVersionSpec proto.InternalMessageInfo + +func (m *StorageVersionStatus) Reset() { *m = StorageVersionStatus{} } +func (*StorageVersionStatus) ProtoMessage() {} +func (*StorageVersionStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_a3903ff5e3cc7a03, []int{5} +} +func (m *StorageVersionStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StorageVersionStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StorageVersionStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_StorageVersionStatus.Merge(m, src) +} +func (m *StorageVersionStatus) XXX_Size() int { + return m.Size() +} +func (m *StorageVersionStatus) XXX_DiscardUnknown() { + xxx_messageInfo_StorageVersionStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_StorageVersionStatus proto.InternalMessageInfo + +func init() { + proto.RegisterType((*ServerStorageVersion)(nil), "k8s.io.api.apiserverinternal.v1alpha1.ServerStorageVersion") + proto.RegisterType((*StorageVersion)(nil), "k8s.io.api.apiserverinternal.v1alpha1.StorageVersion") + proto.RegisterType((*StorageVersionCondition)(nil), "k8s.io.api.apiserverinternal.v1alpha1.StorageVersionCondition") + proto.RegisterType((*StorageVersionList)(nil), "k8s.io.api.apiserverinternal.v1alpha1.StorageVersionList") + proto.RegisterType((*StorageVersionSpec)(nil), "k8s.io.api.apiserverinternal.v1alpha1.StorageVersionSpec") + proto.RegisterType((*StorageVersionStatus)(nil), "k8s.io.api.apiserverinternal.v1alpha1.StorageVersionStatus") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.proto", fileDescriptor_a3903ff5e3cc7a03) +} + +var fileDescriptor_a3903ff5e3cc7a03 = []byte{ + // 763 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x54, 0x4f, 0x4f, 0x13, 0x41, + 0x14, 0xef, 0xd2, 0x52, 0x60, 0xaa, 0x54, 0x46, 0x08, 0xb5, 0x26, 0x5b, 0x6c, 0xa2, 0x41, 0x8d, + 0xbb, 0xd2, 0x88, 0x91, 0x98, 0x68, 0x58, 0x20, 0x06, 0x03, 0x62, 0x06, 0xe2, 0x01, 0x3d, 0x38, + 0xdd, 0x1d, 0xb7, 0x6b, 0xbb, 0x3b, 0x9b, 0x9d, 0x69, 0x13, 0x2e, 0xc6, 0x8f, 0xe0, 0x07, 0xf1, + 0xe8, 0x87, 0xe0, 0x64, 0xb8, 0x98, 0x90, 0x98, 0x34, 0xb2, 0x7e, 0x0b, 0x4e, 0x66, 0x66, 0x77, + 0x5b, 0xb6, 0x2d, 0xb1, 0xe1, 0xb0, 0xc9, 0xce, 0x7b, 0xef, 0xf7, 0x7b, 0x7f, 0xe6, 0x37, 0x0f, + 0xbc, 0x69, 0x3e, 0x63, 0x9a, 0x43, 0xf5, 0x66, 0xbb, 0x4e, 0x02, 0x8f, 0x70, 0xc2, 0xf4, 0x0e, + 0xf1, 0x2c, 0x1a, 0xe8, 0xb1, 0x03, 0xfb, 0x8e, 0xf8, 0x18, 0x09, 0x3a, 0x24, 0x70, 0x3c, 0x4e, + 0x02, 0x0f, 0xb7, 0xf4, 0xce, 0x0a, 0x6e, 0xf9, 0x0d, 0xbc, 0xa2, 0xdb, 0xc4, 0x23, 0x01, 0xe6, + 0xc4, 0xd2, 0xfc, 0x80, 0x72, 0x0a, 0xef, 0x46, 0x30, 0x0d, 0xfb, 0x8e, 0x36, 0x04, 0xd3, 0x12, + 0x58, 0xf9, 0x91, 0xed, 0xf0, 0x46, 0xbb, 0xae, 0x99, 0xd4, 0xd5, 0x6d, 0x6a, 0x53, 0x5d, 0xa2, + 0xeb, 0xed, 0x4f, 0xf2, 0x24, 0x0f, 0xf2, 0x2f, 0x62, 0x2d, 0x3f, 0xe9, 0x17, 0xe3, 0x62, 0xb3, + 0xe1, 0x78, 0x24, 0x38, 0xd2, 0xfd, 0xa6, 0x2d, 0x2b, 0xd3, 0x5d, 0xc2, 0xb1, 0xde, 0x19, 0xaa, + 0xa5, 0xac, 0x5f, 0x86, 0x0a, 0xda, 0x1e, 0x77, 0x5c, 0x32, 0x04, 0x78, 0xfa, 0x3f, 0x00, 0x33, + 0x1b, 0xc4, 0xc5, 0x83, 0xb8, 0xea, 0x2f, 0x05, 0xcc, 0xef, 0xcb, 0x4e, 0xf7, 0x39, 0x0d, 0xb0, + 0x4d, 0xde, 0x91, 0x80, 0x39, 0xd4, 0x83, 0xab, 0xa0, 0x80, 0x7d, 0x27, 0x72, 0x6d, 0x6f, 0x96, + 0x94, 0x25, 0x65, 0x79, 0xc6, 0xb8, 0x79, 0xdc, 0xad, 0x64, 0xc2, 0x6e, 0xa5, 0xb0, 0xfe, 0x76, + 0x3b, 0x71, 0xa1, 0x8b, 0x71, 0x70, 0x1d, 0x14, 0x89, 0x67, 0x52, 0xcb, 0xf1, 0xec, 0x98, 0xa9, + 0x34, 0x21, 0xa1, 0x8b, 0x31, 0xb4, 0xb8, 0x95, 0x76, 0xa3, 0xc1, 0x78, 0xb8, 0x01, 0xe6, 0x2c, + 0x62, 0x52, 0x0b, 0xd7, 0x5b, 0x49, 0x35, 0xac, 0x94, 0x5d, 0xca, 0x2e, 0xcf, 0x18, 0x0b, 0x61, + 0xb7, 0x32, 0xb7, 0x39, 0xe8, 0x44, 0xc3, 0xf1, 0xd5, 0x1f, 0x13, 0x60, 0x76, 0xa0, 0xa3, 0x8f, + 0x60, 0x5a, 0x8c, 0xdb, 0xc2, 0x1c, 0xcb, 0x76, 0x0a, 0xb5, 0xc7, 0x5a, 0xff, 0xca, 0x7b, 0x53, + 0xd3, 0xfc, 0xa6, 0x2d, 0xef, 0x5f, 0x13, 0xd1, 0x5a, 0x67, 0x45, 0xdb, 0xab, 0x7f, 0x26, 0x26, + 0xdf, 0x25, 0x1c, 0x1b, 0x30, 0xee, 0x02, 0xf4, 0x6d, 0xa8, 0xc7, 0x0a, 0xdf, 0x83, 0x1c, 0xf3, + 0x89, 0x29, 0x3b, 0x2e, 0xd4, 0xd6, 0xb4, 0xb1, 0x04, 0xa5, 0xa5, 0xcb, 0xdc, 0xf7, 0x89, 0x69, + 0x5c, 0x8b, 0xd3, 0xe4, 0xc4, 0x09, 0x49, 0x52, 0x68, 0x82, 0x3c, 0xe3, 0x98, 0xb7, 0xc5, 0x2c, + 0x04, 0xfd, 0xf3, 0xab, 0xd1, 0x4b, 0x0a, 0x63, 0x36, 0x4e, 0x90, 0x8f, 0xce, 0x28, 0xa6, 0xae, + 0x7e, 0xcf, 0x82, 0xc5, 0x34, 0x60, 0x83, 0x7a, 0x96, 0xc3, 0xc5, 0xfc, 0x5e, 0x82, 0x1c, 0x3f, + 0xf2, 0x49, 0x2c, 0x85, 0x87, 0x49, 0x89, 0x07, 0x47, 0x3e, 0x39, 0xef, 0x56, 0x6e, 0x5f, 0x02, + 0x13, 0x6e, 0x24, 0x81, 0x70, 0xad, 0xd7, 0x41, 0x24, 0x89, 0x3b, 0xe9, 0x22, 0xce, 0xbb, 0x95, + 0x62, 0x0f, 0x96, 0xae, 0x0b, 0xbe, 0x06, 0x90, 0xd6, 0x65, 0x87, 0xd6, 0xab, 0x48, 0xc1, 0x42, + 0x59, 0x62, 0x10, 0x59, 0xa3, 0x1c, 0xd3, 0xc0, 0xbd, 0xa1, 0x08, 0x34, 0x02, 0x05, 0x3b, 0x00, + 0xb6, 0x30, 0xe3, 0x07, 0x01, 0xf6, 0x58, 0x54, 0xa2, 0xe3, 0x92, 0x52, 0x4e, 0x0e, 0xf5, 0xc1, + 0x78, 0x8a, 0x10, 0x88, 0x7e, 0xde, 0x9d, 0x21, 0x36, 0x34, 0x22, 0x03, 0xbc, 0x07, 0xf2, 0x01, + 0xc1, 0x8c, 0x7a, 0xa5, 0x49, 0xd9, 0x7e, 0xef, 0x0e, 0x90, 0xb4, 0xa2, 0xd8, 0x0b, 0xef, 0x83, + 0x29, 0x97, 0x30, 0x86, 0x6d, 0x52, 0xca, 0xcb, 0xc0, 0x62, 0x1c, 0x38, 0xb5, 0x1b, 0x99, 0x51, + 0xe2, 0xaf, 0xfe, 0x54, 0x00, 0x4c, 0xcf, 0x7d, 0xc7, 0x61, 0x1c, 0x7e, 0x18, 0x52, 0xba, 0x36, + 0x5e, 0x5f, 0x02, 0x2d, 0x75, 0x7e, 0x23, 0x4e, 0x39, 0x9d, 0x58, 0x2e, 0xa8, 0xfc, 0x10, 0x4c, + 0x3a, 0x9c, 0xb8, 0xe2, 0x16, 0xb3, 0xcb, 0x85, 0xda, 0xea, 0x95, 0x74, 0x68, 0x5c, 0x8f, 0x33, + 0x4c, 0x6e, 0x0b, 0x2e, 0x14, 0x51, 0x56, 0xe7, 0x07, 0xfb, 0x11, 0x0f, 0xa0, 0xfa, 0x7b, 0x02, + 0xcc, 0x8f, 0x92, 0x31, 0xfc, 0x02, 0x8a, 0x2c, 0x65, 0x67, 0x25, 0x45, 0x16, 0x35, 0xf6, 0xe3, + 0x18, 0xb1, 0xfa, 0xfa, 0xab, 0x2a, 0x6d, 0x67, 0x68, 0x30, 0x19, 0xdc, 0x03, 0x0b, 0x26, 0x75, + 0x5d, 0xea, 0x6d, 0x8d, 0xdc, 0x79, 0xb7, 0xc2, 0x6e, 0x65, 0x61, 0x63, 0x54, 0x00, 0x1a, 0x8d, + 0x83, 0x01, 0x00, 0x66, 0xf2, 0x04, 0xa2, 0xa5, 0x57, 0xa8, 0xbd, 0xb8, 0xd2, 0x80, 0x7b, 0x2f, + 0xa9, 0xbf, 0xb3, 0x7a, 0x26, 0x86, 0x2e, 0x64, 0x31, 0xb4, 0xe3, 0x33, 0x35, 0x73, 0x72, 0xa6, + 0x66, 0x4e, 0xcf, 0xd4, 0xcc, 0xd7, 0x50, 0x55, 0x8e, 0x43, 0x55, 0x39, 0x09, 0x55, 0xe5, 0x34, + 0x54, 0x95, 0x3f, 0xa1, 0xaa, 0x7c, 0xfb, 0xab, 0x66, 0x0e, 0xa7, 0x93, 0x3c, 0xff, 0x02, 0x00, + 0x00, 0xff, 0xff, 0xa1, 0x5f, 0xcf, 0x37, 0x78, 0x07, 0x00, 0x00, +} + +func (m *ServerStorageVersion) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServerStorageVersion) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServerStorageVersion) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.DecodableVersions) > 0 { + for iNdEx := len(m.DecodableVersions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.DecodableVersions[iNdEx]) + copy(dAtA[i:], m.DecodableVersions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DecodableVersions[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.EncodingVersion) + copy(dAtA[i:], m.EncodingVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.EncodingVersion))) + i-- + dAtA[i] = 0x12 + i -= len(m.APIServerID) + copy(dAtA[i:], m.APIServerID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIServerID))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *StorageVersion) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StorageVersion) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StorageVersion) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *StorageVersionCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StorageVersionCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StorageVersionCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x32 + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x2a + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x18 + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *StorageVersionList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StorageVersionList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StorageVersionList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *StorageVersionSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StorageVersionSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StorageVersionSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *StorageVersionStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StorageVersionStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StorageVersionStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.CommonEncodingVersion != nil { + i -= len(*m.CommonEncodingVersion) + copy(dAtA[i:], *m.CommonEncodingVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.CommonEncodingVersion))) + i-- + dAtA[i] = 0x12 + } + if len(m.StorageVersions) > 0 { + for iNdEx := len(m.StorageVersions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.StorageVersions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ServerStorageVersion) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.APIServerID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.EncodingVersion) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.DecodableVersions) > 0 { + for _, s := range m.DecodableVersions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *StorageVersion) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *StorageVersionCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *StorageVersionList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *StorageVersionSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *StorageVersionStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.StorageVersions) > 0 { + for _, e := range m.StorageVersions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.CommonEncodingVersion != nil { + l = len(*m.CommonEncodingVersion) + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *ServerStorageVersion) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServerStorageVersion{`, + `APIServerID:` + fmt.Sprintf("%v", this.APIServerID) + `,`, + `EncodingVersion:` + fmt.Sprintf("%v", this.EncodingVersion) + `,`, + `DecodableVersions:` + fmt.Sprintf("%v", this.DecodableVersions) + `,`, + `}`, + }, "") + return s +} +func (this *StorageVersion) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StorageVersion{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "StorageVersionSpec", "StorageVersionSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "StorageVersionStatus", "StorageVersionStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *StorageVersionCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StorageVersionCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *StorageVersionList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]StorageVersion{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "StorageVersion", "StorageVersion", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&StorageVersionList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *StorageVersionSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StorageVersionSpec{`, + `}`, + }, "") + return s +} +func (this *StorageVersionStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForStorageVersions := "[]ServerStorageVersion{" + for _, f := range this.StorageVersions { + repeatedStringForStorageVersions += strings.Replace(strings.Replace(f.String(), "ServerStorageVersion", "ServerStorageVersion", 1), `&`, ``, 1) + "," + } + repeatedStringForStorageVersions += "}" + repeatedStringForConditions := "[]StorageVersionCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "StorageVersionCondition", "StorageVersionCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&StorageVersionStatus{`, + `StorageVersions:` + repeatedStringForStorageVersions + `,`, + `CommonEncodingVersion:` + valueToStringGenerated(this.CommonEncodingVersion) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ServerStorageVersion) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServerStorageVersion: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServerStorageVersion: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIServerID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIServerID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EncodingVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EncodingVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DecodableVersions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DecodableVersions = append(m.DecodableVersions, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StorageVersion) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StorageVersion: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StorageVersion: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StorageVersionCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StorageVersionCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StorageVersionCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = StorageVersionConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + m.ObservedGeneration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ObservedGeneration |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StorageVersionList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StorageVersionList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StorageVersionList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, StorageVersion{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StorageVersionSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StorageVersionSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StorageVersionSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StorageVersionStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StorageVersionStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StorageVersionStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StorageVersions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StorageVersions = append(m.StorageVersions, ServerStorageVersion{}) + if err := m.StorageVersions[len(m.StorageVersions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CommonEncodingVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.CommonEncodingVersion = &s + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, StorageVersionCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.proto b/vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.proto new file mode 100644 index 0000000000..539c8c9e2b --- /dev/null +++ b/vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.proto @@ -0,0 +1,121 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package k8s.io.api.apiserverinternal.v1alpha1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1alpha1"; + +// An API server instance reports the version it can decode and the version it +// encodes objects to when persisting objects in the backend. +message ServerStorageVersion { + // The ID of the reporting API server. + optional string apiServerID = 1; + + // The API server encodes the object to this version when persisting it in + // the backend (e.g., etcd). + optional string encodingVersion = 2; + + // The API server can decode objects encoded in these versions. + // The encodingVersion must be included in the decodableVersions. + // +listType=set + repeated string decodableVersions = 3; +} + +// Storage version of a specific resource. +message StorageVersion { + // The name is .. + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec is an empty spec. It is here to comply with Kubernetes API style. + optional StorageVersionSpec spec = 2; + + // API server instances report the version they can decode and the version they + // encode objects to when persisting objects in the backend. + optional StorageVersionStatus status = 3; +} + +// Describes the state of the storageVersion at a certain point. +message StorageVersionCondition { + // Type of the condition. + // +required + optional string type = 1; + + // Status of the condition, one of True, False, Unknown. + // +required + optional string status = 2; + + // If set, this represents the .metadata.generation that the condition was set based upon. + // +optional + optional int64 observedGeneration = 3; + + // Last time the condition transitioned from one status to another. + // +required + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4; + + // The reason for the condition's last transition. + // +required + optional string reason = 5; + + // A human readable message indicating details about the transition. + // +required + optional string message = 6; +} + +// A list of StorageVersions. +message StorageVersionList { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + repeated StorageVersion items = 2; +} + +// StorageVersionSpec is an empty spec. +message StorageVersionSpec { +} + +// API server instances report the versions they can decode and the version they +// encode objects to when persisting objects in the backend. +message StorageVersionStatus { + // The reported versions per API server instance. + // +optional + // +listType=map + // +listMapKey=apiServerID + repeated ServerStorageVersion storageVersions = 1; + + // If all API server instances agree on the same encoding storage version, + // then this field is set to that version. Otherwise this field is left empty. + // API servers should finish updating its storageVersionStatus entry before + // serving write operations, so that this field will be in sync with the reality. + // +optional + optional string commonEncodingVersion = 2; + + // The latest available observations of the storageVersion's state. + // +optional + // +listType=map + // +listMapKey=type + repeated StorageVersionCondition conditions = 3; +} + diff --git a/vendor/k8s.io/api/settings/v1alpha1/register.go b/vendor/k8s.io/api/apiserverinternal/v1alpha1/register.go similarity index 73% rename from vendor/k8s.io/api/settings/v1alpha1/register.go rename to vendor/k8s.io/api/apiserverinternal/v1alpha1/register.go index eee278d959..5e46e8d9a7 100644 --- a/vendor/k8s.io/api/settings/v1alpha1/register.go +++ b/vendor/k8s.io/api/apiserverinternal/v1alpha1/register.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2019 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -23,7 +23,7 @@ import ( ) // GroupName is the group name use in this package -const GroupName = "settings.k8s.io" +const GroupName = "internal.apiserver.k8s.io" // SchemeGroupVersion is group version used to register these objects var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} @@ -34,18 +34,14 @@ func Resource(resource string) schema.GroupResource { } var ( - // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. - // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - localSchemeBuilder = &SchemeBuilder - AddToScheme = localSchemeBuilder.AddToScheme + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme ) -// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, - &PodPreset{}, - &PodPresetList{}, + &StorageVersion{}, + &StorageVersionList{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) return nil diff --git a/vendor/k8s.io/api/apiserverinternal/v1alpha1/types.go b/vendor/k8s.io/api/apiserverinternal/v1alpha1/types.go new file mode 100644 index 0000000000..880091b6f8 --- /dev/null +++ b/vendor/k8s.io/api/apiserverinternal/v1alpha1/types.go @@ -0,0 +1,127 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Storage version of a specific resource. +type StorageVersion struct { + metav1.TypeMeta `json:",inline"` + // The name is .. + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec is an empty spec. It is here to comply with Kubernetes API style. + Spec StorageVersionSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + + // API server instances report the version they can decode and the version they + // encode objects to when persisting objects in the backend. + Status StorageVersionStatus `json:"status" protobuf:"bytes,3,opt,name=status"` +} + +// StorageVersionSpec is an empty spec. +type StorageVersionSpec struct{} + +// API server instances report the versions they can decode and the version they +// encode objects to when persisting objects in the backend. +type StorageVersionStatus struct { + // The reported versions per API server instance. + // +optional + // +listType=map + // +listMapKey=apiServerID + StorageVersions []ServerStorageVersion `json:"storageVersions,omitempty" protobuf:"bytes,1,opt,name=storageVersions"` + // If all API server instances agree on the same encoding storage version, + // then this field is set to that version. Otherwise this field is left empty. + // API servers should finish updating its storageVersionStatus entry before + // serving write operations, so that this field will be in sync with the reality. + // +optional + CommonEncodingVersion *string `json:"commonEncodingVersion,omitempty" protobuf:"bytes,2,opt,name=commonEncodingVersion"` + + // The latest available observations of the storageVersion's state. + // +optional + // +listType=map + // +listMapKey=type + Conditions []StorageVersionCondition `json:"conditions,omitempty" protobuf:"bytes,3,opt,name=conditions"` +} + +// An API server instance reports the version it can decode and the version it +// encodes objects to when persisting objects in the backend. +type ServerStorageVersion struct { + // The ID of the reporting API server. + APIServerID string `json:"apiServerID,omitempty" protobuf:"bytes,1,opt,name=apiServerID"` + + // The API server encodes the object to this version when persisting it in + // the backend (e.g., etcd). + EncodingVersion string `json:"encodingVersion,omitempty" protobuf:"bytes,2,opt,name=encodingVersion"` + + // The API server can decode objects encoded in these versions. + // The encodingVersion must be included in the decodableVersions. + // +listType=set + DecodableVersions []string `json:"decodableVersions,omitempty" protobuf:"bytes,3,opt,name=decodableVersions"` +} + +type StorageVersionConditionType string + +const ( + // Indicates that encoding storage versions reported by all servers are equal. + AllEncodingVersionsEqual StorageVersionConditionType = "AllEncodingVersionsEqual" +) + +type ConditionStatus string + +const ( + ConditionTrue ConditionStatus = "True" + ConditionFalse ConditionStatus = "False" + ConditionUnknown ConditionStatus = "Unknown" +) + +// Describes the state of the storageVersion at a certain point. +type StorageVersionCondition struct { + // Type of the condition. + // +required + Type StorageVersionConditionType `json:"type" protobuf:"bytes,1,opt,name=type"` + // Status of the condition, one of True, False, Unknown. + // +required + Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status"` + // If set, this represents the .metadata.generation that the condition was set based upon. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,3,opt,name=observedGeneration"` + // Last time the condition transitioned from one status to another. + // +required + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"` + // The reason for the condition's last transition. + // +required + Reason string `json:"reason" protobuf:"bytes,5,opt,name=reason"` + // A human readable message indicating details about the transition. + // +required + Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// A list of StorageVersions. +type StorageVersionList struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + Items []StorageVersion `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/api/apiserverinternal/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/apiserverinternal/v1alpha1/types_swagger_doc_generated.go new file mode 100644 index 0000000000..b05c285954 --- /dev/null +++ b/vendor/k8s.io/api/apiserverinternal/v1alpha1/types_swagger_doc_generated.go @@ -0,0 +1,93 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_ServerStorageVersion = map[string]string{ + "": "An API server instance reports the version it can decode and the version it encodes objects to when persisting objects in the backend.", + "apiServerID": "The ID of the reporting API server.", + "encodingVersion": "The API server encodes the object to this version when persisting it in the backend (e.g., etcd).", + "decodableVersions": "The API server can decode objects encoded in these versions. The encodingVersion must be included in the decodableVersions.", +} + +func (ServerStorageVersion) SwaggerDoc() map[string]string { + return map_ServerStorageVersion +} + +var map_StorageVersion = map[string]string{ + "": "\n Storage version of a specific resource.", + "metadata": "The name is ..", + "spec": "Spec is an empty spec. It is here to comply with Kubernetes API style.", + "status": "API server instances report the version they can decode and the version they encode objects to when persisting objects in the backend.", +} + +func (StorageVersion) SwaggerDoc() map[string]string { + return map_StorageVersion +} + +var map_StorageVersionCondition = map[string]string{ + "": "Describes the state of the storageVersion at a certain point.", + "type": "Type of the condition.", + "status": "Status of the condition, one of True, False, Unknown.", + "observedGeneration": "If set, this represents the .metadata.generation that the condition was set based upon.", + "lastTransitionTime": "Last time the condition transitioned from one status to another.", + "reason": "The reason for the condition's last transition.", + "message": "A human readable message indicating details about the transition.", +} + +func (StorageVersionCondition) SwaggerDoc() map[string]string { + return map_StorageVersionCondition +} + +var map_StorageVersionList = map[string]string{ + "": "A list of StorageVersions.", +} + +func (StorageVersionList) SwaggerDoc() map[string]string { + return map_StorageVersionList +} + +var map_StorageVersionSpec = map[string]string{ + "": "StorageVersionSpec is an empty spec.", +} + +func (StorageVersionSpec) SwaggerDoc() map[string]string { + return map_StorageVersionSpec +} + +var map_StorageVersionStatus = map[string]string{ + "": "API server instances report the versions they can decode and the version they encode objects to when persisting objects in the backend.", + "storageVersions": "The reported versions per API server instance.", + "commonEncodingVersion": "If all API server instances agree on the same encoding storage version, then this field is set to that version. Otherwise this field is left empty. API servers should finish updating its storageVersionStatus entry before serving write operations, so that this field will be in sync with the reality.", + "conditions": "The latest available observations of the storageVersion's state.", +} + +func (StorageVersionStatus) SwaggerDoc() map[string]string { + return map_StorageVersionStatus +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/apiserverinternal/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/apiserverinternal/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..7e82a9070f --- /dev/null +++ b/vendor/k8s.io/api/apiserverinternal/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,175 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerStorageVersion) DeepCopyInto(out *ServerStorageVersion) { + *out = *in + if in.DecodableVersions != nil { + in, out := &in.DecodableVersions, &out.DecodableVersions + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerStorageVersion. +func (in *ServerStorageVersion) DeepCopy() *ServerStorageVersion { + if in == nil { + return nil + } + out := new(ServerStorageVersion) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageVersion) DeepCopyInto(out *StorageVersion) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageVersion. +func (in *StorageVersion) DeepCopy() *StorageVersion { + if in == nil { + return nil + } + out := new(StorageVersion) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StorageVersion) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageVersionCondition) DeepCopyInto(out *StorageVersionCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageVersionCondition. +func (in *StorageVersionCondition) DeepCopy() *StorageVersionCondition { + if in == nil { + return nil + } + out := new(StorageVersionCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageVersionList) DeepCopyInto(out *StorageVersionList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]StorageVersion, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageVersionList. +func (in *StorageVersionList) DeepCopy() *StorageVersionList { + if in == nil { + return nil + } + out := new(StorageVersionList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StorageVersionList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageVersionSpec) DeepCopyInto(out *StorageVersionSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageVersionSpec. +func (in *StorageVersionSpec) DeepCopy() *StorageVersionSpec { + if in == nil { + return nil + } + out := new(StorageVersionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageVersionStatus) DeepCopyInto(out *StorageVersionStatus) { + *out = *in + if in.StorageVersions != nil { + in, out := &in.StorageVersions, &out.StorageVersions + *out = make([]ServerStorageVersion, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CommonEncodingVersion != nil { + in, out := &in.CommonEncodingVersion, &out.CommonEncodingVersion + *out = new(string) + **out = **in + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]StorageVersionCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageVersionStatus. +func (in *StorageVersionStatus) DeepCopy() *StorageVersionStatus { + if in == nil { + return nil + } + out := new(StorageVersionStatus) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/apps/v1/generated.pb.go b/vendor/k8s.io/api/apps/v1/generated.pb.go index 6ef25f50f9..5b7bda5009 100644 --- a/vendor/k8s.io/api/apps/v1/generated.pb.go +++ b/vendor/k8s.io/api/apps/v1/generated.pb.go @@ -3557,10 +3557,7 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3677,10 +3674,7 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3829,10 +3823,7 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4043,10 +4034,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4163,10 +4151,7 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4357,10 +4342,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4616,10 +4598,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4737,10 +4716,7 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4889,10 +4865,7 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5136,10 +5109,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5256,10 +5226,7 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5510,10 +5477,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5731,10 +5695,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5852,10 +5813,7 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -6004,10 +5962,7 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -6218,10 +6173,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -6338,10 +6290,7 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -6499,10 +6448,7 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -6681,10 +6627,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -6770,10 +6713,7 @@ func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -6895,10 +6835,7 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -6968,10 +6905,7 @@ func (m *RollingUpdateStatefulSetStrategy) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -7120,10 +7054,7 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -7334,10 +7265,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -7454,10 +7382,7 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -7747,10 +7672,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -8013,10 +7935,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -8134,10 +8053,7 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/k8s.io/api/apps/v1/generated.proto b/vendor/k8s.io/api/apps/v1/generated.proto index 6c55279745..3ee640462d 100644 --- a/vendor/k8s.io/api/apps/v1/generated.proto +++ b/vendor/k8s.io/api/apps/v1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.apps.v1; diff --git a/vendor/k8s.io/api/apps/v1beta1/generated.pb.go b/vendor/k8s.io/api/apps/v1beta1/generated.pb.go index f81b559013..6bc56e382a 100644 --- a/vendor/k8s.io/api/apps/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/apps/v1beta1/generated.pb.go @@ -2734,10 +2734,7 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2854,10 +2851,7 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3006,10 +3000,7 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3253,10 +3244,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3373,10 +3361,7 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3568,7 +3553,7 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -3618,10 +3603,7 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3908,10 +3890,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4129,10 +4108,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4250,10 +4226,7 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4322,10 +4295,7 @@ func (m *RollbackConfig) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4447,10 +4417,7 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4520,10 +4487,7 @@ func (m *RollingUpdateStatefulSetStrategy) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4672,10 +4636,7 @@ func (m *Scale) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4744,10 +4705,7 @@ func (m *ScaleSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4926,7 +4884,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -4975,10 +4933,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5127,10 +5082,7 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5341,10 +5293,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5461,10 +5410,7 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5754,10 +5700,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -6021,10 +5964,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -6142,10 +6082,7 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/k8s.io/api/apps/v1beta1/generated.proto b/vendor/k8s.io/api/apps/v1beta1/generated.proto index 694f6570d8..888f3e79e1 100644 --- a/vendor/k8s.io/api/apps/v1beta1/generated.proto +++ b/vendor/k8s.io/api/apps/v1beta1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.apps.v1beta1; diff --git a/vendor/k8s.io/api/apps/v1beta2/generated.pb.go b/vendor/k8s.io/api/apps/v1beta2/generated.pb.go index 8a9f20052b..09db97a8dc 100644 --- a/vendor/k8s.io/api/apps/v1beta2/generated.pb.go +++ b/vendor/k8s.io/api/apps/v1beta2/generated.pb.go @@ -3878,10 +3878,7 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3998,10 +3995,7 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4150,10 +4144,7 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4364,10 +4355,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4484,10 +4472,7 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4678,10 +4663,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4937,10 +4919,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5058,10 +5037,7 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5210,10 +5186,7 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5457,10 +5430,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5577,10 +5547,7 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5831,10 +5798,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -6052,10 +6016,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -6173,10 +6134,7 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -6325,10 +6283,7 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -6539,10 +6494,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -6659,10 +6611,7 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -6820,10 +6769,7 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -7002,10 +6948,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -7091,10 +7034,7 @@ func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -7216,10 +7156,7 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -7289,10 +7226,7 @@ func (m *RollingUpdateStatefulSetStrategy) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -7441,10 +7375,7 @@ func (m *Scale) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -7513,10 +7444,7 @@ func (m *ScaleSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -7695,7 +7623,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -7744,10 +7672,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -7896,10 +7821,7 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -8110,10 +8032,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -8230,10 +8149,7 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -8523,10 +8439,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -8789,10 +8702,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -8910,10 +8820,7 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/k8s.io/api/apps/v1beta2/generated.proto b/vendor/k8s.io/api/apps/v1beta2/generated.proto index 17e43970fa..1ea7e23a81 100644 --- a/vendor/k8s.io/api/apps/v1beta2/generated.proto +++ b/vendor/k8s.io/api/apps/v1beta2/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.apps.v1beta2; diff --git a/vendor/k8s.io/api/authentication/v1/generated.pb.go b/vendor/k8s.io/api/authentication/v1/generated.pb.go index 6524f8ca96..896e0d3401 100644 --- a/vendor/k8s.io/api/authentication/v1/generated.pb.go +++ b/vendor/k8s.io/api/authentication/v1/generated.pb.go @@ -1264,10 +1264,7 @@ func (m *BoundObjectReference) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1349,10 +1346,7 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1501,10 +1495,7 @@ func (m *TokenRequest) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1642,10 +1633,7 @@ func (m *TokenRequestSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1760,10 +1748,7 @@ func (m *TokenRequestStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1912,10 +1897,7 @@ func (m *TokenReview) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2029,10 +2011,7 @@ func (m *TokenReviewSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2199,10 +2178,7 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2460,7 +2436,7 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -2477,10 +2453,7 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/k8s.io/api/authentication/v1/generated.proto b/vendor/k8s.io/api/authentication/v1/generated.proto index db7be173dd..2fb1243644 100644 --- a/vendor/k8s.io/api/authentication/v1/generated.proto +++ b/vendor/k8s.io/api/authentication/v1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.authentication.v1; diff --git a/vendor/k8s.io/api/authentication/v1beta1/generated.pb.go b/vendor/k8s.io/api/authentication/v1beta1/generated.pb.go index 6c391dbfa3..3d8f765150 100644 --- a/vendor/k8s.io/api/authentication/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/authentication/v1beta1/generated.pb.go @@ -737,10 +737,7 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -889,10 +886,7 @@ func (m *TokenReview) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1006,10 +1000,7 @@ func (m *TokenReviewSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1176,10 +1167,7 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1437,7 +1425,7 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -1454,10 +1442,7 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/k8s.io/api/authentication/v1beta1/generated.proto b/vendor/k8s.io/api/authentication/v1beta1/generated.proto index caf2a6a53a..d3bff49eb5 100644 --- a/vendor/k8s.io/api/authentication/v1beta1/generated.proto +++ b/vendor/k8s.io/api/authentication/v1beta1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.authentication.v1beta1; diff --git a/vendor/k8s.io/api/authorization/v1/generated.pb.go b/vendor/k8s.io/api/authorization/v1/generated.pb.go index dbc0bdc71d..66c7c06ae7 100644 --- a/vendor/k8s.io/api/authorization/v1/generated.pb.go +++ b/vendor/k8s.io/api/authorization/v1/generated.pb.go @@ -1793,10 +1793,7 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1945,10 +1942,7 @@ func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2062,10 +2056,7 @@ func (m *NonResourceAttributes) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2179,10 +2170,7 @@ func (m *NonResourceRule) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2456,10 +2444,7 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2637,10 +2622,7 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2789,10 +2771,7 @@ func (m *SelfSubjectAccessReview) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2914,10 +2893,7 @@ func (m *SelfSubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3066,10 +3042,7 @@ func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3151,10 +3124,7 @@ func (m *SelfSubjectRulesReviewSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3303,10 +3273,7 @@ func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3604,7 +3571,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -3653,10 +3620,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3810,10 +3774,7 @@ func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3983,10 +3944,7 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/k8s.io/api/authorization/v1/generated.proto b/vendor/k8s.io/api/authorization/v1/generated.proto index f68a04e49e..931b0f4995 100644 --- a/vendor/k8s.io/api/authorization/v1/generated.proto +++ b/vendor/k8s.io/api/authorization/v1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.authorization.v1; diff --git a/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go b/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go index 647c0c582b..4331d3e5b0 100644 --- a/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go @@ -1793,10 +1793,7 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1945,10 +1942,7 @@ func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2062,10 +2056,7 @@ func (m *NonResourceAttributes) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2179,10 +2170,7 @@ func (m *NonResourceRule) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2456,10 +2444,7 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2637,10 +2622,7 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2789,10 +2771,7 @@ func (m *SelfSubjectAccessReview) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2914,10 +2893,7 @@ func (m *SelfSubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3066,10 +3042,7 @@ func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3151,10 +3124,7 @@ func (m *SelfSubjectRulesReviewSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3303,10 +3273,7 @@ func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3604,7 +3571,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -3653,10 +3620,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3810,10 +3774,7 @@ func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3983,10 +3944,7 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/k8s.io/api/authorization/v1beta1/generated.proto b/vendor/k8s.io/api/authorization/v1beta1/generated.proto index 3876a3eeb9..a9e221608e 100644 --- a/vendor/k8s.io/api/authorization/v1beta1/generated.proto +++ b/vendor/k8s.io/api/authorization/v1beta1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.authorization.v1beta1; diff --git a/vendor/k8s.io/api/autoscaling/v1/generated.pb.go b/vendor/k8s.io/api/autoscaling/v1/generated.pb.go index 1e3d89076d..a6ff299d73 100644 --- a/vendor/k8s.io/api/autoscaling/v1/generated.pb.go +++ b/vendor/k8s.io/api/autoscaling/v1/generated.pb.go @@ -47,10 +47,66 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +func (m *ContainerResourceMetricSource) Reset() { *m = ContainerResourceMetricSource{} } +func (*ContainerResourceMetricSource) ProtoMessage() {} +func (*ContainerResourceMetricSource) Descriptor() ([]byte, []int) { + return fileDescriptor_2bb1f2101a7f10e2, []int{0} +} +func (m *ContainerResourceMetricSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerResourceMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ContainerResourceMetricSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerResourceMetricSource.Merge(m, src) +} +func (m *ContainerResourceMetricSource) XXX_Size() int { + return m.Size() +} +func (m *ContainerResourceMetricSource) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerResourceMetricSource.DiscardUnknown(m) +} + +var xxx_messageInfo_ContainerResourceMetricSource proto.InternalMessageInfo + +func (m *ContainerResourceMetricStatus) Reset() { *m = ContainerResourceMetricStatus{} } +func (*ContainerResourceMetricStatus) ProtoMessage() {} +func (*ContainerResourceMetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_2bb1f2101a7f10e2, []int{1} +} +func (m *ContainerResourceMetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerResourceMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ContainerResourceMetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerResourceMetricStatus.Merge(m, src) +} +func (m *ContainerResourceMetricStatus) XXX_Size() int { + return m.Size() +} +func (m *ContainerResourceMetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerResourceMetricStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ContainerResourceMetricStatus proto.InternalMessageInfo + func (m *CrossVersionObjectReference) Reset() { *m = CrossVersionObjectReference{} } func (*CrossVersionObjectReference) ProtoMessage() {} func (*CrossVersionObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{0} + return fileDescriptor_2bb1f2101a7f10e2, []int{2} } func (m *CrossVersionObjectReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -78,7 +134,7 @@ var xxx_messageInfo_CrossVersionObjectReference proto.InternalMessageInfo func (m *ExternalMetricSource) Reset() { *m = ExternalMetricSource{} } func (*ExternalMetricSource) ProtoMessage() {} func (*ExternalMetricSource) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{1} + return fileDescriptor_2bb1f2101a7f10e2, []int{3} } func (m *ExternalMetricSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -106,7 +162,7 @@ var xxx_messageInfo_ExternalMetricSource proto.InternalMessageInfo func (m *ExternalMetricStatus) Reset() { *m = ExternalMetricStatus{} } func (*ExternalMetricStatus) ProtoMessage() {} func (*ExternalMetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{2} + return fileDescriptor_2bb1f2101a7f10e2, []int{4} } func (m *ExternalMetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -134,7 +190,7 @@ var xxx_messageInfo_ExternalMetricStatus proto.InternalMessageInfo func (m *HorizontalPodAutoscaler) Reset() { *m = HorizontalPodAutoscaler{} } func (*HorizontalPodAutoscaler) ProtoMessage() {} func (*HorizontalPodAutoscaler) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{3} + return fileDescriptor_2bb1f2101a7f10e2, []int{5} } func (m *HorizontalPodAutoscaler) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -162,7 +218,7 @@ var xxx_messageInfo_HorizontalPodAutoscaler proto.InternalMessageInfo func (m *HorizontalPodAutoscalerCondition) Reset() { *m = HorizontalPodAutoscalerCondition{} } func (*HorizontalPodAutoscalerCondition) ProtoMessage() {} func (*HorizontalPodAutoscalerCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{4} + return fileDescriptor_2bb1f2101a7f10e2, []int{6} } func (m *HorizontalPodAutoscalerCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -190,7 +246,7 @@ var xxx_messageInfo_HorizontalPodAutoscalerCondition proto.InternalMessageInfo func (m *HorizontalPodAutoscalerList) Reset() { *m = HorizontalPodAutoscalerList{} } func (*HorizontalPodAutoscalerList) ProtoMessage() {} func (*HorizontalPodAutoscalerList) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{5} + return fileDescriptor_2bb1f2101a7f10e2, []int{7} } func (m *HorizontalPodAutoscalerList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -218,7 +274,7 @@ var xxx_messageInfo_HorizontalPodAutoscalerList proto.InternalMessageInfo func (m *HorizontalPodAutoscalerSpec) Reset() { *m = HorizontalPodAutoscalerSpec{} } func (*HorizontalPodAutoscalerSpec) ProtoMessage() {} func (*HorizontalPodAutoscalerSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{6} + return fileDescriptor_2bb1f2101a7f10e2, []int{8} } func (m *HorizontalPodAutoscalerSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -246,7 +302,7 @@ var xxx_messageInfo_HorizontalPodAutoscalerSpec proto.InternalMessageInfo func (m *HorizontalPodAutoscalerStatus) Reset() { *m = HorizontalPodAutoscalerStatus{} } func (*HorizontalPodAutoscalerStatus) ProtoMessage() {} func (*HorizontalPodAutoscalerStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{7} + return fileDescriptor_2bb1f2101a7f10e2, []int{9} } func (m *HorizontalPodAutoscalerStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -274,7 +330,7 @@ var xxx_messageInfo_HorizontalPodAutoscalerStatus proto.InternalMessageInfo func (m *MetricSpec) Reset() { *m = MetricSpec{} } func (*MetricSpec) ProtoMessage() {} func (*MetricSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{8} + return fileDescriptor_2bb1f2101a7f10e2, []int{10} } func (m *MetricSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -302,7 +358,7 @@ var xxx_messageInfo_MetricSpec proto.InternalMessageInfo func (m *MetricStatus) Reset() { *m = MetricStatus{} } func (*MetricStatus) ProtoMessage() {} func (*MetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{9} + return fileDescriptor_2bb1f2101a7f10e2, []int{11} } func (m *MetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -330,7 +386,7 @@ var xxx_messageInfo_MetricStatus proto.InternalMessageInfo func (m *ObjectMetricSource) Reset() { *m = ObjectMetricSource{} } func (*ObjectMetricSource) ProtoMessage() {} func (*ObjectMetricSource) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{10} + return fileDescriptor_2bb1f2101a7f10e2, []int{12} } func (m *ObjectMetricSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -358,7 +414,7 @@ var xxx_messageInfo_ObjectMetricSource proto.InternalMessageInfo func (m *ObjectMetricStatus) Reset() { *m = ObjectMetricStatus{} } func (*ObjectMetricStatus) ProtoMessage() {} func (*ObjectMetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{11} + return fileDescriptor_2bb1f2101a7f10e2, []int{13} } func (m *ObjectMetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -386,7 +442,7 @@ var xxx_messageInfo_ObjectMetricStatus proto.InternalMessageInfo func (m *PodsMetricSource) Reset() { *m = PodsMetricSource{} } func (*PodsMetricSource) ProtoMessage() {} func (*PodsMetricSource) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{12} + return fileDescriptor_2bb1f2101a7f10e2, []int{14} } func (m *PodsMetricSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -414,7 +470,7 @@ var xxx_messageInfo_PodsMetricSource proto.InternalMessageInfo func (m *PodsMetricStatus) Reset() { *m = PodsMetricStatus{} } func (*PodsMetricStatus) ProtoMessage() {} func (*PodsMetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{13} + return fileDescriptor_2bb1f2101a7f10e2, []int{15} } func (m *PodsMetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -442,7 +498,7 @@ var xxx_messageInfo_PodsMetricStatus proto.InternalMessageInfo func (m *ResourceMetricSource) Reset() { *m = ResourceMetricSource{} } func (*ResourceMetricSource) ProtoMessage() {} func (*ResourceMetricSource) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{14} + return fileDescriptor_2bb1f2101a7f10e2, []int{16} } func (m *ResourceMetricSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -470,7 +526,7 @@ var xxx_messageInfo_ResourceMetricSource proto.InternalMessageInfo func (m *ResourceMetricStatus) Reset() { *m = ResourceMetricStatus{} } func (*ResourceMetricStatus) ProtoMessage() {} func (*ResourceMetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{15} + return fileDescriptor_2bb1f2101a7f10e2, []int{17} } func (m *ResourceMetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -498,7 +554,7 @@ var xxx_messageInfo_ResourceMetricStatus proto.InternalMessageInfo func (m *Scale) Reset() { *m = Scale{} } func (*Scale) ProtoMessage() {} func (*Scale) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{16} + return fileDescriptor_2bb1f2101a7f10e2, []int{18} } func (m *Scale) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -526,7 +582,7 @@ var xxx_messageInfo_Scale proto.InternalMessageInfo func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } func (*ScaleSpec) ProtoMessage() {} func (*ScaleSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{17} + return fileDescriptor_2bb1f2101a7f10e2, []int{19} } func (m *ScaleSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -554,7 +610,7 @@ var xxx_messageInfo_ScaleSpec proto.InternalMessageInfo func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } func (*ScaleStatus) ProtoMessage() {} func (*ScaleStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{18} + return fileDescriptor_2bb1f2101a7f10e2, []int{20} } func (m *ScaleStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -580,6 +636,8 @@ func (m *ScaleStatus) XXX_DiscardUnknown() { var xxx_messageInfo_ScaleStatus proto.InternalMessageInfo func init() { + proto.RegisterType((*ContainerResourceMetricSource)(nil), "k8s.io.api.autoscaling.v1.ContainerResourceMetricSource") + proto.RegisterType((*ContainerResourceMetricStatus)(nil), "k8s.io.api.autoscaling.v1.ContainerResourceMetricStatus") proto.RegisterType((*CrossVersionObjectReference)(nil), "k8s.io.api.autoscaling.v1.CrossVersionObjectReference") proto.RegisterType((*ExternalMetricSource)(nil), "k8s.io.api.autoscaling.v1.ExternalMetricSource") proto.RegisterType((*ExternalMetricStatus)(nil), "k8s.io.api.autoscaling.v1.ExternalMetricStatus") @@ -606,102 +664,206 @@ func init() { } var fileDescriptor_2bb1f2101a7f10e2 = []byte{ - // 1516 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x58, 0xcf, 0x6f, 0x13, 0xc7, - 0x17, 0x8f, 0x7f, 0x24, 0x24, 0xe3, 0x90, 0xe4, 0x3b, 0x20, 0x08, 0xe1, 0x8b, 0x37, 0xda, 0x22, - 0x44, 0x7f, 0xb0, 0x6e, 0x52, 0x8a, 0xe8, 0x31, 0x76, 0x4b, 0x41, 0x8d, 0x21, 0x4c, 0x02, 0xa5, - 0x3f, 0xc5, 0x64, 0x3d, 0x38, 0x43, 0xbc, 0xbb, 0xd6, 0xec, 0xd8, 0x22, 0x48, 0x95, 0xda, 0x43, - 0xef, 0xbd, 0xb4, 0xea, 0xb1, 0x95, 0x7a, 0xed, 0x99, 0x73, 0x6f, 0x1c, 0x39, 0x20, 0x95, 0xd3, - 0xaa, 0x6c, 0x8f, 0xfd, 0x0f, 0x38, 0x55, 0xf3, 0xc3, 0xeb, 0x5d, 0xdb, 0xeb, 0x24, 0x26, 0x44, - 0x6d, 0x6f, 0x3b, 0x33, 0xef, 0x7d, 0xde, 0xec, 0x7b, 0x6f, 0xde, 0x2f, 0x50, 0xde, 0xbe, 0xec, - 0x5b, 0xd4, 0x2b, 0x6d, 0xb7, 0x36, 0x09, 0x73, 0x09, 0x27, 0x7e, 0xa9, 0x4d, 0xdc, 0x9a, 0xc7, - 0x4a, 0xfa, 0x00, 0x37, 0x69, 0x09, 0xb7, 0xb8, 0xe7, 0xdb, 0xb8, 0x41, 0xdd, 0x7a, 0xa9, 0xbd, - 0x54, 0xaa, 0x13, 0x97, 0x30, 0xcc, 0x49, 0xcd, 0x6a, 0x32, 0x8f, 0x7b, 0xf0, 0x94, 0x22, 0xb5, - 0x70, 0x93, 0x5a, 0x31, 0x52, 0xab, 0xbd, 0xb4, 0x70, 0xa1, 0x4e, 0xf9, 0x56, 0x6b, 0xd3, 0xb2, - 0x3d, 0xa7, 0x54, 0xf7, 0xea, 0x5e, 0x49, 0x72, 0x6c, 0xb6, 0xee, 0xc9, 0x95, 0x5c, 0xc8, 0x2f, - 0x85, 0xb4, 0x60, 0xc6, 0x84, 0xda, 0x1e, 0x23, 0x03, 0xa4, 0x2d, 0x5c, 0xec, 0xd2, 0x38, 0xd8, - 0xde, 0xa2, 0x2e, 0x61, 0x3b, 0xa5, 0xe6, 0x76, 0x5d, 0x32, 0x31, 0xe2, 0x7b, 0x2d, 0x66, 0x93, - 0x7d, 0x71, 0xf9, 0x25, 0x87, 0x70, 0x3c, 0x48, 0x56, 0x29, 0x8d, 0x8b, 0xb5, 0x5c, 0x4e, 0x9d, - 0x7e, 0x31, 0x97, 0x76, 0x63, 0xf0, 0xed, 0x2d, 0xe2, 0xe0, 0x5e, 0x3e, 0xf3, 0xfb, 0x0c, 0x38, - 0x5d, 0x61, 0x9e, 0xef, 0xdf, 0x26, 0xcc, 0xa7, 0x9e, 0x7b, 0x63, 0xf3, 0x3e, 0xb1, 0x39, 0x22, - 0xf7, 0x08, 0x23, 0xae, 0x4d, 0xe0, 0x22, 0xc8, 0x6f, 0x53, 0xb7, 0x36, 0x9f, 0x59, 0xcc, 0x9c, - 0x9f, 0x2a, 0x4f, 0x3f, 0x0e, 0x8c, 0xb1, 0x30, 0x30, 0xf2, 0x1f, 0x51, 0xb7, 0x86, 0xe4, 0x89, - 0xa0, 0x70, 0xb1, 0x43, 0xe6, 0xb3, 0x49, 0x8a, 0xeb, 0xd8, 0x21, 0x48, 0x9e, 0xc0, 0x65, 0x00, - 0x70, 0x93, 0x6a, 0x01, 0xf3, 0x39, 0x49, 0x07, 0x35, 0x1d, 0x58, 0x59, 0xbb, 0xa6, 0x4f, 0x50, - 0x8c, 0xca, 0xfc, 0x21, 0x07, 0x8e, 0x7f, 0xf0, 0x80, 0x13, 0xe6, 0xe2, 0x46, 0x95, 0x70, 0x46, - 0xed, 0x75, 0xa9, 0x5f, 0x01, 0xe6, 0xc8, 0xb5, 0x10, 0xa0, 0xaf, 0x15, 0x81, 0x55, 0xa3, 0x13, - 0x14, 0xa3, 0x82, 0x1e, 0x98, 0x51, 0xab, 0x75, 0xd2, 0x20, 0x36, 0xf7, 0x98, 0xbc, 0x6c, 0x61, - 0xf9, 0x1d, 0xab, 0xeb, 0x40, 0x91, 0xd6, 0xac, 0xe6, 0x76, 0x5d, 0x6c, 0xf8, 0x96, 0x30, 0x8e, - 0xd5, 0x5e, 0xb2, 0x56, 0xf1, 0x26, 0x69, 0x74, 0x58, 0xcb, 0x30, 0x0c, 0x8c, 0x99, 0x6a, 0x02, - 0x0e, 0xf5, 0xc0, 0x43, 0x0c, 0x0a, 0x1c, 0xb3, 0x3a, 0xe1, 0xb7, 0x71, 0xa3, 0x45, 0xe4, 0x2f, - 0x17, 0x96, 0xad, 0x61, 0xd2, 0xac, 0x8e, 0x03, 0x59, 0x37, 0x5b, 0xd8, 0xe5, 0x94, 0xef, 0x94, - 0x67, 0xc3, 0xc0, 0x28, 0x6c, 0x74, 0x61, 0x50, 0x1c, 0x13, 0xb6, 0x01, 0x54, 0xcb, 0x95, 0x36, - 0x61, 0xb8, 0x4e, 0x94, 0xa4, 0xfc, 0x48, 0x92, 0x4e, 0x84, 0x81, 0x01, 0x37, 0xfa, 0xd0, 0xd0, - 0x00, 0x09, 0xe6, 0x4f, 0xfd, 0x86, 0xe1, 0x98, 0xb7, 0xfc, 0x7f, 0x87, 0x61, 0xb6, 0xc0, 0xb4, - 0xdd, 0x62, 0x8c, 0xb8, 0x2f, 0x65, 0x99, 0xe3, 0xfa, 0xb7, 0xa6, 0x2b, 0x31, 0x2c, 0x94, 0x40, - 0x86, 0x3b, 0xe0, 0x98, 0x5e, 0x1f, 0x80, 0x81, 0x4e, 0x86, 0x81, 0x71, 0xac, 0xd2, 0x0f, 0x87, - 0x06, 0xc9, 0x30, 0x1f, 0x65, 0xc1, 0xc9, 0xab, 0x1e, 0xa3, 0x0f, 0x3d, 0x97, 0xe3, 0xc6, 0x9a, - 0x57, 0x5b, 0xd1, 0xb1, 0x91, 0x30, 0x78, 0x17, 0x4c, 0x0a, 0xed, 0xd5, 0x30, 0xc7, 0xd2, 0x46, - 0x85, 0xe5, 0xb7, 0xf7, 0xa6, 0x6b, 0x15, 0x18, 0xaa, 0x84, 0xe3, 0xae, 0x55, 0xbb, 0x7b, 0x28, - 0x42, 0x85, 0x77, 0x40, 0xde, 0x6f, 0x12, 0x5b, 0x5b, 0xf2, 0x92, 0x95, 0x1a, 0xa3, 0xad, 0x94, - 0x3b, 0xae, 0x37, 0x89, 0xdd, 0x8d, 0x23, 0x62, 0x85, 0x24, 0x22, 0xbc, 0x0b, 0x26, 0x7c, 0xe9, - 0x6b, 0xda, 0x6c, 0x97, 0x47, 0xc0, 0x96, 0xfc, 0xe5, 0x19, 0x8d, 0x3e, 0xa1, 0xd6, 0x48, 0xe3, - 0x9a, 0xdf, 0xe6, 0xc0, 0x62, 0x0a, 0x67, 0xc5, 0x73, 0x6b, 0x94, 0x53, 0xcf, 0x85, 0x57, 0x41, - 0x9e, 0xef, 0x34, 0x3b, 0x2e, 0x7e, 0xb1, 0x73, 0xd1, 0x8d, 0x9d, 0x26, 0x79, 0x11, 0x18, 0x67, - 0x77, 0xe3, 0x17, 0x74, 0x48, 0x22, 0xc0, 0xd5, 0xe8, 0x87, 0xb2, 0x09, 0x2c, 0x7d, 0xad, 0x17, - 0x81, 0x31, 0x20, 0x2f, 0x59, 0x11, 0x52, 0xf2, 0xf2, 0x22, 0x22, 0x34, 0xb0, 0xcf, 0x37, 0x18, - 0x76, 0x7d, 0x25, 0x89, 0x3a, 0x1d, 0x0f, 0x7f, 0x63, 0x6f, 0x46, 0x16, 0x1c, 0xe5, 0x05, 0x7d, - 0x0b, 0xb8, 0xda, 0x87, 0x86, 0x06, 0x48, 0x80, 0xe7, 0xc0, 0x04, 0x23, 0xd8, 0xf7, 0x5c, 0xe9, - 0xdc, 0x53, 0x5d, 0xe5, 0x22, 0xb9, 0x8b, 0xf4, 0x29, 0x7c, 0x1d, 0x1c, 0x71, 0x88, 0xef, 0xe3, - 0x3a, 0x99, 0x1f, 0x97, 0x84, 0xb3, 0x9a, 0xf0, 0x48, 0x55, 0x6d, 0xa3, 0xce, 0xb9, 0xf9, 0x34, - 0x03, 0x4e, 0xa7, 0xe8, 0x71, 0x95, 0xfa, 0x1c, 0x7e, 0xde, 0xe7, 0xc5, 0xd6, 0x1e, 0x23, 0x06, - 0xf5, 0x95, 0x0f, 0xcf, 0x69, 0xd9, 0x93, 0x9d, 0x9d, 0x98, 0x07, 0x7f, 0x0c, 0xc6, 0x29, 0x27, - 0x8e, 0xb0, 0x4a, 0xee, 0x7c, 0x61, 0x79, 0x79, 0xff, 0x6e, 0x56, 0x3e, 0xaa, 0xe1, 0xc7, 0xaf, - 0x09, 0x20, 0xa4, 0xf0, 0xcc, 0xbf, 0xb2, 0xa9, 0xbf, 0x25, 0xdc, 0x1c, 0xb6, 0xc1, 0x8c, 0x5c, - 0xa9, 0x50, 0x8c, 0xc8, 0x3d, 0xfd, 0x73, 0xc3, 0x1e, 0xd1, 0x90, 0xe4, 0x5d, 0x3e, 0xa1, 0x6f, - 0x31, 0xb3, 0x9e, 0x40, 0x45, 0x3d, 0x52, 0xe0, 0x12, 0x28, 0x38, 0xd4, 0x45, 0xa4, 0xd9, 0xa0, - 0x36, 0x56, 0xce, 0x38, 0xae, 0xd2, 0x4f, 0xb5, 0xbb, 0x8d, 0xe2, 0x34, 0xf0, 0x5d, 0x50, 0x70, - 0xf0, 0x83, 0x88, 0x25, 0x27, 0x59, 0x8e, 0x69, 0x79, 0x85, 0x6a, 0xf7, 0x08, 0xc5, 0xe9, 0xe0, - 0x7d, 0x50, 0x54, 0x39, 0xa5, 0xb2, 0x76, 0xeb, 0x16, 0xa7, 0x0d, 0xfa, 0x10, 0x0b, 0x3f, 0x5a, - 0x23, 0xcc, 0x26, 0x2e, 0x17, 0xae, 0x91, 0x97, 0x48, 0x66, 0x18, 0x18, 0xc5, 0x8d, 0xa1, 0x94, - 0x68, 0x17, 0x24, 0xf3, 0xb7, 0x1c, 0x38, 0x33, 0x34, 0x0c, 0xc0, 0x2b, 0x00, 0x7a, 0x9b, 0x3e, - 0x61, 0x6d, 0x52, 0xfb, 0x50, 0xd5, 0x45, 0xa2, 0x40, 0x11, 0x3a, 0xcf, 0xa9, 0x9c, 0x78, 0xa3, - 0xef, 0x14, 0x0d, 0xe0, 0x80, 0x36, 0x38, 0x2a, 0xde, 0x85, 0xd2, 0x32, 0xd5, 0xb5, 0xd0, 0xfe, - 0x1e, 0xdd, 0xff, 0xc2, 0xc0, 0x38, 0xba, 0x1a, 0x07, 0x41, 0x49, 0x4c, 0xb8, 0x02, 0x66, 0x75, - 0xb0, 0xef, 0xd1, 0xfa, 0x49, 0xad, 0xf5, 0xd9, 0x4a, 0xf2, 0x18, 0xf5, 0xd2, 0x0b, 0x88, 0x1a, - 0xf1, 0x29, 0x23, 0xb5, 0x08, 0x22, 0x9f, 0x84, 0x78, 0x3f, 0x79, 0x8c, 0x7a, 0xe9, 0xa1, 0x03, - 0x0c, 0x8d, 0x9a, 0x6a, 0xc1, 0x71, 0x09, 0xf9, 0x5a, 0x18, 0x18, 0x46, 0x65, 0x38, 0x29, 0xda, - 0x0d, 0x4b, 0x94, 0x81, 0xba, 0x76, 0x90, 0x0f, 0xe4, 0x62, 0x22, 0xf4, 0x2e, 0xf6, 0x84, 0xde, - 0xb9, 0x78, 0xa1, 0x18, 0x0b, 0xb3, 0x37, 0xc1, 0x84, 0x27, 0x5f, 0x86, 0xb6, 0xcb, 0x85, 0x21, - 0xcf, 0x29, 0x4a, 0x69, 0x11, 0x50, 0x19, 0x88, 0x58, 0xa6, 0x9f, 0x96, 0x06, 0x82, 0xd7, 0x40, - 0xbe, 0xe9, 0xd5, 0x3a, 0x89, 0xe8, 0xcd, 0x21, 0x80, 0x6b, 0x5e, 0xcd, 0x4f, 0xc0, 0x4d, 0x8a, - 0x1b, 0x8b, 0x5d, 0x24, 0x21, 0xe0, 0x27, 0x60, 0xb2, 0x93, 0xf0, 0x75, 0x75, 0x50, 0x1a, 0x02, - 0x87, 0x34, 0x69, 0x02, 0x72, 0x5a, 0x04, 0xb2, 0xce, 0x09, 0x8a, 0xe0, 0x04, 0x34, 0xd1, 0xa5, - 0x9a, 0xb4, 0xca, 0x70, 0xe8, 0x41, 0xe5, 0xb6, 0x82, 0xee, 0x9c, 0xa0, 0x08, 0xce, 0xfc, 0x31, - 0x07, 0xa6, 0x13, 0xe5, 0xdf, 0x21, 0x9b, 0x46, 0xe5, 0xf1, 0x03, 0x33, 0x8d, 0x82, 0x3b, 0x50, - 0xd3, 0x28, 0xc8, 0x57, 0x62, 0x9a, 0x18, 0xf4, 0x00, 0xd3, 0x3c, 0xcd, 0x01, 0xd8, 0xef, 0xc6, - 0xf0, 0x4b, 0x30, 0xa1, 0x02, 0xe6, 0x4b, 0x26, 0x95, 0x28, 0xbd, 0xeb, 0xfc, 0xa1, 0x51, 0x7b, - 0xea, 0xff, 0xec, 0x9e, 0xea, 0x7f, 0x72, 0x10, 0x7d, 0x52, 0x94, 0x75, 0x52, 0x7b, 0xa5, 0x2f, - 0xc0, 0xa4, 0xdf, 0x69, 0x30, 0xf2, 0xa3, 0x37, 0x18, 0x52, 0xe1, 0x51, 0x6b, 0x11, 0x41, 0xc2, - 0x1a, 0x98, 0xc6, 0xf1, 0x1a, 0x7f, 0x7c, 0xa4, 0xdf, 0x98, 0x13, 0x0d, 0x45, 0xa2, 0xb8, 0x4f, - 0xa0, 0x9a, 0xbf, 0xf7, 0x9a, 0x55, 0xbd, 0xbb, 0x7f, 0xa2, 0x59, 0x0f, 0xaf, 0xcb, 0xfa, 0x4f, - 0x58, 0xf6, 0xe7, 0x2c, 0x98, 0xeb, 0x4d, 0x13, 0x23, 0xb5, 0xd3, 0x0f, 0x07, 0xce, 0x04, 0xb2, - 0x23, 0x5d, 0x3a, 0xea, 0x02, 0xf6, 0x36, 0x17, 0x48, 0x58, 0x22, 0x77, 0xe0, 0x96, 0x30, 0x7f, - 0x49, 0xea, 0x68, 0xf4, 0x91, 0xc3, 0x57, 0x83, 0xfb, 0xf2, 0xd1, 0x94, 0x74, 0x5a, 0x0b, 0xdb, - 0x73, 0x6f, 0xfe, 0xaa, 0xd5, 0xf4, 0x6b, 0x16, 0x1c, 0x1f, 0x54, 0x22, 0xc0, 0x8a, 0x9e, 0xd2, - 0x29, 0x25, 0x95, 0xe2, 0x53, 0xba, 0x17, 0x81, 0x61, 0x0c, 0x68, 0x33, 0x3b, 0x30, 0xb1, 0x41, - 0xde, 0x1d, 0x30, 0x9f, 0xb0, 0x7c, 0xac, 0x66, 0xd3, 0x4d, 0xc3, 0xff, 0xc3, 0xc0, 0x98, 0xdf, - 0x48, 0xa1, 0x41, 0xa9, 0xdc, 0x29, 0xd3, 0xac, 0xdc, 0x2b, 0x9f, 0x66, 0x3d, 0xea, 0xd7, 0x97, - 0x72, 0xad, 0x03, 0xd1, 0xd7, 0x67, 0xe0, 0x54, 0xd2, 0x07, 0xfa, 0x15, 0x76, 0x26, 0x0c, 0x8c, - 0x53, 0x95, 0x34, 0x22, 0x94, 0xce, 0x9f, 0xe6, 0xc8, 0xb9, 0xc3, 0x71, 0x64, 0xf3, 0x9b, 0x2c, - 0x18, 0x97, 0xcd, 0xc9, 0x21, 0x8c, 0x94, 0xae, 0x24, 0x46, 0x4a, 0x67, 0x87, 0x64, 0x38, 0x79, - 0xa3, 0xd4, 0x01, 0xd2, 0xf5, 0x9e, 0x01, 0xd2, 0xb9, 0x5d, 0x91, 0x86, 0x8f, 0x8b, 0xde, 0x03, - 0x53, 0x91, 0x40, 0xf8, 0x96, 0x28, 0x16, 0x75, 0x57, 0x95, 0x91, 0xb6, 0x8d, 0x66, 0x0c, 0x51, - 0x3b, 0x15, 0x51, 0x98, 0x14, 0x14, 0x62, 0x12, 0xf6, 0xc7, 0x2c, 0xa8, 0xfd, 0xf8, 0xc0, 0x74, - 0xaa, 0x4b, 0xdd, 0x1f, 0x13, 0xca, 0xe7, 0x1f, 0x3f, 0x2f, 0x8e, 0x3d, 0x79, 0x5e, 0x1c, 0x7b, - 0xf6, 0xbc, 0x38, 0xf6, 0x75, 0x58, 0xcc, 0x3c, 0x0e, 0x8b, 0x99, 0x27, 0x61, 0x31, 0xf3, 0x2c, - 0x2c, 0x66, 0xfe, 0x08, 0x8b, 0x99, 0xef, 0xfe, 0x2c, 0x8e, 0x7d, 0x9a, 0x6d, 0x2f, 0xfd, 0x1d, - 0x00, 0x00, 0xff, 0xff, 0x3c, 0x26, 0x41, 0xcb, 0x94, 0x19, 0x00, 0x00, + // 1605 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x58, 0x4d, 0x70, 0xd3, 0xd6, + 0x16, 0x8e, 0x7f, 0x12, 0x92, 0xe3, 0x90, 0x9f, 0x0b, 0x0f, 0x4c, 0x78, 0x58, 0x19, 0x3d, 0x86, + 0xc9, 0x7b, 0xaf, 0x48, 0x8d, 0x4b, 0x19, 0xba, 0x8c, 0xdc, 0x52, 0x98, 0xc6, 0x10, 0x6e, 0x02, + 0xa5, 0xbf, 0xc3, 0x8d, 0x7c, 0x71, 0x44, 0x2c, 0xc9, 0x23, 0xc9, 0x1e, 0xc2, 0x0c, 0x33, 0xed, + 0xa2, 0xfb, 0x6e, 0x68, 0xb7, 0xed, 0x4c, 0xb7, 0x5d, 0xb3, 0xee, 0x8e, 0x25, 0x0b, 0x66, 0xca, + 0xca, 0x53, 0xd4, 0x45, 0x17, 0x5d, 0x75, 0xcb, 0xaa, 0xa3, 0xab, 0x2b, 0x59, 0xb2, 0x2d, 0xc5, + 0x71, 0x42, 0xa6, 0xed, 0xb0, 0xb3, 0x7c, 0xcf, 0xf9, 0xce, 0xbd, 0xe7, 0xff, 0x1c, 0x50, 0xb6, + 0x2f, 0xd9, 0x92, 0x66, 0xca, 0xdb, 0xad, 0x4d, 0x6a, 0x19, 0xd4, 0xa1, 0xb6, 0xdc, 0xa6, 0x46, + 0xcd, 0xb4, 0x64, 0x7e, 0x40, 0x9a, 0x9a, 0x4c, 0x5a, 0x8e, 0x69, 0xab, 0xa4, 0xa1, 0x19, 0x75, + 0xb9, 0xbd, 0x2c, 0xd7, 0xa9, 0x41, 0x2d, 0xe2, 0xd0, 0x9a, 0xd4, 0xb4, 0x4c, 0xc7, 0x44, 0xa7, + 0x7c, 0x52, 0x89, 0x34, 0x35, 0x29, 0x42, 0x2a, 0xb5, 0x97, 0x17, 0xce, 0xd7, 0x35, 0x67, 0xab, + 0xb5, 0x29, 0xa9, 0xa6, 0x2e, 0xd7, 0xcd, 0xba, 0x29, 0x33, 0x8e, 0xcd, 0xd6, 0x5d, 0xf6, 0xc5, + 0x3e, 0xd8, 0x2f, 0x1f, 0x69, 0x41, 0x8c, 0x08, 0x55, 0x4d, 0x8b, 0x0e, 0x90, 0xb6, 0x70, 0xa1, + 0x4b, 0xa3, 0x13, 0x75, 0x4b, 0x33, 0xa8, 0xb5, 0x23, 0x37, 0xb7, 0xeb, 0x8c, 0xc9, 0xa2, 0xb6, + 0xd9, 0xb2, 0x54, 0xba, 0x27, 0x2e, 0x5b, 0xd6, 0xa9, 0x43, 0x06, 0xc9, 0x92, 0x93, 0xb8, 0xac, + 0x96, 0xe1, 0x68, 0x7a, 0xbf, 0x98, 0x8b, 0xbb, 0x31, 0xd8, 0xea, 0x16, 0xd5, 0x49, 0x2f, 0x9f, + 0xf8, 0x5b, 0x16, 0xce, 0x54, 0x4c, 0xc3, 0x21, 0x1e, 0x07, 0xe6, 0x8f, 0xa8, 0x52, 0xc7, 0xd2, + 0xd4, 0x75, 0xf6, 0x1b, 0x55, 0x20, 0x6f, 0x10, 0x9d, 0x16, 0x33, 0x8b, 0x99, 0xa5, 0x29, 0x45, + 0x7e, 0xd2, 0x11, 0xc6, 0xdc, 0x8e, 0x90, 0xbf, 0x46, 0x74, 0xfa, 0xb2, 0x23, 0x08, 0xfd, 0x8a, + 0x93, 0x02, 0x18, 0x8f, 0x04, 0x33, 0x66, 0x74, 0x1b, 0x8a, 0x0e, 0xb1, 0xea, 0xd4, 0x59, 0x69, + 0x53, 0x8b, 0xd4, 0xe9, 0x4d, 0x47, 0x6b, 0x68, 0x0f, 0x88, 0xa3, 0x99, 0x46, 0x31, 0xbb, 0x98, + 0x59, 0x1a, 0x57, 0xfe, 0xed, 0x76, 0x84, 0xe2, 0x46, 0x02, 0x0d, 0x4e, 0xe4, 0x46, 0x6d, 0x40, + 0xb1, 0xb3, 0x5b, 0xa4, 0xd1, 0xa2, 0xc5, 0xdc, 0x62, 0x66, 0xa9, 0x50, 0x96, 0xa4, 0xae, 0x83, + 0x84, 0x5a, 0x91, 0x9a, 0xdb, 0x75, 0xe6, 0x31, 0x81, 0xc9, 0xa4, 0x1b, 0x2d, 0x62, 0x38, 0x9a, + 0xb3, 0xa3, 0x9c, 0x70, 0x3b, 0x02, 0xda, 0xe8, 0x43, 0xc3, 0x03, 0x24, 0x20, 0x19, 0xa6, 0xd4, + 0x40, 0x6f, 0xc5, 0x71, 0xa6, 0x9b, 0x79, 0xae, 0x9b, 0xa9, 0xae, 0x42, 0xbb, 0x34, 0xe2, 0x1f, + 0x29, 0x9a, 0x76, 0x88, 0xd3, 0xb2, 0x0f, 0x46, 0xd3, 0x9f, 0xc0, 0x29, 0xb5, 0x65, 0x59, 0xd4, + 0x48, 0x56, 0xf5, 0x19, 0xb7, 0x23, 0x9c, 0xaa, 0x24, 0x11, 0xe1, 0x64, 0x7e, 0xf4, 0x10, 0x8e, + 0xc5, 0x0f, 0xf7, 0xa3, 0xed, 0xd3, 0xfc, 0x81, 0xc7, 0x2a, 0xfd, 0x90, 0x78, 0x90, 0x9c, 0xb8, + 0xce, 0xf3, 0x43, 0xe8, 0xfc, 0x51, 0x06, 0x4e, 0x57, 0x2c, 0xd3, 0xb6, 0x6f, 0x51, 0xcb, 0xd6, + 0x4c, 0xe3, 0xfa, 0xe6, 0x3d, 0xaa, 0x3a, 0x98, 0xde, 0xa5, 0x16, 0x35, 0x54, 0x8a, 0x16, 0x21, + 0xbf, 0xad, 0x19, 0x35, 0xae, 0xf1, 0xe9, 0x40, 0xe3, 0x1f, 0x68, 0x46, 0x0d, 0xb3, 0x13, 0x8f, + 0x82, 0xd9, 0x24, 0x1b, 0xa7, 0x88, 0x28, 0xbc, 0x0c, 0x40, 0x9a, 0x1a, 0x17, 0xc0, 0x54, 0x31, + 0xa5, 0x20, 0x4e, 0x07, 0x2b, 0x6b, 0x57, 0xf9, 0x09, 0x8e, 0x50, 0x89, 0xdf, 0xe4, 0xe0, 0xf8, + 0x7b, 0xf7, 0x1d, 0x6a, 0x19, 0xa4, 0x11, 0x0b, 0xb6, 0x32, 0x80, 0xce, 0xbe, 0xaf, 0x75, 0x1d, + 0x21, 0x04, 0xab, 0x86, 0x27, 0x38, 0x42, 0x85, 0x4c, 0x98, 0xf1, 0xbf, 0xd6, 0x69, 0x83, 0xaa, + 0x8e, 0x69, 0xb1, 0xcb, 0x16, 0xca, 0x6f, 0xa5, 0xd9, 0xc3, 0x96, 0xbc, 0xd4, 0x23, 0xb5, 0x97, + 0xa5, 0x55, 0xb2, 0x49, 0x1b, 0x01, 0xab, 0x82, 0xdc, 0x8e, 0x30, 0x53, 0x8d, 0xc1, 0xe1, 0x1e, + 0x78, 0x44, 0xa0, 0xe0, 0x07, 0xc4, 0x7e, 0xac, 0x3f, 0xeb, 0x76, 0x84, 0xc2, 0x46, 0x17, 0x06, + 0x47, 0x31, 0x13, 0xa2, 0x3a, 0xff, 0xaa, 0xa3, 0x5a, 0xfc, 0xae, 0xdf, 0x30, 0x7e, 0x6c, 0xfe, + 0x2d, 0x0c, 0xb3, 0x05, 0xd3, 0x3c, 0x6c, 0xf6, 0x63, 0x99, 0xe3, 0xfc, 0x59, 0xd3, 0x95, 0x08, + 0x16, 0x8e, 0x21, 0xa3, 0x9d, 0xc1, 0x89, 0x60, 0x34, 0x03, 0x9d, 0xdc, 0x4b, 0x12, 0x10, 0x1f, + 0x67, 0xe1, 0xe4, 0x15, 0xd3, 0xd2, 0x1e, 0x78, 0x51, 0xde, 0x58, 0x33, 0x6b, 0x2b, 0xbc, 0xf2, + 0x53, 0x0b, 0xdd, 0x81, 0x49, 0x4f, 0x7b, 0x35, 0xe2, 0x10, 0x66, 0xa3, 0x42, 0xf9, 0xcd, 0xe1, + 0x74, 0xed, 0x27, 0x86, 0x2a, 0x75, 0x48, 0xd7, 0xaa, 0xdd, 0xff, 0x70, 0x88, 0x8a, 0x6e, 0x43, + 0xde, 0x6e, 0x52, 0x95, 0x5b, 0xf2, 0xa2, 0x94, 0xd8, 0x81, 0x48, 0x09, 0x77, 0x5c, 0x6f, 0x52, + 0xb5, 0x9b, 0x47, 0xbc, 0x2f, 0xcc, 0x10, 0xd1, 0x1d, 0x98, 0xb0, 0x99, 0xaf, 0x71, 0xb3, 0x5d, + 0x1a, 0x01, 0x9b, 0xf1, 0x2b, 0x33, 0x1c, 0x7d, 0xc2, 0xff, 0xc6, 0x1c, 0x57, 0xfc, 0x2a, 0x07, + 0x8b, 0x09, 0x9c, 0x15, 0xd3, 0xa8, 0x69, 0x2c, 0xc5, 0x5f, 0x81, 0xbc, 0xb3, 0xd3, 0x0c, 0x5c, + 0xfc, 0x42, 0x70, 0xd1, 0x8d, 0x9d, 0xa6, 0x57, 0x84, 0xce, 0xee, 0xc6, 0xef, 0xd1, 0x61, 0x86, + 0x80, 0x56, 0xc3, 0x07, 0x65, 0x63, 0x58, 0xfc, 0x5a, 0x2f, 0x3b, 0xc2, 0x80, 0xae, 0x4b, 0x0a, + 0x91, 0xe2, 0x97, 0xf7, 0x32, 0x42, 0x83, 0xd8, 0xce, 0x86, 0x45, 0x0c, 0xdb, 0x97, 0xa4, 0xe9, + 0x81, 0x87, 0xff, 0x6f, 0x38, 0x23, 0x7b, 0x1c, 0xca, 0x02, 0xbf, 0x05, 0x5a, 0xed, 0x43, 0xc3, + 0x03, 0x24, 0xa0, 0x73, 0x30, 0x61, 0x51, 0x62, 0x9b, 0x06, 0x2f, 0x38, 0xa1, 0x72, 0x31, 0xfb, + 0x17, 0xf3, 0x53, 0xf4, 0x5f, 0x38, 0xa2, 0x53, 0xdb, 0x26, 0x75, 0xca, 0xbb, 0x81, 0x59, 0x4e, + 0x78, 0xa4, 0xea, 0xff, 0x8d, 0x83, 0x73, 0xf1, 0x59, 0x06, 0x4e, 0x27, 0xe8, 0x71, 0x55, 0xb3, + 0x1d, 0xf4, 0x69, 0x9f, 0x17, 0x4b, 0x43, 0x66, 0x0c, 0xcd, 0xf6, 0x7d, 0x78, 0x8e, 0xcb, 0x9e, + 0x0c, 0xfe, 0x89, 0x78, 0xf0, 0x87, 0x30, 0xae, 0x39, 0x54, 0xf7, 0xac, 0x92, 0x5b, 0x2a, 0x94, + 0xcb, 0x7b, 0x77, 0x33, 0xe5, 0x28, 0x87, 0x1f, 0xbf, 0xea, 0x01, 0x61, 0x1f, 0x4f, 0xfc, 0x3d, + 0x9b, 0xf8, 0x2c, 0xcf, 0xcd, 0x51, 0x1b, 0x66, 0xd8, 0x97, 0x9f, 0x8a, 0x31, 0xbd, 0xcb, 0x1f, + 0x97, 0x16, 0x44, 0x29, 0xc5, 0x5b, 0x39, 0xc1, 0x6f, 0x31, 0xb3, 0x1e, 0x43, 0xc5, 0x3d, 0x52, + 0xd0, 0x32, 0x14, 0x74, 0xcd, 0xc0, 0xb4, 0xd9, 0xd0, 0x54, 0x62, 0xf3, 0x1e, 0x88, 0x95, 0x9f, + 0x6a, 0xf7, 0x6f, 0x1c, 0xa5, 0x41, 0x6f, 0x43, 0x41, 0x27, 0xf7, 0x43, 0x96, 0x1c, 0x63, 0x39, + 0xc6, 0xe5, 0x15, 0xaa, 0xdd, 0x23, 0x1c, 0xa5, 0x43, 0xf7, 0xa0, 0xe4, 0xd7, 0x94, 0xca, 0xda, + 0xcd, 0x48, 0xdb, 0xb4, 0x46, 0x2d, 0x95, 0x1a, 0x8e, 0xe7, 0x1a, 0x79, 0x86, 0x24, 0xba, 0x1d, + 0xa1, 0xb4, 0x91, 0x4a, 0x89, 0x77, 0x41, 0x12, 0x7f, 0xca, 0xc1, 0x99, 0xd4, 0x34, 0x80, 0x2e, + 0x03, 0x32, 0x37, 0x6d, 0x6a, 0xb5, 0x69, 0xed, 0x7d, 0xbf, 0xeb, 0xf7, 0x1a, 0x14, 0x4f, 0xe7, + 0x39, 0xbf, 0x26, 0x5e, 0xef, 0x3b, 0xc5, 0x03, 0x38, 0x90, 0x0a, 0x47, 0xbd, 0xb8, 0xf0, 0xb5, + 0xac, 0xf1, 0x5e, 0x68, 0x6f, 0x41, 0x37, 0xef, 0x76, 0x84, 0xa3, 0xab, 0x51, 0x10, 0x1c, 0xc7, + 0x44, 0x2b, 0x30, 0xcb, 0x93, 0x7d, 0x8f, 0xd6, 0x4f, 0x72, 0xad, 0xcf, 0x56, 0xe2, 0xc7, 0xb8, + 0x97, 0xde, 0x83, 0xa8, 0x51, 0x5b, 0xb3, 0x68, 0x2d, 0x84, 0xc8, 0xc7, 0x21, 0xde, 0x8d, 0x1f, + 0xe3, 0x5e, 0x7a, 0xa4, 0x83, 0xc0, 0x51, 0x13, 0x2d, 0x38, 0xce, 0x20, 0xff, 0xe3, 0x76, 0x04, + 0xa1, 0x92, 0x4e, 0x8a, 0x77, 0xc3, 0x12, 0x1f, 0xe5, 0x81, 0xf7, 0x0e, 0x2c, 0x40, 0x2e, 0xc4, + 0x52, 0xef, 0x62, 0x4f, 0xea, 0x9d, 0x8b, 0x36, 0x8a, 0x91, 0x34, 0x7b, 0x03, 0x26, 0x4c, 0x16, + 0x19, 0xdc, 0x2e, 0xe7, 0x53, 0xc2, 0x29, 0x2c, 0x69, 0x21, 0x90, 0x02, 0x5e, 0x2e, 0xe3, 0xa1, + 0xc5, 0x81, 0xd0, 0x55, 0xc8, 0x37, 0xcd, 0x5a, 0x50, 0x88, 0xfe, 0x9f, 0x02, 0xb8, 0x66, 0xd6, + 0xec, 0x18, 0xdc, 0xa4, 0x77, 0x63, 0xef, 0x5f, 0xcc, 0x20, 0xd0, 0x47, 0x30, 0x19, 0x14, 0x7c, + 0xde, 0x1d, 0xc8, 0x29, 0x70, 0x83, 0x06, 0x50, 0x65, 0xda, 0x4b, 0x64, 0xc1, 0x09, 0x0e, 0xe1, + 0xd0, 0x43, 0x98, 0x57, 0x7b, 0xe7, 0xa9, 0xe2, 0x91, 0x5d, 0x6b, 0x67, 0xea, 0xb4, 0xab, 0xfc, + 0xcb, 0xed, 0x08, 0xf3, 0x7d, 0x24, 0xb8, 0x5f, 0x92, 0xf7, 0x32, 0xca, 0x3b, 0x45, 0xe6, 0x14, + 0xe9, 0x2f, 0x1b, 0xd4, 0xed, 0xfb, 0x2f, 0x0b, 0x4e, 0x70, 0x08, 0x27, 0x7e, 0x9b, 0x87, 0xe9, + 0x58, 0xf7, 0x79, 0xc8, 0x9e, 0xe1, 0xb7, 0x11, 0x07, 0xe6, 0x19, 0x3e, 0xdc, 0x81, 0x7a, 0x86, + 0x0f, 0x79, 0x48, 0x9e, 0xe1, 0x0b, 0x3b, 0x24, 0xcf, 0x88, 0xbc, 0x6c, 0x80, 0x67, 0x3c, 0xcb, + 0x01, 0xea, 0x0f, 0x62, 0xf4, 0x39, 0x4c, 0xf8, 0xe5, 0x62, 0x9f, 0x25, 0x35, 0x6c, 0x6e, 0x78, + 0xf5, 0xe4, 0xa8, 0x3d, 0xd3, 0x4f, 0x76, 0xa8, 0xe9, 0x87, 0x1e, 0xc4, 0x94, 0x18, 0xd6, 0xdc, + 0xc4, 0x49, 0xf1, 0x33, 0x98, 0xb4, 0x83, 0xf1, 0x2a, 0x3f, 0xfa, 0x78, 0xc5, 0x14, 0x1e, 0x0e, + 0x56, 0x21, 0x24, 0xaa, 0xc1, 0x34, 0x89, 0x4e, 0x38, 0xe3, 0x23, 0x3d, 0x63, 0xce, 0x1b, 0xa7, + 0x62, 0xa3, 0x4d, 0x0c, 0x55, 0xfc, 0xb9, 0xd7, 0xac, 0x7e, 0xd8, 0xff, 0x15, 0xcd, 0x7a, 0x78, + 0x33, 0xe6, 0x3f, 0xc2, 0xb2, 0xdf, 0x67, 0x61, 0xae, 0xb7, 0x48, 0x8e, 0xb4, 0x4c, 0x78, 0x30, + 0x70, 0x23, 0x92, 0x1d, 0xe9, 0xd2, 0xe1, 0x0c, 0x34, 0xe4, 0xae, 0x33, 0x6a, 0x89, 0xdc, 0x81, + 0x5b, 0x42, 0xfc, 0x21, 0xae, 0xa3, 0xd1, 0x17, 0x2e, 0x09, 0xeb, 0xc9, 0xec, 0x21, 0xad, 0x27, + 0x5f, 0xb1, 0x9a, 0x7e, 0xcc, 0xc2, 0xf1, 0xd7, 0x1b, 0xfa, 0xe1, 0x77, 0x79, 0x8f, 0xfb, 0xf5, + 0xf5, 0x7a, 0xcf, 0x3e, 0xd4, 0x8a, 0xed, 0xcb, 0x2c, 0x8c, 0xb3, 0xd1, 0xec, 0x10, 0x16, 0x6a, + 0x97, 0x63, 0x0b, 0xb5, 0xb3, 0x29, 0x15, 0x8e, 0xdd, 0x28, 0x71, 0x7d, 0x76, 0xad, 0x67, 0x7d, + 0x76, 0x6e, 0x57, 0xa4, 0xf4, 0x65, 0xd9, 0x3b, 0x30, 0x15, 0x0a, 0x44, 0x6f, 0x78, 0xbd, 0x2a, + 0x9f, 0x29, 0x33, 0xcc, 0xb6, 0xe1, 0x86, 0x25, 0x1c, 0x26, 0x43, 0x0a, 0x51, 0x83, 0x42, 0x44, + 0xc2, 0xde, 0x98, 0x3d, 0x6a, 0x3b, 0xba, 0x2e, 0x9e, 0xea, 0x52, 0xf7, 0xe7, 0x04, 0x65, 0xe9, + 0xc9, 0x8b, 0xd2, 0xd8, 0xd3, 0x17, 0xa5, 0xb1, 0xe7, 0x2f, 0x4a, 0x63, 0x5f, 0xb8, 0xa5, 0xcc, + 0x13, 0xb7, 0x94, 0x79, 0xea, 0x96, 0x32, 0xcf, 0xdd, 0x52, 0xe6, 0x17, 0xb7, 0x94, 0xf9, 0xfa, + 0xd7, 0xd2, 0xd8, 0xc7, 0xd9, 0xf6, 0xf2, 0x9f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xc7, 0x83, 0x99, + 0xe1, 0x70, 0x1d, 0x00, 0x00, +} + +func (m *ContainerResourceMetricSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerResourceMetricSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerResourceMetricSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Container) + copy(dAtA[i:], m.Container) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Container))) + i-- + dAtA[i] = 0x2a + if m.TargetAverageValue != nil { + { + size, err := m.TargetAverageValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.TargetAverageUtilization != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TargetAverageUtilization)) + i-- + dAtA[i] = 0x10 + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ContainerResourceMetricStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerResourceMetricStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerResourceMetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Container) + copy(dAtA[i:], m.Container) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Container))) + i-- + dAtA[i] = 0x22 + { + size, err := m.CurrentAverageValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if m.CurrentAverageUtilization != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.CurrentAverageUtilization)) + i-- + dAtA[i] = 0x10 + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *CrossVersionObjectReference) Marshal() (dAtA []byte, err error) { @@ -1138,6 +1300,18 @@ func (m *MetricSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.ContainerResource != nil { + { + size, err := m.ContainerResource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } if m.External != nil { { size, err := m.External.MarshalToSizedBuffer(dAtA[:i]) @@ -1214,6 +1388,18 @@ func (m *MetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.ContainerResource != nil { + { + size, err := m.ContainerResource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } if m.External != nil { { size, err := m.External.MarshalToSizedBuffer(dAtA[:i]) @@ -1723,6 +1909,44 @@ func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return base } +func (m *ContainerResourceMetricSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.TargetAverageUtilization != nil { + n += 1 + sovGenerated(uint64(*m.TargetAverageUtilization)) + } + if m.TargetAverageValue != nil { + l = m.TargetAverageValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Container) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ContainerResourceMetricStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.CurrentAverageUtilization != nil { + n += 1 + sovGenerated(uint64(*m.CurrentAverageUtilization)) + } + l = m.CurrentAverageValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Container) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *CrossVersionObjectReference) Size() (n int) { if m == nil { return 0 @@ -1896,6 +2120,10 @@ func (m *MetricSpec) Size() (n int) { l = m.External.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.ContainerResource != nil { + l = m.ContainerResource.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -1923,6 +2151,10 @@ func (m *MetricStatus) Size() (n int) { l = m.External.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.ContainerResource != nil { + l = m.ContainerResource.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -2083,6 +2315,32 @@ func sovGenerated(x uint64) (n int) { func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } +func (this *ContainerResourceMetricSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerResourceMetricSource{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `TargetAverageUtilization:` + valueToStringGenerated(this.TargetAverageUtilization) + `,`, + `TargetAverageValue:` + strings.Replace(fmt.Sprintf("%v", this.TargetAverageValue), "Quantity", "resource.Quantity", 1) + `,`, + `Container:` + fmt.Sprintf("%v", this.Container) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerResourceMetricStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerResourceMetricStatus{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `CurrentAverageUtilization:` + valueToStringGenerated(this.CurrentAverageUtilization) + `,`, + `CurrentAverageValue:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CurrentAverageValue), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, + `Container:` + fmt.Sprintf("%v", this.Container) + `,`, + `}`, + }, "") + return s +} func (this *CrossVersionObjectReference) String() string { if this == nil { return "nil" @@ -2200,6 +2458,7 @@ func (this *MetricSpec) String() string { `Pods:` + strings.Replace(this.Pods.String(), "PodsMetricSource", "PodsMetricSource", 1) + `,`, `Resource:` + strings.Replace(this.Resource.String(), "ResourceMetricSource", "ResourceMetricSource", 1) + `,`, `External:` + strings.Replace(this.External.String(), "ExternalMetricSource", "ExternalMetricSource", 1) + `,`, + `ContainerResource:` + strings.Replace(this.ContainerResource.String(), "ContainerResourceMetricSource", "ContainerResourceMetricSource", 1) + `,`, `}`, }, "") return s @@ -2214,6 +2473,7 @@ func (this *MetricStatus) String() string { `Pods:` + strings.Replace(this.Pods.String(), "PodsMetricStatus", "PodsMetricStatus", 1) + `,`, `Resource:` + strings.Replace(this.Resource.String(), "ResourceMetricStatus", "ResourceMetricStatus", 1) + `,`, `External:` + strings.Replace(this.External.String(), "ExternalMetricStatus", "ExternalMetricStatus", 1) + `,`, + `ContainerResource:` + strings.Replace(this.ContainerResource.String(), "ContainerResourceMetricStatus", "ContainerResourceMetricStatus", 1) + `,`, `}`, }, "") return s @@ -2320,20 +2580,357 @@ func (this *ScaleStatus) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&ScaleStatus{`, - `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, - `Selector:` + fmt.Sprintf("%v", this.Selector) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" + s := strings.Join([]string{`&ScaleStatus{`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `Selector:` + fmt.Sprintf("%v", this.Selector) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ContainerResourceMetricSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerResourceMetricSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerResourceMetricSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetAverageUtilization", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TargetAverageUtilization = &v + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetAverageValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TargetAverageValue == nil { + m.TargetAverageValue = &resource.Quantity{} + } + if err := m.TargetAverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Container = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerResourceMetricStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerResourceMetricStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerResourceMetricStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentAverageUtilization", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.CurrentAverageUtilization = &v + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentAverageValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.CurrentAverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Container = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) + return nil } func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { l := len(dAtA) @@ -2466,10 +3063,7 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2659,10 +3253,7 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2849,10 +3440,7 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3001,10 +3589,7 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3215,10 +3800,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3335,10 +3917,7 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3480,10 +4059,7 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3647,10 +4223,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3870,16 +4443,49 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerResource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ContainerResource == nil { + m.ContainerResource = &ContainerResourceMetricSource{} + } + if err := m.ContainerResource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4099,16 +4705,49 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerResource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ContainerResource == nil { + m.ContainerResource = &ContainerResourceMetricStatus{} + } + if err := m.ContainerResource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4328,10 +4967,7 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4551,10 +5187,7 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4705,10 +5338,7 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4859,10 +5489,7 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5000,10 +5627,7 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5138,10 +5762,7 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5290,10 +5911,7 @@ func (m *Scale) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5362,10 +5980,7 @@ func (m *ScaleSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5466,10 +6081,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/k8s.io/api/autoscaling/v1/generated.proto b/vendor/k8s.io/api/autoscaling/v1/generated.proto index f50ed9d1f0..08b8667c8d 100644 --- a/vendor/k8s.io/api/autoscaling/v1/generated.proto +++ b/vendor/k8s.io/api/autoscaling/v1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.autoscaling.v1; @@ -30,6 +30,60 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v1"; +// ContainerResourceMetricSource indicates how to scale on a resource metric known to +// Kubernetes, as specified in the requests and limits, describing a single container in +// each of the pods of the current scale target(e.g. CPU or memory). The values will be +// averaged together before being compared to the target. Such metrics are built into +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. Only one "target" type +// should be set. +message ContainerResourceMetricSource { + // name is the name of the resource in question. + optional string name = 1; + + // targetAverageUtilization is the target value of the average of the + // resource metric across all relevant pods, represented as a percentage of + // the requested value of the resource for the pods. + // +optional + optional int32 targetAverageUtilization = 2; + + // targetAverageValue is the target value of the average of the + // resource metric across all relevant pods, as a raw value (instead of as + // a percentage of the request), similar to the "pods" metric source type. + // +optional + optional k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 3; + + // container is the name of the container in the pods of the scaling target. + optional string container = 5; +} + +// ContainerResourceMetricStatus indicates the current value of a resource metric known to +// Kubernetes, as specified in requests and limits, describing a single container in each pod in the +// current scale target (e.g. CPU or memory). Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. +message ContainerResourceMetricStatus { + // name is the name of the resource in question. + optional string name = 1; + + // currentAverageUtilization is the current value of the average of the + // resource metric across all relevant pods, represented as a percentage of + // the requested value of the resource for the pods. It will only be + // present if `targetAverageValue` was set in the corresponding metric + // specification. + // +optional + optional int32 currentAverageUtilization = 2; + + // currentAverageValue is the current value of the average of the + // resource metric across all relevant pods, as a raw value (instead of as + // a percentage of the request), similar to the "pods" metric source type. + // It will always be set, regardless of the corresponding metric specification. + optional k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 3; + + // container is the name of the container in the pods of the scaling taget + optional string container = 4; +} + // CrossVersionObjectReference contains enough information to let you identify the referred resource. message CrossVersionObjectReference { // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" @@ -184,8 +238,10 @@ message HorizontalPodAutoscalerStatus { // MetricSpec specifies how to scale based on a single metric // (only `type` and one other matching field should be set at once). message MetricSpec { - // type is the type of metric source. It should be one of "Object", - // "Pods" or "Resource", each mapping to a matching field in the object. + // type is the type of metric source. It should be one of "ContainerResource", + // "External", "Object", "Pods" or "Resource", each mapping to a matching field in the object. + // Note: "ContainerResource" type is available on when the feature-gate + // HPAContainerMetrics is enabled optional string type = 1; // object refers to a metric describing a single kubernetes object @@ -207,6 +263,15 @@ message MetricSpec { // +optional optional ResourceMetricSource resource = 4; + // container resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing a single container in each pod of the + // current scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics using the "pods" source. + // This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag. + // +optional + optional ContainerResourceMetricSource containerResource = 7; + // external refers to a global metric that is not associated // with any Kubernetes object. It allows autoscaling based on information // coming from components running outside of cluster @@ -218,8 +283,10 @@ message MetricSpec { // MetricStatus describes the last-read state of a single metric. message MetricStatus { - // type is the type of metric source. It will be one of "Object", - // "Pods" or "Resource", each corresponds to a matching field in the object. + // type is the type of metric source. It will be one of "ContainerResource", + // "External", "Object", "Pods" or "Resource", each corresponds to a matching field in the object. + // Note: "ContainerResource" type is available on when the feature-gate + // HPAContainerMetrics is enabled optional string type = 1; // object refers to a metric describing a single kubernetes object @@ -241,6 +308,14 @@ message MetricStatus { // +optional optional ResourceMetricStatus resource = 4; + // container resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing a single container in each pod in the + // current scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics using the "pods" source. + // +optional + optional ContainerResourceMetricStatus containerResource = 7; + // external refers to a global metric that is not associated // with any Kubernetes object. It allows autoscaling based on information // coming from components running outside of cluster diff --git a/vendor/k8s.io/api/autoscaling/v1/types.go b/vendor/k8s.io/api/autoscaling/v1/types.go index 55b2a0d6b6..3343177b2a 100644 --- a/vendor/k8s.io/api/autoscaling/v1/types.go +++ b/vendor/k8s.io/api/autoscaling/v1/types.go @@ -165,6 +165,12 @@ const ( // Kubernetes, and have special scaling options on top of those available // to normal per-pod metrics (the "pods" source). ResourceMetricSourceType MetricSourceType = "Resource" + // ContainerResourceMetricSourceType is a resource metric known to Kubernetes, as + // specified in requests and limits, describing a single container in each pod in the current + // scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics (the "pods" source). + ContainerResourceMetricSourceType MetricSourceType = "ContainerResource" // ExternalMetricSourceType is a global metric that is not associated // with any Kubernetes object. It allows autoscaling based on information // coming from components running outside of cluster @@ -176,8 +182,10 @@ const ( // MetricSpec specifies how to scale based on a single metric // (only `type` and one other matching field should be set at once). type MetricSpec struct { - // type is the type of metric source. It should be one of "Object", - // "Pods" or "Resource", each mapping to a matching field in the object. + // type is the type of metric source. It should be one of "ContainerResource", + // "External", "Object", "Pods" or "Resource", each mapping to a matching field in the object. + // Note: "ContainerResource" type is available on when the feature-gate + // HPAContainerMetrics is enabled Type MetricSourceType `json:"type" protobuf:"bytes,1,name=type"` // object refers to a metric describing a single kubernetes object @@ -196,6 +204,14 @@ type MetricSpec struct { // to normal per-pod metrics using the "pods" source. // +optional Resource *ResourceMetricSource `json:"resource,omitempty" protobuf:"bytes,4,opt,name=resource"` + // container resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing a single container in each pod of the + // current scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics using the "pods" source. + // This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag. + // +optional + ContainerResource *ContainerResourceMetricSource `json:"containerResource,omitempty" protobuf:"bytes,7,opt,name=containerResource"` // external refers to a global metric that is not associated // with any Kubernetes object. It allows autoscaling based on information // coming from components running outside of cluster @@ -267,6 +283,30 @@ type ResourceMetricSource struct { TargetAverageValue *resource.Quantity `json:"targetAverageValue,omitempty" protobuf:"bytes,3,opt,name=targetAverageValue"` } +// ContainerResourceMetricSource indicates how to scale on a resource metric known to +// Kubernetes, as specified in the requests and limits, describing a single container in +// each of the pods of the current scale target(e.g. CPU or memory). The values will be +// averaged together before being compared to the target. Such metrics are built into +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. Only one "target" type +// should be set. +type ContainerResourceMetricSource struct { + // name is the name of the resource in question. + Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // targetAverageUtilization is the target value of the average of the + // resource metric across all relevant pods, represented as a percentage of + // the requested value of the resource for the pods. + // +optional + TargetAverageUtilization *int32 `json:"targetAverageUtilization,omitempty" protobuf:"varint,2,opt,name=targetAverageUtilization"` + // targetAverageValue is the target value of the average of the + // resource metric across all relevant pods, as a raw value (instead of as + // a percentage of the request), similar to the "pods" metric source type. + // +optional + TargetAverageValue *resource.Quantity `json:"targetAverageValue,omitempty" protobuf:"bytes,3,opt,name=targetAverageValue"` + // container is the name of the container in the pods of the scaling target. + Container string `json:"container" protobuf:"bytes,5,opt,name=container"` +} + // ExternalMetricSource indicates how to scale on a metric not associated with // any Kubernetes object (for example length of queue in cloud // messaging service, or QPS from loadbalancer running outside of cluster). @@ -289,8 +329,10 @@ type ExternalMetricSource struct { // MetricStatus describes the last-read state of a single metric. type MetricStatus struct { - // type is the type of metric source. It will be one of "Object", - // "Pods" or "Resource", each corresponds to a matching field in the object. + // type is the type of metric source. It will be one of "ContainerResource", + // "External", "Object", "Pods" or "Resource", each corresponds to a matching field in the object. + // Note: "ContainerResource" type is available on when the feature-gate + // HPAContainerMetrics is enabled Type MetricSourceType `json:"type" protobuf:"bytes,1,name=type"` // object refers to a metric describing a single kubernetes object @@ -309,6 +351,13 @@ type MetricStatus struct { // to normal per-pod metrics using the "pods" source. // +optional Resource *ResourceMetricStatus `json:"resource,omitempty" protobuf:"bytes,4,opt,name=resource"` + // container resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing a single container in each pod in the + // current scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics using the "pods" source. + // +optional + ContainerResource *ContainerResourceMetricStatus `json:"containerResource,omitempty" protobuf:"bytes,7,opt,name=containerResource"` // external refers to a global metric that is not associated // with any Kubernetes object. It allows autoscaling based on information // coming from components running outside of cluster @@ -414,6 +463,30 @@ type ResourceMetricStatus struct { CurrentAverageValue resource.Quantity `json:"currentAverageValue" protobuf:"bytes,3,name=currentAverageValue"` } +// ContainerResourceMetricStatus indicates the current value of a resource metric known to +// Kubernetes, as specified in requests and limits, describing a single container in each pod in the +// current scale target (e.g. CPU or memory). Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. +type ContainerResourceMetricStatus struct { + // name is the name of the resource in question. + Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // currentAverageUtilization is the current value of the average of the + // resource metric across all relevant pods, represented as a percentage of + // the requested value of the resource for the pods. It will only be + // present if `targetAverageValue` was set in the corresponding metric + // specification. + // +optional + CurrentAverageUtilization *int32 `json:"currentAverageUtilization,omitempty" protobuf:"bytes,2,opt,name=currentAverageUtilization"` + // currentAverageValue is the current value of the average of the + // resource metric across all relevant pods, as a raw value (instead of as + // a percentage of the request), similar to the "pods" metric source type. + // It will always be set, regardless of the corresponding metric specification. + CurrentAverageValue resource.Quantity `json:"currentAverageValue" protobuf:"bytes,3,name=currentAverageValue"` + // container is the name of the container in the pods of the scaling taget + Container string `json:"container" protobuf:"bytes,4,opt,name=container"` +} + // ExternalMetricStatus indicates the current value of a global metric // not associated with any Kubernetes object. type ExternalMetricStatus struct { diff --git a/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go index 129ce2b484..192dc5f398 100644 --- a/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go @@ -27,6 +27,30 @@ package v1 // Those methods can be generated by using hack/update-generated-swagger-docs.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_ContainerResourceMetricSource = map[string]string{ + "": "ContainerResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in the requests and limits, describing a single container in each of the pods of the current scale target(e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built into Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. Only one \"target\" type should be set.", + "name": "name is the name of the resource in question.", + "targetAverageUtilization": "targetAverageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods.", + "targetAverageValue": "targetAverageValue is the target value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the \"pods\" metric source type.", + "container": "container is the name of the container in the pods of the scaling target.", +} + +func (ContainerResourceMetricSource) SwaggerDoc() map[string]string { + return map_ContainerResourceMetricSource +} + +var map_ContainerResourceMetricStatus = map[string]string{ + "": "ContainerResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing a single container in each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", + "name": "name is the name of the resource in question.", + "currentAverageUtilization": "currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. It will only be present if `targetAverageValue` was set in the corresponding metric specification.", + "currentAverageValue": "currentAverageValue is the current value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the \"pods\" metric source type. It will always be set, regardless of the corresponding metric specification.", + "container": "container is the name of the container in the pods of the scaling taget", +} + +func (ContainerResourceMetricStatus) SwaggerDoc() map[string]string { + return map_ContainerResourceMetricStatus +} + var map_CrossVersionObjectReference = map[string]string{ "": "CrossVersionObjectReference contains enough information to let you identify the referred resource.", "kind": "Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\"", @@ -122,12 +146,13 @@ func (HorizontalPodAutoscalerStatus) SwaggerDoc() map[string]string { } var map_MetricSpec = map[string]string{ - "": "MetricSpec specifies how to scale based on a single metric (only `type` and one other matching field should be set at once).", - "type": "type is the type of metric source. It should be one of \"Object\", \"Pods\" or \"Resource\", each mapping to a matching field in the object.", - "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", - "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", - "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", - "external": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).", + "": "MetricSpec specifies how to scale based on a single metric (only `type` and one other matching field should be set at once).", + "type": "type is the type of metric source. It should be one of \"ContainerResource\", \"External\", \"Object\", \"Pods\" or \"Resource\", each mapping to a matching field in the object. Note: \"ContainerResource\" type is available on when the feature-gate HPAContainerMetrics is enabled", + "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", + "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", + "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", + "containerResource": "container resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod of the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag.", + "external": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).", } func (MetricSpec) SwaggerDoc() map[string]string { @@ -135,12 +160,13 @@ func (MetricSpec) SwaggerDoc() map[string]string { } var map_MetricStatus = map[string]string{ - "": "MetricStatus describes the last-read state of a single metric.", - "type": "type is the type of metric source. It will be one of \"Object\", \"Pods\" or \"Resource\", each corresponds to a matching field in the object.", - "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", - "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", - "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", - "external": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).", + "": "MetricStatus describes the last-read state of a single metric.", + "type": "type is the type of metric source. It will be one of \"ContainerResource\", \"External\", \"Object\", \"Pods\" or \"Resource\", each corresponds to a matching field in the object. Note: \"ContainerResource\" type is available on when the feature-gate HPAContainerMetrics is enabled", + "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", + "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", + "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", + "containerResource": "container resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", + "external": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).", } func (MetricStatus) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/autoscaling/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/autoscaling/v1/zz_generated.deepcopy.go index ddb6011280..05ae6ebda7 100644 --- a/vendor/k8s.io/api/autoscaling/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/autoscaling/v1/zz_generated.deepcopy.go @@ -25,6 +25,54 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerResourceMetricSource) DeepCopyInto(out *ContainerResourceMetricSource) { + *out = *in + if in.TargetAverageUtilization != nil { + in, out := &in.TargetAverageUtilization, &out.TargetAverageUtilization + *out = new(int32) + **out = **in + } + if in.TargetAverageValue != nil { + in, out := &in.TargetAverageValue, &out.TargetAverageValue + x := (*in).DeepCopy() + *out = &x + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerResourceMetricSource. +func (in *ContainerResourceMetricSource) DeepCopy() *ContainerResourceMetricSource { + if in == nil { + return nil + } + out := new(ContainerResourceMetricSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerResourceMetricStatus) DeepCopyInto(out *ContainerResourceMetricStatus) { + *out = *in + if in.CurrentAverageUtilization != nil { + in, out := &in.CurrentAverageUtilization, &out.CurrentAverageUtilization + *out = new(int32) + **out = **in + } + out.CurrentAverageValue = in.CurrentAverageValue.DeepCopy() + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerResourceMetricStatus. +func (in *ContainerResourceMetricStatus) DeepCopy() *ContainerResourceMetricStatus { + if in == nil { + return nil + } + out := new(ContainerResourceMetricStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CrossVersionObjectReference) DeepCopyInto(out *CrossVersionObjectReference) { *out = *in @@ -252,6 +300,11 @@ func (in *MetricSpec) DeepCopyInto(out *MetricSpec) { *out = new(ResourceMetricSource) (*in).DeepCopyInto(*out) } + if in.ContainerResource != nil { + in, out := &in.ContainerResource, &out.ContainerResource + *out = new(ContainerResourceMetricSource) + (*in).DeepCopyInto(*out) + } if in.External != nil { in, out := &in.External, &out.External *out = new(ExternalMetricSource) @@ -288,6 +341,11 @@ func (in *MetricStatus) DeepCopyInto(out *MetricStatus) { *out = new(ResourceMetricStatus) (*in).DeepCopyInto(*out) } + if in.ContainerResource != nil { + in, out := &in.ContainerResource, &out.ContainerResource + *out = new(ContainerResourceMetricStatus) + (*in).DeepCopyInto(*out) + } if in.External != nil { in, out := &in.External, &out.External *out = new(ExternalMetricStatus) diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/generated.pb.go b/vendor/k8s.io/api/autoscaling/v2beta1/generated.pb.go index e129e41b8b..28832c152d 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/generated.pb.go +++ b/vendor/k8s.io/api/autoscaling/v2beta1/generated.pb.go @@ -47,10 +47,66 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +func (m *ContainerResourceMetricSource) Reset() { *m = ContainerResourceMetricSource{} } +func (*ContainerResourceMetricSource) ProtoMessage() {} +func (*ContainerResourceMetricSource) Descriptor() ([]byte, []int) { + return fileDescriptor_26c1bfc7a52d0478, []int{0} +} +func (m *ContainerResourceMetricSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerResourceMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ContainerResourceMetricSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerResourceMetricSource.Merge(m, src) +} +func (m *ContainerResourceMetricSource) XXX_Size() int { + return m.Size() +} +func (m *ContainerResourceMetricSource) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerResourceMetricSource.DiscardUnknown(m) +} + +var xxx_messageInfo_ContainerResourceMetricSource proto.InternalMessageInfo + +func (m *ContainerResourceMetricStatus) Reset() { *m = ContainerResourceMetricStatus{} } +func (*ContainerResourceMetricStatus) ProtoMessage() {} +func (*ContainerResourceMetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_26c1bfc7a52d0478, []int{1} +} +func (m *ContainerResourceMetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerResourceMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ContainerResourceMetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerResourceMetricStatus.Merge(m, src) +} +func (m *ContainerResourceMetricStatus) XXX_Size() int { + return m.Size() +} +func (m *ContainerResourceMetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerResourceMetricStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ContainerResourceMetricStatus proto.InternalMessageInfo + func (m *CrossVersionObjectReference) Reset() { *m = CrossVersionObjectReference{} } func (*CrossVersionObjectReference) ProtoMessage() {} func (*CrossVersionObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{0} + return fileDescriptor_26c1bfc7a52d0478, []int{2} } func (m *CrossVersionObjectReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -78,7 +134,7 @@ var xxx_messageInfo_CrossVersionObjectReference proto.InternalMessageInfo func (m *ExternalMetricSource) Reset() { *m = ExternalMetricSource{} } func (*ExternalMetricSource) ProtoMessage() {} func (*ExternalMetricSource) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{1} + return fileDescriptor_26c1bfc7a52d0478, []int{3} } func (m *ExternalMetricSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -106,7 +162,7 @@ var xxx_messageInfo_ExternalMetricSource proto.InternalMessageInfo func (m *ExternalMetricStatus) Reset() { *m = ExternalMetricStatus{} } func (*ExternalMetricStatus) ProtoMessage() {} func (*ExternalMetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{2} + return fileDescriptor_26c1bfc7a52d0478, []int{4} } func (m *ExternalMetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -134,7 +190,7 @@ var xxx_messageInfo_ExternalMetricStatus proto.InternalMessageInfo func (m *HorizontalPodAutoscaler) Reset() { *m = HorizontalPodAutoscaler{} } func (*HorizontalPodAutoscaler) ProtoMessage() {} func (*HorizontalPodAutoscaler) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{3} + return fileDescriptor_26c1bfc7a52d0478, []int{5} } func (m *HorizontalPodAutoscaler) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -162,7 +218,7 @@ var xxx_messageInfo_HorizontalPodAutoscaler proto.InternalMessageInfo func (m *HorizontalPodAutoscalerCondition) Reset() { *m = HorizontalPodAutoscalerCondition{} } func (*HorizontalPodAutoscalerCondition) ProtoMessage() {} func (*HorizontalPodAutoscalerCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{4} + return fileDescriptor_26c1bfc7a52d0478, []int{6} } func (m *HorizontalPodAutoscalerCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -190,7 +246,7 @@ var xxx_messageInfo_HorizontalPodAutoscalerCondition proto.InternalMessageInfo func (m *HorizontalPodAutoscalerList) Reset() { *m = HorizontalPodAutoscalerList{} } func (*HorizontalPodAutoscalerList) ProtoMessage() {} func (*HorizontalPodAutoscalerList) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{5} + return fileDescriptor_26c1bfc7a52d0478, []int{7} } func (m *HorizontalPodAutoscalerList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -218,7 +274,7 @@ var xxx_messageInfo_HorizontalPodAutoscalerList proto.InternalMessageInfo func (m *HorizontalPodAutoscalerSpec) Reset() { *m = HorizontalPodAutoscalerSpec{} } func (*HorizontalPodAutoscalerSpec) ProtoMessage() {} func (*HorizontalPodAutoscalerSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{6} + return fileDescriptor_26c1bfc7a52d0478, []int{8} } func (m *HorizontalPodAutoscalerSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -246,7 +302,7 @@ var xxx_messageInfo_HorizontalPodAutoscalerSpec proto.InternalMessageInfo func (m *HorizontalPodAutoscalerStatus) Reset() { *m = HorizontalPodAutoscalerStatus{} } func (*HorizontalPodAutoscalerStatus) ProtoMessage() {} func (*HorizontalPodAutoscalerStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{7} + return fileDescriptor_26c1bfc7a52d0478, []int{9} } func (m *HorizontalPodAutoscalerStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -274,7 +330,7 @@ var xxx_messageInfo_HorizontalPodAutoscalerStatus proto.InternalMessageInfo func (m *MetricSpec) Reset() { *m = MetricSpec{} } func (*MetricSpec) ProtoMessage() {} func (*MetricSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{8} + return fileDescriptor_26c1bfc7a52d0478, []int{10} } func (m *MetricSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -302,7 +358,7 @@ var xxx_messageInfo_MetricSpec proto.InternalMessageInfo func (m *MetricStatus) Reset() { *m = MetricStatus{} } func (*MetricStatus) ProtoMessage() {} func (*MetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{9} + return fileDescriptor_26c1bfc7a52d0478, []int{11} } func (m *MetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -330,7 +386,7 @@ var xxx_messageInfo_MetricStatus proto.InternalMessageInfo func (m *ObjectMetricSource) Reset() { *m = ObjectMetricSource{} } func (*ObjectMetricSource) ProtoMessage() {} func (*ObjectMetricSource) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{10} + return fileDescriptor_26c1bfc7a52d0478, []int{12} } func (m *ObjectMetricSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -358,7 +414,7 @@ var xxx_messageInfo_ObjectMetricSource proto.InternalMessageInfo func (m *ObjectMetricStatus) Reset() { *m = ObjectMetricStatus{} } func (*ObjectMetricStatus) ProtoMessage() {} func (*ObjectMetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{11} + return fileDescriptor_26c1bfc7a52d0478, []int{13} } func (m *ObjectMetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -386,7 +442,7 @@ var xxx_messageInfo_ObjectMetricStatus proto.InternalMessageInfo func (m *PodsMetricSource) Reset() { *m = PodsMetricSource{} } func (*PodsMetricSource) ProtoMessage() {} func (*PodsMetricSource) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{12} + return fileDescriptor_26c1bfc7a52d0478, []int{14} } func (m *PodsMetricSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -414,7 +470,7 @@ var xxx_messageInfo_PodsMetricSource proto.InternalMessageInfo func (m *PodsMetricStatus) Reset() { *m = PodsMetricStatus{} } func (*PodsMetricStatus) ProtoMessage() {} func (*PodsMetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{13} + return fileDescriptor_26c1bfc7a52d0478, []int{15} } func (m *PodsMetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -442,7 +498,7 @@ var xxx_messageInfo_PodsMetricStatus proto.InternalMessageInfo func (m *ResourceMetricSource) Reset() { *m = ResourceMetricSource{} } func (*ResourceMetricSource) ProtoMessage() {} func (*ResourceMetricSource) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{14} + return fileDescriptor_26c1bfc7a52d0478, []int{16} } func (m *ResourceMetricSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -470,7 +526,7 @@ var xxx_messageInfo_ResourceMetricSource proto.InternalMessageInfo func (m *ResourceMetricStatus) Reset() { *m = ResourceMetricStatus{} } func (*ResourceMetricStatus) ProtoMessage() {} func (*ResourceMetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{15} + return fileDescriptor_26c1bfc7a52d0478, []int{17} } func (m *ResourceMetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -496,6 +552,8 @@ func (m *ResourceMetricStatus) XXX_DiscardUnknown() { var xxx_messageInfo_ResourceMetricStatus proto.InternalMessageInfo func init() { + proto.RegisterType((*ContainerResourceMetricSource)(nil), "k8s.io.api.autoscaling.v2beta1.ContainerResourceMetricSource") + proto.RegisterType((*ContainerResourceMetricStatus)(nil), "k8s.io.api.autoscaling.v2beta1.ContainerResourceMetricStatus") proto.RegisterType((*CrossVersionObjectReference)(nil), "k8s.io.api.autoscaling.v2beta1.CrossVersionObjectReference") proto.RegisterType((*ExternalMetricSource)(nil), "k8s.io.api.autoscaling.v2beta1.ExternalMetricSource") proto.RegisterType((*ExternalMetricStatus)(nil), "k8s.io.api.autoscaling.v2beta1.ExternalMetricStatus") @@ -519,100 +577,203 @@ func init() { } var fileDescriptor_26c1bfc7a52d0478 = []byte{ - // 1475 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x58, 0xcb, 0x8f, 0x1b, 0x45, - 0x13, 0x5f, 0x3f, 0x76, 0xb3, 0x69, 0x6f, 0x76, 0xf7, 0xeb, 0x44, 0x89, 0xb3, 0xf9, 0x62, 0xaf, - 0x2c, 0x84, 0x42, 0x44, 0x66, 0x12, 0xb3, 0x3c, 0x24, 0x84, 0xc4, 0xda, 0x40, 0x12, 0xb1, 0x4e, - 0x42, 0xef, 0x26, 0x42, 0x90, 0x20, 0xda, 0x33, 0x1d, 0x6f, 0xb3, 0x9e, 0x19, 0x6b, 0xba, 0x6d, - 0x65, 0x83, 0x90, 0xb8, 0x70, 0xe7, 0x02, 0x67, 0x90, 0x38, 0x21, 0xb8, 0xc2, 0x99, 0x5b, 0x8e, - 0x39, 0x26, 0x02, 0x59, 0x64, 0xf8, 0x2f, 0x72, 0x42, 0xfd, 0x98, 0xf1, 0x8c, 0x1f, 0x6b, 0xc7, - 0x38, 0xe1, 0x71, 0x9b, 0xee, 0xaa, 0xfa, 0x55, 0x4f, 0xfd, 0xaa, 0xab, 0xbb, 0x1a, 0x5c, 0xdc, - 0x7b, 0x8d, 0x19, 0xd4, 0x33, 0xf7, 0xda, 0x75, 0xe2, 0xbb, 0x84, 0x13, 0x66, 0x76, 0x88, 0x6b, - 0x7b, 0xbe, 0xa9, 0x05, 0xb8, 0x45, 0x4d, 0xdc, 0xe6, 0x1e, 0xb3, 0x70, 0x93, 0xba, 0x0d, 0xb3, - 0x53, 0xae, 0x13, 0x8e, 0x2f, 0x98, 0x0d, 0xe2, 0x12, 0x1f, 0x73, 0x62, 0x1b, 0x2d, 0xdf, 0xe3, - 0x1e, 0x2c, 0x28, 0x7d, 0x03, 0xb7, 0xa8, 0x11, 0xd3, 0x37, 0xb4, 0xfe, 0xda, 0xb9, 0x06, 0xe5, - 0xbb, 0xed, 0xba, 0x61, 0x79, 0x8e, 0xd9, 0xf0, 0x1a, 0x9e, 0x29, 0xcd, 0xea, 0xed, 0xdb, 0x72, - 0x24, 0x07, 0xf2, 0x4b, 0xc1, 0xad, 0x95, 0x62, 0xee, 0x2d, 0xcf, 0x27, 0x66, 0x67, 0xc0, 0xe5, - 0xda, 0x46, 0x4f, 0xc7, 0xc1, 0xd6, 0x2e, 0x75, 0x89, 0xbf, 0x6f, 0xb6, 0xf6, 0x1a, 0xd2, 0xc8, - 0x27, 0xcc, 0x6b, 0xfb, 0x16, 0x79, 0x22, 0x2b, 0x66, 0x3a, 0x84, 0xe3, 0x61, 0xbe, 0xcc, 0x51, - 0x56, 0x7e, 0xdb, 0xe5, 0xd4, 0x19, 0x74, 0xf3, 0xca, 0x38, 0x03, 0x66, 0xed, 0x12, 0x07, 0xf7, - 0xdb, 0x95, 0xbe, 0x4a, 0x81, 0x53, 0x55, 0xdf, 0x63, 0xec, 0x06, 0xf1, 0x19, 0xf5, 0xdc, 0xab, - 0xf5, 0x4f, 0x88, 0xc5, 0x11, 0xb9, 0x4d, 0x7c, 0xe2, 0x5a, 0x04, 0xae, 0x83, 0xec, 0x1e, 0x75, - 0xed, 0x7c, 0x6a, 0x3d, 0x75, 0xe6, 0x70, 0x65, 0xe9, 0x5e, 0xb7, 0x38, 0x17, 0x74, 0x8b, 0xd9, - 0x77, 0xa9, 0x6b, 0x23, 0x29, 0x11, 0x1a, 0x2e, 0x76, 0x48, 0x3e, 0x9d, 0xd4, 0xb8, 0x82, 0x1d, - 0x82, 0xa4, 0x04, 0x96, 0x01, 0xc0, 0x2d, 0xaa, 0x1d, 0xe4, 0x33, 0x52, 0x0f, 0x6a, 0x3d, 0xb0, - 0x79, 0xed, 0xb2, 0x96, 0xa0, 0x98, 0x56, 0xe9, 0xeb, 0x0c, 0x38, 0xf6, 0xf6, 0x1d, 0x4e, 0x7c, - 0x17, 0x37, 0x6b, 0x84, 0xfb, 0xd4, 0xda, 0x96, 0xf1, 0x15, 0x60, 0x8e, 0x1c, 0x0b, 0x07, 0x7a, - 0x59, 0x11, 0x58, 0x2d, 0x92, 0xa0, 0x98, 0x16, 0xf4, 0xc0, 0xb2, 0x1a, 0x6d, 0x93, 0x26, 0xb1, - 0xb8, 0xe7, 0xcb, 0xc5, 0xe6, 0xca, 0x2f, 0x19, 0xbd, 0x2c, 0x8a, 0xa2, 0x66, 0xb4, 0xf6, 0x1a, - 0x62, 0x82, 0x19, 0x82, 0x1c, 0xa3, 0x73, 0xc1, 0xd8, 0xc2, 0x75, 0xd2, 0x0c, 0x4d, 0x2b, 0x30, - 0xe8, 0x16, 0x97, 0x6b, 0x09, 0x38, 0xd4, 0x07, 0x0f, 0x31, 0xc8, 0x71, 0xec, 0x37, 0x08, 0xbf, - 0x81, 0x9b, 0x6d, 0x22, 0x7f, 0x39, 0x57, 0x36, 0x0e, 0xf2, 0x66, 0x84, 0x09, 0x64, 0xbc, 0xd7, - 0xc6, 0x2e, 0xa7, 0x7c, 0xbf, 0xb2, 0x12, 0x74, 0x8b, 0xb9, 0x9d, 0x1e, 0x0c, 0x8a, 0x63, 0xc2, - 0x0e, 0x80, 0x6a, 0xb8, 0xd9, 0x21, 0x3e, 0x6e, 0x10, 0xe5, 0x29, 0x3b, 0x95, 0xa7, 0xe3, 0x41, - 0xb7, 0x08, 0x77, 0x06, 0xd0, 0xd0, 0x10, 0x0f, 0xa5, 0x6f, 0x06, 0x89, 0xe1, 0x98, 0xb7, 0xd9, - 0xbf, 0x83, 0x98, 0x5d, 0xb0, 0x64, 0xb5, 0x7d, 0x9f, 0xb8, 0x7f, 0x89, 0x99, 0x63, 0xfa, 0xb7, - 0x96, 0xaa, 0x31, 0x2c, 0x94, 0x40, 0x86, 0xfb, 0xe0, 0xa8, 0x1e, 0xcf, 0x80, 0xa0, 0x13, 0x41, - 0xb7, 0x78, 0xb4, 0x3a, 0x08, 0x87, 0x86, 0xf9, 0x28, 0xfd, 0x92, 0x06, 0x27, 0x2e, 0x79, 0x3e, - 0xbd, 0xeb, 0xb9, 0x1c, 0x37, 0xaf, 0x79, 0xf6, 0xa6, 0x2e, 0x90, 0xc4, 0x87, 0x1f, 0x83, 0x45, - 0x11, 0x3d, 0x1b, 0x73, 0x2c, 0x39, 0xca, 0x95, 0xcf, 0x4f, 0x16, 0x6b, 0x55, 0x18, 0x6a, 0x84, - 0xe3, 0x1e, 0xab, 0xbd, 0x39, 0x14, 0xa1, 0xc2, 0x5b, 0x20, 0xcb, 0x5a, 0xc4, 0xd2, 0x4c, 0xbe, - 0x6e, 0x1c, 0x5c, 0xa8, 0x8d, 0x11, 0x0b, 0xdd, 0x6e, 0x11, 0xab, 0x57, 0x4c, 0xc4, 0x08, 0x49, - 0x58, 0x48, 0xc0, 0x02, 0x93, 0x09, 0xa7, 0xb9, 0x7b, 0x63, 0x5a, 0x07, 0x12, 0xa4, 0xb2, 0xac, - 0x5d, 0x2c, 0xa8, 0x31, 0xd2, 0xe0, 0xa5, 0x2f, 0x32, 0x60, 0x7d, 0x84, 0x65, 0xd5, 0x73, 0x6d, - 0xca, 0xa9, 0xe7, 0xc2, 0x4b, 0x20, 0xcb, 0xf7, 0x5b, 0x61, 0xb2, 0x6f, 0x84, 0xab, 0xdd, 0xd9, - 0x6f, 0x91, 0xc7, 0xdd, 0xe2, 0x73, 0xe3, 0xec, 0x85, 0x1e, 0x92, 0x08, 0x70, 0x2b, 0xfa, 0xab, - 0x74, 0x02, 0x4b, 0x2f, 0xeb, 0x71, 0xb7, 0x38, 0xe4, 0x84, 0x32, 0x22, 0xa4, 0xe4, 0xe2, 0x45, - 0x6d, 0x68, 0x62, 0xc6, 0x77, 0x7c, 0xec, 0x32, 0xe5, 0x89, 0x3a, 0x61, 0xae, 0x9f, 0x9d, 0x8c, - 0x6e, 0x61, 0x51, 0x59, 0xd3, 0xab, 0x80, 0x5b, 0x03, 0x68, 0x68, 0x88, 0x07, 0xf8, 0x3c, 0x58, - 0xf0, 0x09, 0x66, 0x9e, 0x2b, 0xd3, 0xfc, 0x70, 0x2f, 0xb8, 0x48, 0xce, 0x22, 0x2d, 0x85, 0x2f, - 0x80, 0x43, 0x0e, 0x61, 0x0c, 0x37, 0x48, 0x7e, 0x5e, 0x2a, 0xae, 0x68, 0xc5, 0x43, 0x35, 0x35, - 0x8d, 0x42, 0x79, 0xe9, 0x61, 0x0a, 0x9c, 0x1a, 0x11, 0xc7, 0x2d, 0xca, 0x38, 0xbc, 0x39, 0x90, - 0xcf, 0xc6, 0x84, 0xb5, 0x83, 0x32, 0x95, 0xcd, 0xab, 0xda, 0xf7, 0x62, 0x38, 0x13, 0xcb, 0xe5, - 0x9b, 0x60, 0x9e, 0x72, 0xe2, 0x08, 0x56, 0x32, 0x67, 0x72, 0xe5, 0x57, 0xa7, 0xcc, 0xb5, 0xca, - 0x11, 0xed, 0x63, 0xfe, 0xb2, 0x40, 0x43, 0x0a, 0xb4, 0xf4, 0x6b, 0x7a, 0xe4, 0xbf, 0x89, 0x84, - 0x87, 0x9f, 0x82, 0x65, 0x39, 0x52, 0x95, 0x19, 0x91, 0xdb, 0xfa, 0x0f, 0xc7, 0xee, 0xa9, 0x03, - 0x0e, 0xf4, 0xca, 0x71, 0xbd, 0x94, 0xe5, 0xed, 0x04, 0x34, 0xea, 0x73, 0x05, 0x2f, 0x80, 0x9c, - 0x43, 0x5d, 0x44, 0x5a, 0x4d, 0x6a, 0x61, 0x95, 0x96, 0xf3, 0xea, 0x48, 0xaa, 0xf5, 0xa6, 0x51, - 0x5c, 0x07, 0xbe, 0x0c, 0x72, 0x0e, 0xbe, 0x13, 0x99, 0x64, 0xa4, 0xc9, 0x51, 0xed, 0x2f, 0x57, - 0xeb, 0x89, 0x50, 0x5c, 0x0f, 0x5e, 0x17, 0xd9, 0x20, 0xaa, 0x34, 0xcb, 0x67, 0x65, 0x98, 0xcf, - 0x8e, 0xfb, 0x3f, 0x5d, 0xe4, 0x45, 0x89, 0x88, 0x65, 0x8e, 0x84, 0x40, 0x21, 0x56, 0xe9, 0xa7, - 0x2c, 0x38, 0x7d, 0xe0, 0xde, 0x87, 0xef, 0x00, 0xe8, 0xd5, 0x19, 0xf1, 0x3b, 0xc4, 0xbe, 0xa8, - 0xae, 0x45, 0xe2, 0x7e, 0x22, 0x62, 0x9c, 0x51, 0x47, 0xe2, 0xd5, 0x01, 0x29, 0x1a, 0x62, 0x01, - 0x2d, 0x70, 0x44, 0x6c, 0x06, 0x15, 0x50, 0xaa, 0xaf, 0x42, 0x4f, 0xb6, 0xd3, 0xfe, 0x17, 0x74, - 0x8b, 0x47, 0xb6, 0xe2, 0x20, 0x28, 0x89, 0x09, 0x37, 0xc1, 0x8a, 0xae, 0xf5, 0x7d, 0x01, 0x3e, - 0xa1, 0x23, 0xb0, 0x52, 0x4d, 0x8a, 0x51, 0xbf, 0xbe, 0x80, 0xb0, 0x09, 0xa3, 0x3e, 0xb1, 0x23, - 0x88, 0x6c, 0x12, 0xe2, 0xad, 0xa4, 0x18, 0xf5, 0xeb, 0xc3, 0x26, 0x58, 0xd6, 0xa8, 0x3a, 0xde, - 0xf9, 0x79, 0x49, 0xd9, 0x8b, 0x13, 0x52, 0xa6, 0x8a, 0x6e, 0x94, 0x83, 0xd5, 0x04, 0x16, 0xea, - 0xc3, 0x86, 0x1c, 0x00, 0x2b, 0x2c, 0x71, 0x2c, 0xbf, 0x20, 0x3d, 0xbd, 0x39, 0xe5, 0x1e, 0x8c, - 0x6a, 0x65, 0xef, 0xf8, 0x8a, 0xa6, 0x18, 0x8a, 0xf9, 0x29, 0x7d, 0x9f, 0x01, 0xa0, 0x97, 0x61, - 0x70, 0x23, 0x51, 0xe4, 0xd7, 0xfb, 0x8a, 0xfc, 0x6a, 0xfc, 0x72, 0x1a, 0x2b, 0xe8, 0x37, 0xc0, - 0x82, 0x27, 0x77, 0x9e, 0x4e, 0x86, 0xf2, 0xb8, 0x65, 0x47, 0x67, 0x69, 0x84, 0x56, 0x01, 0xa2, - 0x74, 0xea, 0xfd, 0xab, 0xd1, 0xe0, 0x15, 0x90, 0x6d, 0x79, 0x76, 0x78, 0xf8, 0x9d, 0x1f, 0x87, - 0x7a, 0xcd, 0xb3, 0x59, 0x02, 0x73, 0x51, 0xac, 0x5d, 0xcc, 0x22, 0x89, 0x03, 0x3f, 0x02, 0x8b, - 0xe1, 0x75, 0x43, 0xdf, 0x4d, 0x36, 0xc6, 0x61, 0x22, 0xad, 0x9f, 0xc0, 0x5d, 0x12, 0x15, 0x34, - 0x94, 0xa0, 0x08, 0x53, 0xe0, 0x13, 0x7d, 0x5b, 0x94, 0xb5, 0x7e, 0x02, 0xfc, 0x61, 0xd7, 0x7e, - 0x85, 0x1f, 0x4a, 0x50, 0x84, 0x59, 0xfa, 0x21, 0x03, 0x96, 0x12, 0xd7, 0xd0, 0xbf, 0x83, 0x2e, - 0x95, 0xd5, 0xb3, 0xa5, 0x4b, 0x61, 0xce, 0x9e, 0x2e, 0x85, 0xfb, 0xf4, 0xe8, 0x8a, 0xe1, 0x0f, - 0xa1, 0xeb, 0x61, 0x06, 0xc0, 0xc1, 0x4c, 0x87, 0x16, 0x58, 0x50, 0xad, 0xc6, 0x2c, 0x4e, 0xb8, - 0xe8, 0xd6, 0xa1, 0x0f, 0x33, 0x0d, 0xdd, 0xd7, 0xa0, 0xa4, 0x27, 0x6a, 0x50, 0xc8, 0x2c, 0x1a, - 0xb9, 0xe8, 0x08, 0x1c, 0xd9, 0xcc, 0xdd, 0x02, 0x8b, 0x2c, 0xec, 0x80, 0xb2, 0xd3, 0x77, 0x40, - 0x32, 0xea, 0x51, 0xef, 0x13, 0x41, 0x42, 0x1b, 0x2c, 0xe1, 0x78, 0x13, 0x32, 0x3f, 0xd5, 0x6f, - 0xac, 0x8a, 0x8e, 0x27, 0xd1, 0x7d, 0x24, 0x50, 0x4b, 0xbf, 0xf5, 0x73, 0xab, 0x36, 0xe4, 0x3f, - 0x96, 0xdb, 0x67, 0xd7, 0x0b, 0xfe, 0x27, 0xe8, 0xfd, 0x36, 0x0d, 0x56, 0xfb, 0x8f, 0x93, 0xa9, - 0x9a, 0xfe, 0xbb, 0x43, 0x5f, 0x2e, 0xd2, 0x53, 0x2d, 0x3a, 0xea, 0x50, 0x26, 0x7b, 0xbd, 0x48, - 0x30, 0x91, 0x99, 0x39, 0x13, 0xa5, 0xef, 0x92, 0x31, 0x9a, 0xfe, 0x61, 0xe4, 0xb3, 0xe1, 0xaf, - 0x07, 0xd3, 0x05, 0xe9, 0x94, 0x76, 0x36, 0xf1, 0x0b, 0xc2, 0xd3, 0x0e, 0xd3, 0x8f, 0x69, 0x70, - 0x6c, 0xd8, 0x2d, 0x02, 0x56, 0xf5, 0x5b, 0xa2, 0x0a, 0x92, 0x19, 0x7f, 0x4b, 0x7c, 0xdc, 0x2d, - 0x16, 0x87, 0xb4, 0xc0, 0x21, 0x4c, 0xec, 0xb9, 0xf1, 0x7d, 0x90, 0x4f, 0x30, 0x7f, 0x9d, 0xd3, - 0x26, 0xbd, 0xab, 0x2e, 0xf7, 0xaa, 0x8d, 0xf9, 0x7f, 0xd0, 0x2d, 0xe6, 0x77, 0x46, 0xe8, 0xa0, - 0x91, 0xd6, 0x23, 0xde, 0xdc, 0x32, 0x4f, 0xfd, 0xcd, 0xed, 0xe7, 0xc1, 0x78, 0xa9, 0xd4, 0x9a, - 0x49, 0xbc, 0x3e, 0x04, 0x27, 0x93, 0x39, 0x30, 0x18, 0xb0, 0xd3, 0x41, 0xb7, 0x78, 0xb2, 0x3a, - 0x4a, 0x09, 0x8d, 0xb6, 0x1f, 0x95, 0xc8, 0x99, 0x67, 0x93, 0xc8, 0x95, 0x73, 0xf7, 0x1e, 0x15, - 0xe6, 0xee, 0x3f, 0x2a, 0xcc, 0x3d, 0x78, 0x54, 0x98, 0xfb, 0x3c, 0x28, 0xa4, 0xee, 0x05, 0x85, - 0xd4, 0xfd, 0xa0, 0x90, 0x7a, 0x10, 0x14, 0x52, 0xbf, 0x07, 0x85, 0xd4, 0x97, 0x7f, 0x14, 0xe6, - 0x3e, 0x38, 0xa4, 0x8f, 0x9e, 0x3f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x05, 0x26, 0x31, 0x5d, 0x9f, - 0x18, 0x00, 0x00, + // 1562 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x59, 0x4b, 0x6c, 0x1b, 0xc5, + 0x1b, 0x8f, 0xed, 0xcd, 0xeb, 0x73, 0x9a, 0xc7, 0xb4, 0xff, 0xd6, 0x4d, 0xff, 0xb5, 0xa3, 0x15, + 0x42, 0xa1, 0xa2, 0xbb, 0xad, 0x09, 0x0f, 0x09, 0x21, 0x11, 0x1b, 0x68, 0x2b, 0x92, 0xb6, 0x4c, + 0xd2, 0x0a, 0x41, 0x8b, 0x98, 0xac, 0xa7, 0xce, 0x12, 0x7b, 0xd7, 0xda, 0x19, 0x5b, 0x4d, 0x11, + 0x12, 0x42, 0xe2, 0xce, 0x05, 0xce, 0x20, 0x71, 0x45, 0x88, 0x0b, 0x9c, 0xb9, 0xf5, 0xd8, 0x63, + 0x2b, 0x90, 0x45, 0xcd, 0x81, 0x33, 0xd7, 0x9e, 0xd0, 0xcc, 0xce, 0xae, 0x77, 0xfd, 0x88, 0x1d, + 0x37, 0x0d, 0x0f, 0xf5, 0xe6, 0xdd, 0xf9, 0xbe, 0xdf, 0x37, 0xf3, 0xfb, 0x5e, 0xf3, 0xad, 0xe1, + 0xc2, 0xce, 0x2b, 0xcc, 0xb0, 0x5d, 0x73, 0xa7, 0xbe, 0x45, 0x3d, 0x87, 0x72, 0xca, 0xcc, 0x06, + 0x75, 0x4a, 0xae, 0x67, 0xaa, 0x05, 0x52, 0xb3, 0x4d, 0x52, 0xe7, 0x2e, 0xb3, 0x48, 0xc5, 0x76, + 0xca, 0x66, 0x23, 0xbf, 0x45, 0x39, 0x39, 0x6f, 0x96, 0xa9, 0x43, 0x3d, 0xc2, 0x69, 0xc9, 0xa8, + 0x79, 0x2e, 0x77, 0x51, 0xd6, 0x97, 0x37, 0x48, 0xcd, 0x36, 0x22, 0xf2, 0x86, 0x92, 0x5f, 0x3c, + 0x5b, 0xb6, 0xf9, 0x76, 0x7d, 0xcb, 0xb0, 0xdc, 0xaa, 0x59, 0x76, 0xcb, 0xae, 0x29, 0xd5, 0xb6, + 0xea, 0xb7, 0xe4, 0x93, 0x7c, 0x90, 0xbf, 0x7c, 0xb8, 0x45, 0x3d, 0x62, 0xde, 0x72, 0x3d, 0x6a, + 0x36, 0xba, 0x4c, 0x2e, 0xae, 0xb4, 0x65, 0xaa, 0xc4, 0xda, 0xb6, 0x1d, 0xea, 0xed, 0x9a, 0xb5, + 0x9d, 0xb2, 0x54, 0xf2, 0x28, 0x73, 0xeb, 0x9e, 0x45, 0xf7, 0xa5, 0xc5, 0xcc, 0x2a, 0xe5, 0xa4, + 0x97, 0x2d, 0xb3, 0x9f, 0x96, 0x57, 0x77, 0xb8, 0x5d, 0xed, 0x36, 0xf3, 0xd2, 0x20, 0x05, 0x66, + 0x6d, 0xd3, 0x2a, 0xe9, 0xd4, 0xd3, 0xff, 0x48, 0xc2, 0xe9, 0xa2, 0xeb, 0x70, 0x22, 0x34, 0xb0, + 0x3a, 0xc4, 0x3a, 0xe5, 0x9e, 0x6d, 0x6d, 0xc8, 0xdf, 0xa8, 0x08, 0x9a, 0x43, 0xaa, 0x34, 0x93, + 0x58, 0x4a, 0x2c, 0x4f, 0x17, 0xcc, 0xbb, 0xcd, 0xdc, 0x58, 0xab, 0x99, 0xd3, 0x2e, 0x93, 0x2a, + 0x7d, 0xd4, 0xcc, 0xe5, 0xba, 0x89, 0x33, 0x02, 0x18, 0x21, 0x82, 0xa5, 0x32, 0x7a, 0x17, 0x32, + 0x9c, 0x78, 0x65, 0xca, 0x57, 0x1b, 0xd4, 0x23, 0x65, 0x7a, 0x8d, 0xdb, 0x15, 0xfb, 0x0e, 0xe1, + 0xb6, 0xeb, 0x64, 0x92, 0x4b, 0x89, 0xe5, 0xf1, 0xc2, 0xff, 0x5b, 0xcd, 0x5c, 0x66, 0xb3, 0x8f, + 0x0c, 0xee, 0xab, 0x8d, 0x1a, 0x80, 0x62, 0x6b, 0xd7, 0x49, 0xa5, 0x4e, 0x33, 0xa9, 0xa5, 0xc4, + 0x72, 0x3a, 0x6f, 0x18, 0xed, 0x28, 0x09, 0x59, 0x31, 0x6a, 0x3b, 0x65, 0x19, 0x36, 0x81, 0xcb, + 0x8c, 0x77, 0xea, 0xc4, 0xe1, 0x36, 0xdf, 0x2d, 0x1c, 0x6f, 0x35, 0x73, 0x68, 0xb3, 0x0b, 0x0d, + 0xf7, 0xb0, 0x80, 0x4c, 0x98, 0xb6, 0x02, 0xde, 0x32, 0x9a, 0xe4, 0x66, 0x41, 0x71, 0x33, 0xdd, + 0x26, 0xb4, 0x2d, 0xa3, 0xff, 0xb9, 0x07, 0xd3, 0x9c, 0xf0, 0x3a, 0x3b, 0x18, 0xa6, 0xdf, 0x87, + 0x93, 0x56, 0xdd, 0xf3, 0xa8, 0xd3, 0x9f, 0xea, 0xd3, 0xad, 0x66, 0xee, 0x64, 0xb1, 0x9f, 0x10, + 0xee, 0xaf, 0x8f, 0x3e, 0x81, 0xa3, 0xf1, 0xc5, 0xc7, 0x61, 0xfb, 0x94, 0x3a, 0xe0, 0xd1, 0x62, + 0x37, 0x24, 0xee, 0x65, 0x67, 0xff, 0x9c, 0x7f, 0x99, 0x80, 0x53, 0x45, 0xcf, 0x65, 0xec, 0x3a, + 0xf5, 0x98, 0xed, 0x3a, 0x57, 0xb6, 0x3e, 0xa2, 0x16, 0xc7, 0xf4, 0x16, 0xf5, 0xa8, 0x63, 0x51, + 0xb4, 0x04, 0xda, 0x8e, 0xed, 0x94, 0x14, 0xe3, 0x33, 0x01, 0xe3, 0x6f, 0xdb, 0x4e, 0x09, 0xcb, + 0x15, 0x21, 0x21, 0x7d, 0x92, 0x8c, 0x4b, 0x44, 0x08, 0xcf, 0x03, 0x90, 0x9a, 0xad, 0x0c, 0x48, + 0x2a, 0xa6, 0x0b, 0x48, 0xc9, 0xc1, 0xea, 0xd5, 0x4b, 0x6a, 0x05, 0x47, 0xa4, 0xf4, 0xaf, 0x52, + 0x70, 0xec, 0xcd, 0xdb, 0x9c, 0x7a, 0x0e, 0xa9, 0xc4, 0x92, 0x2d, 0x0f, 0x50, 0x95, 0xcf, 0x97, + 0xdb, 0x81, 0x10, 0x82, 0xad, 0x87, 0x2b, 0x38, 0x22, 0x85, 0x5c, 0x98, 0xf5, 0x9f, 0x36, 0x68, + 0x85, 0x5a, 0xdc, 0xf5, 0xe4, 0x66, 0xd3, 0xf9, 0x17, 0xf6, 0xf2, 0x07, 0x33, 0x44, 0xe9, 0x31, + 0x1a, 0xe7, 0x8d, 0x35, 0xb2, 0x45, 0x2b, 0x81, 0x6a, 0x01, 0xb5, 0x9a, 0xb9, 0xd9, 0xf5, 0x18, + 0x1c, 0xee, 0x80, 0x47, 0x04, 0xd2, 0x7e, 0x42, 0x3c, 0x8e, 0xf7, 0xe7, 0x5a, 0xcd, 0x5c, 0x7a, + 0xb3, 0x0d, 0x83, 0xa3, 0x98, 0x7d, 0xb2, 0x5a, 0x7b, 0xd2, 0x59, 0xad, 0x7f, 0xdd, 0xed, 0x18, + 0x3f, 0x37, 0xff, 0x15, 0x8e, 0xd9, 0x86, 0x19, 0x95, 0x36, 0x8f, 0xe3, 0x99, 0x63, 0xea, 0x58, + 0x33, 0xc5, 0x08, 0x16, 0x8e, 0x21, 0xa3, 0xdd, 0xde, 0x85, 0x60, 0x34, 0x07, 0x9d, 0xd8, 0x4f, + 0x11, 0xd0, 0x7f, 0x4e, 0xc2, 0x89, 0x8b, 0xae, 0x67, 0xdf, 0x11, 0x59, 0x5e, 0xb9, 0xea, 0x96, + 0x56, 0x55, 0xfb, 0xa7, 0x1e, 0xfa, 0x10, 0xa6, 0x04, 0x7b, 0x25, 0xc2, 0x89, 0xf4, 0x51, 0x3a, + 0x7f, 0x6e, 0x38, 0xae, 0xfd, 0xc2, 0xb0, 0x4e, 0x39, 0x69, 0x7b, 0xb5, 0xfd, 0x0e, 0x87, 0xa8, + 0xe8, 0x26, 0x68, 0xac, 0x46, 0x2d, 0xe5, 0xc9, 0x57, 0x8d, 0xbd, 0xaf, 0x21, 0x46, 0x9f, 0x8d, + 0x6e, 0xd4, 0xa8, 0xd5, 0x2e, 0x26, 0xe2, 0x09, 0x4b, 0x58, 0x44, 0x61, 0x82, 0xc9, 0x80, 0x53, + 0xbe, 0x7b, 0x6d, 0x54, 0x03, 0x12, 0xa4, 0x30, 0xab, 0x4c, 0x4c, 0xf8, 0xcf, 0x58, 0x81, 0xeb, + 0x9f, 0xa7, 0x60, 0xa9, 0x8f, 0x66, 0xd1, 0x75, 0x4a, 0xb6, 0x2c, 0xf6, 0x17, 0x41, 0xe3, 0xbb, + 0xb5, 0x20, 0xd8, 0x57, 0x82, 0xdd, 0x6e, 0xee, 0xd6, 0x44, 0x3b, 0x7a, 0x66, 0x90, 0xbe, 0x90, + 0xc3, 0x12, 0x01, 0xad, 0x85, 0xa7, 0x4a, 0xc6, 0xb0, 0xd4, 0xb6, 0x1e, 0x35, 0x73, 0x3d, 0xee, + 0x5f, 0x46, 0x88, 0x14, 0xdf, 0xbc, 0xa8, 0x0d, 0x15, 0xc2, 0xf8, 0xa6, 0x47, 0x1c, 0xe6, 0x5b, + 0xb2, 0xab, 0x41, 0xac, 0x9f, 0x19, 0xce, 0xdd, 0x42, 0xa3, 0xb0, 0xa8, 0x76, 0x81, 0xd6, 0xba, + 0xd0, 0x70, 0x0f, 0x0b, 0xe8, 0x59, 0x98, 0xf0, 0x28, 0x61, 0xae, 0xa3, 0x5a, 0x4f, 0x48, 0x2e, + 0x96, 0x6f, 0xb1, 0x5a, 0x45, 0xcf, 0xc1, 0x64, 0x95, 0x32, 0x46, 0xca, 0x34, 0x33, 0x2e, 0x05, + 0xe7, 0x94, 0xe0, 0xe4, 0xba, 0xff, 0x1a, 0x07, 0xeb, 0xfa, 0x83, 0x04, 0x9c, 0xea, 0xc3, 0xe3, + 0x9a, 0xcd, 0x38, 0xba, 0xd1, 0x15, 0xcf, 0xc6, 0x90, 0xb5, 0xc3, 0x66, 0x7e, 0x34, 0xcf, 0x2b, + 0xdb, 0x53, 0xc1, 0x9b, 0x48, 0x2c, 0xdf, 0x80, 0x71, 0x9b, 0xd3, 0xaa, 0xf0, 0x4a, 0x6a, 0x39, + 0x9d, 0x7f, 0x79, 0xc4, 0x58, 0x2b, 0x1c, 0x51, 0x36, 0xc6, 0x2f, 0x09, 0x34, 0xec, 0x83, 0xea, + 0xbf, 0x24, 0xfb, 0x9e, 0x4d, 0x04, 0x3c, 0xfa, 0x18, 0x66, 0xe5, 0x93, 0x5f, 0x99, 0x31, 0xbd, + 0xa5, 0x4e, 0x38, 0x30, 0xa7, 0xf6, 0x68, 0xe8, 0x85, 0xe3, 0x6a, 0x2b, 0xb3, 0x1b, 0x31, 0x68, + 0xdc, 0x61, 0x0a, 0x9d, 0x87, 0x74, 0xd5, 0x76, 0x30, 0xad, 0x55, 0x6c, 0x8b, 0x30, 0x75, 0x2f, + 0x92, 0x2d, 0x69, 0xbd, 0xfd, 0x1a, 0x47, 0x65, 0xd0, 0x8b, 0x90, 0xae, 0x92, 0xdb, 0xa1, 0x4a, + 0x4a, 0xaa, 0x1c, 0x55, 0xf6, 0xd2, 0xeb, 0xed, 0x25, 0x1c, 0x95, 0x43, 0xd7, 0x44, 0x34, 0x88, + 0x2a, 0xcd, 0x32, 0x9a, 0xa4, 0xf9, 0xcc, 0xa0, 0xf3, 0xa9, 0x22, 0x2f, 0x4a, 0x44, 0x24, 0x72, + 0x24, 0x04, 0x0e, 0xb0, 0xf4, 0x1f, 0x35, 0x38, 0xbd, 0x67, 0xee, 0xa3, 0xb7, 0x00, 0xb9, 0x5b, + 0x8c, 0x7a, 0x0d, 0x5a, 0xba, 0xe0, 0x5f, 0xfa, 0xc5, 0xfd, 0x44, 0x70, 0x9c, 0xf2, 0x5b, 0xe2, + 0x95, 0xae, 0x55, 0xdc, 0x43, 0x03, 0x59, 0x70, 0x44, 0x24, 0x83, 0x4f, 0xa8, 0xad, 0xae, 0x42, + 0xfb, 0xcb, 0xb4, 0x85, 0x56, 0x33, 0x77, 0x64, 0x2d, 0x0a, 0x82, 0xe3, 0x98, 0x68, 0x15, 0xe6, + 0x54, 0xad, 0xef, 0x20, 0xf8, 0x84, 0x62, 0x60, 0xae, 0x18, 0x5f, 0xc6, 0x9d, 0xf2, 0x02, 0xa2, + 0x44, 0x99, 0xed, 0xd1, 0x52, 0x08, 0xa1, 0xc5, 0x21, 0xde, 0x88, 0x2f, 0xe3, 0x4e, 0x79, 0x54, + 0x81, 0x59, 0x85, 0xaa, 0xf8, 0xce, 0x8c, 0x4b, 0x97, 0x3d, 0x3f, 0xa4, 0xcb, 0xfc, 0xa2, 0x1b, + 0xc6, 0x60, 0x31, 0x86, 0x85, 0x3b, 0xb0, 0x11, 0x07, 0xb0, 0x82, 0x12, 0xc7, 0x32, 0x13, 0xd2, + 0xd2, 0xeb, 0x23, 0xe6, 0x60, 0x58, 0x2b, 0xdb, 0xed, 0x2b, 0x7c, 0xc5, 0x70, 0xc4, 0x8e, 0xfe, + 0xbd, 0x06, 0xd0, 0x8e, 0x30, 0xb4, 0x12, 0x2b, 0xf2, 0x4b, 0x1d, 0x45, 0x7e, 0x3e, 0x7a, 0x39, + 0x8d, 0x14, 0xf4, 0xeb, 0x30, 0xe1, 0xca, 0xcc, 0x53, 0xc1, 0x90, 0x1f, 0xb4, 0xed, 0xb0, 0x97, + 0x86, 0x68, 0x05, 0x10, 0xa5, 0x53, 0xe5, 0xaf, 0x42, 0x43, 0x97, 0x41, 0xab, 0xb9, 0xa5, 0xa0, + 0xf9, 0x9d, 0x1b, 0x84, 0x7a, 0xd5, 0x2d, 0xb1, 0x18, 0xe6, 0x94, 0xd8, 0xbb, 0x78, 0x8b, 0x25, + 0x0e, 0xfa, 0x00, 0xa6, 0x82, 0xeb, 0x86, 0xba, 0x9b, 0xac, 0x0c, 0xc2, 0xec, 0x35, 0x03, 0x17, + 0x66, 0x44, 0x05, 0x0d, 0x56, 0x70, 0x88, 0x89, 0x3e, 0x4b, 0xc0, 0x82, 0xd5, 0x39, 0xd3, 0x65, + 0x26, 0x87, 0x6b, 0xdd, 0x7b, 0x8e, 0xdd, 0x85, 0xff, 0xb5, 0x9a, 0xb9, 0x85, 0x2e, 0x11, 0xdc, + 0x6d, 0x4e, 0x1c, 0x92, 0xaa, 0x2b, 0xab, 0x6c, 0x38, 0x43, 0x1c, 0xb2, 0xd7, 0xec, 0xe1, 0x1f, + 0x32, 0x58, 0xc1, 0x21, 0xa6, 0xfe, 0x83, 0x06, 0x33, 0xb1, 0xbb, 0xf0, 0xdf, 0x11, 0x33, 0x7e, + 0x6a, 0x1d, 0x6c, 0xcc, 0xf8, 0x98, 0x07, 0x1f, 0x33, 0x3e, 0xee, 0xa1, 0xc6, 0x8c, 0x6f, 0xf2, + 0x30, 0x63, 0x26, 0x72, 0xc8, 0x1e, 0x31, 0xf3, 0x20, 0x05, 0xa8, 0x3b, 0xe7, 0x91, 0x05, 0x13, + 0xfe, 0xd0, 0x75, 0x10, 0xbd, 0x3e, 0xbc, 0x7f, 0xa9, 0xb6, 0xae, 0xa0, 0x3b, 0x46, 0xb5, 0xe4, + 0x50, 0xa3, 0x1a, 0x3d, 0x88, 0x91, 0x36, 0xbc, 0x0c, 0xf4, 0x1d, 0x6b, 0x6f, 0xc2, 0x14, 0x0b, + 0x66, 0x41, 0x6d, 0xf4, 0x59, 0x50, 0xb2, 0x1e, 0x4e, 0x81, 0x21, 0x24, 0x2a, 0xc1, 0x0c, 0x89, + 0x8e, 0x63, 0xe3, 0x23, 0x1d, 0x63, 0x5e, 0xcc, 0x7e, 0xb1, 0x39, 0x2c, 0x86, 0xaa, 0xff, 0xda, + 0xe9, 0x5b, 0xbf, 0x2a, 0xfc, 0x63, 0x7d, 0x7b, 0x78, 0x53, 0xf1, 0x7f, 0xc2, 0xbd, 0xdf, 0x24, + 0x61, 0xbe, 0xb3, 0xb1, 0x8e, 0xf4, 0xf9, 0xe3, 0x4e, 0xcf, 0x6f, 0x38, 0xc9, 0x91, 0x36, 0x1d, + 0xce, 0x6a, 0x43, 0x7e, 0x9d, 0x8d, 0x7a, 0x22, 0x75, 0xe0, 0x9e, 0xd0, 0xbf, 0x8d, 0x73, 0x34, + 0xfa, 0x27, 0xa2, 0x3e, 0x1f, 0x54, 0x93, 0x87, 0xf4, 0x41, 0xf5, 0x09, 0xd3, 0xf4, 0x5d, 0x12, + 0x8e, 0x3d, 0xfd, 0x4f, 0x61, 0xf8, 0xaf, 0x8f, 0x3f, 0x75, 0xf3, 0xf5, 0xf4, 0x9f, 0x81, 0x61, + 0x02, 0xb9, 0x70, 0xf6, 0xee, 0xc3, 0xec, 0xd8, 0xbd, 0x87, 0xd9, 0xb1, 0xfb, 0x0f, 0xb3, 0x63, + 0x9f, 0xb6, 0xb2, 0x89, 0xbb, 0xad, 0x6c, 0xe2, 0x5e, 0x2b, 0x9b, 0xb8, 0xdf, 0xca, 0x26, 0x7e, + 0x6b, 0x65, 0x13, 0x5f, 0xfc, 0x9e, 0x1d, 0x7b, 0x6f, 0x52, 0xb5, 0x9e, 0xbf, 0x02, 0x00, 0x00, + 0xff, 0xff, 0x4d, 0x4c, 0xa8, 0x42, 0x87, 0x1c, 0x00, 0x00, +} + +func (m *ContainerResourceMetricSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerResourceMetricSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerResourceMetricSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Container) + copy(dAtA[i:], m.Container) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Container))) + i-- + dAtA[i] = 0x22 + if m.TargetAverageValue != nil { + { + size, err := m.TargetAverageValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.TargetAverageUtilization != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TargetAverageUtilization)) + i-- + dAtA[i] = 0x10 + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ContainerResourceMetricStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerResourceMetricStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerResourceMetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Container) + copy(dAtA[i:], m.Container) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Container))) + i-- + dAtA[i] = 0x22 + { + size, err := m.CurrentAverageValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if m.CurrentAverageUtilization != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.CurrentAverageUtilization)) + i-- + dAtA[i] = 0x10 + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *CrossVersionObjectReference) Marshal() (dAtA []byte, err error) { @@ -1081,6 +1242,18 @@ func (m *MetricSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.ContainerResource != nil { + { + size, err := m.ContainerResource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } if m.External != nil { { size, err := m.External.MarshalToSizedBuffer(dAtA[:i]) @@ -1157,6 +1330,18 @@ func (m *MetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.ContainerResource != nil { + { + size, err := m.ContainerResource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } if m.External != nil { { size, err := m.External.MarshalToSizedBuffer(dAtA[:i]) @@ -1556,6 +1741,44 @@ func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return base } +func (m *ContainerResourceMetricSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.TargetAverageUtilization != nil { + n += 1 + sovGenerated(uint64(*m.TargetAverageUtilization)) + } + if m.TargetAverageValue != nil { + l = m.TargetAverageValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Container) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ContainerResourceMetricStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.CurrentAverageUtilization != nil { + n += 1 + sovGenerated(uint64(*m.CurrentAverageUtilization)) + } + l = m.CurrentAverageValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Container) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *CrossVersionObjectReference) Size() (n int) { if m == nil { return 0 @@ -1741,6 +1964,10 @@ func (m *MetricSpec) Size() (n int) { l = m.External.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.ContainerResource != nil { + l = m.ContainerResource.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -1768,6 +1995,10 @@ func (m *MetricStatus) Size() (n int) { l = m.External.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.ContainerResource != nil { + l = m.ContainerResource.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -1891,6 +2122,32 @@ func sovGenerated(x uint64) (n int) { func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } +func (this *ContainerResourceMetricSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerResourceMetricSource{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `TargetAverageUtilization:` + valueToStringGenerated(this.TargetAverageUtilization) + `,`, + `TargetAverageValue:` + strings.Replace(fmt.Sprintf("%v", this.TargetAverageValue), "Quantity", "resource.Quantity", 1) + `,`, + `Container:` + fmt.Sprintf("%v", this.Container) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerResourceMetricStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerResourceMetricStatus{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `CurrentAverageUtilization:` + valueToStringGenerated(this.CurrentAverageUtilization) + `,`, + `CurrentAverageValue:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CurrentAverageValue), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, + `Container:` + fmt.Sprintf("%v", this.Container) + `,`, + `}`, + }, "") + return s +} func (this *CrossVersionObjectReference) String() string { if this == nil { return "nil" @@ -2024,6 +2281,7 @@ func (this *MetricSpec) String() string { `Pods:` + strings.Replace(this.Pods.String(), "PodsMetricSource", "PodsMetricSource", 1) + `,`, `Resource:` + strings.Replace(this.Resource.String(), "ResourceMetricSource", "ResourceMetricSource", 1) + `,`, `External:` + strings.Replace(this.External.String(), "ExternalMetricSource", "ExternalMetricSource", 1) + `,`, + `ContainerResource:` + strings.Replace(this.ContainerResource.String(), "ContainerResourceMetricSource", "ContainerResourceMetricSource", 1) + `,`, `}`, }, "") return s @@ -2038,6 +2296,7 @@ func (this *MetricStatus) String() string { `Pods:` + strings.Replace(this.Pods.String(), "PodsMetricStatus", "PodsMetricStatus", 1) + `,`, `Resource:` + strings.Replace(this.Resource.String(), "ResourceMetricStatus", "ResourceMetricStatus", 1) + `,`, `External:` + strings.Replace(this.External.String(), "ExternalMetricStatus", "ExternalMetricStatus", 1) + `,`, + `ContainerResource:` + strings.Replace(this.ContainerResource.String(), "ContainerResourceMetricStatus", "ContainerResourceMetricStatus", 1) + `,`, `}`, }, "") return s @@ -2110,21 +2369,358 @@ func (this *ResourceMetricStatus) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&ResourceMetricStatus{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `CurrentAverageUtilization:` + valueToStringGenerated(this.CurrentAverageUtilization) + `,`, - `CurrentAverageValue:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CurrentAverageValue), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" + s := strings.Join([]string{`&ResourceMetricStatus{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `CurrentAverageUtilization:` + valueToStringGenerated(this.CurrentAverageUtilization) + `,`, + `CurrentAverageValue:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CurrentAverageValue), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ContainerResourceMetricSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerResourceMetricSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerResourceMetricSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetAverageUtilization", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TargetAverageUtilization = &v + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetAverageValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TargetAverageValue == nil { + m.TargetAverageValue = &resource.Quantity{} + } + if err := m.TargetAverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Container = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerResourceMetricStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerResourceMetricStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerResourceMetricStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentAverageUtilization", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.CurrentAverageUtilization = &v + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentAverageValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.CurrentAverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Container = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) + return nil } func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { l := len(dAtA) @@ -2257,10 +2853,7 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2450,10 +3043,7 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2640,10 +3230,7 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2792,10 +3379,7 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3006,10 +3590,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3126,10 +3707,7 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3285,10 +3863,7 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3500,10 +4075,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3723,16 +4295,49 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerResource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ContainerResource == nil { + m.ContainerResource = &ContainerResourceMetricSource{} + } + if err := m.ContainerResource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3952,16 +4557,49 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerResource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ContainerResource == nil { + m.ContainerResource = &ContainerResourceMetricStatus{} + } + if err := m.ContainerResource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4181,10 +4819,7 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4404,10 +5039,7 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4558,10 +5190,7 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4712,10 +5341,7 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4853,10 +5479,7 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4991,10 +5614,7 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto b/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto index f90f93f9f4..5ad55fa727 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto +++ b/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.autoscaling.v2beta1; @@ -30,6 +30,60 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v2beta1"; +// ContainerResourceMetricSource indicates how to scale on a resource metric known to +// Kubernetes, as specified in requests and limits, describing each pod in the +// current scale target (e.g. CPU or memory). The values will be averaged +// together before being compared to the target. Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. Only one "target" type +// should be set. +message ContainerResourceMetricSource { + // name is the name of the resource in question. + optional string name = 1; + + // targetAverageUtilization is the target value of the average of the + // resource metric across all relevant pods, represented as a percentage of + // the requested value of the resource for the pods. + // +optional + optional int32 targetAverageUtilization = 2; + + // targetAverageValue is the target value of the average of the + // resource metric across all relevant pods, as a raw value (instead of as + // a percentage of the request), similar to the "pods" metric source type. + // +optional + optional k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 3; + + // container is the name of the container in the pods of the scaling target + optional string container = 4; +} + +// ContainerResourceMetricStatus indicates the current value of a resource metric known to +// Kubernetes, as specified in requests and limits, describing a single container in each pod in the +// current scale target (e.g. CPU or memory). Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. +message ContainerResourceMetricStatus { + // name is the name of the resource in question. + optional string name = 1; + + // currentAverageUtilization is the current value of the average of the + // resource metric across all relevant pods, represented as a percentage of + // the requested value of the resource for the pods. It will only be + // present if `targetAverageValue` was set in the corresponding metric + // specification. + // +optional + optional int32 currentAverageUtilization = 2; + + // currentAverageValue is the current value of the average of the + // resource metric across all relevant pods, as a raw value (instead of as + // a percentage of the request), similar to the "pods" metric source type. + // It will always be set, regardless of the corresponding metric specification. + optional k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 3; + + // container is the name of the container in the pods of the scaling target + optional string container = 4; +} + // CrossVersionObjectReference contains enough information to let you identify the referred resource. message CrossVersionObjectReference { // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" @@ -200,8 +254,10 @@ message HorizontalPodAutoscalerStatus { // MetricSpec specifies how to scale based on a single metric // (only `type` and one other matching field should be set at once). message MetricSpec { - // type is the type of metric source. It should be one of "Object", - // "Pods" or "Resource", each mapping to a matching field in the object. + // type is the type of metric source. It should be one of "ContainerResource", + // "External", "Object", "Pods" or "Resource", each mapping to a matching field in the object. + // Note: "ContainerResource" type is available on when the feature-gate + // HPAContainerMetrics is enabled optional string type = 1; // object refers to a metric describing a single kubernetes object @@ -223,6 +279,15 @@ message MetricSpec { // +optional optional ResourceMetricSource resource = 4; + // container resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing a single container in + // each pod of the current scale target (e.g. CPU or memory). Such metrics are + // built in to Kubernetes, and have special scaling options on top of those + // available to normal per-pod metrics using the "pods" source. + // This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag. + // +optional + optional ContainerResourceMetricSource containerResource = 7; + // external refers to a global metric that is not associated // with any Kubernetes object. It allows autoscaling based on information // coming from components running outside of cluster @@ -234,8 +299,10 @@ message MetricSpec { // MetricStatus describes the last-read state of a single metric. message MetricStatus { - // type is the type of metric source. It will be one of "Object", - // "Pods" or "Resource", each corresponds to a matching field in the object. + // type is the type of metric source. It will be one of "ContainerResource", + // "External", "Object", "Pods" or "Resource", each corresponds to a matching field in the object. + // Note: "ContainerResource" type is available on when the feature-gate + // HPAContainerMetrics is enabled optional string type = 1; // object refers to a metric describing a single kubernetes object @@ -257,6 +324,14 @@ message MetricStatus { // +optional optional ResourceMetricStatus resource = 4; + // container resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing a single container in each pod in the + // current scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics using the "pods" source. + // +optional + optional ContainerResourceMetricStatus containerResource = 7; + // external refers to a global metric that is not associated // with any Kubernetes object. It allows autoscaling based on information // coming from components running outside of cluster diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/types.go b/vendor/k8s.io/api/autoscaling/v2beta1/types.go index d76e879678..05023d9bcf 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/types.go +++ b/vendor/k8s.io/api/autoscaling/v2beta1/types.go @@ -76,6 +76,12 @@ const ( // Kubernetes, and have special scaling options on top of those available // to normal per-pod metrics (the "pods" source). ResourceMetricSourceType MetricSourceType = "Resource" + // ContainerResourceMetricSourceType is a resource metric known to Kubernetes, as + // specified in requests and limits, describing a single container in each pod in the current + // scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics (the "pods" source). + ContainerResourceMetricSourceType MetricSourceType = "ContainerResource" // ExternalMetricSourceType is a global metric that is not associated // with any Kubernetes object. It allows autoscaling based on information // coming from components running outside of cluster @@ -87,8 +93,10 @@ const ( // MetricSpec specifies how to scale based on a single metric // (only `type` and one other matching field should be set at once). type MetricSpec struct { - // type is the type of metric source. It should be one of "Object", - // "Pods" or "Resource", each mapping to a matching field in the object. + // type is the type of metric source. It should be one of "ContainerResource", + // "External", "Object", "Pods" or "Resource", each mapping to a matching field in the object. + // Note: "ContainerResource" type is available on when the feature-gate + // HPAContainerMetrics is enabled Type MetricSourceType `json:"type" protobuf:"bytes,1,name=type"` // object refers to a metric describing a single kubernetes object @@ -107,6 +115,14 @@ type MetricSpec struct { // to normal per-pod metrics using the "pods" source. // +optional Resource *ResourceMetricSource `json:"resource,omitempty" protobuf:"bytes,4,opt,name=resource"` + // container resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing a single container in + // each pod of the current scale target (e.g. CPU or memory). Such metrics are + // built in to Kubernetes, and have special scaling options on top of those + // available to normal per-pod metrics using the "pods" source. + // This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag. + // +optional + ContainerResource *ContainerResourceMetricSource `json:"containerResource,omitempty" protobuf:"bytes,7,opt,name=containerResource"` // external refers to a global metric that is not associated // with any Kubernetes object. It allows autoscaling based on information // coming from components running outside of cluster @@ -178,6 +194,30 @@ type ResourceMetricSource struct { TargetAverageValue *resource.Quantity `json:"targetAverageValue,omitempty" protobuf:"bytes,3,opt,name=targetAverageValue"` } +// ContainerResourceMetricSource indicates how to scale on a resource metric known to +// Kubernetes, as specified in requests and limits, describing each pod in the +// current scale target (e.g. CPU or memory). The values will be averaged +// together before being compared to the target. Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. Only one "target" type +// should be set. +type ContainerResourceMetricSource struct { + // name is the name of the resource in question. + Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // targetAverageUtilization is the target value of the average of the + // resource metric across all relevant pods, represented as a percentage of + // the requested value of the resource for the pods. + // +optional + TargetAverageUtilization *int32 `json:"targetAverageUtilization,omitempty" protobuf:"varint,2,opt,name=targetAverageUtilization"` + // targetAverageValue is the target value of the average of the + // resource metric across all relevant pods, as a raw value (instead of as + // a percentage of the request), similar to the "pods" metric source type. + // +optional + TargetAverageValue *resource.Quantity `json:"targetAverageValue,omitempty" protobuf:"bytes,3,opt,name=targetAverageValue"` + // container is the name of the container in the pods of the scaling target + Container string `json:"container" protobuf:"bytes,4,opt,name=container"` +} + // ExternalMetricSource indicates how to scale on a metric not associated with // any Kubernetes object (for example length of queue in cloud // messaging service, or QPS from loadbalancer running outside of cluster). @@ -265,8 +305,10 @@ type HorizontalPodAutoscalerCondition struct { // MetricStatus describes the last-read state of a single metric. type MetricStatus struct { - // type is the type of metric source. It will be one of "Object", - // "Pods" or "Resource", each corresponds to a matching field in the object. + // type is the type of metric source. It will be one of "ContainerResource", + // "External", "Object", "Pods" or "Resource", each corresponds to a matching field in the object. + // Note: "ContainerResource" type is available on when the feature-gate + // HPAContainerMetrics is enabled Type MetricSourceType `json:"type" protobuf:"bytes,1,name=type"` // object refers to a metric describing a single kubernetes object @@ -285,6 +327,13 @@ type MetricStatus struct { // to normal per-pod metrics using the "pods" source. // +optional Resource *ResourceMetricStatus `json:"resource,omitempty" protobuf:"bytes,4,opt,name=resource"` + // container resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing a single container in each pod in the + // current scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics using the "pods" source. + // +optional + ContainerResource *ContainerResourceMetricStatus `json:"containerResource,omitempty" protobuf:"bytes,7,opt,name=containerResource"` // external refers to a global metric that is not associated // with any Kubernetes object. It allows autoscaling based on information // coming from components running outside of cluster @@ -354,6 +403,30 @@ type ResourceMetricStatus struct { CurrentAverageValue resource.Quantity `json:"currentAverageValue" protobuf:"bytes,3,name=currentAverageValue"` } +// ContainerResourceMetricStatus indicates the current value of a resource metric known to +// Kubernetes, as specified in requests and limits, describing a single container in each pod in the +// current scale target (e.g. CPU or memory). Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. +type ContainerResourceMetricStatus struct { + // name is the name of the resource in question. + Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // currentAverageUtilization is the current value of the average of the + // resource metric across all relevant pods, represented as a percentage of + // the requested value of the resource for the pods. It will only be + // present if `targetAverageValue` was set in the corresponding metric + // specification. + // +optional + CurrentAverageUtilization *int32 `json:"currentAverageUtilization,omitempty" protobuf:"bytes,2,opt,name=currentAverageUtilization"` + // currentAverageValue is the current value of the average of the + // resource metric across all relevant pods, as a raw value (instead of as + // a percentage of the request), similar to the "pods" metric source type. + // It will always be set, regardless of the corresponding metric specification. + CurrentAverageValue resource.Quantity `json:"currentAverageValue" protobuf:"bytes,3,name=currentAverageValue"` + // container is the name of the container in the pods of the scaling target + Container string `json:"container" protobuf:"bytes,4,opt,name=container"` +} + // ExternalMetricStatus indicates the current value of a global metric // not associated with any Kubernetes object. type ExternalMetricStatus struct { diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go index a0d5f53375..08a54621f8 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go @@ -27,6 +27,30 @@ package v2beta1 // Those methods can be generated by using hack/update-generated-swagger-docs.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_ContainerResourceMetricSource = map[string]string{ + "": "ContainerResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. Only one \"target\" type should be set.", + "name": "name is the name of the resource in question.", + "targetAverageUtilization": "targetAverageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods.", + "targetAverageValue": "targetAverageValue is the target value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the \"pods\" metric source type.", + "container": "container is the name of the container in the pods of the scaling target", +} + +func (ContainerResourceMetricSource) SwaggerDoc() map[string]string { + return map_ContainerResourceMetricSource +} + +var map_ContainerResourceMetricStatus = map[string]string{ + "": "ContainerResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing a single container in each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", + "name": "name is the name of the resource in question.", + "currentAverageUtilization": "currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. It will only be present if `targetAverageValue` was set in the corresponding metric specification.", + "currentAverageValue": "currentAverageValue is the current value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the \"pods\" metric source type. It will always be set, regardless of the corresponding metric specification.", + "container": "container is the name of the container in the pods of the scaling target", +} + +func (ContainerResourceMetricStatus) SwaggerDoc() map[string]string { + return map_ContainerResourceMetricStatus +} + var map_CrossVersionObjectReference = map[string]string{ "": "CrossVersionObjectReference contains enough information to let you identify the referred resource.", "kind": "Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\"", @@ -123,12 +147,13 @@ func (HorizontalPodAutoscalerStatus) SwaggerDoc() map[string]string { } var map_MetricSpec = map[string]string{ - "": "MetricSpec specifies how to scale based on a single metric (only `type` and one other matching field should be set at once).", - "type": "type is the type of metric source. It should be one of \"Object\", \"Pods\" or \"Resource\", each mapping to a matching field in the object.", - "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", - "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", - "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", - "external": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).", + "": "MetricSpec specifies how to scale based on a single metric (only `type` and one other matching field should be set at once).", + "type": "type is the type of metric source. It should be one of \"ContainerResource\", \"External\", \"Object\", \"Pods\" or \"Resource\", each mapping to a matching field in the object. Note: \"ContainerResource\" type is available on when the feature-gate HPAContainerMetrics is enabled", + "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", + "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", + "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", + "containerResource": "container resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod of the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag.", + "external": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).", } func (MetricSpec) SwaggerDoc() map[string]string { @@ -136,12 +161,13 @@ func (MetricSpec) SwaggerDoc() map[string]string { } var map_MetricStatus = map[string]string{ - "": "MetricStatus describes the last-read state of a single metric.", - "type": "type is the type of metric source. It will be one of \"Object\", \"Pods\" or \"Resource\", each corresponds to a matching field in the object.", - "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", - "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", - "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", - "external": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).", + "": "MetricStatus describes the last-read state of a single metric.", + "type": "type is the type of metric source. It will be one of \"ContainerResource\", \"External\", \"Object\", \"Pods\" or \"Resource\", each corresponds to a matching field in the object. Note: \"ContainerResource\" type is available on when the feature-gate HPAContainerMetrics is enabled", + "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", + "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", + "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", + "containerResource": "container resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", + "external": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).", } func (MetricStatus) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/autoscaling/v2beta1/zz_generated.deepcopy.go index c51e05b8f0..f10c7884d0 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/autoscaling/v2beta1/zz_generated.deepcopy.go @@ -25,6 +25,54 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerResourceMetricSource) DeepCopyInto(out *ContainerResourceMetricSource) { + *out = *in + if in.TargetAverageUtilization != nil { + in, out := &in.TargetAverageUtilization, &out.TargetAverageUtilization + *out = new(int32) + **out = **in + } + if in.TargetAverageValue != nil { + in, out := &in.TargetAverageValue, &out.TargetAverageValue + x := (*in).DeepCopy() + *out = &x + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerResourceMetricSource. +func (in *ContainerResourceMetricSource) DeepCopy() *ContainerResourceMetricSource { + if in == nil { + return nil + } + out := new(ContainerResourceMetricSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerResourceMetricStatus) DeepCopyInto(out *ContainerResourceMetricStatus) { + *out = *in + if in.CurrentAverageUtilization != nil { + in, out := &in.CurrentAverageUtilization, &out.CurrentAverageUtilization + *out = new(int32) + **out = **in + } + out.CurrentAverageValue = in.CurrentAverageValue.DeepCopy() + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerResourceMetricStatus. +func (in *ContainerResourceMetricStatus) DeepCopy() *ContainerResourceMetricStatus { + if in == nil { + return nil + } + out := new(ContainerResourceMetricStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CrossVersionObjectReference) DeepCopyInto(out *CrossVersionObjectReference) { *out = *in @@ -263,6 +311,11 @@ func (in *MetricSpec) DeepCopyInto(out *MetricSpec) { *out = new(ResourceMetricSource) (*in).DeepCopyInto(*out) } + if in.ContainerResource != nil { + in, out := &in.ContainerResource, &out.ContainerResource + *out = new(ContainerResourceMetricSource) + (*in).DeepCopyInto(*out) + } if in.External != nil { in, out := &in.External, &out.External *out = new(ExternalMetricSource) @@ -299,6 +352,11 @@ func (in *MetricStatus) DeepCopyInto(out *MetricStatus) { *out = new(ResourceMetricStatus) (*in).DeepCopyInto(*out) } + if in.ContainerResource != nil { + in, out := &in.ContainerResource, &out.ContainerResource + *out = new(ContainerResourceMetricStatus) + (*in).DeepCopyInto(*out) + } if in.External != nil { in, out := &in.External, &out.External *out = new(ExternalMetricStatus) diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/generated.pb.go b/vendor/k8s.io/api/autoscaling/v2beta2/generated.pb.go index c69d6cb9e7..cece3c877a 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta2/generated.pb.go +++ b/vendor/k8s.io/api/autoscaling/v2beta2/generated.pb.go @@ -47,10 +47,66 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +func (m *ContainerResourceMetricSource) Reset() { *m = ContainerResourceMetricSource{} } +func (*ContainerResourceMetricSource) ProtoMessage() {} +func (*ContainerResourceMetricSource) Descriptor() ([]byte, []int) { + return fileDescriptor_592ad94d7d6be24f, []int{0} +} +func (m *ContainerResourceMetricSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerResourceMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ContainerResourceMetricSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerResourceMetricSource.Merge(m, src) +} +func (m *ContainerResourceMetricSource) XXX_Size() int { + return m.Size() +} +func (m *ContainerResourceMetricSource) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerResourceMetricSource.DiscardUnknown(m) +} + +var xxx_messageInfo_ContainerResourceMetricSource proto.InternalMessageInfo + +func (m *ContainerResourceMetricStatus) Reset() { *m = ContainerResourceMetricStatus{} } +func (*ContainerResourceMetricStatus) ProtoMessage() {} +func (*ContainerResourceMetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_592ad94d7d6be24f, []int{1} +} +func (m *ContainerResourceMetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerResourceMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ContainerResourceMetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerResourceMetricStatus.Merge(m, src) +} +func (m *ContainerResourceMetricStatus) XXX_Size() int { + return m.Size() +} +func (m *ContainerResourceMetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerResourceMetricStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ContainerResourceMetricStatus proto.InternalMessageInfo + func (m *CrossVersionObjectReference) Reset() { *m = CrossVersionObjectReference{} } func (*CrossVersionObjectReference) ProtoMessage() {} func (*CrossVersionObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{0} + return fileDescriptor_592ad94d7d6be24f, []int{2} } func (m *CrossVersionObjectReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -78,7 +134,7 @@ var xxx_messageInfo_CrossVersionObjectReference proto.InternalMessageInfo func (m *ExternalMetricSource) Reset() { *m = ExternalMetricSource{} } func (*ExternalMetricSource) ProtoMessage() {} func (*ExternalMetricSource) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{1} + return fileDescriptor_592ad94d7d6be24f, []int{3} } func (m *ExternalMetricSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -106,7 +162,7 @@ var xxx_messageInfo_ExternalMetricSource proto.InternalMessageInfo func (m *ExternalMetricStatus) Reset() { *m = ExternalMetricStatus{} } func (*ExternalMetricStatus) ProtoMessage() {} func (*ExternalMetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{2} + return fileDescriptor_592ad94d7d6be24f, []int{4} } func (m *ExternalMetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -134,7 +190,7 @@ var xxx_messageInfo_ExternalMetricStatus proto.InternalMessageInfo func (m *HPAScalingPolicy) Reset() { *m = HPAScalingPolicy{} } func (*HPAScalingPolicy) ProtoMessage() {} func (*HPAScalingPolicy) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{3} + return fileDescriptor_592ad94d7d6be24f, []int{5} } func (m *HPAScalingPolicy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -162,7 +218,7 @@ var xxx_messageInfo_HPAScalingPolicy proto.InternalMessageInfo func (m *HPAScalingRules) Reset() { *m = HPAScalingRules{} } func (*HPAScalingRules) ProtoMessage() {} func (*HPAScalingRules) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{4} + return fileDescriptor_592ad94d7d6be24f, []int{6} } func (m *HPAScalingRules) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -190,7 +246,7 @@ var xxx_messageInfo_HPAScalingRules proto.InternalMessageInfo func (m *HorizontalPodAutoscaler) Reset() { *m = HorizontalPodAutoscaler{} } func (*HorizontalPodAutoscaler) ProtoMessage() {} func (*HorizontalPodAutoscaler) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{5} + return fileDescriptor_592ad94d7d6be24f, []int{7} } func (m *HorizontalPodAutoscaler) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -218,7 +274,7 @@ var xxx_messageInfo_HorizontalPodAutoscaler proto.InternalMessageInfo func (m *HorizontalPodAutoscalerBehavior) Reset() { *m = HorizontalPodAutoscalerBehavior{} } func (*HorizontalPodAutoscalerBehavior) ProtoMessage() {} func (*HorizontalPodAutoscalerBehavior) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{6} + return fileDescriptor_592ad94d7d6be24f, []int{8} } func (m *HorizontalPodAutoscalerBehavior) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -246,7 +302,7 @@ var xxx_messageInfo_HorizontalPodAutoscalerBehavior proto.InternalMessageInfo func (m *HorizontalPodAutoscalerCondition) Reset() { *m = HorizontalPodAutoscalerCondition{} } func (*HorizontalPodAutoscalerCondition) ProtoMessage() {} func (*HorizontalPodAutoscalerCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{7} + return fileDescriptor_592ad94d7d6be24f, []int{9} } func (m *HorizontalPodAutoscalerCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -274,7 +330,7 @@ var xxx_messageInfo_HorizontalPodAutoscalerCondition proto.InternalMessageInfo func (m *HorizontalPodAutoscalerList) Reset() { *m = HorizontalPodAutoscalerList{} } func (*HorizontalPodAutoscalerList) ProtoMessage() {} func (*HorizontalPodAutoscalerList) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{8} + return fileDescriptor_592ad94d7d6be24f, []int{10} } func (m *HorizontalPodAutoscalerList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -302,7 +358,7 @@ var xxx_messageInfo_HorizontalPodAutoscalerList proto.InternalMessageInfo func (m *HorizontalPodAutoscalerSpec) Reset() { *m = HorizontalPodAutoscalerSpec{} } func (*HorizontalPodAutoscalerSpec) ProtoMessage() {} func (*HorizontalPodAutoscalerSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{9} + return fileDescriptor_592ad94d7d6be24f, []int{11} } func (m *HorizontalPodAutoscalerSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -330,7 +386,7 @@ var xxx_messageInfo_HorizontalPodAutoscalerSpec proto.InternalMessageInfo func (m *HorizontalPodAutoscalerStatus) Reset() { *m = HorizontalPodAutoscalerStatus{} } func (*HorizontalPodAutoscalerStatus) ProtoMessage() {} func (*HorizontalPodAutoscalerStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{10} + return fileDescriptor_592ad94d7d6be24f, []int{12} } func (m *HorizontalPodAutoscalerStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -358,7 +414,7 @@ var xxx_messageInfo_HorizontalPodAutoscalerStatus proto.InternalMessageInfo func (m *MetricIdentifier) Reset() { *m = MetricIdentifier{} } func (*MetricIdentifier) ProtoMessage() {} func (*MetricIdentifier) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{11} + return fileDescriptor_592ad94d7d6be24f, []int{13} } func (m *MetricIdentifier) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -386,7 +442,7 @@ var xxx_messageInfo_MetricIdentifier proto.InternalMessageInfo func (m *MetricSpec) Reset() { *m = MetricSpec{} } func (*MetricSpec) ProtoMessage() {} func (*MetricSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{12} + return fileDescriptor_592ad94d7d6be24f, []int{14} } func (m *MetricSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -414,7 +470,7 @@ var xxx_messageInfo_MetricSpec proto.InternalMessageInfo func (m *MetricStatus) Reset() { *m = MetricStatus{} } func (*MetricStatus) ProtoMessage() {} func (*MetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{13} + return fileDescriptor_592ad94d7d6be24f, []int{15} } func (m *MetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -442,7 +498,7 @@ var xxx_messageInfo_MetricStatus proto.InternalMessageInfo func (m *MetricTarget) Reset() { *m = MetricTarget{} } func (*MetricTarget) ProtoMessage() {} func (*MetricTarget) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{14} + return fileDescriptor_592ad94d7d6be24f, []int{16} } func (m *MetricTarget) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -470,7 +526,7 @@ var xxx_messageInfo_MetricTarget proto.InternalMessageInfo func (m *MetricValueStatus) Reset() { *m = MetricValueStatus{} } func (*MetricValueStatus) ProtoMessage() {} func (*MetricValueStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{15} + return fileDescriptor_592ad94d7d6be24f, []int{17} } func (m *MetricValueStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -498,7 +554,7 @@ var xxx_messageInfo_MetricValueStatus proto.InternalMessageInfo func (m *ObjectMetricSource) Reset() { *m = ObjectMetricSource{} } func (*ObjectMetricSource) ProtoMessage() {} func (*ObjectMetricSource) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{16} + return fileDescriptor_592ad94d7d6be24f, []int{18} } func (m *ObjectMetricSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -526,7 +582,7 @@ var xxx_messageInfo_ObjectMetricSource proto.InternalMessageInfo func (m *ObjectMetricStatus) Reset() { *m = ObjectMetricStatus{} } func (*ObjectMetricStatus) ProtoMessage() {} func (*ObjectMetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{17} + return fileDescriptor_592ad94d7d6be24f, []int{19} } func (m *ObjectMetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -554,7 +610,7 @@ var xxx_messageInfo_ObjectMetricStatus proto.InternalMessageInfo func (m *PodsMetricSource) Reset() { *m = PodsMetricSource{} } func (*PodsMetricSource) ProtoMessage() {} func (*PodsMetricSource) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{18} + return fileDescriptor_592ad94d7d6be24f, []int{20} } func (m *PodsMetricSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -582,7 +638,7 @@ var xxx_messageInfo_PodsMetricSource proto.InternalMessageInfo func (m *PodsMetricStatus) Reset() { *m = PodsMetricStatus{} } func (*PodsMetricStatus) ProtoMessage() {} func (*PodsMetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{19} + return fileDescriptor_592ad94d7d6be24f, []int{21} } func (m *PodsMetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -610,7 +666,7 @@ var xxx_messageInfo_PodsMetricStatus proto.InternalMessageInfo func (m *ResourceMetricSource) Reset() { *m = ResourceMetricSource{} } func (*ResourceMetricSource) ProtoMessage() {} func (*ResourceMetricSource) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{20} + return fileDescriptor_592ad94d7d6be24f, []int{22} } func (m *ResourceMetricSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -638,7 +694,7 @@ var xxx_messageInfo_ResourceMetricSource proto.InternalMessageInfo func (m *ResourceMetricStatus) Reset() { *m = ResourceMetricStatus{} } func (*ResourceMetricStatus) ProtoMessage() {} func (*ResourceMetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{21} + return fileDescriptor_592ad94d7d6be24f, []int{23} } func (m *ResourceMetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -664,6 +720,8 @@ func (m *ResourceMetricStatus) XXX_DiscardUnknown() { var xxx_messageInfo_ResourceMetricStatus proto.InternalMessageInfo func init() { + proto.RegisterType((*ContainerResourceMetricSource)(nil), "k8s.io.api.autoscaling.v2beta2.ContainerResourceMetricSource") + proto.RegisterType((*ContainerResourceMetricStatus)(nil), "k8s.io.api.autoscaling.v2beta2.ContainerResourceMetricStatus") proto.RegisterType((*CrossVersionObjectReference)(nil), "k8s.io.api.autoscaling.v2beta2.CrossVersionObjectReference") proto.RegisterType((*ExternalMetricSource)(nil), "k8s.io.api.autoscaling.v2beta2.ExternalMetricSource") proto.RegisterType((*ExternalMetricStatus)(nil), "k8s.io.api.autoscaling.v2beta2.ExternalMetricStatus") @@ -693,111 +751,202 @@ func init() { } var fileDescriptor_592ad94d7d6be24f = []byte{ - // 1657 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x58, 0xdb, 0x6f, 0x1b, 0x45, - 0x17, 0xcf, 0xda, 0xce, 0x6d, 0x9c, 0x5b, 0xa7, 0xfd, 0x5a, 0x2b, 0xd5, 0x67, 0x47, 0xfb, 0x55, - 0x1f, 0x50, 0xd1, 0x35, 0x31, 0x01, 0x2a, 0x55, 0x08, 0xe2, 0x14, 0xda, 0xaa, 0x49, 0x1b, 0xc6, - 0x69, 0x40, 0x28, 0xad, 0x18, 0xef, 0x4e, 0x9c, 0x21, 0xf6, 0xae, 0xb5, 0xb3, 0x76, 0x9b, 0x22, - 0x21, 0x5e, 0x78, 0x47, 0x20, 0x5e, 0xf9, 0x03, 0x10, 0x42, 0xe2, 0x05, 0x89, 0x47, 0x2e, 0xaa, - 0x2a, 0x84, 0x50, 0xdf, 0x28, 0x2f, 0x16, 0x35, 0xff, 0x45, 0x9e, 0xd0, 0x5c, 0x76, 0xbd, 0xbb, - 0x76, 0x62, 0x27, 0x4a, 0x8a, 0xfa, 0xb6, 0x33, 0xe7, 0x9c, 0xdf, 0x99, 0x39, 0xf7, 0x59, 0x70, - 0x65, 0xfb, 0x22, 0x33, 0xa8, 0x93, 0xdf, 0x6e, 0x94, 0x89, 0x6b, 0x13, 0x8f, 0xb0, 0x7c, 0x93, - 0xd8, 0x96, 0xe3, 0xe6, 0x15, 0x01, 0xd7, 0x69, 0x1e, 0x37, 0x3c, 0x87, 0x99, 0xb8, 0x4a, 0xed, - 0x4a, 0xbe, 0x59, 0x28, 0x13, 0x0f, 0x17, 0xf2, 0x15, 0x62, 0x13, 0x17, 0x7b, 0xc4, 0x32, 0xea, - 0xae, 0xe3, 0x39, 0x30, 0x2b, 0xf9, 0x0d, 0x5c, 0xa7, 0x46, 0x88, 0xdf, 0x50, 0xfc, 0xb3, 0x17, - 0x2a, 0xd4, 0xdb, 0x6a, 0x94, 0x0d, 0xd3, 0xa9, 0xe5, 0x2b, 0x4e, 0xc5, 0xc9, 0x0b, 0xb1, 0x72, - 0x63, 0x53, 0xac, 0xc4, 0x42, 0x7c, 0x49, 0xb8, 0x59, 0x3d, 0xa4, 0xde, 0x74, 0x5c, 0x92, 0x6f, - 0xce, 0xc7, 0x55, 0xce, 0x2e, 0x74, 0x78, 0x6a, 0xd8, 0xdc, 0xa2, 0x36, 0x71, 0x77, 0xf2, 0xf5, - 0xed, 0x8a, 0x10, 0x72, 0x09, 0x73, 0x1a, 0xae, 0x49, 0x0e, 0x24, 0xc5, 0xf2, 0x35, 0xe2, 0xe1, - 0x5e, 0xba, 0xf2, 0x7b, 0x49, 0xb9, 0x0d, 0xdb, 0xa3, 0xb5, 0x6e, 0x35, 0xaf, 0xf6, 0x13, 0x60, - 0xe6, 0x16, 0xa9, 0xe1, 0xb8, 0x9c, 0xfe, 0xa5, 0x06, 0xce, 0x2e, 0xb9, 0x0e, 0x63, 0xeb, 0xc4, - 0x65, 0xd4, 0xb1, 0x6f, 0x96, 0x3f, 0x24, 0xa6, 0x87, 0xc8, 0x26, 0x71, 0x89, 0x6d, 0x12, 0x38, - 0x07, 0x52, 0xdb, 0xd4, 0xb6, 0x32, 0xda, 0x9c, 0xf6, 0xfc, 0x78, 0x71, 0xe2, 0x61, 0x2b, 0x37, - 0xd4, 0x6e, 0xe5, 0x52, 0xd7, 0xa9, 0x6d, 0x21, 0x41, 0xe1, 0x1c, 0x36, 0xae, 0x91, 0x4c, 0x22, - 0xca, 0x71, 0x03, 0xd7, 0x08, 0x12, 0x14, 0x58, 0x00, 0x00, 0xd7, 0xa9, 0x52, 0x90, 0x49, 0x0a, - 0x3e, 0xa8, 0xf8, 0xc0, 0xe2, 0xea, 0x35, 0x45, 0x41, 0x21, 0x2e, 0xfd, 0x81, 0x06, 0x4e, 0xbd, - 0x75, 0xcf, 0x23, 0xae, 0x8d, 0xab, 0x2b, 0xc4, 0x73, 0xa9, 0x59, 0x12, 0xf6, 0x85, 0xef, 0x81, - 0x91, 0x9a, 0x58, 0x8b, 0x23, 0xa5, 0x0b, 0x2f, 0x19, 0xfb, 0x47, 0x82, 0x21, 0xa5, 0xaf, 0x59, - 0xc4, 0xf6, 0xe8, 0x26, 0x25, 0x6e, 0x71, 0x4a, 0xa9, 0x1e, 0x91, 0x14, 0xa4, 0xf0, 0xe0, 0x1a, - 0x18, 0xf1, 0xb0, 0x5b, 0x21, 0x9e, 0xb8, 0x4a, 0xba, 0xf0, 0xe2, 0x60, 0xc8, 0x6b, 0x42, 0xa6, - 0x83, 0x2a, 0xd7, 0x48, 0x61, 0xe9, 0xbf, 0x77, 0x5f, 0xc4, 0xc3, 0x5e, 0x83, 0x1d, 0xe3, 0x45, - 0x36, 0xc0, 0xa8, 0xd9, 0x70, 0x5d, 0x62, 0xfb, 0x37, 0x99, 0x1f, 0x0c, 0x7a, 0x1d, 0x57, 0x1b, - 0x44, 0x9e, 0xae, 0x38, 0xad, 0xb0, 0x47, 0x97, 0x24, 0x12, 0xf2, 0x21, 0xf5, 0x6f, 0x35, 0x30, - 0x73, 0x75, 0x75, 0xb1, 0x24, 0x21, 0x56, 0x9d, 0x2a, 0x35, 0x77, 0xe0, 0x45, 0x90, 0xf2, 0x76, - 0xea, 0x44, 0x85, 0xc9, 0x39, 0x3f, 0x08, 0xd6, 0x76, 0xea, 0x64, 0xb7, 0x95, 0x3b, 0x15, 0xe7, - 0xe7, 0xfb, 0x48, 0x48, 0xc0, 0xff, 0x81, 0xe1, 0x26, 0xd7, 0x2b, 0x8e, 0x3a, 0x5c, 0x9c, 0x54, - 0xa2, 0xc3, 0xe2, 0x30, 0x48, 0xd2, 0xe0, 0x25, 0x30, 0x59, 0x27, 0x2e, 0x75, 0xac, 0x12, 0x31, - 0x1d, 0xdb, 0x62, 0x22, 0x88, 0x86, 0x8b, 0xff, 0x51, 0xcc, 0x93, 0xab, 0x61, 0x22, 0x8a, 0xf2, - 0xea, 0x5f, 0x25, 0xc0, 0x74, 0xe7, 0x00, 0xa8, 0x51, 0x25, 0x0c, 0xde, 0x01, 0xb3, 0xcc, 0xc3, - 0x65, 0x5a, 0xa5, 0xf7, 0xb1, 0x47, 0x1d, 0xfb, 0x5d, 0x6a, 0x5b, 0xce, 0xdd, 0x28, 0x7a, 0xb6, - 0xdd, 0xca, 0xcd, 0x96, 0xf6, 0xe4, 0x42, 0xfb, 0x20, 0xc0, 0xeb, 0x60, 0x82, 0x91, 0x2a, 0x31, - 0x3d, 0x79, 0x5f, 0x65, 0x97, 0xe7, 0xda, 0xad, 0xdc, 0x44, 0x29, 0xb4, 0xbf, 0xdb, 0xca, 0x9d, - 0x8c, 0x18, 0x46, 0x12, 0x51, 0x44, 0x18, 0xde, 0x01, 0x63, 0x75, 0xfe, 0x45, 0x09, 0xcb, 0x24, - 0xe6, 0x92, 0x83, 0xc4, 0x4a, 0xdc, 0xe0, 0xc5, 0x19, 0x65, 0xaa, 0xb1, 0x55, 0x85, 0x84, 0x02, - 0x4c, 0xfd, 0xc7, 0x04, 0x38, 0x73, 0xd5, 0x71, 0xe9, 0x7d, 0xc7, 0xf6, 0x70, 0x75, 0xd5, 0xb1, - 0x16, 0x15, 0x22, 0x71, 0xe1, 0x07, 0x60, 0x8c, 0xd7, 0x28, 0x0b, 0x7b, 0xb8, 0x47, 0x9c, 0x06, - 0xa5, 0xc6, 0xa8, 0x6f, 0x57, 0xf8, 0x06, 0x33, 0x38, 0xb7, 0xd1, 0x9c, 0x37, 0x64, 0x21, 0x59, - 0x21, 0x1e, 0xee, 0xe4, 0x7a, 0x67, 0x0f, 0x05, 0xa8, 0xf0, 0x36, 0x48, 0xb1, 0x3a, 0x31, 0x55, - 0xa8, 0x5e, 0xea, 0x7b, 0xb3, 0xde, 0x07, 0x2d, 0xd5, 0x89, 0xd9, 0x29, 0x3e, 0x7c, 0x85, 0x04, - 0x2c, 0x24, 0x60, 0x84, 0x89, 0x90, 0x16, 0x5e, 0x4d, 0x17, 0x5e, 0x3f, 0xac, 0x02, 0x99, 0x17, - 0x41, 0xce, 0xc9, 0x35, 0x52, 0xe0, 0xfa, 0x1f, 0x1a, 0xc8, 0xed, 0x21, 0x59, 0x24, 0x5b, 0xb8, - 0x49, 0x1d, 0x17, 0xae, 0x83, 0x51, 0xb1, 0x73, 0xab, 0xae, 0x4c, 0x99, 0x1f, 0xdc, 0x8d, 0x22, - 0x6c, 0x8b, 0x69, 0x9e, 0x91, 0x25, 0x89, 0x81, 0x7c, 0x30, 0xb8, 0x01, 0xc6, 0xc5, 0xe7, 0x65, - 0xe7, 0xae, 0xad, 0xcc, 0x78, 0x60, 0xe4, 0xc9, 0x76, 0x2b, 0x37, 0x5e, 0xf2, 0x51, 0x50, 0x07, - 0x50, 0xff, 0x34, 0x09, 0xe6, 0xf6, 0xb8, 0xd9, 0x92, 0x63, 0x5b, 0x94, 0x07, 0x3f, 0xbc, 0x1a, - 0xc9, 0xff, 0x85, 0x58, 0xfe, 0x9f, 0xeb, 0x27, 0x1f, 0xaa, 0x07, 0xcb, 0x81, 0xbf, 0x12, 0x11, - 0x2c, 0x65, 0xf0, 0xdd, 0x56, 0xae, 0x47, 0xaf, 0x36, 0x02, 0xa4, 0xa8, 0x5b, 0x60, 0x13, 0xc0, - 0x2a, 0x66, 0xde, 0x9a, 0x8b, 0x6d, 0x26, 0x35, 0xd1, 0x1a, 0x51, 0x91, 0x70, 0x7e, 0xb0, 0x40, - 0xe6, 0x12, 0xc5, 0x59, 0x75, 0x0a, 0xb8, 0xdc, 0x85, 0x86, 0x7a, 0x68, 0x80, 0xff, 0x07, 0x23, - 0x2e, 0xc1, 0xcc, 0xb1, 0x33, 0x29, 0x71, 0x8b, 0x20, 0x6c, 0x90, 0xd8, 0x45, 0x8a, 0x0a, 0x5f, - 0x00, 0xa3, 0x35, 0xc2, 0x18, 0xae, 0x90, 0xcc, 0xb0, 0x60, 0x0c, 0xea, 0xee, 0x8a, 0xdc, 0x46, - 0x3e, 0x5d, 0xff, 0x53, 0x03, 0x67, 0xf7, 0xb0, 0xe3, 0x32, 0x65, 0x1e, 0xdc, 0xe8, 0xca, 0x54, - 0x63, 0xb0, 0x0b, 0x72, 0x69, 0x91, 0xa7, 0x41, 0x8d, 0xf0, 0x77, 0x42, 0x59, 0xba, 0x01, 0x86, - 0xa9, 0x47, 0x6a, 0x7e, 0x01, 0x7a, 0xed, 0x90, 0x59, 0xd4, 0xa9, 0xef, 0xd7, 0x38, 0x1a, 0x92, - 0xa0, 0xfa, 0x83, 0xe4, 0x9e, 0x77, 0xe3, 0xa9, 0x0c, 0x3f, 0x02, 0x53, 0x62, 0xa5, 0x7a, 0x2b, - 0xd9, 0x54, 0x37, 0xec, 0x5b, 0x2d, 0xf6, 0x19, 0x6d, 0x8a, 0xa7, 0xd5, 0x51, 0xa6, 0x4a, 0x11, - 0x68, 0x14, 0x53, 0x05, 0xe7, 0x41, 0xba, 0x46, 0x6d, 0x44, 0xea, 0x55, 0x6a, 0x62, 0xa6, 0xfa, - 0xd4, 0x74, 0xbb, 0x95, 0x4b, 0xaf, 0x74, 0xb6, 0x51, 0x98, 0x07, 0xbe, 0x02, 0xd2, 0x35, 0x7c, - 0x2f, 0x10, 0x91, 0xfd, 0xe4, 0xa4, 0xd2, 0x97, 0x5e, 0xe9, 0x90, 0x50, 0x98, 0x0f, 0xde, 0xe2, - 0xd1, 0xc0, 0x3b, 0x31, 0xcb, 0xa4, 0x84, 0x99, 0xcf, 0x0f, 0xd6, 0xb8, 0x45, 0xf1, 0x0b, 0x45, - 0x8e, 0x80, 0x40, 0x3e, 0x16, 0xa4, 0x60, 0xac, 0xac, 0x6a, 0x90, 0x88, 0xb2, 0x74, 0xe1, 0x8d, - 0xc3, 0xba, 0x4f, 0xc1, 0x14, 0x27, 0x78, 0x98, 0xf8, 0x2b, 0x14, 0xc0, 0xeb, 0xdf, 0xa7, 0xc0, - 0x7f, 0xf7, 0x2d, 0xa0, 0xf0, 0x6d, 0x00, 0x9d, 0x32, 0x23, 0x6e, 0x93, 0x58, 0x57, 0xe4, 0x2c, - 0xca, 0x87, 0x42, 0xee, 0xce, 0x64, 0xf1, 0x34, 0xcf, 0xb0, 0x9b, 0x5d, 0x54, 0xd4, 0x43, 0x02, - 0x9a, 0x60, 0x92, 0xe7, 0x9d, 0xf4, 0x1d, 0x55, 0xf3, 0xe7, 0xc1, 0x92, 0xfa, 0x04, 0x1f, 0x1d, - 0x96, 0xc3, 0x20, 0x28, 0x8a, 0x09, 0x17, 0xc1, 0xb4, 0x1a, 0x7b, 0x62, 0xbe, 0x3c, 0xa3, 0x8c, - 0x3d, 0xbd, 0x14, 0x25, 0xa3, 0x38, 0x3f, 0x87, 0xb0, 0x08, 0xa3, 0x2e, 0xb1, 0x02, 0x88, 0x54, - 0x14, 0xe2, 0x72, 0x94, 0x8c, 0xe2, 0xfc, 0xb0, 0x0a, 0xa6, 0x14, 0xaa, 0x72, 0x6d, 0x66, 0x58, - 0x44, 0xc7, 0x80, 0x03, 0xaa, 0xea, 0x5c, 0x41, 0xb8, 0x2f, 0x45, 0xb0, 0x50, 0x0c, 0x1b, 0x7a, - 0x00, 0x98, 0x7e, 0x35, 0x65, 0x99, 0x11, 0xa1, 0xe9, 0xcd, 0x43, 0xc6, 0x4b, 0x50, 0x96, 0x3b, - 0x33, 0x40, 0xb0, 0xc5, 0x50, 0x48, 0x8f, 0xfe, 0x85, 0x06, 0x66, 0xe2, 0x03, 0x6e, 0xf0, 0xb4, - 0xd0, 0xf6, 0x7c, 0x5a, 0xdc, 0x06, 0x63, 0x72, 0x54, 0x72, 0x5c, 0x15, 0x00, 0x2f, 0x0f, 0x58, - 0xf4, 0x70, 0x99, 0x54, 0x4b, 0x4a, 0x54, 0x86, 0xb3, 0xbf, 0x42, 0x01, 0xa4, 0xfe, 0x75, 0x12, - 0x80, 0x4e, 0x8a, 0xc1, 0x85, 0x48, 0x97, 0x9b, 0x8b, 0x75, 0xb9, 0x99, 0xf0, 0x3b, 0x25, 0xd4, - 0xd1, 0xd6, 0xc1, 0x88, 0x23, 0x4a, 0x8f, 0x3a, 0x61, 0xa1, 0x9f, 0x31, 0x83, 0x31, 0x29, 0x40, - 0x2b, 0x02, 0xde, 0x3b, 0x54, 0x01, 0x53, 0x68, 0xf0, 0x06, 0x48, 0xd5, 0x1d, 0xcb, 0x9f, 0x6b, - 0xfa, 0x8e, 0x84, 0xab, 0x8e, 0xc5, 0x22, 0x98, 0x63, 0xfc, 0xec, 0x7c, 0x17, 0x09, 0x1c, 0x3e, - 0x66, 0xfa, 0xaf, 0x58, 0x11, 0xa2, 0xe9, 0xc2, 0x42, 0x3f, 0x4c, 0xa4, 0xf8, 0x23, 0xb8, 0xc2, - 0x98, 0x3e, 0x05, 0x05, 0x98, 0x1c, 0x9f, 0xa8, 0x87, 0x90, 0x2a, 0x43, 0x7d, 0xf1, 0x7b, 0xbd, - 0x00, 0x25, 0xbe, 0x4f, 0x41, 0x01, 0xa6, 0xfe, 0x4d, 0x12, 0x4c, 0x44, 0x5e, 0x58, 0xff, 0x86, - 0xbb, 0x64, 0xae, 0x1d, 0xad, 0xbb, 0x24, 0xe6, 0xd1, 0xbb, 0x4b, 0xe2, 0x1e, 0x9f, 0xbb, 0x42, - 0xf8, 0x3d, 0xdc, 0xf5, 0x73, 0xc2, 0x77, 0x97, 0x6c, 0xb5, 0x83, 0xb9, 0x4b, 0xf2, 0x86, 0xdc, - 0x75, 0x33, 0xfc, 0x7e, 0xec, 0x33, 0xf3, 0x18, 0xfe, 0xe5, 0x8c, 0x77, 0x1a, 0xd8, 0xf6, 0xa8, - 0xb7, 0x53, 0x1c, 0xef, 0x7a, 0x6b, 0x5a, 0x60, 0x02, 0x37, 0x89, 0x8b, 0x2b, 0x44, 0x6c, 0x2b, - 0x7f, 0x1d, 0x14, 0x77, 0x86, 0x3f, 0xf5, 0x16, 0x43, 0x38, 0x28, 0x82, 0xca, 0xdb, 0xa0, 0x5a, - 0xdf, 0xf2, 0x82, 0x37, 0xa4, 0xea, 0x0c, 0xa2, 0x0d, 0x2e, 0x76, 0x51, 0x51, 0x0f, 0x09, 0xfd, - 0xf3, 0x04, 0x38, 0xd1, 0xf5, 0x7a, 0xef, 0x18, 0x45, 0x3b, 0x26, 0xa3, 0x24, 0x9e, 0xa2, 0x51, - 0x92, 0x07, 0x36, 0xca, 0x2f, 0x09, 0x00, 0xbb, 0x8b, 0x28, 0xfc, 0x58, 0xb4, 0x62, 0xd3, 0xa5, - 0x65, 0x62, 0x49, 0xf2, 0x51, 0x8c, 0x91, 0xe1, 0x3e, 0x1e, 0xc6, 0x46, 0x71, 0x65, 0xc7, 0xf3, - 0x83, 0x29, 0xf4, 0x1f, 0x29, 0x79, 0xb4, 0xff, 0x91, 0xf4, 0xdf, 0xe2, 0x66, 0x7c, 0xa6, 0x7f, - 0x5c, 0xf5, 0x72, 0x7f, 0xf2, 0x29, 0xba, 0x5f, 0xff, 0x49, 0x03, 0x33, 0xf1, 0x26, 0xfc, 0xcc, - 0xfd, 0xce, 0xfc, 0x35, 0x7a, 0x89, 0x67, 0xfb, 0x57, 0xe6, 0x77, 0x1a, 0x38, 0xd5, 0x6b, 0x84, - 0x81, 0x4b, 0x91, 0xc1, 0x33, 0x1f, 0x1e, 0x3c, 0x77, 0x5b, 0xb9, 0x5c, 0x8f, 0x1f, 0x10, 0x3e, - 0x4c, 0x68, 0x36, 0x3d, 0x1e, 0x07, 0xfc, 0xd0, 0x7d, 0x66, 0xe9, 0x84, 0x23, 0x39, 0xf3, 0xb1, - 0xda, 0xbb, 0x78, 0xe1, 0xe1, 0x93, 0xec, 0xd0, 0xa3, 0x27, 0xd9, 0xa1, 0xc7, 0x4f, 0xb2, 0x43, - 0x9f, 0xb4, 0xb3, 0xda, 0xc3, 0x76, 0x56, 0x7b, 0xd4, 0xce, 0x6a, 0x8f, 0xdb, 0x59, 0xed, 0xaf, - 0x76, 0x56, 0xfb, 0xec, 0xef, 0xec, 0xd0, 0xfb, 0xa3, 0x0a, 0xfa, 0x9f, 0x00, 0x00, 0x00, 0xff, - 0xff, 0x79, 0xae, 0x08, 0x04, 0x2d, 0x1a, 0x00, 0x00, + // 1741 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x59, 0xcd, 0x6f, 0x1b, 0xc7, + 0x15, 0xd7, 0x92, 0xd4, 0xd7, 0x50, 0x9f, 0xe3, 0x2f, 0x42, 0x86, 0x49, 0x61, 0x6b, 0xb4, 0xae, + 0x51, 0x2f, 0x2b, 0x56, 0x6d, 0x0d, 0x18, 0x45, 0xab, 0x95, 0x5b, 0xdb, 0xb0, 0x64, 0xab, 0x43, + 0x59, 0x2d, 0x0a, 0xd9, 0xe8, 0x70, 0x77, 0x44, 0x4d, 0x45, 0xee, 0x12, 0xbb, 0x4b, 0xda, 0x72, + 0x81, 0xa2, 0x08, 0x90, 0x7b, 0x90, 0x20, 0xd7, 0xfc, 0x09, 0x09, 0x7c, 0x09, 0x90, 0x63, 0x3e, + 0x60, 0x18, 0x41, 0x10, 0xf8, 0x16, 0xe7, 0x42, 0xc4, 0xcc, 0x31, 0xc7, 0xdc, 0x7c, 0x0a, 0xe6, + 0x63, 0x3f, 0x49, 0x89, 0x94, 0x20, 0x29, 0xd0, 0x8d, 0x3b, 0xf3, 0xde, 0xef, 0xcd, 0x7b, 0xf3, + 0x7b, 0x6f, 0xde, 0x0c, 0xc1, 0xad, 0x9d, 0xeb, 0xae, 0x46, 0xed, 0xe2, 0x4e, 0xb3, 0x42, 0x1c, + 0x8b, 0x78, 0xc4, 0x2d, 0xb6, 0x88, 0x65, 0xda, 0x4e, 0x51, 0x4e, 0xe0, 0x06, 0x2d, 0xe2, 0xa6, + 0x67, 0xbb, 0x06, 0xae, 0x51, 0xab, 0x5a, 0x6c, 0x95, 0x2a, 0xc4, 0xc3, 0xa5, 0x62, 0x95, 0x58, + 0xc4, 0xc1, 0x1e, 0x31, 0xb5, 0x86, 0x63, 0x7b, 0x36, 0xcc, 0x0b, 0x79, 0x0d, 0x37, 0xa8, 0x16, + 0x91, 0xd7, 0xa4, 0xfc, 0xdc, 0xb5, 0x2a, 0xf5, 0xb6, 0x9b, 0x15, 0xcd, 0xb0, 0xeb, 0xc5, 0xaa, + 0x5d, 0xb5, 0x8b, 0x5c, 0xad, 0xd2, 0xdc, 0xe2, 0x5f, 0xfc, 0x83, 0xff, 0x12, 0x70, 0x73, 0x6a, + 0xc4, 0xbc, 0x61, 0x3b, 0xa4, 0xd8, 0x5a, 0x48, 0x9a, 0x9c, 0x5b, 0x0c, 0x65, 0xea, 0xd8, 0xd8, + 0xa6, 0x16, 0x71, 0x76, 0x8b, 0x8d, 0x9d, 0x2a, 0x57, 0x72, 0x88, 0x6b, 0x37, 0x1d, 0x83, 0x1c, + 0x48, 0xcb, 0x2d, 0xd6, 0x89, 0x87, 0x7b, 0xd9, 0x2a, 0xee, 0xa5, 0xe5, 0x34, 0x2d, 0x8f, 0xd6, + 0xbb, 0xcd, 0xfc, 0xa1, 0x9f, 0x82, 0x6b, 0x6c, 0x93, 0x3a, 0x4e, 0xea, 0xa9, 0x3f, 0x28, 0xe0, + 0xd2, 0xb2, 0x6d, 0x79, 0x98, 0x69, 0x20, 0xe9, 0xc4, 0x2a, 0xf1, 0x1c, 0x6a, 0x94, 0xf9, 0x6f, + 0xb8, 0x0c, 0x32, 0x16, 0xae, 0x93, 0x9c, 0x32, 0xaf, 0x5c, 0x19, 0xd7, 0x8b, 0x2f, 0xda, 0x85, + 0xa1, 0x4e, 0xbb, 0x90, 0xb9, 0x87, 0xeb, 0xe4, 0x4d, 0xbb, 0x50, 0xe8, 0x0e, 0x9c, 0xe6, 0xc3, + 0x30, 0x11, 0xc4, 0x95, 0xe1, 0x3a, 0x18, 0xf1, 0xb0, 0x53, 0x25, 0x5e, 0x2e, 0x35, 0xaf, 0x5c, + 0xc9, 0x96, 0x7e, 0xa3, 0xed, 0xbf, 0x7f, 0x9a, 0x58, 0xc2, 0x3a, 0xd7, 0xd1, 0xa7, 0xa4, 0xd1, + 0x11, 0xf1, 0x8d, 0x24, 0x16, 0x2c, 0x82, 0x71, 0xc3, 0x5f, 0x7b, 0x2e, 0xcd, 0xd7, 0x37, 0x2b, + 0x45, 0xc7, 0x43, 0xa7, 0x42, 0x19, 0xf5, 0xc7, 0x7d, 0xbc, 0xf5, 0xb0, 0xd7, 0x74, 0x8f, 0xc6, + 0xdb, 0x4d, 0x30, 0x6a, 0x34, 0x1d, 0x87, 0x58, 0xbe, 0xbb, 0x0b, 0x83, 0xb9, 0xbb, 0x81, 0x6b, + 0x4d, 0x22, 0x16, 0xa2, 0x4f, 0x4b, 0xd3, 0xa3, 0xcb, 0x02, 0x09, 0xf9, 0x90, 0x07, 0xf7, 0xfa, + 0x7d, 0x05, 0x5c, 0x5c, 0x76, 0x6c, 0xd7, 0xdd, 0x20, 0x8e, 0x4b, 0x6d, 0xeb, 0x7e, 0xe5, 0x3f, + 0xc4, 0xf0, 0x10, 0xd9, 0x22, 0x0e, 0xb1, 0x0c, 0x02, 0xe7, 0x41, 0x66, 0x87, 0x5a, 0xa6, 0xf4, + 0x79, 0xc2, 0xf7, 0xf9, 0x2e, 0xb5, 0x4c, 0xc4, 0x67, 0x98, 0x04, 0x8f, 0x4a, 0x2a, 0x2e, 0x11, + 0x71, 0xb9, 0x04, 0x00, 0x6e, 0x50, 0x69, 0x40, 0xae, 0x0a, 0x4a, 0x39, 0xb0, 0xb4, 0x76, 0x47, + 0xce, 0xa0, 0x88, 0x94, 0xfa, 0x5c, 0x01, 0x67, 0xff, 0xfa, 0xc4, 0x23, 0x8e, 0x85, 0x6b, 0x31, + 0xca, 0xfd, 0x13, 0x8c, 0xd4, 0xf9, 0x37, 0x5f, 0x52, 0xb6, 0xf4, 0xdb, 0xc1, 0xc2, 0x77, 0xc7, + 0x24, 0x96, 0x47, 0xb7, 0x28, 0x71, 0x42, 0xc6, 0x88, 0x19, 0x24, 0xf1, 0x8e, 0x87, 0x87, 0xea, + 0xd7, 0xdd, 0x8e, 0x08, 0x36, 0x1d, 0x9f, 0x23, 0xc7, 0x4a, 0x31, 0xf5, 0x43, 0x05, 0xcc, 0xdc, + 0x5e, 0x5b, 0x2a, 0x0b, 0x88, 0x35, 0xbb, 0x46, 0x8d, 0x5d, 0x78, 0x1d, 0x64, 0xbc, 0xdd, 0x86, + 0x9f, 0x1a, 0x97, 0x7d, 0x12, 0xac, 0xef, 0x36, 0x58, 0x6a, 0x9c, 0x4d, 0xca, 0xb3, 0x71, 0xc4, + 0x35, 0xe0, 0x2f, 0xc0, 0x70, 0x8b, 0xd9, 0xe5, 0x4b, 0x1d, 0xd6, 0x27, 0xa5, 0xea, 0x30, 0x5f, + 0x0c, 0x12, 0x73, 0xf0, 0x06, 0x98, 0x6c, 0x10, 0x87, 0xda, 0x66, 0x99, 0x18, 0xb6, 0x65, 0xba, + 0x9c, 0x44, 0xc3, 0xfa, 0x39, 0x29, 0x3c, 0xb9, 0x16, 0x9d, 0x44, 0x71, 0x59, 0xf5, 0x83, 0x14, + 0x98, 0x0e, 0x17, 0x80, 0x9a, 0x35, 0xe2, 0xc2, 0x47, 0x60, 0xce, 0xf5, 0x70, 0x85, 0xd6, 0xe8, + 0x53, 0xec, 0x51, 0xdb, 0xfa, 0x07, 0xb5, 0x4c, 0xfb, 0x71, 0x1c, 0x3d, 0xdf, 0x69, 0x17, 0xe6, + 0xca, 0x7b, 0x4a, 0xa1, 0x7d, 0x10, 0xe0, 0x5d, 0x30, 0xe1, 0x92, 0x1a, 0x31, 0x3c, 0xe1, 0xaf, + 0x8c, 0xcb, 0xaf, 0x3a, 0xed, 0xc2, 0x44, 0x39, 0x32, 0xfe, 0xa6, 0x5d, 0x38, 0x13, 0x0b, 0x8c, + 0x98, 0x44, 0x31, 0x65, 0xf8, 0x08, 0x8c, 0x35, 0xd8, 0x2f, 0x4a, 0xdc, 0x5c, 0x6a, 0x3e, 0x3d, + 0x08, 0x57, 0x92, 0x01, 0xd7, 0x67, 0x64, 0xa8, 0xc6, 0xd6, 0x24, 0x12, 0x0a, 0x30, 0xd5, 0x4f, + 0x53, 0xe0, 0xc2, 0x6d, 0xdb, 0xa1, 0x4f, 0x59, 0x55, 0xa8, 0xad, 0xd9, 0xe6, 0x92, 0x44, 0x24, + 0x0e, 0xfc, 0x37, 0x18, 0x63, 0xe7, 0x90, 0x89, 0x3d, 0xdc, 0x83, 0xa7, 0xc1, 0x71, 0xa2, 0x35, + 0x76, 0xaa, 0x6c, 0xc0, 0xd5, 0x98, 0xb4, 0xd6, 0x5a, 0xd0, 0x44, 0x21, 0x59, 0x25, 0x1e, 0x0e, + 0x73, 0x3d, 0x1c, 0x43, 0x01, 0x2a, 0x7c, 0x08, 0x32, 0x6e, 0x83, 0x18, 0x92, 0xaa, 0x37, 0xfa, + 0x7a, 0xd6, 0x7b, 0xa1, 0xe5, 0x06, 0x31, 0xc2, 0xe2, 0xc3, 0xbe, 0x10, 0x87, 0x85, 0x04, 0x8c, + 0xb8, 0x9c, 0xd2, 0x7c, 0x57, 0xb3, 0xa5, 0x3f, 0x1d, 0xd6, 0x80, 0xc8, 0x8b, 0x20, 0xe7, 0xc4, + 0x37, 0x92, 0xe0, 0xea, 0x37, 0x0a, 0x28, 0xec, 0xa1, 0xa9, 0x93, 0x6d, 0xdc, 0xa2, 0xb6, 0x03, + 0x37, 0xc0, 0x28, 0x1f, 0x79, 0xd0, 0x90, 0xa1, 0x2c, 0x0e, 0xbe, 0x8d, 0x9c, 0xb6, 0x7a, 0x96, + 0x65, 0x64, 0x59, 0x60, 0x20, 0x1f, 0x0c, 0x6e, 0x82, 0x71, 0xfe, 0xf3, 0xa6, 0xfd, 0xd8, 0x92, + 0x61, 0x3c, 0x30, 0xf2, 0x24, 0x3b, 0x21, 0xca, 0x3e, 0x0a, 0x0a, 0x01, 0xd5, 0xb7, 0xd3, 0x60, + 0x7e, 0x0f, 0xcf, 0x96, 0x6d, 0xcb, 0xa4, 0x8c, 0xfc, 0xf0, 0x76, 0x2c, 0xff, 0x17, 0x13, 0xf9, + 0x7f, 0xb9, 0x9f, 0x7e, 0xa4, 0x1e, 0xac, 0x04, 0xfb, 0x95, 0x8a, 0x61, 0xc9, 0x80, 0xbf, 0x69, + 0x17, 0x7a, 0xf4, 0x63, 0x5a, 0x80, 0x14, 0xdf, 0x16, 0xd8, 0x02, 0xb0, 0x86, 0x5d, 0x6f, 0xdd, + 0xc1, 0x96, 0x2b, 0x2c, 0xd1, 0x3a, 0x91, 0x4c, 0xb8, 0x3a, 0x18, 0x91, 0x99, 0x86, 0x3e, 0x27, + 0x57, 0x01, 0x57, 0xba, 0xd0, 0x50, 0x0f, 0x0b, 0xf0, 0x97, 0x60, 0xc4, 0x21, 0xd8, 0xb5, 0xad, + 0x5c, 0x86, 0x7b, 0x11, 0xd0, 0x06, 0xf1, 0x51, 0x24, 0x67, 0xe1, 0xaf, 0xc1, 0x68, 0x9d, 0xb8, + 0x2e, 0xae, 0x92, 0xdc, 0x30, 0x17, 0x0c, 0xea, 0xee, 0xaa, 0x18, 0x46, 0xfe, 0xbc, 0xfa, 0xad, + 0x02, 0x2e, 0xee, 0x11, 0xc7, 0x15, 0xea, 0x7a, 0x70, 0xb3, 0x2b, 0x53, 0xb5, 0xc1, 0x1c, 0x64, + 0xda, 0x3c, 0x4f, 0x83, 0x1a, 0xe1, 0x8f, 0x44, 0xb2, 0x74, 0x13, 0x0c, 0x53, 0x8f, 0xd4, 0xfd, + 0x02, 0xf4, 0xc7, 0x43, 0x66, 0x51, 0x58, 0xdf, 0xef, 0x30, 0x34, 0x24, 0x40, 0xd5, 0xe7, 0xe9, + 0x3d, 0x7d, 0x63, 0xa9, 0x0c, 0xff, 0x0b, 0xa6, 0xf8, 0x97, 0x3c, 0x5b, 0xc9, 0x96, 0xf4, 0xb0, + 0x6f, 0xb5, 0xd8, 0xa7, 0xb5, 0xd1, 0xcf, 0xcb, 0xa5, 0x4c, 0x95, 0x63, 0xd0, 0x28, 0x61, 0x0a, + 0x2e, 0x80, 0x6c, 0x9d, 0x5a, 0x88, 0x34, 0x6a, 0xd4, 0xc0, 0xae, 0x3c, 0xa7, 0xa6, 0x3b, 0xed, + 0x42, 0x76, 0x35, 0x1c, 0x46, 0x51, 0x19, 0xf8, 0x7b, 0x90, 0xad, 0xe3, 0x27, 0x81, 0x8a, 0x38, + 0x4f, 0xce, 0x48, 0x7b, 0xd9, 0xd5, 0x70, 0x0a, 0x45, 0xe5, 0xe0, 0x03, 0xc6, 0x06, 0x76, 0x12, + 0xbb, 0xb9, 0x0c, 0x0f, 0xf3, 0xd5, 0xc1, 0x0e, 0x6e, 0x5e, 0xfc, 0x22, 0xcc, 0xe1, 0x10, 0xc8, + 0xc7, 0x82, 0x14, 0x8c, 0x55, 0x64, 0x0d, 0xe2, 0x2c, 0xcb, 0x96, 0xfe, 0x7c, 0xd8, 0xed, 0x93, + 0x30, 0xfa, 0x04, 0xa3, 0x89, 0xff, 0x85, 0x02, 0x78, 0xf5, 0xe3, 0x0c, 0xb8, 0xb4, 0x6f, 0x01, + 0x85, 0x7f, 0x03, 0xd0, 0xae, 0xb8, 0xc4, 0x69, 0x11, 0xf3, 0x96, 0xb8, 0x6f, 0xb0, 0xa6, 0x90, + 0x6d, 0x67, 0x5a, 0x3f, 0xcf, 0x32, 0xec, 0x7e, 0xd7, 0x2c, 0xea, 0xa1, 0x01, 0x0d, 0x30, 0xc9, + 0xf2, 0x4e, 0xec, 0x1d, 0x95, 0xfd, 0xe7, 0xc1, 0x92, 0x7a, 0x96, 0xb5, 0x0e, 0x2b, 0x51, 0x10, + 0x14, 0xc7, 0x84, 0x4b, 0x60, 0x5a, 0xb6, 0x3d, 0x89, 0xbd, 0xbc, 0x20, 0x83, 0x3d, 0xbd, 0x1c, + 0x9f, 0x46, 0x49, 0x79, 0x06, 0x61, 0x12, 0x97, 0x3a, 0xc4, 0x0c, 0x20, 0x32, 0x71, 0x88, 0x9b, + 0xf1, 0x69, 0x94, 0x94, 0x87, 0x35, 0x30, 0x25, 0x51, 0xe5, 0xd6, 0xe6, 0x86, 0x39, 0x3b, 0x06, + 0x6c, 0x50, 0xe5, 0xc9, 0x15, 0xd0, 0x7d, 0x39, 0x86, 0x85, 0x12, 0xd8, 0xd0, 0x03, 0xc0, 0xf0, + 0xab, 0xa9, 0x9b, 0x1b, 0xe1, 0x96, 0xfe, 0x72, 0x48, 0xbe, 0x04, 0x65, 0x39, 0xec, 0x01, 0x82, + 0x21, 0x17, 0x45, 0xec, 0xa8, 0xef, 0x29, 0x60, 0x26, 0xd9, 0xe0, 0x06, 0x57, 0x0b, 0x65, 0xcf, + 0xab, 0xc5, 0x43, 0x30, 0x26, 0x5a, 0x25, 0xdb, 0x91, 0x04, 0xf8, 0xdd, 0x80, 0x45, 0x0f, 0x57, + 0x48, 0xad, 0x2c, 0x55, 0x05, 0x9d, 0xfd, 0x2f, 0x14, 0x40, 0xaa, 0x1f, 0x65, 0x00, 0x08, 0x53, + 0x0c, 0x2e, 0xc6, 0x4e, 0xb9, 0xf9, 0xc4, 0x29, 0x37, 0x13, 0xbd, 0xa7, 0x44, 0x4e, 0xb4, 0x0d, + 0x30, 0x62, 0xf3, 0xd2, 0x23, 0x57, 0x58, 0xea, 0x17, 0xcc, 0xa0, 0x4d, 0x0a, 0xd0, 0x74, 0xc0, + 0xce, 0x0e, 0x59, 0xc0, 0x24, 0x1a, 0xbc, 0x07, 0x32, 0x0d, 0xdb, 0xf4, 0xfb, 0x9a, 0xbe, 0x2d, + 0xe1, 0x9a, 0x6d, 0xba, 0x31, 0xcc, 0x31, 0xb6, 0x76, 0x36, 0x8a, 0x38, 0x0e, 0x6b, 0x33, 0xfd, + 0x97, 0x0a, 0x4e, 0xd1, 0x6c, 0x69, 0xb1, 0x1f, 0x66, 0xaf, 0x47, 0x01, 0x11, 0x4c, 0x7f, 0x06, + 0x05, 0x98, 0xf0, 0x2d, 0x05, 0xcc, 0x1a, 0xc9, 0x0b, 0x76, 0x6e, 0x74, 0xb0, 0xae, 0x6c, 0xdf, + 0x77, 0x08, 0xfd, 0x5c, 0xa7, 0x5d, 0x98, 0xed, 0x12, 0x41, 0xdd, 0xe6, 0x98, 0x93, 0x44, 0xde, + 0xc6, 0x64, 0x2d, 0xec, 0xeb, 0x64, 0xaf, 0x6b, 0xa8, 0x70, 0xd2, 0x9f, 0x41, 0x01, 0xa6, 0xfa, + 0x2c, 0x03, 0x26, 0x62, 0xd7, 0xbc, 0x9f, 0x83, 0x33, 0x22, 0xe1, 0x8f, 0x96, 0x33, 0x02, 0xf3, + 0xe8, 0x39, 0x23, 0x70, 0x4f, 0x94, 0x33, 0xc2, 0xe4, 0x49, 0x72, 0x26, 0xe2, 0x64, 0x0f, 0xce, + 0x7c, 0x9e, 0xf2, 0x39, 0x23, 0x9a, 0x8e, 0xc1, 0x38, 0x23, 0x64, 0x23, 0x9c, 0xb9, 0x1f, 0xbd, + 0x49, 0xf7, 0xe9, 0xfe, 0x34, 0x3f, 0xc2, 0xda, 0xdf, 0x9b, 0xd8, 0xf2, 0xa8, 0xb7, 0xab, 0x8f, + 0x77, 0xdd, 0xba, 0x4d, 0x30, 0x81, 0x5b, 0xc4, 0xc1, 0x55, 0xc2, 0x87, 0x25, 0x69, 0x0e, 0x8a, + 0x3b, 0xc3, 0x2e, 0xbd, 0x4b, 0x11, 0x1c, 0x14, 0x43, 0x65, 0x0d, 0x81, 0xfc, 0x7e, 0xe0, 0x05, + 0xb7, 0x69, 0x79, 0x46, 0xf2, 0x86, 0x60, 0xa9, 0x6b, 0x16, 0xf5, 0xd0, 0x50, 0xdf, 0x4d, 0x81, + 0xd9, 0xae, 0x77, 0x8c, 0x30, 0x28, 0xca, 0x31, 0x05, 0x25, 0x75, 0x82, 0x41, 0x49, 0x1f, 0x38, + 0x28, 0x5f, 0xa4, 0x00, 0xec, 0x3e, 0x4e, 0xe0, 0xff, 0x78, 0x53, 0x62, 0x38, 0xb4, 0x42, 0x4c, + 0x31, 0x7d, 0x14, 0x0d, 0x75, 0xb4, 0xa3, 0x89, 0x62, 0xa3, 0xa4, 0xb1, 0x63, 0x7a, 0xf2, 0x0d, + 0x5f, 0xd4, 0xd2, 0x47, 0xfb, 0xa2, 0xa6, 0x7e, 0x95, 0x0c, 0xe3, 0xa9, 0x7e, 0xc2, 0xeb, 0xb5, + 0xfd, 0xe9, 0x13, 0xdc, 0x7e, 0xf5, 0x33, 0x05, 0xcc, 0x24, 0xdb, 0x91, 0x53, 0xf7, 0xb0, 0xfb, + 0x65, 0xdc, 0x89, 0xd3, 0xfd, 0xa8, 0xfb, 0x4c, 0x01, 0x67, 0x4f, 0xd9, 0x3f, 0x3c, 0xea, 0x27, + 0xdd, 0x6b, 0x3e, 0x2d, 0xff, 0xd3, 0xe8, 0xd7, 0x5e, 0xbc, 0xce, 0x0f, 0xbd, 0x7c, 0x9d, 0x1f, + 0x7a, 0xf5, 0x3a, 0x3f, 0xf4, 0xff, 0x4e, 0x5e, 0x79, 0xd1, 0xc9, 0x2b, 0x2f, 0x3b, 0x79, 0xe5, + 0x55, 0x27, 0xaf, 0x7c, 0xd7, 0xc9, 0x2b, 0xef, 0x7c, 0x9f, 0x1f, 0xfa, 0xd7, 0xa8, 0x84, 0xfe, + 0x29, 0x00, 0x00, 0xff, 0xff, 0xb0, 0x79, 0x7e, 0xc9, 0x1b, 0x1d, 0x00, 0x00, +} + +func (m *ContainerResourceMetricSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerResourceMetricSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerResourceMetricSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Container) + copy(dAtA[i:], m.Container) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Container))) + i-- + dAtA[i] = 0x1a + { + size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ContainerResourceMetricStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerResourceMetricStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerResourceMetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Container) + copy(dAtA[i:], m.Container) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Container))) + i-- + dAtA[i] = 0x1a + { + size, err := m.Current.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *CrossVersionObjectReference) Marshal() (dAtA []byte, err error) { @@ -1408,6 +1557,18 @@ func (m *MetricSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.ContainerResource != nil { + { + size, err := m.ContainerResource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } if m.External != nil { { size, err := m.External.MarshalToSizedBuffer(dAtA[:i]) @@ -1484,6 +1645,18 @@ func (m *MetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.ContainerResource != nil { + { + size, err := m.ContainerResource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } if m.External != nil { { size, err := m.External.MarshalToSizedBuffer(dAtA[:i]) @@ -1928,6 +2101,36 @@ func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return base } +func (m *ContainerResourceMetricSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Target.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Container) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ContainerResourceMetricStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Current.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Container) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *CrossVersionObjectReference) Size() (n int) { if m == nil { return 0 @@ -2166,6 +2369,10 @@ func (m *MetricSpec) Size() (n int) { l = m.External.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.ContainerResource != nil { + l = m.ContainerResource.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -2193,6 +2400,10 @@ func (m *MetricStatus) Size() (n int) { l = m.External.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.ContainerResource != nil { + l = m.ContainerResource.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -2326,6 +2537,30 @@ func sovGenerated(x uint64) (n int) { func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } +func (this *ContainerResourceMetricSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerResourceMetricSource{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Target:` + strings.Replace(strings.Replace(this.Target.String(), "MetricTarget", "MetricTarget", 1), `&`, ``, 1) + `,`, + `Container:` + fmt.Sprintf("%v", this.Container) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerResourceMetricStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerResourceMetricStatus{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Current:` + strings.Replace(strings.Replace(this.Current.String(), "MetricValueStatus", "MetricValueStatus", 1), `&`, ``, 1) + `,`, + `Container:` + fmt.Sprintf("%v", this.Container) + `,`, + `}`, + }, "") + return s +} func (this *CrossVersionObjectReference) String() string { if this == nil { return "nil" @@ -2507,6 +2742,7 @@ func (this *MetricSpec) String() string { `Pods:` + strings.Replace(this.Pods.String(), "PodsMetricSource", "PodsMetricSource", 1) + `,`, `Resource:` + strings.Replace(this.Resource.String(), "ResourceMetricSource", "ResourceMetricSource", 1) + `,`, `External:` + strings.Replace(this.External.String(), "ExternalMetricSource", "ExternalMetricSource", 1) + `,`, + `ContainerResource:` + strings.Replace(this.ContainerResource.String(), "ContainerResourceMetricSource", "ContainerResourceMetricSource", 1) + `,`, `}`, }, "") return s @@ -2521,6 +2757,7 @@ func (this *MetricStatus) String() string { `Pods:` + strings.Replace(this.Pods.String(), "PodsMetricStatus", "PodsMetricStatus", 1) + `,`, `Resource:` + strings.Replace(this.Resource.String(), "ResourceMetricStatus", "ResourceMetricStatus", 1) + `,`, `External:` + strings.Replace(this.External.String(), "ExternalMetricStatus", "ExternalMetricStatus", 1) + `,`, + `ContainerResource:` + strings.Replace(this.ContainerResource.String(), "ContainerResourceMetricStatus", "ContainerResourceMetricStatus", 1) + `,`, `}`, }, "") return s @@ -2618,13 +2855,307 @@ func (this *ResourceMetricStatus) String() string { }, "") return s } -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ContainerResourceMetricSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerResourceMetricSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerResourceMetricSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Target.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Container = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerResourceMetricStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerResourceMetricStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerResourceMetricStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Current", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Current.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Container = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil } func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { l := len(dAtA) @@ -2757,10 +3288,7 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2876,10 +3404,7 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2995,10 +3520,7 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3118,10 +3640,7 @@ func (m *HPAScalingPolicy) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3258,10 +3777,7 @@ func (m *HPAScalingRules) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3410,10 +3926,7 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3535,10 +4048,7 @@ func (m *HorizontalPodAutoscalerBehavior) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3749,10 +4259,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3869,10 +4376,7 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4064,10 +4568,7 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4279,10 +4780,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4400,10 +4898,7 @@ func (m *MetricIdentifier) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4623,16 +5118,49 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerResource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ContainerResource == nil { + m.ContainerResource = &ContainerResourceMetricSource{} + } + if err := m.ContainerResource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4852,16 +5380,49 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerResource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ContainerResource == nil { + m.ContainerResource = &ContainerResourceMetricStatus{} + } + if err := m.ContainerResource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5035,10 +5596,7 @@ func (m *MetricTarget) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5180,10 +5738,7 @@ func (m *MetricValueStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5332,10 +5887,7 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5484,10 +6036,7 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5603,10 +6152,7 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5722,10 +6268,7 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5840,10 +6383,7 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5958,10 +6498,7 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto b/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto index 24dc5882e7..77a6cb379e 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto +++ b/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.autoscaling.v2beta2; @@ -30,6 +30,40 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v2beta2"; +// ContainerResourceMetricSource indicates how to scale on a resource metric known to +// Kubernetes, as specified in requests and limits, describing each pod in the +// current scale target (e.g. CPU or memory). The values will be averaged +// together before being compared to the target. Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. Only one "target" type +// should be set. +message ContainerResourceMetricSource { + // name is the name of the resource in question. + optional string name = 1; + + // target specifies the target value for the given metric + optional MetricTarget target = 2; + + // container is the name of the container in the pods of the scaling target + optional string container = 3; +} + +// ContainerResourceMetricStatus indicates the current value of a resource metric known to +// Kubernetes, as specified in requests and limits, describing a single container in each pod in the +// current scale target (e.g. CPU or memory). Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. +message ContainerResourceMetricStatus { + // Name is the name of the resource in question. + optional string name = 1; + + // current contains the current value for the given metric + optional MetricValueStatus current = 2; + + // Container is the name of the container in the pods of the scaling target + optional string container = 3; +} + // CrossVersionObjectReference contains enough information to let you identify the referred resource. message CrossVersionObjectReference { // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" @@ -256,8 +290,10 @@ message MetricIdentifier { // MetricSpec specifies how to scale based on a single metric // (only `type` and one other matching field should be set at once). message MetricSpec { - // type is the type of metric source. It should be one of "Object", - // "Pods" or "Resource", each mapping to a matching field in the object. + // type is the type of metric source. It should be one of "ContainerResource", "External", + // "Object", "Pods" or "Resource", each mapping to a matching field in the object. + // Note: "ContainerResource" type is available on when the feature-gate + // HPAContainerMetrics is enabled optional string type = 1; // object refers to a metric describing a single kubernetes object @@ -279,6 +315,15 @@ message MetricSpec { // +optional optional ResourceMetricSource resource = 4; + // container resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing a single container in + // each pod of the current scale target (e.g. CPU or memory). Such metrics are + // built in to Kubernetes, and have special scaling options on top of those + // available to normal per-pod metrics using the "pods" source. + // This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag. + // +optional + optional ContainerResourceMetricSource containerResource = 7; + // external refers to a global metric that is not associated // with any Kubernetes object. It allows autoscaling based on information // coming from components running outside of cluster @@ -290,8 +335,10 @@ message MetricSpec { // MetricStatus describes the last-read state of a single metric. message MetricStatus { - // type is the type of metric source. It will be one of "Object", - // "Pods" or "Resource", each corresponds to a matching field in the object. + // type is the type of metric source. It will be one of "ContainerResource", "External", + // "Object", "Pods" or "Resource", each corresponds to a matching field in the object. + // Note: "ContainerResource" type is available on when the feature-gate + // HPAContainerMetrics is enabled optional string type = 1; // object refers to a metric describing a single kubernetes object @@ -313,6 +360,14 @@ message MetricStatus { // +optional optional ResourceMetricStatus resource = 4; + // container resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing a single container in each pod in the + // current scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics using the "pods" source. + // +optional + optional ContainerResourceMetricStatus containerResource = 7; + // external refers to a global metric that is not associated // with any Kubernetes object. It allows autoscaling based on information // coming from components running outside of cluster diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/types.go b/vendor/k8s.io/api/autoscaling/v2beta2/types.go index 6e5b8f68c1..ac6cb67690 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta2/types.go +++ b/vendor/k8s.io/api/autoscaling/v2beta2/types.go @@ -96,8 +96,10 @@ type CrossVersionObjectReference struct { // MetricSpec specifies how to scale based on a single metric // (only `type` and one other matching field should be set at once). type MetricSpec struct { - // type is the type of metric source. It should be one of "Object", - // "Pods" or "Resource", each mapping to a matching field in the object. + // type is the type of metric source. It should be one of "ContainerResource", "External", + // "Object", "Pods" or "Resource", each mapping to a matching field in the object. + // Note: "ContainerResource" type is available on when the feature-gate + // HPAContainerMetrics is enabled Type MetricSourceType `json:"type" protobuf:"bytes,1,name=type"` // object refers to a metric describing a single kubernetes object @@ -116,6 +118,14 @@ type MetricSpec struct { // to normal per-pod metrics using the "pods" source. // +optional Resource *ResourceMetricSource `json:"resource,omitempty" protobuf:"bytes,4,opt,name=resource"` + // container resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing a single container in + // each pod of the current scale target (e.g. CPU or memory). Such metrics are + // built in to Kubernetes, and have special scaling options on top of those + // available to normal per-pod metrics using the "pods" source. + // This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag. + // +optional + ContainerResource *ContainerResourceMetricSource `json:"containerResource,omitempty" protobuf:"bytes,7,opt,name=containerResource"` // external refers to a global metric that is not associated // with any Kubernetes object. It allows autoscaling based on information // coming from components running outside of cluster @@ -169,7 +179,7 @@ type HPAScalingRules struct { // - For scale up: 0 (i.e. no stabilization is done). // - For scale down: 300 (i.e. the stabilization window is 300 seconds long). // +optional - StabilizationWindowSeconds *int32 `json:"stabilizationWindowSeconds" protobuf:"varint,3,opt,name=stabilizationWindowSeconds"` + StabilizationWindowSeconds *int32 `json:"stabilizationWindowSeconds,omitempty" protobuf:"varint,3,opt,name=stabilizationWindowSeconds"` // selectPolicy is used to specify which policy should be used. // If not set, the default value MaxPolicySelect is used. // +optional @@ -220,6 +230,12 @@ const ( // Kubernetes, and have special scaling options on top of those available // to normal per-pod metrics (the "pods" source). ResourceMetricSourceType MetricSourceType = "Resource" + // ContainerResourceMetricSourceType is a resource metric known to Kubernetes, as + // specified in requests and limits, describing a single container in each pod in the current + // scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics (the "pods" source). + ContainerResourceMetricSourceType MetricSourceType = "ContainerResource" // ExternalMetricSourceType is a global metric that is not associated // with any Kubernetes object. It allows autoscaling based on information // coming from components running outside of cluster @@ -263,6 +279,22 @@ type ResourceMetricSource struct { Target MetricTarget `json:"target" protobuf:"bytes,2,name=target"` } +// ContainerResourceMetricSource indicates how to scale on a resource metric known to +// Kubernetes, as specified in requests and limits, describing each pod in the +// current scale target (e.g. CPU or memory). The values will be averaged +// together before being compared to the target. Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. Only one "target" type +// should be set. +type ContainerResourceMetricSource struct { + // name is the name of the resource in question. + Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // target specifies the target value for the given metric + Target MetricTarget `json:"target" protobuf:"bytes,2,name=target"` + // container is the name of the container in the pods of the scaling target + Container string `json:"container" protobuf:"bytes,3,opt,name=container"` +} + // ExternalMetricSource indicates how to scale on a metric not associated with // any Kubernetes object (for example length of queue in cloud // messaging service, or QPS from loadbalancer running outside of cluster). @@ -382,8 +414,10 @@ type HorizontalPodAutoscalerCondition struct { // MetricStatus describes the last-read state of a single metric. type MetricStatus struct { - // type is the type of metric source. It will be one of "Object", - // "Pods" or "Resource", each corresponds to a matching field in the object. + // type is the type of metric source. It will be one of "ContainerResource", "External", + // "Object", "Pods" or "Resource", each corresponds to a matching field in the object. + // Note: "ContainerResource" type is available on when the feature-gate + // HPAContainerMetrics is enabled Type MetricSourceType `json:"type" protobuf:"bytes,1,name=type"` // object refers to a metric describing a single kubernetes object @@ -402,6 +436,13 @@ type MetricStatus struct { // to normal per-pod metrics using the "pods" source. // +optional Resource *ResourceMetricStatus `json:"resource,omitempty" protobuf:"bytes,4,opt,name=resource"` + // container resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing a single container in each pod in the + // current scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics using the "pods" source. + // +optional + ContainerResource *ContainerResourceMetricStatus `json:"containerResource,omitempty" protobuf:"bytes,7,opt,name=containerResource"` // external refers to a global metric that is not associated // with any Kubernetes object. It allows autoscaling based on information // coming from components running outside of cluster @@ -443,6 +484,20 @@ type ResourceMetricStatus struct { Current MetricValueStatus `json:"current" protobuf:"bytes,2,name=current"` } +// ContainerResourceMetricStatus indicates the current value of a resource metric known to +// Kubernetes, as specified in requests and limits, describing a single container in each pod in the +// current scale target (e.g. CPU or memory). Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. +type ContainerResourceMetricStatus struct { + // Name is the name of the resource in question. + Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // current contains the current value for the given metric + Current MetricValueStatus `json:"current" protobuf:"bytes,2,name=current"` + // Container is the name of the container in the pods of the scaling target + Container string `json:"container" protobuf:"bytes,3,opt,name=container"` +} + // ExternalMetricStatus indicates the current value of a global metric // not associated with any Kubernetes object. type ExternalMetricStatus struct { diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/types_swagger_doc_generated.go b/vendor/k8s.io/api/autoscaling/v2beta2/types_swagger_doc_generated.go index 3f38880f95..e3ea3002bd 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta2/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/autoscaling/v2beta2/types_swagger_doc_generated.go @@ -27,6 +27,28 @@ package v2beta2 // Those methods can be generated by using hack/update-generated-swagger-docs.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_ContainerResourceMetricSource = map[string]string{ + "": "ContainerResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. Only one \"target\" type should be set.", + "name": "name is the name of the resource in question.", + "target": "target specifies the target value for the given metric", + "container": "container is the name of the container in the pods of the scaling target", +} + +func (ContainerResourceMetricSource) SwaggerDoc() map[string]string { + return map_ContainerResourceMetricSource +} + +var map_ContainerResourceMetricStatus = map[string]string{ + "": "ContainerResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing a single container in each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", + "name": "Name is the name of the resource in question.", + "current": "current contains the current value for the given metric", + "container": "Container is the name of the container in the pods of the scaling target", +} + +func (ContainerResourceMetricStatus) SwaggerDoc() map[string]string { + return map_ContainerResourceMetricStatus +} + var map_CrossVersionObjectReference = map[string]string{ "": "CrossVersionObjectReference contains enough information to let you identify the referred resource.", "kind": "Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\"", @@ -162,12 +184,13 @@ func (MetricIdentifier) SwaggerDoc() map[string]string { } var map_MetricSpec = map[string]string{ - "": "MetricSpec specifies how to scale based on a single metric (only `type` and one other matching field should be set at once).", - "type": "type is the type of metric source. It should be one of \"Object\", \"Pods\" or \"Resource\", each mapping to a matching field in the object.", - "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", - "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", - "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", - "external": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).", + "": "MetricSpec specifies how to scale based on a single metric (only `type` and one other matching field should be set at once).", + "type": "type is the type of metric source. It should be one of \"ContainerResource\", \"External\", \"Object\", \"Pods\" or \"Resource\", each mapping to a matching field in the object. Note: \"ContainerResource\" type is available on when the feature-gate HPAContainerMetrics is enabled", + "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", + "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", + "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", + "containerResource": "container resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod of the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag.", + "external": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).", } func (MetricSpec) SwaggerDoc() map[string]string { @@ -175,12 +198,13 @@ func (MetricSpec) SwaggerDoc() map[string]string { } var map_MetricStatus = map[string]string{ - "": "MetricStatus describes the last-read state of a single metric.", - "type": "type is the type of metric source. It will be one of \"Object\", \"Pods\" or \"Resource\", each corresponds to a matching field in the object.", - "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", - "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", - "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", - "external": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).", + "": "MetricStatus describes the last-read state of a single metric.", + "type": "type is the type of metric source. It will be one of \"ContainerResource\", \"External\", \"Object\", \"Pods\" or \"Resource\", each corresponds to a matching field in the object. Note: \"ContainerResource\" type is available on when the feature-gate HPAContainerMetrics is enabled", + "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", + "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", + "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", + "containerResource": "container resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", + "external": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).", } func (MetricStatus) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/zz_generated.deepcopy.go b/vendor/k8s.io/api/autoscaling/v2beta2/zz_generated.deepcopy.go index ca26fe9206..81642822a6 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta2/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/autoscaling/v2beta2/zz_generated.deepcopy.go @@ -25,6 +25,40 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerResourceMetricSource) DeepCopyInto(out *ContainerResourceMetricSource) { + *out = *in + in.Target.DeepCopyInto(&out.Target) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerResourceMetricSource. +func (in *ContainerResourceMetricSource) DeepCopy() *ContainerResourceMetricSource { + if in == nil { + return nil + } + out := new(ContainerResourceMetricSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerResourceMetricStatus) DeepCopyInto(out *ContainerResourceMetricStatus) { + *out = *in + in.Current.DeepCopyInto(&out.Current) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerResourceMetricStatus. +func (in *ContainerResourceMetricStatus) DeepCopy() *ContainerResourceMetricStatus { + if in == nil { + return nil + } + out := new(ContainerResourceMetricStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CrossVersionObjectReference) DeepCopyInto(out *CrossVersionObjectReference) { *out = *in @@ -340,6 +374,11 @@ func (in *MetricSpec) DeepCopyInto(out *MetricSpec) { *out = new(ResourceMetricSource) (*in).DeepCopyInto(*out) } + if in.ContainerResource != nil { + in, out := &in.ContainerResource, &out.ContainerResource + *out = new(ContainerResourceMetricSource) + (*in).DeepCopyInto(*out) + } if in.External != nil { in, out := &in.External, &out.External *out = new(ExternalMetricSource) @@ -376,6 +415,11 @@ func (in *MetricStatus) DeepCopyInto(out *MetricStatus) { *out = new(ResourceMetricStatus) (*in).DeepCopyInto(*out) } + if in.ContainerResource != nil { + in, out := &in.ContainerResource, &out.ContainerResource + *out = new(ContainerResourceMetricStatus) + (*in).DeepCopyInto(*out) + } if in.External != nil { in, out := &in.External, &out.External *out = new(ExternalMetricStatus) diff --git a/vendor/k8s.io/api/batch/v1/generated.pb.go b/vendor/k8s.io/api/batch/v1/generated.pb.go index 35944e7267..dc43514c6a 100644 --- a/vendor/k8s.io/api/batch/v1/generated.pb.go +++ b/vendor/k8s.io/api/batch/v1/generated.pb.go @@ -924,10 +924,7 @@ func (m *Job) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1171,10 +1168,7 @@ func (m *JobCondition) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1291,10 +1285,7 @@ func (m *JobList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1534,10 +1525,7 @@ func (m *JobSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1750,10 +1738,7 @@ func (m *JobStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/k8s.io/api/batch/v1/generated.proto b/vendor/k8s.io/api/batch/v1/generated.proto index 75de45f171..7548c04dc1 100644 --- a/vendor/k8s.io/api/batch/v1/generated.proto +++ b/vendor/k8s.io/api/batch/v1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.batch.v1; @@ -151,6 +151,7 @@ message JobSpec { // JobStatus represents the current state of a Job. message JobStatus { // The latest available observations of an object's current state. + // When a job fails, one of the conditions will have type == "Failed". // More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ // +optional // +patchMergeKey=type @@ -166,6 +167,7 @@ message JobStatus { // Represents time when the job was completed. It is not guaranteed to // be set in happens-before order across separate operations. // It is represented in RFC3339 form and is in UTC. + // The completion time is only set when the job finishes successfully. // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.Time completionTime = 3; diff --git a/vendor/k8s.io/api/batch/v1/types.go b/vendor/k8s.io/api/batch/v1/types.go index 646a3cd7ef..fd478874a1 100644 --- a/vendor/k8s.io/api/batch/v1/types.go +++ b/vendor/k8s.io/api/batch/v1/types.go @@ -131,6 +131,7 @@ type JobSpec struct { // JobStatus represents the current state of a Job. type JobStatus struct { // The latest available observations of an object's current state. + // When a job fails, one of the conditions will have type == "Failed". // More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ // +optional // +patchMergeKey=type @@ -146,6 +147,7 @@ type JobStatus struct { // Represents time when the job was completed. It is not guaranteed to // be set in happens-before order across separate operations. // It is represented in RFC3339 form and is in UTC. + // The completion time is only set when the job finishes successfully. // +optional CompletionTime *metav1.Time `json:"completionTime,omitempty" protobuf:"bytes,3,opt,name=completionTime"` diff --git a/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go index 0120e07d45..0d8003a727 100644 --- a/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go @@ -80,9 +80,9 @@ func (JobSpec) SwaggerDoc() map[string]string { var map_JobStatus = map[string]string{ "": "JobStatus represents the current state of a Job.", - "conditions": "The latest available observations of an object's current state. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", + "conditions": "The latest available observations of an object's current state. When a job fails, one of the conditions will have type == \"Failed\". More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", "startTime": "Represents time when the job was acknowledged by the job controller. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC.", - "completionTime": "Represents time when the job was completed. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC.", + "completionTime": "Represents time when the job was completed. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC. The completion time is only set when the job finishes successfully.", "active": "The number of actively running pods.", "succeeded": "The number of pods which reached phase Succeeded.", "failed": "The number of pods which reached phase Failed.", diff --git a/vendor/k8s.io/api/batch/v1beta1/generated.pb.go b/vendor/k8s.io/api/batch/v1beta1/generated.pb.go index 69c4054bfe..1d1f5f0090 100644 --- a/vendor/k8s.io/api/batch/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/batch/v1beta1/generated.pb.go @@ -927,10 +927,7 @@ func (m *CronJob) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1047,10 +1044,7 @@ func (m *CronJobList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1278,10 +1272,7 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1401,10 +1392,7 @@ func (m *CronJobStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1520,10 +1508,7 @@ func (m *JobTemplate) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1639,10 +1624,7 @@ func (m *JobTemplateSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/k8s.io/api/batch/v1beta1/generated.proto b/vendor/k8s.io/api/batch/v1beta1/generated.proto index 995b4f3f9d..4dab09a52c 100644 --- a/vendor/k8s.io/api/batch/v1beta1/generated.proto +++ b/vendor/k8s.io/api/batch/v1beta1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.batch.v1beta1; diff --git a/vendor/k8s.io/api/batch/v2alpha1/generated.pb.go b/vendor/k8s.io/api/batch/v2alpha1/generated.pb.go index 3e58dbb92a..16fb819cd0 100644 --- a/vendor/k8s.io/api/batch/v2alpha1/generated.pb.go +++ b/vendor/k8s.io/api/batch/v2alpha1/generated.pb.go @@ -927,10 +927,7 @@ func (m *CronJob) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1047,10 +1044,7 @@ func (m *CronJobList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1278,10 +1272,7 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1401,10 +1392,7 @@ func (m *CronJobStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1520,10 +1508,7 @@ func (m *JobTemplate) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1639,10 +1624,7 @@ func (m *JobTemplateSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/k8s.io/api/batch/v2alpha1/generated.proto b/vendor/k8s.io/api/batch/v2alpha1/generated.proto index 0bba13b86a..f538d50cd9 100644 --- a/vendor/k8s.io/api/batch/v2alpha1/generated.proto +++ b/vendor/k8s.io/api/batch/v2alpha1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.batch.v2alpha1; diff --git a/vendor/k8s.io/api/certificates/v1/generated.pb.go b/vendor/k8s.io/api/certificates/v1/generated.pb.go index d2cf41a25a..fca7d21154 100644 --- a/vendor/k8s.io/api/certificates/v1/generated.pb.go +++ b/vendor/k8s.io/api/certificates/v1/generated.pb.go @@ -989,10 +989,7 @@ func (m *CertificateSigningRequest) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1236,10 +1233,7 @@ func (m *CertificateSigningRequestCondition) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1356,10 +1350,7 @@ func (m *CertificateSigningRequestList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1683,7 +1674,7 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -1732,10 +1723,7 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1853,10 +1841,7 @@ func (m *CertificateSigningRequestStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1938,10 +1923,7 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/k8s.io/api/certificates/v1/generated.proto b/vendor/k8s.io/api/certificates/v1/generated.proto index 8427424a8c..839c1aa874 100644 --- a/vendor/k8s.io/api/certificates/v1/generated.proto +++ b/vendor/k8s.io/api/certificates/v1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.certificates.v1; diff --git a/vendor/k8s.io/api/certificates/v1beta1/generated.pb.go b/vendor/k8s.io/api/certificates/v1beta1/generated.pb.go index 1729931b82..f21256f484 100644 --- a/vendor/k8s.io/api/certificates/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/certificates/v1beta1/generated.pb.go @@ -993,10 +993,7 @@ func (m *CertificateSigningRequest) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1240,10 +1237,7 @@ func (m *CertificateSigningRequestCondition) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1360,10 +1354,7 @@ func (m *CertificateSigningRequestList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1687,7 +1678,7 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -1737,10 +1728,7 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1858,10 +1846,7 @@ func (m *CertificateSigningRequestStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1943,10 +1928,7 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/k8s.io/api/certificates/v1beta1/generated.proto b/vendor/k8s.io/api/certificates/v1beta1/generated.proto index 2fb4dc4ec7..73631fb77c 100644 --- a/vendor/k8s.io/api/certificates/v1beta1/generated.proto +++ b/vendor/k8s.io/api/certificates/v1beta1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.certificates.v1beta1; diff --git a/vendor/k8s.io/api/coordination/v1/generated.pb.go b/vendor/k8s.io/api/coordination/v1/generated.pb.go index 22c3d624e2..d5ed0f27c5 100644 --- a/vendor/k8s.io/api/coordination/v1/generated.pb.go +++ b/vendor/k8s.io/api/coordination/v1/generated.pb.go @@ -554,10 +554,7 @@ func (m *Lease) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -674,10 +671,7 @@ func (m *LeaseList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -872,10 +866,7 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/k8s.io/api/coordination/v1/generated.proto b/vendor/k8s.io/api/coordination/v1/generated.proto index 4206746d83..4d887850db 100644 --- a/vendor/k8s.io/api/coordination/v1/generated.proto +++ b/vendor/k8s.io/api/coordination/v1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.coordination.v1; diff --git a/vendor/k8s.io/api/coordination/v1beta1/generated.pb.go b/vendor/k8s.io/api/coordination/v1beta1/generated.pb.go index 57a314cfd7..bcd00d4454 100644 --- a/vendor/k8s.io/api/coordination/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/coordination/v1beta1/generated.pb.go @@ -554,10 +554,7 @@ func (m *Lease) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -674,10 +671,7 @@ func (m *LeaseList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -872,10 +866,7 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/k8s.io/api/coordination/v1beta1/generated.proto b/vendor/k8s.io/api/coordination/v1beta1/generated.proto index cfc2711c67..10b485e305 100644 --- a/vendor/k8s.io/api/coordination/v1beta1/generated.proto +++ b/vendor/k8s.io/api/coordination/v1beta1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.coordination.v1beta1; diff --git a/vendor/k8s.io/api/core/v1/generated.pb.go b/vendor/k8s.io/api/core/v1/generated.pb.go index 9b29b21e57..54098aa701 100644 --- a/vendor/k8s.io/api/core/v1/generated.pb.go +++ b/vendor/k8s.io/api/core/v1/generated.pb.go @@ -4025,10 +4025,38 @@ func (m *PodTemplateSpec) XXX_DiscardUnknown() { var xxx_messageInfo_PodTemplateSpec proto.InternalMessageInfo +func (m *PortStatus) Reset() { *m = PortStatus{} } +func (*PortStatus) ProtoMessage() {} +func (*PortStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{142} +} +func (m *PortStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PortStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PortStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_PortStatus.Merge(m, src) +} +func (m *PortStatus) XXX_Size() int { + return m.Size() +} +func (m *PortStatus) XXX_DiscardUnknown() { + xxx_messageInfo_PortStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_PortStatus proto.InternalMessageInfo + func (m *PortworxVolumeSource) Reset() { *m = PortworxVolumeSource{} } func (*PortworxVolumeSource) ProtoMessage() {} func (*PortworxVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{142} + return fileDescriptor_83c10c24ec417dc9, []int{143} } func (m *PortworxVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4056,7 +4084,7 @@ var xxx_messageInfo_PortworxVolumeSource proto.InternalMessageInfo func (m *Preconditions) Reset() { *m = Preconditions{} } func (*Preconditions) ProtoMessage() {} func (*Preconditions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{143} + return fileDescriptor_83c10c24ec417dc9, []int{144} } func (m *Preconditions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4084,7 +4112,7 @@ var xxx_messageInfo_Preconditions proto.InternalMessageInfo func (m *PreferAvoidPodsEntry) Reset() { *m = PreferAvoidPodsEntry{} } func (*PreferAvoidPodsEntry) ProtoMessage() {} func (*PreferAvoidPodsEntry) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{144} + return fileDescriptor_83c10c24ec417dc9, []int{145} } func (m *PreferAvoidPodsEntry) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4112,7 +4140,7 @@ var xxx_messageInfo_PreferAvoidPodsEntry proto.InternalMessageInfo func (m *PreferredSchedulingTerm) Reset() { *m = PreferredSchedulingTerm{} } func (*PreferredSchedulingTerm) ProtoMessage() {} func (*PreferredSchedulingTerm) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{145} + return fileDescriptor_83c10c24ec417dc9, []int{146} } func (m *PreferredSchedulingTerm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4140,7 +4168,7 @@ var xxx_messageInfo_PreferredSchedulingTerm proto.InternalMessageInfo func (m *Probe) Reset() { *m = Probe{} } func (*Probe) ProtoMessage() {} func (*Probe) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{146} + return fileDescriptor_83c10c24ec417dc9, []int{147} } func (m *Probe) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4168,7 +4196,7 @@ var xxx_messageInfo_Probe proto.InternalMessageInfo func (m *ProjectedVolumeSource) Reset() { *m = ProjectedVolumeSource{} } func (*ProjectedVolumeSource) ProtoMessage() {} func (*ProjectedVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{147} + return fileDescriptor_83c10c24ec417dc9, []int{148} } func (m *ProjectedVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4196,7 +4224,7 @@ var xxx_messageInfo_ProjectedVolumeSource proto.InternalMessageInfo func (m *QuobyteVolumeSource) Reset() { *m = QuobyteVolumeSource{} } func (*QuobyteVolumeSource) ProtoMessage() {} func (*QuobyteVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{148} + return fileDescriptor_83c10c24ec417dc9, []int{149} } func (m *QuobyteVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4224,7 +4252,7 @@ var xxx_messageInfo_QuobyteVolumeSource proto.InternalMessageInfo func (m *RBDPersistentVolumeSource) Reset() { *m = RBDPersistentVolumeSource{} } func (*RBDPersistentVolumeSource) ProtoMessage() {} func (*RBDPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{149} + return fileDescriptor_83c10c24ec417dc9, []int{150} } func (m *RBDPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4252,7 +4280,7 @@ var xxx_messageInfo_RBDPersistentVolumeSource proto.InternalMessageInfo func (m *RBDVolumeSource) Reset() { *m = RBDVolumeSource{} } func (*RBDVolumeSource) ProtoMessage() {} func (*RBDVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{150} + return fileDescriptor_83c10c24ec417dc9, []int{151} } func (m *RBDVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4280,7 +4308,7 @@ var xxx_messageInfo_RBDVolumeSource proto.InternalMessageInfo func (m *RangeAllocation) Reset() { *m = RangeAllocation{} } func (*RangeAllocation) ProtoMessage() {} func (*RangeAllocation) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{151} + return fileDescriptor_83c10c24ec417dc9, []int{152} } func (m *RangeAllocation) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4308,7 +4336,7 @@ var xxx_messageInfo_RangeAllocation proto.InternalMessageInfo func (m *ReplicationController) Reset() { *m = ReplicationController{} } func (*ReplicationController) ProtoMessage() {} func (*ReplicationController) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{152} + return fileDescriptor_83c10c24ec417dc9, []int{153} } func (m *ReplicationController) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4336,7 +4364,7 @@ var xxx_messageInfo_ReplicationController proto.InternalMessageInfo func (m *ReplicationControllerCondition) Reset() { *m = ReplicationControllerCondition{} } func (*ReplicationControllerCondition) ProtoMessage() {} func (*ReplicationControllerCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{153} + return fileDescriptor_83c10c24ec417dc9, []int{154} } func (m *ReplicationControllerCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4364,7 +4392,7 @@ var xxx_messageInfo_ReplicationControllerCondition proto.InternalMessageInfo func (m *ReplicationControllerList) Reset() { *m = ReplicationControllerList{} } func (*ReplicationControllerList) ProtoMessage() {} func (*ReplicationControllerList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{154} + return fileDescriptor_83c10c24ec417dc9, []int{155} } func (m *ReplicationControllerList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4392,7 +4420,7 @@ var xxx_messageInfo_ReplicationControllerList proto.InternalMessageInfo func (m *ReplicationControllerSpec) Reset() { *m = ReplicationControllerSpec{} } func (*ReplicationControllerSpec) ProtoMessage() {} func (*ReplicationControllerSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{155} + return fileDescriptor_83c10c24ec417dc9, []int{156} } func (m *ReplicationControllerSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4420,7 +4448,7 @@ var xxx_messageInfo_ReplicationControllerSpec proto.InternalMessageInfo func (m *ReplicationControllerStatus) Reset() { *m = ReplicationControllerStatus{} } func (*ReplicationControllerStatus) ProtoMessage() {} func (*ReplicationControllerStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{156} + return fileDescriptor_83c10c24ec417dc9, []int{157} } func (m *ReplicationControllerStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4448,7 +4476,7 @@ var xxx_messageInfo_ReplicationControllerStatus proto.InternalMessageInfo func (m *ResourceFieldSelector) Reset() { *m = ResourceFieldSelector{} } func (*ResourceFieldSelector) ProtoMessage() {} func (*ResourceFieldSelector) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{157} + return fileDescriptor_83c10c24ec417dc9, []int{158} } func (m *ResourceFieldSelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4476,7 +4504,7 @@ var xxx_messageInfo_ResourceFieldSelector proto.InternalMessageInfo func (m *ResourceQuota) Reset() { *m = ResourceQuota{} } func (*ResourceQuota) ProtoMessage() {} func (*ResourceQuota) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{158} + return fileDescriptor_83c10c24ec417dc9, []int{159} } func (m *ResourceQuota) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4504,7 +4532,7 @@ var xxx_messageInfo_ResourceQuota proto.InternalMessageInfo func (m *ResourceQuotaList) Reset() { *m = ResourceQuotaList{} } func (*ResourceQuotaList) ProtoMessage() {} func (*ResourceQuotaList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{159} + return fileDescriptor_83c10c24ec417dc9, []int{160} } func (m *ResourceQuotaList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4532,7 +4560,7 @@ var xxx_messageInfo_ResourceQuotaList proto.InternalMessageInfo func (m *ResourceQuotaSpec) Reset() { *m = ResourceQuotaSpec{} } func (*ResourceQuotaSpec) ProtoMessage() {} func (*ResourceQuotaSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{160} + return fileDescriptor_83c10c24ec417dc9, []int{161} } func (m *ResourceQuotaSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4560,7 +4588,7 @@ var xxx_messageInfo_ResourceQuotaSpec proto.InternalMessageInfo func (m *ResourceQuotaStatus) Reset() { *m = ResourceQuotaStatus{} } func (*ResourceQuotaStatus) ProtoMessage() {} func (*ResourceQuotaStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{161} + return fileDescriptor_83c10c24ec417dc9, []int{162} } func (m *ResourceQuotaStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4588,7 +4616,7 @@ var xxx_messageInfo_ResourceQuotaStatus proto.InternalMessageInfo func (m *ResourceRequirements) Reset() { *m = ResourceRequirements{} } func (*ResourceRequirements) ProtoMessage() {} func (*ResourceRequirements) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{162} + return fileDescriptor_83c10c24ec417dc9, []int{163} } func (m *ResourceRequirements) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4616,7 +4644,7 @@ var xxx_messageInfo_ResourceRequirements proto.InternalMessageInfo func (m *SELinuxOptions) Reset() { *m = SELinuxOptions{} } func (*SELinuxOptions) ProtoMessage() {} func (*SELinuxOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{163} + return fileDescriptor_83c10c24ec417dc9, []int{164} } func (m *SELinuxOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4644,7 +4672,7 @@ var xxx_messageInfo_SELinuxOptions proto.InternalMessageInfo func (m *ScaleIOPersistentVolumeSource) Reset() { *m = ScaleIOPersistentVolumeSource{} } func (*ScaleIOPersistentVolumeSource) ProtoMessage() {} func (*ScaleIOPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{164} + return fileDescriptor_83c10c24ec417dc9, []int{165} } func (m *ScaleIOPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4672,7 +4700,7 @@ var xxx_messageInfo_ScaleIOPersistentVolumeSource proto.InternalMessageInfo func (m *ScaleIOVolumeSource) Reset() { *m = ScaleIOVolumeSource{} } func (*ScaleIOVolumeSource) ProtoMessage() {} func (*ScaleIOVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{165} + return fileDescriptor_83c10c24ec417dc9, []int{166} } func (m *ScaleIOVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4700,7 +4728,7 @@ var xxx_messageInfo_ScaleIOVolumeSource proto.InternalMessageInfo func (m *ScopeSelector) Reset() { *m = ScopeSelector{} } func (*ScopeSelector) ProtoMessage() {} func (*ScopeSelector) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{166} + return fileDescriptor_83c10c24ec417dc9, []int{167} } func (m *ScopeSelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4728,7 +4756,7 @@ var xxx_messageInfo_ScopeSelector proto.InternalMessageInfo func (m *ScopedResourceSelectorRequirement) Reset() { *m = ScopedResourceSelectorRequirement{} } func (*ScopedResourceSelectorRequirement) ProtoMessage() {} func (*ScopedResourceSelectorRequirement) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{167} + return fileDescriptor_83c10c24ec417dc9, []int{168} } func (m *ScopedResourceSelectorRequirement) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4756,7 +4784,7 @@ var xxx_messageInfo_ScopedResourceSelectorRequirement proto.InternalMessageInfo func (m *SeccompProfile) Reset() { *m = SeccompProfile{} } func (*SeccompProfile) ProtoMessage() {} func (*SeccompProfile) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{168} + return fileDescriptor_83c10c24ec417dc9, []int{169} } func (m *SeccompProfile) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4784,7 +4812,7 @@ var xxx_messageInfo_SeccompProfile proto.InternalMessageInfo func (m *Secret) Reset() { *m = Secret{} } func (*Secret) ProtoMessage() {} func (*Secret) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{169} + return fileDescriptor_83c10c24ec417dc9, []int{170} } func (m *Secret) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4812,7 +4840,7 @@ var xxx_messageInfo_Secret proto.InternalMessageInfo func (m *SecretEnvSource) Reset() { *m = SecretEnvSource{} } func (*SecretEnvSource) ProtoMessage() {} func (*SecretEnvSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{170} + return fileDescriptor_83c10c24ec417dc9, []int{171} } func (m *SecretEnvSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4840,7 +4868,7 @@ var xxx_messageInfo_SecretEnvSource proto.InternalMessageInfo func (m *SecretKeySelector) Reset() { *m = SecretKeySelector{} } func (*SecretKeySelector) ProtoMessage() {} func (*SecretKeySelector) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{171} + return fileDescriptor_83c10c24ec417dc9, []int{172} } func (m *SecretKeySelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4868,7 +4896,7 @@ var xxx_messageInfo_SecretKeySelector proto.InternalMessageInfo func (m *SecretList) Reset() { *m = SecretList{} } func (*SecretList) ProtoMessage() {} func (*SecretList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{172} + return fileDescriptor_83c10c24ec417dc9, []int{173} } func (m *SecretList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4896,7 +4924,7 @@ var xxx_messageInfo_SecretList proto.InternalMessageInfo func (m *SecretProjection) Reset() { *m = SecretProjection{} } func (*SecretProjection) ProtoMessage() {} func (*SecretProjection) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{173} + return fileDescriptor_83c10c24ec417dc9, []int{174} } func (m *SecretProjection) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4924,7 +4952,7 @@ var xxx_messageInfo_SecretProjection proto.InternalMessageInfo func (m *SecretReference) Reset() { *m = SecretReference{} } func (*SecretReference) ProtoMessage() {} func (*SecretReference) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{174} + return fileDescriptor_83c10c24ec417dc9, []int{175} } func (m *SecretReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4952,7 +4980,7 @@ var xxx_messageInfo_SecretReference proto.InternalMessageInfo func (m *SecretVolumeSource) Reset() { *m = SecretVolumeSource{} } func (*SecretVolumeSource) ProtoMessage() {} func (*SecretVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{175} + return fileDescriptor_83c10c24ec417dc9, []int{176} } func (m *SecretVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4980,7 +5008,7 @@ var xxx_messageInfo_SecretVolumeSource proto.InternalMessageInfo func (m *SecurityContext) Reset() { *m = SecurityContext{} } func (*SecurityContext) ProtoMessage() {} func (*SecurityContext) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{176} + return fileDescriptor_83c10c24ec417dc9, []int{177} } func (m *SecurityContext) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5008,7 +5036,7 @@ var xxx_messageInfo_SecurityContext proto.InternalMessageInfo func (m *SerializedReference) Reset() { *m = SerializedReference{} } func (*SerializedReference) ProtoMessage() {} func (*SerializedReference) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{177} + return fileDescriptor_83c10c24ec417dc9, []int{178} } func (m *SerializedReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5036,7 +5064,7 @@ var xxx_messageInfo_SerializedReference proto.InternalMessageInfo func (m *Service) Reset() { *m = Service{} } func (*Service) ProtoMessage() {} func (*Service) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{178} + return fileDescriptor_83c10c24ec417dc9, []int{179} } func (m *Service) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5064,7 +5092,7 @@ var xxx_messageInfo_Service proto.InternalMessageInfo func (m *ServiceAccount) Reset() { *m = ServiceAccount{} } func (*ServiceAccount) ProtoMessage() {} func (*ServiceAccount) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{179} + return fileDescriptor_83c10c24ec417dc9, []int{180} } func (m *ServiceAccount) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5092,7 +5120,7 @@ var xxx_messageInfo_ServiceAccount proto.InternalMessageInfo func (m *ServiceAccountList) Reset() { *m = ServiceAccountList{} } func (*ServiceAccountList) ProtoMessage() {} func (*ServiceAccountList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{180} + return fileDescriptor_83c10c24ec417dc9, []int{181} } func (m *ServiceAccountList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5120,7 +5148,7 @@ var xxx_messageInfo_ServiceAccountList proto.InternalMessageInfo func (m *ServiceAccountTokenProjection) Reset() { *m = ServiceAccountTokenProjection{} } func (*ServiceAccountTokenProjection) ProtoMessage() {} func (*ServiceAccountTokenProjection) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{181} + return fileDescriptor_83c10c24ec417dc9, []int{182} } func (m *ServiceAccountTokenProjection) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5148,7 +5176,7 @@ var xxx_messageInfo_ServiceAccountTokenProjection proto.InternalMessageInfo func (m *ServiceList) Reset() { *m = ServiceList{} } func (*ServiceList) ProtoMessage() {} func (*ServiceList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{182} + return fileDescriptor_83c10c24ec417dc9, []int{183} } func (m *ServiceList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5176,7 +5204,7 @@ var xxx_messageInfo_ServiceList proto.InternalMessageInfo func (m *ServicePort) Reset() { *m = ServicePort{} } func (*ServicePort) ProtoMessage() {} func (*ServicePort) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{183} + return fileDescriptor_83c10c24ec417dc9, []int{184} } func (m *ServicePort) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5204,7 +5232,7 @@ var xxx_messageInfo_ServicePort proto.InternalMessageInfo func (m *ServiceProxyOptions) Reset() { *m = ServiceProxyOptions{} } func (*ServiceProxyOptions) ProtoMessage() {} func (*ServiceProxyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{184} + return fileDescriptor_83c10c24ec417dc9, []int{185} } func (m *ServiceProxyOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5232,7 +5260,7 @@ var xxx_messageInfo_ServiceProxyOptions proto.InternalMessageInfo func (m *ServiceSpec) Reset() { *m = ServiceSpec{} } func (*ServiceSpec) ProtoMessage() {} func (*ServiceSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{185} + return fileDescriptor_83c10c24ec417dc9, []int{186} } func (m *ServiceSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5260,7 +5288,7 @@ var xxx_messageInfo_ServiceSpec proto.InternalMessageInfo func (m *ServiceStatus) Reset() { *m = ServiceStatus{} } func (*ServiceStatus) ProtoMessage() {} func (*ServiceStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{186} + return fileDescriptor_83c10c24ec417dc9, []int{187} } func (m *ServiceStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5288,7 +5316,7 @@ var xxx_messageInfo_ServiceStatus proto.InternalMessageInfo func (m *SessionAffinityConfig) Reset() { *m = SessionAffinityConfig{} } func (*SessionAffinityConfig) ProtoMessage() {} func (*SessionAffinityConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{187} + return fileDescriptor_83c10c24ec417dc9, []int{188} } func (m *SessionAffinityConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5316,7 +5344,7 @@ var xxx_messageInfo_SessionAffinityConfig proto.InternalMessageInfo func (m *StorageOSPersistentVolumeSource) Reset() { *m = StorageOSPersistentVolumeSource{} } func (*StorageOSPersistentVolumeSource) ProtoMessage() {} func (*StorageOSPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{188} + return fileDescriptor_83c10c24ec417dc9, []int{189} } func (m *StorageOSPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5344,7 +5372,7 @@ var xxx_messageInfo_StorageOSPersistentVolumeSource proto.InternalMessageInfo func (m *StorageOSVolumeSource) Reset() { *m = StorageOSVolumeSource{} } func (*StorageOSVolumeSource) ProtoMessage() {} func (*StorageOSVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{189} + return fileDescriptor_83c10c24ec417dc9, []int{190} } func (m *StorageOSVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5372,7 +5400,7 @@ var xxx_messageInfo_StorageOSVolumeSource proto.InternalMessageInfo func (m *Sysctl) Reset() { *m = Sysctl{} } func (*Sysctl) ProtoMessage() {} func (*Sysctl) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{190} + return fileDescriptor_83c10c24ec417dc9, []int{191} } func (m *Sysctl) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5400,7 +5428,7 @@ var xxx_messageInfo_Sysctl proto.InternalMessageInfo func (m *TCPSocketAction) Reset() { *m = TCPSocketAction{} } func (*TCPSocketAction) ProtoMessage() {} func (*TCPSocketAction) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{191} + return fileDescriptor_83c10c24ec417dc9, []int{192} } func (m *TCPSocketAction) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5428,7 +5456,7 @@ var xxx_messageInfo_TCPSocketAction proto.InternalMessageInfo func (m *Taint) Reset() { *m = Taint{} } func (*Taint) ProtoMessage() {} func (*Taint) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{192} + return fileDescriptor_83c10c24ec417dc9, []int{193} } func (m *Taint) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5456,7 +5484,7 @@ var xxx_messageInfo_Taint proto.InternalMessageInfo func (m *Toleration) Reset() { *m = Toleration{} } func (*Toleration) ProtoMessage() {} func (*Toleration) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{193} + return fileDescriptor_83c10c24ec417dc9, []int{194} } func (m *Toleration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5484,7 +5512,7 @@ var xxx_messageInfo_Toleration proto.InternalMessageInfo func (m *TopologySelectorLabelRequirement) Reset() { *m = TopologySelectorLabelRequirement{} } func (*TopologySelectorLabelRequirement) ProtoMessage() {} func (*TopologySelectorLabelRequirement) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{194} + return fileDescriptor_83c10c24ec417dc9, []int{195} } func (m *TopologySelectorLabelRequirement) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5512,7 +5540,7 @@ var xxx_messageInfo_TopologySelectorLabelRequirement proto.InternalMessageInfo func (m *TopologySelectorTerm) Reset() { *m = TopologySelectorTerm{} } func (*TopologySelectorTerm) ProtoMessage() {} func (*TopologySelectorTerm) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{195} + return fileDescriptor_83c10c24ec417dc9, []int{196} } func (m *TopologySelectorTerm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5540,7 +5568,7 @@ var xxx_messageInfo_TopologySelectorTerm proto.InternalMessageInfo func (m *TopologySpreadConstraint) Reset() { *m = TopologySpreadConstraint{} } func (*TopologySpreadConstraint) ProtoMessage() {} func (*TopologySpreadConstraint) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{196} + return fileDescriptor_83c10c24ec417dc9, []int{197} } func (m *TopologySpreadConstraint) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5568,7 +5596,7 @@ var xxx_messageInfo_TopologySpreadConstraint proto.InternalMessageInfo func (m *TypedLocalObjectReference) Reset() { *m = TypedLocalObjectReference{} } func (*TypedLocalObjectReference) ProtoMessage() {} func (*TypedLocalObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{197} + return fileDescriptor_83c10c24ec417dc9, []int{198} } func (m *TypedLocalObjectReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5596,7 +5624,7 @@ var xxx_messageInfo_TypedLocalObjectReference proto.InternalMessageInfo func (m *Volume) Reset() { *m = Volume{} } func (*Volume) ProtoMessage() {} func (*Volume) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{198} + return fileDescriptor_83c10c24ec417dc9, []int{199} } func (m *Volume) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5624,7 +5652,7 @@ var xxx_messageInfo_Volume proto.InternalMessageInfo func (m *VolumeDevice) Reset() { *m = VolumeDevice{} } func (*VolumeDevice) ProtoMessage() {} func (*VolumeDevice) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{199} + return fileDescriptor_83c10c24ec417dc9, []int{200} } func (m *VolumeDevice) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5652,7 +5680,7 @@ var xxx_messageInfo_VolumeDevice proto.InternalMessageInfo func (m *VolumeMount) Reset() { *m = VolumeMount{} } func (*VolumeMount) ProtoMessage() {} func (*VolumeMount) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{200} + return fileDescriptor_83c10c24ec417dc9, []int{201} } func (m *VolumeMount) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5680,7 +5708,7 @@ var xxx_messageInfo_VolumeMount proto.InternalMessageInfo func (m *VolumeNodeAffinity) Reset() { *m = VolumeNodeAffinity{} } func (*VolumeNodeAffinity) ProtoMessage() {} func (*VolumeNodeAffinity) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{201} + return fileDescriptor_83c10c24ec417dc9, []int{202} } func (m *VolumeNodeAffinity) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5708,7 +5736,7 @@ var xxx_messageInfo_VolumeNodeAffinity proto.InternalMessageInfo func (m *VolumeProjection) Reset() { *m = VolumeProjection{} } func (*VolumeProjection) ProtoMessage() {} func (*VolumeProjection) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{202} + return fileDescriptor_83c10c24ec417dc9, []int{203} } func (m *VolumeProjection) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5736,7 +5764,7 @@ var xxx_messageInfo_VolumeProjection proto.InternalMessageInfo func (m *VolumeSource) Reset() { *m = VolumeSource{} } func (*VolumeSource) ProtoMessage() {} func (*VolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{203} + return fileDescriptor_83c10c24ec417dc9, []int{204} } func (m *VolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5764,7 +5792,7 @@ var xxx_messageInfo_VolumeSource proto.InternalMessageInfo func (m *VsphereVirtualDiskVolumeSource) Reset() { *m = VsphereVirtualDiskVolumeSource{} } func (*VsphereVirtualDiskVolumeSource) ProtoMessage() {} func (*VsphereVirtualDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{204} + return fileDescriptor_83c10c24ec417dc9, []int{205} } func (m *VsphereVirtualDiskVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5792,7 +5820,7 @@ var xxx_messageInfo_VsphereVirtualDiskVolumeSource proto.InternalMessageInfo func (m *WeightedPodAffinityTerm) Reset() { *m = WeightedPodAffinityTerm{} } func (*WeightedPodAffinityTerm) ProtoMessage() {} func (*WeightedPodAffinityTerm) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{205} + return fileDescriptor_83c10c24ec417dc9, []int{206} } func (m *WeightedPodAffinityTerm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5820,7 +5848,7 @@ var xxx_messageInfo_WeightedPodAffinityTerm proto.InternalMessageInfo func (m *WindowsSecurityContextOptions) Reset() { *m = WindowsSecurityContextOptions{} } func (*WindowsSecurityContextOptions) ProtoMessage() {} func (*WindowsSecurityContextOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{206} + return fileDescriptor_83c10c24ec417dc9, []int{207} } func (m *WindowsSecurityContextOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6006,6 +6034,7 @@ func init() { proto.RegisterType((*PodTemplate)(nil), "k8s.io.api.core.v1.PodTemplate") proto.RegisterType((*PodTemplateList)(nil), "k8s.io.api.core.v1.PodTemplateList") proto.RegisterType((*PodTemplateSpec)(nil), "k8s.io.api.core.v1.PodTemplateSpec") + proto.RegisterType((*PortStatus)(nil), "k8s.io.api.core.v1.PortStatus") proto.RegisterType((*PortworxVolumeSource)(nil), "k8s.io.api.core.v1.PortworxVolumeSource") proto.RegisterType((*Preconditions)(nil), "k8s.io.api.core.v1.Preconditions") proto.RegisterType((*PreferAvoidPodsEntry)(nil), "k8s.io.api.core.v1.PreferAvoidPodsEntry") @@ -6087,876 +6116,882 @@ func init() { } var fileDescriptor_83c10c24ec417dc9 = []byte{ - // 13889 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0xbd, 0x6b, 0x70, 0x24, 0xd7, - 0x75, 0x18, 0xac, 0x9e, 0xc1, 0x63, 0xe6, 0xe0, 0x7d, 0xb1, 0xbb, 0xc4, 0x82, 0xbb, 0x8b, 0x65, - 0xaf, 0xb4, 0x5c, 0x8a, 0x24, 0x56, 0x7c, 0x89, 0x34, 0x49, 0xd1, 0x02, 0x30, 0xc0, 0xee, 0x70, - 0x17, 0xd8, 0xe1, 0x1d, 0xec, 0xae, 0x44, 0x53, 0xfa, 0xd4, 0x98, 0xb9, 0x00, 0x9a, 0x98, 0xe9, - 0x1e, 0x76, 0xf7, 0x60, 0x17, 0xfc, 0xe4, 0xfa, 0xfc, 0xc9, 0x4f, 0xf9, 0x91, 0x52, 0xa5, 0x5c, - 0x79, 0xd8, 0x2e, 0x57, 0xca, 0x71, 0xca, 0x56, 0x9c, 0xa4, 0xe2, 0xd8, 0xb1, 0x1d, 0xcb, 0x89, - 0x9d, 0x38, 0x0f, 0x27, 0x3f, 0x1c, 0xc7, 0x95, 0x44, 0xae, 0x72, 0x05, 0xb1, 0xd7, 0xa9, 0xb8, - 0xf4, 0x23, 0xb6, 0x13, 0x3b, 0x3f, 0x82, 0xb8, 0xe2, 0xd4, 0x7d, 0xf6, 0xbd, 0x3d, 0xdd, 0x33, - 0x83, 0x25, 0x00, 0x51, 0x2a, 0xfe, 0x9b, 0xb9, 0xe7, 0xdc, 0x73, 0x6f, 0xdf, 0xe7, 0xb9, 0xe7, - 0x09, 0xaf, 0xec, 0xbc, 0x14, 0xce, 0xbb, 0xfe, 0xd5, 0x9d, 0xf6, 0x06, 0x09, 0x3c, 0x12, 0x91, - 0xf0, 0xea, 0x2e, 0xf1, 0xea, 0x7e, 0x70, 0x55, 0x00, 0x9c, 0x96, 0x7b, 0xb5, 0xe6, 0x07, 0xe4, - 0xea, 0xee, 0x33, 0x57, 0xb7, 0x88, 0x47, 0x02, 0x27, 0x22, 0xf5, 0xf9, 0x56, 0xe0, 0x47, 0x3e, - 0x42, 0x1c, 0x67, 0xde, 0x69, 0xb9, 0xf3, 0x14, 0x67, 0x7e, 0xf7, 0x99, 0xd9, 0xa7, 0xb7, 0xdc, - 0x68, 0xbb, 0xbd, 0x31, 0x5f, 0xf3, 0x9b, 0x57, 0xb7, 0xfc, 0x2d, 0xff, 0x2a, 0x43, 0xdd, 0x68, - 0x6f, 0xb2, 0x7f, 0xec, 0x0f, 0xfb, 0xc5, 0x49, 0xcc, 0x3e, 0x1f, 0x37, 0xd3, 0x74, 0x6a, 0xdb, - 0xae, 0x47, 0x82, 0xbd, 0xab, 0xad, 0x9d, 0x2d, 0xd6, 0x6e, 0x40, 0x42, 0xbf, 0x1d, 0xd4, 0x48, - 0xb2, 0xe1, 0xae, 0xb5, 0xc2, 0xab, 0x4d, 0x12, 0x39, 0x29, 0xdd, 0x9d, 0xbd, 0x9a, 0x55, 0x2b, - 0x68, 0x7b, 0x91, 0xdb, 0xec, 0x6c, 0xe6, 0xe3, 0xbd, 0x2a, 0x84, 0xb5, 0x6d, 0xd2, 0x74, 0x3a, - 0xea, 0x3d, 0x97, 0x55, 0xaf, 0x1d, 0xb9, 0x8d, 0xab, 0xae, 0x17, 0x85, 0x51, 0x90, 0xac, 0x64, - 0x7f, 0xd5, 0x82, 0x8b, 0x0b, 0x77, 0xab, 0xcb, 0x0d, 0x27, 0x8c, 0xdc, 0xda, 0x62, 0xc3, 0xaf, - 0xed, 0x54, 0x23, 0x3f, 0x20, 0x77, 0xfc, 0x46, 0xbb, 0x49, 0xaa, 0x6c, 0x20, 0xd0, 0x53, 0x50, - 0xd8, 0x65, 0xff, 0xcb, 0xa5, 0x19, 0xeb, 0xa2, 0x75, 0xa5, 0xb8, 0x38, 0xf9, 0x1b, 0xfb, 0x73, - 0x1f, 0x7a, 0xb0, 0x3f, 0x57, 0xb8, 0x23, 0xca, 0xb1, 0xc2, 0x40, 0x97, 0x61, 0x68, 0x33, 0x5c, - 0xdf, 0x6b, 0x91, 0x99, 0x1c, 0xc3, 0x1d, 0x17, 0xb8, 0x43, 0x2b, 0x55, 0x5a, 0x8a, 0x05, 0x14, - 0x5d, 0x85, 0x62, 0xcb, 0x09, 0x22, 0x37, 0x72, 0x7d, 0x6f, 0x26, 0x7f, 0xd1, 0xba, 0x32, 0xb8, - 0x38, 0x25, 0x50, 0x8b, 0x15, 0x09, 0xc0, 0x31, 0x0e, 0xed, 0x46, 0x40, 0x9c, 0xfa, 0x2d, 0xaf, - 0xb1, 0x37, 0x33, 0x70, 0xd1, 0xba, 0x52, 0x88, 0xbb, 0x81, 0x45, 0x39, 0x56, 0x18, 0xf6, 0x8f, - 0xe4, 0xa0, 0xb0, 0xb0, 0xb9, 0xe9, 0x7a, 0x6e, 0xb4, 0x87, 0xee, 0xc0, 0xa8, 0xe7, 0xd7, 0x89, - 0xfc, 0xcf, 0xbe, 0x62, 0xe4, 0xd9, 0x8b, 0xf3, 0x9d, 0x4b, 0x69, 0x7e, 0x4d, 0xc3, 0x5b, 0x9c, - 0x7c, 0xb0, 0x3f, 0x37, 0xaa, 0x97, 0x60, 0x83, 0x0e, 0xc2, 0x30, 0xd2, 0xf2, 0xeb, 0x8a, 0x6c, - 0x8e, 0x91, 0x9d, 0x4b, 0x23, 0x5b, 0x89, 0xd1, 0x16, 0x27, 0x1e, 0xec, 0xcf, 0x8d, 0x68, 0x05, - 0x58, 0x27, 0x82, 0x36, 0x60, 0x82, 0xfe, 0xf5, 0x22, 0x57, 0xd1, 0xcd, 0x33, 0xba, 0x97, 0xb2, - 0xe8, 0x6a, 0xa8, 0x8b, 0xd3, 0x0f, 0xf6, 0xe7, 0x26, 0x12, 0x85, 0x38, 0x49, 0xd0, 0x7e, 0x17, - 0xc6, 0x17, 0xa2, 0xc8, 0xa9, 0x6d, 0x93, 0x3a, 0x9f, 0x41, 0xf4, 0x3c, 0x0c, 0x78, 0x4e, 0x93, - 0x88, 0xf9, 0xbd, 0x28, 0x06, 0x76, 0x60, 0xcd, 0x69, 0x92, 0x83, 0xfd, 0xb9, 0xc9, 0xdb, 0x9e, - 0xfb, 0x4e, 0x5b, 0xac, 0x0a, 0x5a, 0x86, 0x19, 0x36, 0x7a, 0x16, 0xa0, 0x4e, 0x76, 0xdd, 0x1a, - 0xa9, 0x38, 0xd1, 0xb6, 0x98, 0x6f, 0x24, 0xea, 0x42, 0x49, 0x41, 0xb0, 0x86, 0x65, 0xdf, 0x87, - 0xe2, 0xc2, 0xae, 0xef, 0xd6, 0x2b, 0x7e, 0x3d, 0x44, 0x3b, 0x30, 0xd1, 0x0a, 0xc8, 0x26, 0x09, - 0x54, 0xd1, 0x8c, 0x75, 0x31, 0x7f, 0x65, 0xe4, 0xd9, 0x2b, 0xa9, 0x1f, 0x6b, 0xa2, 0x2e, 0x7b, - 0x51, 0xb0, 0xb7, 0xf8, 0x88, 0x68, 0x6f, 0x22, 0x01, 0xc5, 0x49, 0xca, 0xf6, 0x3f, 0xcf, 0xc1, - 0xe9, 0x85, 0x77, 0xdb, 0x01, 0x29, 0xb9, 0xe1, 0x4e, 0x72, 0x85, 0xd7, 0xdd, 0x70, 0x67, 0x2d, - 0x1e, 0x01, 0xb5, 0xb4, 0x4a, 0xa2, 0x1c, 0x2b, 0x0c, 0xf4, 0x34, 0x0c, 0xd3, 0xdf, 0xb7, 0x71, - 0x59, 0x7c, 0xf2, 0xb4, 0x40, 0x1e, 0x29, 0x39, 0x91, 0x53, 0xe2, 0x20, 0x2c, 0x71, 0xd0, 0x2a, - 0x8c, 0xd4, 0xd8, 0x86, 0xdc, 0x5a, 0xf5, 0xeb, 0x84, 0x4d, 0x66, 0x71, 0xf1, 0x49, 0x8a, 0xbe, - 0x14, 0x17, 0x1f, 0xec, 0xcf, 0xcd, 0xf0, 0xbe, 0x09, 0x12, 0x1a, 0x0c, 0xeb, 0xf5, 0x91, 0xad, - 0xf6, 0xd7, 0x00, 0xa3, 0x04, 0x29, 0x7b, 0xeb, 0x8a, 0xb6, 0x55, 0x06, 0xd9, 0x56, 0x19, 0x4d, - 0xdf, 0x26, 0xe8, 0x19, 0x18, 0xd8, 0x71, 0xbd, 0xfa, 0xcc, 0x10, 0xa3, 0x75, 0x9e, 0xce, 0xf9, - 0x0d, 0xd7, 0xab, 0x1f, 0xec, 0xcf, 0x4d, 0x19, 0xdd, 0xa1, 0x85, 0x98, 0xa1, 0xda, 0x7f, 0x6a, - 0xc1, 0x1c, 0x83, 0xad, 0xb8, 0x0d, 0x52, 0x21, 0x41, 0xe8, 0x86, 0x11, 0xf1, 0x22, 0x63, 0x40, - 0x9f, 0x05, 0x08, 0x49, 0x2d, 0x20, 0x91, 0x36, 0xa4, 0x6a, 0x61, 0x54, 0x15, 0x04, 0x6b, 0x58, - 0xf4, 0x40, 0x08, 0xb7, 0x9d, 0x80, 0xad, 0x2f, 0x31, 0xb0, 0xea, 0x40, 0xa8, 0x4a, 0x00, 0x8e, - 0x71, 0x8c, 0x03, 0x21, 0xdf, 0xeb, 0x40, 0x40, 0x9f, 0x80, 0x89, 0xb8, 0xb1, 0xb0, 0xe5, 0xd4, - 0xe4, 0x00, 0xb2, 0x2d, 0x53, 0x35, 0x41, 0x38, 0x89, 0x6b, 0xff, 0x6d, 0x4b, 0x2c, 0x1e, 0xfa, - 0xd5, 0xef, 0xf3, 0x6f, 0xb5, 0x7f, 0xc9, 0x82, 0xe1, 0x45, 0xd7, 0xab, 0xbb, 0xde, 0x16, 0xfa, - 0x1c, 0x14, 0xe8, 0xdd, 0x54, 0x77, 0x22, 0x47, 0x9c, 0x7b, 0x1f, 0xd3, 0xf6, 0x96, 0xba, 0x2a, - 0xe6, 0x5b, 0x3b, 0x5b, 0xb4, 0x20, 0x9c, 0xa7, 0xd8, 0x74, 0xb7, 0xdd, 0xda, 0x78, 0x9b, 0xd4, - 0xa2, 0x55, 0x12, 0x39, 0xf1, 0xe7, 0xc4, 0x65, 0x58, 0x51, 0x45, 0x37, 0x60, 0x28, 0x72, 0x82, - 0x2d, 0x12, 0x89, 0x03, 0x30, 0xf5, 0xa0, 0xe2, 0x35, 0x31, 0xdd, 0x91, 0xc4, 0xab, 0x91, 0xf8, - 0x5a, 0x58, 0x67, 0x55, 0xb1, 0x20, 0x61, 0xff, 0xd0, 0x30, 0x9c, 0x5d, 0xaa, 0x96, 0x33, 0xd6, - 0xd5, 0x65, 0x18, 0xaa, 0x07, 0xee, 0x2e, 0x09, 0xc4, 0x38, 0x2b, 0x2a, 0x25, 0x56, 0x8a, 0x05, - 0x14, 0xbd, 0x04, 0xa3, 0xfc, 0x42, 0xba, 0xee, 0x78, 0xf5, 0x86, 0x1c, 0xe2, 0x53, 0x02, 0x7b, - 0xf4, 0x8e, 0x06, 0xc3, 0x06, 0xe6, 0x21, 0x17, 0xd5, 0xe5, 0xc4, 0x66, 0xcc, 0xba, 0xec, 0xbe, - 0x68, 0xc1, 0x24, 0x6f, 0x66, 0x21, 0x8a, 0x02, 0x77, 0xa3, 0x1d, 0x91, 0x70, 0x66, 0x90, 0x9d, - 0x74, 0x4b, 0x69, 0xa3, 0x95, 0x39, 0x02, 0xf3, 0x77, 0x12, 0x54, 0xf8, 0x21, 0x38, 0x23, 0xda, - 0x9d, 0x4c, 0x82, 0x71, 0x47, 0xb3, 0xe8, 0x3b, 0x2d, 0x98, 0xad, 0xf9, 0x5e, 0x14, 0xf8, 0x8d, - 0x06, 0x09, 0x2a, 0xed, 0x8d, 0x86, 0x1b, 0x6e, 0xf3, 0x75, 0x8a, 0xc9, 0x26, 0x3b, 0x09, 0x32, - 0xe6, 0x50, 0x21, 0x89, 0x39, 0xbc, 0xf0, 0x60, 0x7f, 0x6e, 0x76, 0x29, 0x93, 0x14, 0xee, 0xd2, - 0x0c, 0xda, 0x01, 0x44, 0xaf, 0xd2, 0x6a, 0xe4, 0x6c, 0x91, 0xb8, 0xf1, 0xe1, 0xfe, 0x1b, 0x3f, - 0xf3, 0x60, 0x7f, 0x0e, 0xad, 0x75, 0x90, 0xc0, 0x29, 0x64, 0xd1, 0x3b, 0x70, 0x8a, 0x96, 0x76, - 0x7c, 0x6b, 0xa1, 0xff, 0xe6, 0x66, 0x1e, 0xec, 0xcf, 0x9d, 0x5a, 0x4b, 0x21, 0x82, 0x53, 0x49, - 0xa3, 0xef, 0xb0, 0xe0, 0x6c, 0xfc, 0xf9, 0xcb, 0xf7, 0x5b, 0x8e, 0x57, 0x8f, 0x1b, 0x2e, 0xf6, - 0xdf, 0x30, 0x3d, 0x93, 0xcf, 0x2e, 0x65, 0x51, 0xc2, 0xd9, 0x8d, 0xcc, 0x2e, 0xc1, 0xe9, 0xd4, - 0xd5, 0x82, 0x26, 0x21, 0xbf, 0x43, 0x38, 0x17, 0x54, 0xc4, 0xf4, 0x27, 0x3a, 0x05, 0x83, 0xbb, - 0x4e, 0xa3, 0x2d, 0x36, 0x0a, 0xe6, 0x7f, 0x5e, 0xce, 0xbd, 0x64, 0xd9, 0xff, 0x22, 0x0f, 0x13, - 0x4b, 0xd5, 0xf2, 0x43, 0xed, 0x42, 0xfd, 0x1a, 0xca, 0x75, 0xbd, 0x86, 0xe2, 0x4b, 0x2d, 0x9f, - 0x79, 0xa9, 0xfd, 0x7f, 0x29, 0x5b, 0x68, 0x80, 0x6d, 0xa1, 0x6f, 0xc9, 0xd8, 0x42, 0x47, 0xbc, - 0x71, 0x76, 0x33, 0x56, 0xd1, 0x20, 0x9b, 0xcc, 0x54, 0x8e, 0xe5, 0xa6, 0x5f, 0x73, 0x1a, 0xc9, - 0xa3, 0xef, 0x90, 0x4b, 0xe9, 0x68, 0xe6, 0xb1, 0x06, 0xa3, 0x4b, 0x4e, 0xcb, 0xd9, 0x70, 0x1b, - 0x6e, 0xe4, 0x92, 0x10, 0x3d, 0x0e, 0x79, 0xa7, 0x5e, 0x67, 0xdc, 0x56, 0x71, 0xf1, 0xf4, 0x83, - 0xfd, 0xb9, 0xfc, 0x42, 0x9d, 0x5e, 0xfb, 0xa0, 0xb0, 0xf6, 0x30, 0xc5, 0x40, 0x1f, 0x85, 0x81, - 0x7a, 0xe0, 0xb7, 0x66, 0x72, 0x0c, 0x93, 0xee, 0xba, 0x81, 0x52, 0xe0, 0xb7, 0x12, 0xa8, 0x0c, - 0xc7, 0xfe, 0xb5, 0x1c, 0x9c, 0x5b, 0x22, 0xad, 0xed, 0x95, 0x6a, 0xc6, 0xf9, 0x7d, 0x05, 0x0a, - 0x4d, 0xdf, 0x73, 0x23, 0x3f, 0x08, 0x45, 0xd3, 0x6c, 0x45, 0xac, 0x8a, 0x32, 0xac, 0xa0, 0xe8, - 0x22, 0x0c, 0xb4, 0x62, 0xa6, 0x72, 0x54, 0x32, 0xa4, 0x8c, 0x9d, 0x64, 0x10, 0x8a, 0xd1, 0x0e, - 0x49, 0x20, 0x56, 0x8c, 0xc2, 0xb8, 0x1d, 0x92, 0x00, 0x33, 0x48, 0x7c, 0x33, 0xd3, 0x3b, 0x5b, - 0x9c, 0xd0, 0x89, 0x9b, 0x99, 0x42, 0xb0, 0x86, 0x85, 0x2a, 0x50, 0x0c, 0x13, 0x33, 0xdb, 0xd7, - 0x36, 0x1d, 0x63, 0x57, 0xb7, 0x9a, 0xc9, 0x98, 0x88, 0x71, 0xa3, 0x0c, 0xf5, 0xbc, 0xba, 0xbf, - 0x92, 0x03, 0xc4, 0x87, 0xf0, 0x1b, 0x6c, 0xe0, 0x6e, 0x77, 0x0e, 0x5c, 0xff, 0x5b, 0xe2, 0xa8, - 0x46, 0xef, 0xcf, 0x2c, 0x38, 0xb7, 0xe4, 0x7a, 0x75, 0x12, 0x64, 0x2c, 0xc0, 0xe3, 0x79, 0xcb, - 0x1e, 0x8e, 0x69, 0x30, 0x96, 0xd8, 0xc0, 0x11, 0x2c, 0x31, 0xfb, 0x8f, 0x2d, 0x40, 0xfc, 0xb3, - 0xdf, 0x77, 0x1f, 0x7b, 0xbb, 0xf3, 0x63, 0x8f, 0x60, 0x59, 0xd8, 0x37, 0x61, 0x7c, 0xa9, 0xe1, - 0x12, 0x2f, 0x2a, 0x57, 0x96, 0x7c, 0x6f, 0xd3, 0xdd, 0x42, 0x2f, 0xc3, 0x78, 0xe4, 0x36, 0x89, - 0xdf, 0x8e, 0xaa, 0xa4, 0xe6, 0x7b, 0xec, 0x25, 0x69, 0x5d, 0x19, 0x5c, 0x44, 0x0f, 0xf6, 0xe7, - 0xc6, 0xd7, 0x0d, 0x08, 0x4e, 0x60, 0xda, 0xbf, 0x4b, 0xc7, 0xcf, 0x6f, 0xb6, 0x7c, 0x8f, 0x78, - 0xd1, 0x92, 0xef, 0xd5, 0xb9, 0xc4, 0xe1, 0x65, 0x18, 0x88, 0xe8, 0x78, 0xf0, 0xb1, 0xbb, 0x2c, - 0x37, 0x0a, 0x1d, 0x85, 0x83, 0xfd, 0xb9, 0x33, 0x9d, 0x35, 0xd8, 0x38, 0xb1, 0x3a, 0xe8, 0x5b, - 0x60, 0x28, 0x8c, 0x9c, 0xa8, 0x1d, 0x8a, 0xd1, 0x7c, 0x4c, 0x8e, 0x66, 0x95, 0x95, 0x1e, 0xec, - 0xcf, 0x4d, 0xa8, 0x6a, 0xbc, 0x08, 0x8b, 0x0a, 0xe8, 0x09, 0x18, 0x6e, 0x92, 0x30, 0x74, 0xb6, - 0xe4, 0x6d, 0x38, 0x21, 0xea, 0x0e, 0xaf, 0xf2, 0x62, 0x2c, 0xe1, 0xe8, 0x12, 0x0c, 0x92, 0x20, - 0xf0, 0x03, 0xb1, 0x47, 0xc7, 0x04, 0xe2, 0xe0, 0x32, 0x2d, 0xc4, 0x1c, 0x66, 0xff, 0x5b, 0x0b, - 0x26, 0x54, 0x5f, 0x79, 0x5b, 0x27, 0xf0, 0x2a, 0x78, 0x13, 0xa0, 0x26, 0x3f, 0x30, 0x64, 0xb7, - 0xc7, 0xc8, 0xb3, 0x97, 0x53, 0x2f, 0xea, 0x8e, 0x61, 0x8c, 0x29, 0xab, 0xa2, 0x10, 0x6b, 0xd4, - 0xec, 0x7f, 0x6c, 0xc1, 0x74, 0xe2, 0x8b, 0x6e, 0xba, 0x61, 0x84, 0xde, 0xea, 0xf8, 0xaa, 0xf9, - 0xfe, 0xbe, 0x8a, 0xd6, 0x66, 0xdf, 0xa4, 0x96, 0xb2, 0x2c, 0xd1, 0xbe, 0xe8, 0x3a, 0x0c, 0xba, - 0x11, 0x69, 0xca, 0x8f, 0xb9, 0xd4, 0xf5, 0x63, 0x78, 0xaf, 0xe2, 0x19, 0x29, 0xd3, 0x9a, 0x98, - 0x13, 0xb0, 0x7f, 0x2d, 0x0f, 0x45, 0xbe, 0x6c, 0x57, 0x9d, 0xd6, 0x09, 0xcc, 0xc5, 0x93, 0x50, - 0x74, 0x9b, 0xcd, 0x76, 0xe4, 0x6c, 0x88, 0xe3, 0xbc, 0xc0, 0xb7, 0x56, 0x59, 0x16, 0xe2, 0x18, - 0x8e, 0xca, 0x30, 0xc0, 0xba, 0xc2, 0xbf, 0xf2, 0xf1, 0xf4, 0xaf, 0x14, 0x7d, 0x9f, 0x2f, 0x39, - 0x91, 0xc3, 0x39, 0x29, 0x75, 0x8f, 0xd0, 0x22, 0xcc, 0x48, 0x20, 0x07, 0x60, 0xc3, 0xf5, 0x9c, - 0x60, 0x8f, 0x96, 0xcd, 0xe4, 0x19, 0xc1, 0xa7, 0xbb, 0x13, 0x5c, 0x54, 0xf8, 0x9c, 0xac, 0xfa, - 0xb0, 0x18, 0x80, 0x35, 0xa2, 0xb3, 0x2f, 0x42, 0x51, 0x21, 0x1f, 0x86, 0x21, 0x9a, 0xfd, 0x04, - 0x4c, 0x24, 0xda, 0xea, 0x55, 0x7d, 0x54, 0xe7, 0xa7, 0x7e, 0x99, 0x1d, 0x19, 0xa2, 0xd7, 0xcb, - 0xde, 0xae, 0x38, 0x72, 0xdf, 0x85, 0x53, 0x8d, 0x94, 0x93, 0x4c, 0xcc, 0x6b, 0xff, 0x27, 0xdf, - 0x39, 0xf1, 0xd9, 0xa7, 0xd2, 0xa0, 0x38, 0xb5, 0x0d, 0xca, 0x23, 0xf8, 0x2d, 0xba, 0x41, 0x9c, - 0x86, 0xce, 0x6e, 0xdf, 0x12, 0x65, 0x58, 0x41, 0xe9, 0x79, 0x77, 0x4a, 0x75, 0xfe, 0x06, 0xd9, - 0xab, 0x92, 0x06, 0xa9, 0x45, 0x7e, 0xf0, 0x75, 0xed, 0xfe, 0x79, 0x3e, 0xfa, 0xfc, 0xb8, 0x1c, - 0x11, 0x04, 0xf2, 0x37, 0xc8, 0x1e, 0x9f, 0x0a, 0xfd, 0xeb, 0xf2, 0x5d, 0xbf, 0xee, 0x67, 0x2d, - 0x18, 0x53, 0x5f, 0x77, 0x02, 0xe7, 0xc2, 0xa2, 0x79, 0x2e, 0x9c, 0xef, 0xba, 0xc0, 0x33, 0x4e, - 0x84, 0xaf, 0xe4, 0xe0, 0xac, 0xc2, 0xa1, 0x6f, 0x03, 0xfe, 0x47, 0xac, 0xaa, 0xab, 0x50, 0xf4, - 0x94, 0xd4, 0xca, 0x32, 0xc5, 0x45, 0xb1, 0xcc, 0x2a, 0xc6, 0xa1, 0x2c, 0x9e, 0x17, 0x8b, 0x96, - 0x46, 0x75, 0x71, 0xae, 0x10, 0xdd, 0x2e, 0x42, 0xbe, 0xed, 0xd6, 0xc5, 0x05, 0xf3, 0x31, 0x39, - 0xda, 0xb7, 0xcb, 0xa5, 0x83, 0xfd, 0xb9, 0xc7, 0xb2, 0x54, 0x09, 0xf4, 0x66, 0x0b, 0xe7, 0x6f, - 0x97, 0x4b, 0x98, 0x56, 0x46, 0x0b, 0x30, 0x21, 0xb5, 0x25, 0x77, 0x28, 0xbb, 0xe5, 0x7b, 0xe2, - 0x1e, 0x52, 0x32, 0x59, 0x6c, 0x82, 0x71, 0x12, 0x1f, 0x95, 0x60, 0x72, 0xa7, 0xbd, 0x41, 0x1a, - 0x24, 0xe2, 0x1f, 0x7c, 0x83, 0x70, 0x89, 0x65, 0x31, 0x7e, 0x99, 0xdd, 0x48, 0xc0, 0x71, 0x47, - 0x0d, 0xfb, 0x2f, 0xd8, 0x7d, 0x20, 0x46, 0xaf, 0x12, 0xf8, 0x74, 0x61, 0x51, 0xea, 0x5f, 0xcf, - 0xe5, 0xdc, 0xcf, 0xaa, 0xb8, 0x41, 0xf6, 0xd6, 0x7d, 0xca, 0x99, 0xa7, 0xaf, 0x0a, 0x63, 0xcd, - 0x0f, 0x74, 0x5d, 0xf3, 0x3f, 0x9f, 0x83, 0xd3, 0x6a, 0x04, 0x0c, 0x26, 0xf0, 0x1b, 0x7d, 0x0c, - 0x9e, 0x81, 0x91, 0x3a, 0xd9, 0x74, 0xda, 0x8d, 0x48, 0x89, 0xcf, 0x07, 0xb9, 0x0a, 0xa5, 0x14, - 0x17, 0x63, 0x1d, 0xe7, 0x10, 0xc3, 0xf6, 0x3f, 0x47, 0xd8, 0x45, 0x1c, 0x39, 0x74, 0x8d, 0xab, - 0x5d, 0x63, 0x65, 0xee, 0x9a, 0x4b, 0x30, 0xe8, 0x36, 0x29, 0x63, 0x96, 0x33, 0xf9, 0xad, 0x32, - 0x2d, 0xc4, 0x1c, 0x86, 0x3e, 0x02, 0xc3, 0x35, 0xbf, 0xd9, 0x74, 0xbc, 0x3a, 0xbb, 0xf2, 0x8a, - 0x8b, 0x23, 0x94, 0x77, 0x5b, 0xe2, 0x45, 0x58, 0xc2, 0xd0, 0x39, 0x18, 0x70, 0x82, 0x2d, 0x2e, - 0xc3, 0x28, 0x2e, 0x16, 0x68, 0x4b, 0x0b, 0xc1, 0x56, 0x88, 0x59, 0x29, 0x7d, 0x82, 0xdd, 0xf3, - 0x83, 0x1d, 0xd7, 0xdb, 0x2a, 0xb9, 0x81, 0xd8, 0x12, 0xea, 0x2e, 0xbc, 0xab, 0x20, 0x58, 0xc3, - 0x42, 0x2b, 0x30, 0xd8, 0xf2, 0x83, 0x28, 0x9c, 0x19, 0x62, 0xc3, 0xfd, 0x58, 0xc6, 0x41, 0xc4, - 0xbf, 0xb6, 0xe2, 0x07, 0x51, 0xfc, 0x01, 0xf4, 0x5f, 0x88, 0x79, 0x75, 0x74, 0x13, 0x86, 0x89, - 0xb7, 0xbb, 0x12, 0xf8, 0xcd, 0x99, 0xe9, 0x6c, 0x4a, 0xcb, 0x1c, 0x85, 0x2f, 0xb3, 0x98, 0x47, - 0x15, 0xc5, 0x58, 0x92, 0x40, 0xdf, 0x02, 0x79, 0xe2, 0xed, 0xce, 0x0c, 0x33, 0x4a, 0xb3, 0x19, - 0x94, 0xee, 0x38, 0x41, 0x7c, 0xe6, 0x2f, 0x7b, 0xbb, 0x98, 0xd6, 0x41, 0x9f, 0x86, 0xa2, 0x3c, - 0x30, 0x42, 0x21, 0xac, 0x4b, 0x5d, 0xb0, 0xf2, 0x98, 0xc1, 0xe4, 0x9d, 0xb6, 0x1b, 0x90, 0x26, - 0xf1, 0xa2, 0x30, 0x3e, 0x21, 0x25, 0x34, 0xc4, 0x31, 0x35, 0xf4, 0x69, 0x29, 0x21, 0x5e, 0xf5, - 0xdb, 0x5e, 0x14, 0xce, 0x14, 0x59, 0xf7, 0x52, 0x75, 0x77, 0x77, 0x62, 0xbc, 0xa4, 0x08, 0x99, - 0x57, 0xc6, 0x06, 0x29, 0xf4, 0x19, 0x18, 0xe3, 0xff, 0xb9, 0x06, 0x2c, 0x9c, 0x39, 0xcd, 0x68, - 0x5f, 0xcc, 0xa6, 0xcd, 0x11, 0x17, 0x4f, 0x0b, 0xe2, 0x63, 0x7a, 0x69, 0x88, 0x4d, 0x6a, 0x08, - 0xc3, 0x58, 0xc3, 0xdd, 0x25, 0x1e, 0x09, 0xc3, 0x4a, 0xe0, 0x6f, 0x90, 0x19, 0x60, 0x03, 0x73, - 0x36, 0x5d, 0x63, 0xe6, 0x6f, 0x90, 0xc5, 0x29, 0x4a, 0xf3, 0xa6, 0x5e, 0x07, 0x9b, 0x24, 0xd0, - 0x6d, 0x18, 0xa7, 0x2f, 0x36, 0x37, 0x26, 0x3a, 0xd2, 0x8b, 0x28, 0x7b, 0x57, 0x61, 0xa3, 0x12, - 0x4e, 0x10, 0x41, 0xb7, 0x60, 0x34, 0x8c, 0x9c, 0x20, 0x6a, 0xb7, 0x38, 0xd1, 0x33, 0xbd, 0x88, - 0x32, 0x85, 0x6b, 0x55, 0xab, 0x82, 0x0d, 0x02, 0xe8, 0x75, 0x28, 0x36, 0xdc, 0x4d, 0x52, 0xdb, - 0xab, 0x35, 0xc8, 0xcc, 0x28, 0xa3, 0x96, 0x7a, 0xa8, 0xdc, 0x94, 0x48, 0x9c, 0xcf, 0x55, 0x7f, - 0x71, 0x5c, 0x1d, 0xdd, 0x81, 0x33, 0x11, 0x09, 0x9a, 0xae, 0xe7, 0xd0, 0xc3, 0x40, 0x3c, 0xad, - 0x98, 0x22, 0x73, 0x8c, 0xed, 0xb6, 0x0b, 0x62, 0x36, 0xce, 0xac, 0xa7, 0x62, 0xe1, 0x8c, 0xda, - 0xe8, 0x3e, 0xcc, 0xa4, 0x40, 0xfc, 0x86, 0x5b, 0xdb, 0x9b, 0x39, 0xc5, 0x28, 0xbf, 0x2a, 0x28, - 0xcf, 0xac, 0x67, 0xe0, 0x1d, 0x74, 0x81, 0xe1, 0x4c, 0xea, 0xe8, 0x16, 0x4c, 0xb0, 0x13, 0xa8, - 0xd2, 0x6e, 0x34, 0x44, 0x83, 0xe3, 0xac, 0xc1, 0x8f, 0xc8, 0xfb, 0xb8, 0x6c, 0x82, 0x0f, 0xf6, - 0xe7, 0x20, 0xfe, 0x87, 0x93, 0xb5, 0xd1, 0x06, 0xd3, 0x99, 0xb5, 0x03, 0x37, 0xda, 0xa3, 0xe7, - 0x06, 0xb9, 0x1f, 0xcd, 0x4c, 0x74, 0x95, 0x57, 0xe8, 0xa8, 0x4a, 0xb1, 0xa6, 0x17, 0xe2, 0x24, - 0x41, 0x7a, 0xa4, 0x86, 0x51, 0xdd, 0xf5, 0x66, 0x26, 0xf9, 0xbb, 0x44, 0x9e, 0x48, 0x55, 0x5a, - 0x88, 0x39, 0x8c, 0xe9, 0xcb, 0xe8, 0x8f, 0x5b, 0xf4, 0xe6, 0x9a, 0x62, 0x88, 0xb1, 0xbe, 0x4c, - 0x02, 0x70, 0x8c, 0x43, 0x99, 0xc9, 0x28, 0xda, 0x9b, 0x41, 0x0c, 0x55, 0x1d, 0x2c, 0xeb, 0xeb, - 0x9f, 0xc6, 0xb4, 0xdc, 0xde, 0x80, 0x71, 0x75, 0x10, 0xb2, 0x31, 0x41, 0x73, 0x30, 0xc8, 0xd8, - 0x27, 0x21, 0x5d, 0x2b, 0xd2, 0x2e, 0x30, 0xd6, 0x0a, 0xf3, 0x72, 0xd6, 0x05, 0xf7, 0x5d, 0xb2, - 0xb8, 0x17, 0x11, 0xfe, 0xa6, 0xcf, 0x6b, 0x5d, 0x90, 0x00, 0x1c, 0xe3, 0xd8, 0xff, 0x87, 0xb3, - 0xa1, 0xf1, 0x69, 0xdb, 0xc7, 0xfd, 0xf2, 0x14, 0x14, 0xb6, 0xfd, 0x30, 0xa2, 0xd8, 0xac, 0x8d, - 0xc1, 0x98, 0xf1, 0xbc, 0x2e, 0xca, 0xb1, 0xc2, 0x40, 0xaf, 0xc0, 0x58, 0x4d, 0x6f, 0x40, 0x5c, - 0x8e, 0xea, 0x18, 0x31, 0x5a, 0xc7, 0x26, 0x2e, 0x7a, 0x09, 0x0a, 0xcc, 0x06, 0xa4, 0xe6, 0x37, - 0x04, 0xd7, 0x26, 0x6f, 0xf8, 0x42, 0x45, 0x94, 0x1f, 0x68, 0xbf, 0xb1, 0xc2, 0x46, 0x97, 0x61, - 0x88, 0x76, 0xa1, 0x5c, 0x11, 0xd7, 0x92, 0x12, 0x14, 0x5d, 0x67, 0xa5, 0x58, 0x40, 0xed, 0xbf, - 0x9c, 0xd3, 0x46, 0x99, 0xbe, 0x87, 0x09, 0xaa, 0xc0, 0xf0, 0x3d, 0xc7, 0x8d, 0x5c, 0x6f, 0x4b, - 0xf0, 0x1f, 0x4f, 0x74, 0xbd, 0xa3, 0x58, 0xa5, 0xbb, 0xbc, 0x02, 0xbf, 0x45, 0xc5, 0x1f, 0x2c, - 0xc9, 0x50, 0x8a, 0x41, 0xdb, 0xf3, 0x28, 0xc5, 0x5c, 0xbf, 0x14, 0x31, 0xaf, 0xc0, 0x29, 0x8a, - 0x3f, 0x58, 0x92, 0x41, 0x6f, 0x01, 0xc8, 0x1d, 0x46, 0xea, 0xc2, 0xf6, 0xe2, 0xa9, 0xde, 0x44, - 0xd7, 0x55, 0x9d, 0xc5, 0x71, 0x7a, 0x47, 0xc7, 0xff, 0xb1, 0x46, 0xcf, 0x8e, 0x18, 0x9f, 0xd6, - 0xd9, 0x19, 0xf4, 0x6d, 0x74, 0x89, 0x3b, 0x41, 0x44, 0xea, 0x0b, 0x91, 0x18, 0x9c, 0x8f, 0xf6, - 0xf7, 0x48, 0x59, 0x77, 0x9b, 0x44, 0xdf, 0x0e, 0x82, 0x08, 0x8e, 0xe9, 0xd9, 0xbf, 0x98, 0x87, - 0x99, 0xac, 0xee, 0xd2, 0x45, 0x47, 0xee, 0xbb, 0xd1, 0x12, 0x65, 0xaf, 0x2c, 0x73, 0xd1, 0x2d, - 0x8b, 0x72, 0xac, 0x30, 0xe8, 0xec, 0x87, 0xee, 0x96, 0x7c, 0x63, 0x0e, 0xc6, 0xb3, 0x5f, 0x65, - 0xa5, 0x58, 0x40, 0x29, 0x5e, 0x40, 0x9c, 0x50, 0x18, 0xf7, 0x68, 0xab, 0x04, 0xb3, 0x52, 0x2c, - 0xa0, 0xba, 0xb4, 0x6b, 0xa0, 0x87, 0xb4, 0xcb, 0x18, 0xa2, 0xc1, 0xa3, 0x1d, 0x22, 0xf4, 0x59, - 0x80, 0x4d, 0xd7, 0x73, 0xc3, 0x6d, 0x46, 0x7d, 0xe8, 0xd0, 0xd4, 0x15, 0x73, 0xb6, 0xa2, 0xa8, - 0x60, 0x8d, 0x22, 0x7a, 0x01, 0x46, 0xd4, 0x06, 0x2c, 0x97, 0x98, 0xa6, 0x53, 0xb3, 0x1c, 0x89, - 0x4f, 0xa3, 0x12, 0xd6, 0xf1, 0xec, 0xb7, 0x93, 0xeb, 0x45, 0xec, 0x00, 0x6d, 0x7c, 0xad, 0x7e, - 0xc7, 0x37, 0xd7, 0x7d, 0x7c, 0xed, 0xaf, 0xe5, 0x61, 0xc2, 0x68, 0xac, 0x1d, 0xf6, 0x71, 0x66, - 0x5d, 0xa3, 0x07, 0xb8, 0x13, 0x11, 0xb1, 0xff, 0xec, 0xde, 0x5b, 0x45, 0x3f, 0xe4, 0xe9, 0x0e, - 0xe0, 0xf5, 0xd1, 0x67, 0xa1, 0xd8, 0x70, 0x42, 0x26, 0x39, 0x23, 0x62, 0xdf, 0xf5, 0x43, 0x2c, - 0x7e, 0x98, 0x38, 0x61, 0xa4, 0xdd, 0x9a, 0x9c, 0x76, 0x4c, 0x92, 0xde, 0x34, 0x94, 0x3f, 0x91, - 0xd6, 0x63, 0xaa, 0x13, 0x94, 0x89, 0xd9, 0xc3, 0x1c, 0x86, 0x5e, 0x82, 0xd1, 0x80, 0xb0, 0x55, - 0xb1, 0x44, 0xb9, 0x39, 0xb6, 0xcc, 0x06, 0x63, 0xb6, 0x0f, 0x6b, 0x30, 0x6c, 0x60, 0xc6, 0x6f, - 0x83, 0xa1, 0x2e, 0x6f, 0x83, 0x27, 0x60, 0x98, 0xfd, 0x50, 0x2b, 0x40, 0xcd, 0x46, 0x99, 0x17, - 0x63, 0x09, 0x4f, 0x2e, 0x98, 0x42, 0x7f, 0x0b, 0x86, 0xbe, 0x3e, 0xc4, 0xa2, 0x66, 0x5a, 0xe6, - 0x02, 0x3f, 0xe5, 0xc4, 0x92, 0xc7, 0x12, 0x66, 0x7f, 0x14, 0xc6, 0x4b, 0x0e, 0x69, 0xfa, 0xde, - 0xb2, 0x57, 0x6f, 0xf9, 0xae, 0x17, 0xa1, 0x19, 0x18, 0x60, 0x97, 0x08, 0x3f, 0x02, 0x06, 0x68, - 0x43, 0x78, 0x80, 0x3e, 0x08, 0xec, 0x2d, 0x38, 0x5d, 0xf2, 0xef, 0x79, 0xf7, 0x9c, 0xa0, 0xbe, - 0x50, 0x29, 0x6b, 0xef, 0xeb, 0x35, 0xf9, 0xbe, 0xe3, 0x46, 0x5b, 0xa9, 0x47, 0xaf, 0x56, 0x93, - 0xb3, 0xb5, 0x2b, 0x6e, 0x83, 0x64, 0x48, 0x41, 0xfe, 0x6a, 0xce, 0x68, 0x29, 0xc6, 0x57, 0x5a, - 0x2d, 0x2b, 0x53, 0xab, 0xf5, 0x06, 0x14, 0x36, 0x5d, 0xd2, 0xa8, 0x63, 0xb2, 0x29, 0x56, 0xe2, - 0xe3, 0xd9, 0x76, 0x28, 0x2b, 0x14, 0x53, 0x4a, 0xbd, 0xf8, 0xeb, 0x70, 0x45, 0x54, 0xc6, 0x8a, - 0x0c, 0xda, 0x81, 0x49, 0xf9, 0x60, 0x90, 0x50, 0xb1, 0x2e, 0x9f, 0xe8, 0xf6, 0x0a, 0x31, 0x89, - 0x9f, 0x7a, 0xb0, 0x3f, 0x37, 0x89, 0x13, 0x64, 0x70, 0x07, 0x61, 0xfa, 0x1c, 0x6c, 0xd2, 0x13, - 0x78, 0x80, 0x0d, 0x3f, 0x7b, 0x0e, 0xb2, 0x97, 0x2d, 0x2b, 0xb5, 0x7f, 0xcc, 0x82, 0x47, 0x3a, - 0x46, 0x46, 0xbc, 0xf0, 0x8f, 0x78, 0x16, 0x92, 0x2f, 0xee, 0x5c, 0xef, 0x17, 0xb7, 0xfd, 0x77, - 0x2c, 0x38, 0xb5, 0xdc, 0x6c, 0x45, 0x7b, 0x25, 0xd7, 0x54, 0x41, 0xbd, 0x08, 0x43, 0x4d, 0x52, - 0x77, 0xdb, 0x4d, 0x31, 0x73, 0x73, 0xf2, 0x94, 0x5a, 0x65, 0xa5, 0x07, 0xfb, 0x73, 0x63, 0xd5, - 0xc8, 0x0f, 0x9c, 0x2d, 0xc2, 0x0b, 0xb0, 0x40, 0x67, 0x67, 0xbd, 0xfb, 0x2e, 0xb9, 0xe9, 0x36, - 0x5d, 0x69, 0x57, 0xd4, 0x55, 0x66, 0x37, 0x2f, 0x07, 0x74, 0xfe, 0x8d, 0xb6, 0xe3, 0x45, 0x6e, - 0xb4, 0x27, 0xb4, 0x47, 0x92, 0x08, 0x8e, 0xe9, 0xd9, 0x5f, 0xb5, 0x60, 0x42, 0xae, 0xfb, 0x85, - 0x7a, 0x3d, 0x20, 0x61, 0x88, 0x66, 0x21, 0xe7, 0xb6, 0x44, 0x2f, 0x41, 0xf4, 0x32, 0x57, 0xae, - 0xe0, 0x9c, 0xdb, 0x92, 0x6c, 0x19, 0x3b, 0x08, 0xf3, 0xa6, 0x22, 0xed, 0xba, 0x28, 0xc7, 0x0a, - 0x03, 0x5d, 0x81, 0x82, 0xe7, 0xd7, 0xb9, 0x6d, 0x17, 0xbf, 0xd2, 0xd8, 0x02, 0x5b, 0x13, 0x65, - 0x58, 0x41, 0x51, 0x05, 0x8a, 0xdc, 0xec, 0x29, 0x5e, 0xb4, 0x7d, 0x19, 0x4f, 0xb1, 0x2f, 0x5b, - 0x97, 0x35, 0x71, 0x4c, 0xc4, 0xfe, 0x55, 0x0b, 0x46, 0xe5, 0x97, 0xf5, 0xc9, 0x73, 0xd2, 0xad, - 0x15, 0xf3, 0x9b, 0xf1, 0xd6, 0xa2, 0x3c, 0x23, 0x83, 0x18, 0xac, 0x62, 0xfe, 0x50, 0xac, 0xe2, - 0x33, 0x30, 0xe2, 0xb4, 0x5a, 0x15, 0x93, 0xcf, 0x64, 0x4b, 0x69, 0x21, 0x2e, 0xc6, 0x3a, 0x8e, - 0xfd, 0xa3, 0x39, 0x18, 0x97, 0x5f, 0x50, 0x6d, 0x6f, 0x84, 0x24, 0x42, 0xeb, 0x50, 0x74, 0xf8, - 0x2c, 0x11, 0xb9, 0xc8, 0x2f, 0xa5, 0xcb, 0x11, 0x8c, 0x29, 0x8d, 0x2f, 0xfc, 0x05, 0x59, 0x1b, - 0xc7, 0x84, 0x50, 0x03, 0xa6, 0x3c, 0x3f, 0x62, 0x87, 0xbf, 0x82, 0x77, 0x53, 0xed, 0x24, 0xa9, - 0x9f, 0x15, 0xd4, 0xa7, 0xd6, 0x92, 0x54, 0x70, 0x27, 0x61, 0xb4, 0x2c, 0x65, 0x33, 0xf9, 0x6c, - 0x61, 0x80, 0x3e, 0x71, 0xe9, 0xa2, 0x19, 0xfb, 0x57, 0x2c, 0x28, 0x4a, 0xb4, 0x93, 0xd0, 0xe2, - 0xad, 0xc2, 0x70, 0xc8, 0x26, 0x41, 0x0e, 0x8d, 0xdd, 0xad, 0xe3, 0x7c, 0xbe, 0xe2, 0x3b, 0x8d, - 0xff, 0x0f, 0xb1, 0xa4, 0xc1, 0x44, 0xf3, 0xaa, 0xfb, 0xef, 0x13, 0xd1, 0xbc, 0xea, 0x4f, 0xc6, - 0xa5, 0xf4, 0x87, 0xac, 0xcf, 0x9a, 0xac, 0x8b, 0xb2, 0x5e, 0xad, 0x80, 0x6c, 0xba, 0xf7, 0x93, - 0xac, 0x57, 0x85, 0x95, 0x62, 0x01, 0x45, 0x6f, 0xc1, 0x68, 0x4d, 0xca, 0x64, 0xe3, 0x1d, 0x7e, - 0xb9, 0xab, 0x7e, 0x40, 0xa9, 0x92, 0xb8, 0x2c, 0x64, 0x49, 0xab, 0x8f, 0x0d, 0x6a, 0xa6, 0x19, - 0x41, 0xbe, 0x97, 0x19, 0x41, 0x4c, 0x37, 0x5b, 0xa9, 0xfe, 0xe3, 0x16, 0x0c, 0x71, 0x59, 0x5c, - 0x7f, 0xa2, 0x50, 0x4d, 0xb3, 0x16, 0x8f, 0xdd, 0x1d, 0x5a, 0x28, 0x34, 0x65, 0x68, 0x15, 0x8a, - 0xec, 0x07, 0x93, 0x25, 0xe6, 0xb3, 0xad, 0xee, 0x79, 0xab, 0x7a, 0x07, 0xef, 0xc8, 0x6a, 0x38, - 0xa6, 0x60, 0xff, 0x70, 0x9e, 0x9e, 0x6e, 0x31, 0xaa, 0x71, 0xe9, 0x5b, 0xc7, 0x77, 0xe9, 0xe7, - 0x8e, 0xeb, 0xd2, 0xdf, 0x82, 0x89, 0x9a, 0xa6, 0x87, 0x8b, 0x67, 0xf2, 0x4a, 0xd7, 0x45, 0xa2, - 0xa9, 0xec, 0xb8, 0x94, 0x65, 0xc9, 0x24, 0x82, 0x93, 0x54, 0xd1, 0xb7, 0xc1, 0x28, 0x9f, 0x67, - 0xd1, 0x0a, 0xb7, 0xc4, 0xf8, 0x48, 0xf6, 0x7a, 0xd1, 0x9b, 0xe0, 0x52, 0x39, 0xad, 0x3a, 0x36, - 0x88, 0xd9, 0x7f, 0x62, 0x01, 0x5a, 0x6e, 0x6d, 0x93, 0x26, 0x09, 0x9c, 0x46, 0x2c, 0x4e, 0xff, - 0x7e, 0x0b, 0x66, 0x48, 0x47, 0xf1, 0x92, 0xdf, 0x6c, 0x8a, 0x47, 0x4b, 0xc6, 0xbb, 0x7a, 0x39, - 0xa3, 0x8e, 0x72, 0x4b, 0x98, 0xc9, 0xc2, 0xc0, 0x99, 0xed, 0xa1, 0x55, 0x98, 0xe6, 0xb7, 0xa4, - 0x02, 0x68, 0xb6, 0xd7, 0x8f, 0x0a, 0xc2, 0xd3, 0xeb, 0x9d, 0x28, 0x38, 0xad, 0x9e, 0xfd, 0x5d, - 0xa3, 0x90, 0xd9, 0x8b, 0x0f, 0xf4, 0x08, 0x1f, 0xe8, 0x11, 0x3e, 0xd0, 0x23, 0x7c, 0xa0, 0x47, - 0xf8, 0x40, 0x8f, 0xf0, 0x4d, 0xaf, 0x47, 0xf8, 0x23, 0x0b, 0xa6, 0x3b, 0xaf, 0x81, 0x93, 0x60, - 0xcc, 0xdb, 0x30, 0xdd, 0x79, 0xd7, 0x75, 0xb5, 0xb3, 0xeb, 0xec, 0x67, 0x7c, 0xef, 0xa5, 0x7c, - 0x03, 0x4e, 0xa3, 0x6f, 0xff, 0xba, 0x05, 0xa7, 0x15, 0xb2, 0xf1, 0xd2, 0xff, 0x3c, 0x4c, 0xf3, - 0xf3, 0x65, 0xa9, 0xe1, 0xb8, 0xcd, 0x75, 0xd2, 0x6c, 0x35, 0x9c, 0x48, 0x9a, 0x19, 0x3c, 0x93, - 0xba, 0x55, 0x13, 0x26, 0xba, 0x46, 0xc5, 0xc5, 0x47, 0x68, 0xbf, 0x52, 0x00, 0x38, 0xad, 0x19, - 0xc3, 0x28, 0x35, 0xd7, 0xd3, 0x4c, 0xf8, 0x17, 0x0b, 0x30, 0xb8, 0xbc, 0x4b, 0xbc, 0xe8, 0x04, - 0x26, 0xaa, 0x06, 0xe3, 0xae, 0xb7, 0xeb, 0x37, 0x76, 0x49, 0x9d, 0xc3, 0x0f, 0xf3, 0xd0, 0x3f, - 0x23, 0x48, 0x8f, 0x97, 0x0d, 0x12, 0x38, 0x41, 0xf2, 0x38, 0x84, 0xed, 0xd7, 0x60, 0x88, 0xdf, - 0x71, 0x42, 0xd2, 0x9e, 0x7a, 0xa5, 0xb1, 0x41, 0x14, 0x37, 0x77, 0xac, 0x08, 0xe0, 0x77, 0xa8, - 0xa8, 0x8e, 0xde, 0x86, 0xf1, 0x4d, 0x37, 0x08, 0xa3, 0x75, 0xb7, 0x49, 0xc2, 0xc8, 0x69, 0xb6, - 0x1e, 0x42, 0xb8, 0xae, 0xc6, 0x61, 0xc5, 0xa0, 0x84, 0x13, 0x94, 0xd1, 0x16, 0x8c, 0x35, 0x1c, - 0xbd, 0xa9, 0xe1, 0x43, 0x37, 0xa5, 0x2e, 0xcf, 0x9b, 0x3a, 0x21, 0x6c, 0xd2, 0xa5, 0xa7, 0x4d, - 0x8d, 0xc9, 0x87, 0x0b, 0x4c, 0x6a, 0xa2, 0x4e, 0x1b, 0x2e, 0x18, 0xe6, 0x30, 0xca, 0x07, 0x32, - 0xfb, 0xe1, 0xa2, 0xc9, 0x07, 0x6a, 0x56, 0xc2, 0x9f, 0x83, 0x22, 0xa1, 0x43, 0x48, 0x09, 0x8b, - 0xfb, 0xf7, 0x6a, 0x7f, 0x7d, 0x5d, 0x75, 0x6b, 0x81, 0x6f, 0xaa, 0x35, 0x96, 0x25, 0x25, 0x1c, - 0x13, 0x45, 0x4b, 0x30, 0x14, 0x92, 0xc0, 0x25, 0xa1, 0xb8, 0x89, 0xbb, 0x4c, 0x23, 0x43, 0xe3, - 0xae, 0x37, 0xfc, 0x37, 0x16, 0x55, 0xe9, 0xf2, 0x72, 0x98, 0xc4, 0x97, 0xdd, 0x95, 0xda, 0xf2, - 0x5a, 0x60, 0xa5, 0x58, 0x40, 0xd1, 0xeb, 0x30, 0x1c, 0x90, 0x06, 0xd3, 0x9b, 0x8d, 0xf5, 0xbf, - 0xc8, 0xb9, 0x1a, 0x8e, 0xd7, 0xc3, 0x92, 0x00, 0xba, 0x01, 0x28, 0x20, 0x94, 0x8f, 0x74, 0xbd, - 0x2d, 0x65, 0x55, 0x2b, 0xee, 0x21, 0x75, 0x6e, 0xe1, 0x18, 0x43, 0x7a, 0x41, 0xe1, 0x94, 0x6a, - 0xe8, 0x1a, 0x4c, 0xa9, 0xd2, 0xb2, 0x17, 0x46, 0x0e, 0x3d, 0xff, 0x27, 0x18, 0x2d, 0x25, 0xc6, - 0xc1, 0x49, 0x04, 0xdc, 0x59, 0xc7, 0xfe, 0xb2, 0x05, 0x7c, 0x9c, 0x4f, 0x40, 0x78, 0xf1, 0x9a, - 0x29, 0xbc, 0x38, 0x9b, 0x39, 0x73, 0x19, 0x82, 0x8b, 0x2f, 0x5b, 0x30, 0xa2, 0xcd, 0x6c, 0xbc, - 0x66, 0xad, 0x2e, 0x6b, 0xb6, 0x0d, 0x93, 0x74, 0xa5, 0xdf, 0xda, 0x08, 0x49, 0xb0, 0x4b, 0xea, - 0x6c, 0x61, 0xe6, 0x1e, 0x6e, 0x61, 0x2a, 0x0b, 0xbe, 0x9b, 0x09, 0x82, 0xb8, 0xa3, 0x09, 0xfb, - 0x73, 0xb2, 0xab, 0xca, 0xe0, 0xb1, 0xa6, 0xe6, 0x3c, 0x61, 0xf0, 0xa8, 0x66, 0x15, 0xc7, 0x38, - 0x74, 0xab, 0x6d, 0xfb, 0x61, 0x94, 0x34, 0x78, 0xbc, 0xee, 0x87, 0x11, 0x66, 0x10, 0xfb, 0x39, - 0x80, 0xe5, 0xfb, 0xa4, 0xc6, 0x57, 0xac, 0xfe, 0xb6, 0xb2, 0xb2, 0xdf, 0x56, 0xf6, 0x6f, 0x5b, - 0x30, 0xbe, 0xb2, 0x64, 0xdc, 0x73, 0xf3, 0x00, 0xfc, 0x41, 0x78, 0xf7, 0xee, 0x9a, 0xb4, 0x16, - 0xe0, 0x0a, 0x5f, 0x55, 0x8a, 0x35, 0x0c, 0x74, 0x16, 0xf2, 0x8d, 0xb6, 0x27, 0xa4, 0xab, 0xc3, - 0x94, 0x7b, 0xb8, 0xd9, 0xf6, 0x30, 0x2d, 0xd3, 0x3c, 0x2e, 0xf2, 0x7d, 0x7b, 0x5c, 0xf4, 0x8c, - 0x7c, 0x80, 0xe6, 0x60, 0xf0, 0xde, 0x3d, 0xb7, 0xce, 0xfd, 0x4b, 0x85, 0x25, 0xc3, 0xdd, 0xbb, - 0xe5, 0x52, 0x88, 0x79, 0xb9, 0xfd, 0xa5, 0x3c, 0xcc, 0xae, 0x34, 0xc8, 0xfd, 0xf7, 0xe8, 0x63, - 0xdb, 0xaf, 0xbf, 0xc8, 0xe1, 0xe4, 0x54, 0x87, 0xf5, 0x09, 0xea, 0x3d, 0x1e, 0x9b, 0x30, 0xcc, - 0xed, 0xfd, 0xa4, 0xc7, 0xed, 0x2b, 0x69, 0xad, 0x67, 0x0f, 0xc8, 0x3c, 0xb7, 0x1b, 0x14, 0x0e, - 0x83, 0xea, 0xc2, 0x14, 0xa5, 0x58, 0x12, 0x9f, 0x7d, 0x19, 0x46, 0x75, 0xcc, 0x43, 0x79, 0xe7, - 0xfd, 0xff, 0x79, 0x98, 0xa4, 0x3d, 0x38, 0xd6, 0x89, 0xb8, 0xdd, 0x39, 0x11, 0x47, 0xed, 0xa1, - 0xd5, 0x7b, 0x36, 0xde, 0x4a, 0xce, 0xc6, 0x33, 0x59, 0xb3, 0x71, 0xd2, 0x73, 0xf0, 0x9d, 0x16, - 0x4c, 0xaf, 0x34, 0xfc, 0xda, 0x4e, 0xc2, 0x8b, 0xea, 0x05, 0x18, 0xa1, 0xc7, 0x71, 0x68, 0x38, - 0xf8, 0x1b, 0x21, 0x1f, 0x04, 0x08, 0xeb, 0x78, 0x5a, 0xb5, 0xdb, 0xb7, 0xcb, 0xa5, 0xb4, 0x48, - 0x11, 0x02, 0x84, 0x75, 0x3c, 0xfb, 0x37, 0x2d, 0x38, 0x7f, 0x6d, 0x69, 0x39, 0x5e, 0x8a, 0x1d, - 0xc1, 0x2a, 0x2e, 0xc3, 0x50, 0xab, 0xae, 0x75, 0x25, 0x96, 0x3e, 0x97, 0x58, 0x2f, 0x04, 0xf4, - 0xfd, 0x12, 0x88, 0xe5, 0xa7, 0x2d, 0x98, 0xbe, 0xe6, 0x46, 0xf4, 0x76, 0x4d, 0x86, 0x4d, 0xa0, - 0xd7, 0x6b, 0xe8, 0x46, 0x7e, 0xb0, 0x97, 0x0c, 0x9b, 0x80, 0x15, 0x04, 0x6b, 0x58, 0xbc, 0xe5, - 0x5d, 0x97, 0x59, 0x9a, 0xe7, 0x4c, 0x3d, 0x1c, 0x16, 0xe5, 0x58, 0x61, 0xd0, 0x0f, 0xab, 0xbb, - 0x01, 0x13, 0x61, 0xee, 0x89, 0x13, 0x56, 0x7d, 0x58, 0x49, 0x02, 0x70, 0x8c, 0x43, 0x5f, 0x73, - 0x73, 0xd7, 0x1a, 0xed, 0x30, 0x22, 0xc1, 0x66, 0x98, 0x71, 0x3a, 0x3e, 0x07, 0x45, 0x22, 0x15, - 0x06, 0xa2, 0xd7, 0x8a, 0x63, 0x54, 0x9a, 0x04, 0x1e, 0xbd, 0x41, 0xe1, 0xf5, 0xe1, 0x93, 0x79, - 0x38, 0xa7, 0xba, 0x15, 0x40, 0x44, 0x6f, 0x4b, 0x0f, 0x67, 0xc1, 0xfc, 0xe2, 0x97, 0x3b, 0xa0, - 0x38, 0xa5, 0x86, 0xfd, 0x63, 0x16, 0x9c, 0x56, 0x1f, 0xfc, 0xbe, 0xfb, 0x4c, 0xfb, 0xe7, 0x72, - 0x30, 0x76, 0x7d, 0x7d, 0xbd, 0x72, 0x8d, 0x44, 0xe2, 0xda, 0xee, 0x6d, 0x06, 0x80, 0x35, 0x6d, - 0x66, 0xb7, 0xc7, 0x5c, 0x3b, 0x72, 0x1b, 0xf3, 0x3c, 0x2a, 0xd2, 0x7c, 0xd9, 0x8b, 0x6e, 0x05, - 0xd5, 0x28, 0x70, 0xbd, 0xad, 0x54, 0xfd, 0xa7, 0x64, 0x2e, 0xf2, 0x59, 0xcc, 0x05, 0x7a, 0x0e, - 0x86, 0x58, 0x58, 0x26, 0x39, 0x09, 0x8f, 0xaa, 0xb7, 0x10, 0x2b, 0x3d, 0xd8, 0x9f, 0x2b, 0xde, - 0xc6, 0x65, 0xfe, 0x07, 0x0b, 0x54, 0x74, 0x1b, 0x46, 0xb6, 0xa3, 0xa8, 0x75, 0x9d, 0x38, 0x75, - 0xfa, 0x74, 0xe7, 0xc7, 0xe1, 0x85, 0xb4, 0xe3, 0x90, 0x0e, 0x02, 0x47, 0x8b, 0x4f, 0x90, 0xb8, - 0x2c, 0xc4, 0x3a, 0x1d, 0xbb, 0x0a, 0x10, 0xc3, 0x8e, 0x48, 0x91, 0x63, 0xff, 0x81, 0x05, 0xc3, - 0x3c, 0x42, 0x46, 0x80, 0x5e, 0x85, 0x01, 0x72, 0x9f, 0xd4, 0x04, 0xc7, 0x9b, 0xda, 0xe1, 0x98, - 0xd3, 0xe2, 0x02, 0x69, 0xfa, 0x1f, 0xb3, 0x5a, 0xe8, 0x3a, 0x0c, 0xd3, 0xde, 0x5e, 0x53, 0xe1, - 0x42, 0x1e, 0xcb, 0xfa, 0x62, 0x35, 0xed, 0x9c, 0x39, 0x13, 0x45, 0x58, 0x56, 0x67, 0xda, 0xf3, - 0x5a, 0xab, 0x4a, 0x4f, 0xec, 0xa8, 0x1b, 0x63, 0xb1, 0xbe, 0x54, 0xe1, 0x48, 0x82, 0x1a, 0xd7, - 0x9e, 0xcb, 0x42, 0x1c, 0x13, 0xb1, 0xd7, 0xa1, 0x48, 0x27, 0x75, 0xa1, 0xe1, 0x3a, 0xdd, 0x0d, - 0x02, 0x9e, 0x84, 0xa2, 0x54, 0xf7, 0x87, 0xc2, 0x33, 0x9e, 0x51, 0x95, 0xd6, 0x00, 0x21, 0x8e, - 0xe1, 0xf6, 0x26, 0x9c, 0x62, 0xc6, 0x9b, 0x4e, 0xb4, 0x6d, 0xec, 0xb1, 0xde, 0x8b, 0xf9, 0x29, - 0xf1, 0x80, 0xe4, 0x33, 0x33, 0xa3, 0x39, 0x9f, 0x8e, 0x4a, 0x8a, 0xf1, 0x63, 0xd2, 0xfe, 0xda, - 0x00, 0x3c, 0x5a, 0xae, 0x66, 0x07, 0x4f, 0x79, 0x09, 0x46, 0x39, 0x5f, 0x4a, 0x97, 0xb6, 0xd3, - 0x10, 0xed, 0x2a, 0x49, 0xf4, 0xba, 0x06, 0xc3, 0x06, 0x26, 0x3a, 0x0f, 0x79, 0xf7, 0x1d, 0x2f, - 0xe9, 0x9a, 0x55, 0x7e, 0x63, 0x0d, 0xd3, 0x72, 0x0a, 0xa6, 0x2c, 0x2e, 0xbf, 0x3b, 0x14, 0x58, - 0xb1, 0xb9, 0xaf, 0xc1, 0xb8, 0x1b, 0xd6, 0x42, 0xb7, 0xec, 0xd1, 0x73, 0x46, 0x3b, 0xa9, 0x94, - 0x70, 0x83, 0x76, 0x5a, 0x41, 0x71, 0x02, 0x5b, 0xbb, 0xc8, 0x06, 0xfb, 0x66, 0x93, 0x7b, 0xba, - 0x8a, 0xd3, 0x17, 0x40, 0x8b, 0x7d, 0x5d, 0xc8, 0x54, 0x0a, 0xe2, 0x05, 0xc0, 0x3f, 0x38, 0xc4, - 0x12, 0x46, 0x5f, 0x8e, 0xb5, 0x6d, 0xa7, 0xb5, 0xd0, 0x8e, 0xb6, 0x4b, 0x6e, 0x58, 0xf3, 0x77, - 0x49, 0xb0, 0xc7, 0x1e, 0xfd, 0x85, 0xf8, 0xe5, 0xa8, 0x00, 0x4b, 0xd7, 0x17, 0x2a, 0x14, 0x13, - 0x77, 0xd6, 0x41, 0x0b, 0x30, 0x21, 0x0b, 0xab, 0x24, 0x64, 0x57, 0xd8, 0x08, 0x23, 0xa3, 0x9c, - 0xa5, 0x44, 0xb1, 0x22, 0x92, 0xc4, 0x37, 0x39, 0x69, 0x38, 0x0a, 0x4e, 0xfa, 0x45, 0x18, 0x73, - 0x3d, 0x37, 0x72, 0x9d, 0xc8, 0xe7, 0xfa, 0x30, 0xfe, 0xbe, 0x67, 0x82, 0xfe, 0xb2, 0x0e, 0xc0, - 0x26, 0x9e, 0xfd, 0x5f, 0x06, 0x60, 0x8a, 0x4d, 0xdb, 0x07, 0x2b, 0xec, 0x9b, 0x69, 0x85, 0xdd, - 0xee, 0x5c, 0x61, 0x47, 0xf1, 0x44, 0x78, 0xe8, 0x65, 0xf6, 0x36, 0x14, 0x95, 0x7f, 0x98, 0x74, - 0x10, 0xb5, 0x32, 0x1c, 0x44, 0x7b, 0x73, 0x1f, 0xd2, 0xc4, 0x2e, 0x9f, 0x6a, 0x62, 0xf7, 0xd7, - 0x2d, 0x88, 0x15, 0x3c, 0xe8, 0x3a, 0x14, 0x5b, 0x3e, 0xb3, 0x1c, 0x0d, 0xa4, 0x39, 0xf6, 0xa3, - 0xa9, 0x17, 0x15, 0xbf, 0x14, 0xf9, 0xc7, 0x57, 0x64, 0x0d, 0x1c, 0x57, 0x46, 0x8b, 0x30, 0xdc, - 0x0a, 0x48, 0x35, 0x62, 0x31, 0x54, 0x7a, 0xd2, 0xe1, 0x6b, 0x84, 0xe3, 0x63, 0x59, 0xd1, 0xfe, - 0x79, 0x0b, 0x80, 0x5b, 0xb1, 0x39, 0xde, 0x16, 0x39, 0x01, 0xa9, 0x75, 0x09, 0x06, 0xc2, 0x16, - 0xa9, 0x75, 0xb3, 0xe9, 0x8d, 0xfb, 0x53, 0x6d, 0x91, 0x5a, 0x3c, 0xe0, 0xf4, 0x1f, 0x66, 0xb5, - 0xed, 0xef, 0x06, 0x18, 0x8f, 0xd1, 0xca, 0x11, 0x69, 0xa2, 0xa7, 0x8d, 0x98, 0x0a, 0x67, 0x13, - 0x31, 0x15, 0x8a, 0x0c, 0x5b, 0x13, 0x90, 0xbe, 0x0d, 0xf9, 0xa6, 0x73, 0x5f, 0x48, 0xc0, 0x9e, - 0xec, 0xde, 0x0d, 0x4a, 0x7f, 0x7e, 0xd5, 0xb9, 0xcf, 0x1f, 0x89, 0x4f, 0xca, 0x05, 0xb2, 0xea, - 0xdc, 0x3f, 0xe0, 0x96, 0xbb, 0xec, 0x90, 0xba, 0xe9, 0x86, 0xd1, 0x17, 0xfe, 0x73, 0xfc, 0x9f, - 0x2d, 0x3b, 0xda, 0x08, 0x6b, 0xcb, 0xf5, 0x84, 0x81, 0x56, 0x5f, 0x6d, 0xb9, 0x5e, 0xb2, 0x2d, - 0xd7, 0xeb, 0xa3, 0x2d, 0xd7, 0x43, 0xef, 0xc2, 0xb0, 0xb0, 0x9f, 0x14, 0x31, 0x8c, 0xae, 0xf6, - 0xd1, 0x9e, 0x30, 0xbf, 0xe4, 0x6d, 0x5e, 0x95, 0x8f, 0x60, 0x51, 0xda, 0xb3, 0x5d, 0xd9, 0x20, - 0xfa, 0x2b, 0x16, 0x8c, 0x8b, 0xdf, 0x98, 0xbc, 0xd3, 0x26, 0x61, 0x24, 0x78, 0xcf, 0x8f, 0xf7, - 0xdf, 0x07, 0x51, 0x91, 0x77, 0xe5, 0xe3, 0xf2, 0x98, 0x35, 0x81, 0x3d, 0x7b, 0x94, 0xe8, 0x05, - 0xfa, 0x7b, 0x16, 0x9c, 0x6a, 0x3a, 0xf7, 0x79, 0x8b, 0xbc, 0x0c, 0x3b, 0x91, 0xeb, 0x0b, 0x3b, - 0x84, 0x57, 0xfb, 0x9b, 0xfe, 0x8e, 0xea, 0xbc, 0x93, 0x52, 0x59, 0x7a, 0x2a, 0x0d, 0xa5, 0x67, - 0x57, 0x53, 0xfb, 0x35, 0xbb, 0x09, 0x05, 0xb9, 0xde, 0x52, 0x44, 0x0d, 0x25, 0x9d, 0xb1, 0x3e, - 0xb4, 0xf9, 0xaa, 0x1e, 0xab, 0x80, 0xb6, 0x23, 0xd6, 0xda, 0xb1, 0xb6, 0xf3, 0x36, 0x8c, 0xea, - 0x6b, 0xec, 0x58, 0xdb, 0x7a, 0x07, 0xa6, 0x53, 0xd6, 0xd2, 0xb1, 0x36, 0x79, 0x0f, 0xce, 0x66, - 0xae, 0x8f, 0xe3, 0x6c, 0xd8, 0xfe, 0x39, 0x4b, 0x3f, 0x07, 0x4f, 0x40, 0x75, 0xb0, 0x64, 0xaa, - 0x0e, 0x2e, 0x74, 0xdf, 0x39, 0x19, 0xfa, 0x83, 0xb7, 0xf4, 0x4e, 0xd3, 0x53, 0x1d, 0xbd, 0x0e, - 0x43, 0x0d, 0x5a, 0x22, 0xad, 0x70, 0xed, 0xde, 0x3b, 0x32, 0xe6, 0xa5, 0x58, 0x79, 0x88, 0x05, - 0x05, 0xfb, 0x97, 0x2c, 0x18, 0x38, 0x81, 0x91, 0xc0, 0xe6, 0x48, 0x3c, 0x9d, 0x49, 0x5a, 0x84, - 0x57, 0x9e, 0xc7, 0xce, 0xbd, 0xe5, 0xfb, 0x11, 0xf1, 0x42, 0xf6, 0x54, 0x4c, 0x1d, 0x98, 0xff, - 0x07, 0xa6, 0x6f, 0xfa, 0x4e, 0x7d, 0xd1, 0x69, 0x38, 0x5e, 0x8d, 0x04, 0x65, 0x6f, 0xeb, 0x50, - 0x16, 0xe4, 0xb9, 0x5e, 0x16, 0xe4, 0xf6, 0x36, 0x20, 0xbd, 0x01, 0xe1, 0x8a, 0x83, 0x61, 0xd8, - 0xe5, 0x4d, 0x89, 0xe1, 0x7f, 0x3c, 0x9d, 0x35, 0xeb, 0xe8, 0x99, 0xe6, 0x64, 0xc2, 0x0b, 0xb0, - 0x24, 0x64, 0xbf, 0x04, 0xa9, 0xfe, 0xfc, 0xbd, 0xc5, 0x06, 0xf6, 0xa7, 0x61, 0x8a, 0xd5, 0x3c, - 0xe4, 0x93, 0xd6, 0x4e, 0x48, 0x25, 0x53, 0x22, 0xfd, 0xd9, 0x5f, 0xb4, 0x60, 0x62, 0x2d, 0x11, - 0x00, 0xed, 0x32, 0xd3, 0x63, 0xa6, 0x08, 0xc3, 0xab, 0xac, 0x14, 0x0b, 0xe8, 0x91, 0xcb, 0xa0, - 0xfe, 0xc2, 0x82, 0x38, 0xc4, 0xc6, 0x09, 0x30, 0x5e, 0x4b, 0x06, 0xe3, 0x95, 0x2a, 0x1b, 0x51, - 0xdd, 0xc9, 0xe2, 0xbb, 0xd0, 0x0d, 0x15, 0x7c, 0xaa, 0x8b, 0x58, 0x24, 0x26, 0xc3, 0x43, 0x15, - 0x8d, 0x9b, 0x11, 0xaa, 0x64, 0x38, 0x2a, 0xfb, 0x3f, 0xe6, 0x00, 0x29, 0xdc, 0xbe, 0x83, 0x63, - 0x75, 0xd6, 0x38, 0x9a, 0xe0, 0x58, 0xbb, 0x80, 0x98, 0x26, 0x3e, 0x70, 0xbc, 0x90, 0x93, 0x75, - 0x85, 0xd4, 0xed, 0x70, 0x6a, 0xfe, 0x59, 0xd1, 0x24, 0xba, 0xd9, 0x41, 0x0d, 0xa7, 0xb4, 0xa0, - 0x59, 0x58, 0x0c, 0xf6, 0x6b, 0x61, 0x31, 0xd4, 0xc3, 0xdd, 0xee, 0x67, 0x2d, 0x18, 0x53, 0xc3, - 0xf4, 0x3e, 0x31, 0x86, 0x57, 0xfd, 0xc9, 0x38, 0xfa, 0x2a, 0x5a, 0x97, 0xd9, 0x95, 0xf0, 0xad, - 0xcc, 0x6d, 0xd2, 0x69, 0xb8, 0xef, 0x12, 0x15, 0x9a, 0x70, 0x4e, 0xb8, 0x41, 0x8a, 0xd2, 0x83, - 0xfd, 0xb9, 0x31, 0xf5, 0x8f, 0x87, 0x42, 0x8e, 0xab, 0xd8, 0x3f, 0x49, 0x37, 0xbb, 0xb9, 0x14, - 0xd1, 0x0b, 0x30, 0xd8, 0xda, 0x76, 0x42, 0x92, 0x70, 0x1a, 0x1a, 0xac, 0xd0, 0xc2, 0x83, 0xfd, - 0xb9, 0x71, 0x55, 0x81, 0x95, 0x60, 0x8e, 0xdd, 0x7f, 0xc8, 0xb1, 0xce, 0xc5, 0xd9, 0x33, 0xe4, - 0xd8, 0x9f, 0x58, 0x30, 0xb0, 0xe6, 0xd7, 0x4f, 0xe2, 0x08, 0x78, 0xcd, 0x38, 0x02, 0xce, 0x65, - 0x45, 0xa9, 0xcf, 0xdc, 0xfd, 0x2b, 0x89, 0xdd, 0x7f, 0x21, 0x93, 0x42, 0xf7, 0x8d, 0xdf, 0x84, - 0x11, 0x16, 0xfb, 0x5e, 0x38, 0x48, 0x3d, 0x67, 0x6c, 0xf8, 0xb9, 0xc4, 0x86, 0x9f, 0xd0, 0x50, - 0xb5, 0x9d, 0xfe, 0x04, 0x0c, 0x0b, 0x8f, 0x9b, 0xa4, 0xf7, 0xa9, 0xc0, 0xc5, 0x12, 0x6e, 0xff, - 0x78, 0x1e, 0x8c, 0x58, 0xfb, 0xe8, 0x57, 0x2c, 0x98, 0x0f, 0xb8, 0x25, 0x6e, 0xbd, 0xd4, 0x0e, - 0x5c, 0x6f, 0xab, 0x5a, 0xdb, 0x26, 0xf5, 0x76, 0xc3, 0xf5, 0xb6, 0xca, 0x5b, 0x9e, 0xaf, 0x8a, - 0x97, 0xef, 0x93, 0x5a, 0x9b, 0xa9, 0xaf, 0x7a, 0x04, 0xf6, 0x57, 0x16, 0xed, 0xcf, 0x3e, 0xd8, - 0x9f, 0x9b, 0xc7, 0x87, 0xa2, 0x8d, 0x0f, 0xd9, 0x17, 0xf4, 0x9b, 0x16, 0x5c, 0xe5, 0x21, 0xe8, - 0xfb, 0xef, 0x7f, 0x97, 0x77, 0x6e, 0x45, 0x92, 0x8a, 0x89, 0xac, 0x93, 0xa0, 0xb9, 0xf8, 0xa2, - 0x18, 0xd0, 0xab, 0x95, 0xc3, 0xb5, 0x85, 0x0f, 0xdb, 0x39, 0xfb, 0x9f, 0xe6, 0x61, 0x4c, 0x84, - 0xa6, 0x12, 0x77, 0xc0, 0x0b, 0xc6, 0x92, 0x78, 0x2c, 0xb1, 0x24, 0xa6, 0x0c, 0xe4, 0xa3, 0x39, - 0xfe, 0x43, 0x98, 0xa2, 0x87, 0xf3, 0x75, 0xe2, 0x04, 0xd1, 0x06, 0x71, 0xb8, 0xe1, 0x54, 0xfe, - 0xd0, 0xa7, 0xbf, 0x12, 0xac, 0xdd, 0x4c, 0x12, 0xc3, 0x9d, 0xf4, 0xbf, 0x99, 0xee, 0x1c, 0x0f, - 0x26, 0x3b, 0xa2, 0x8b, 0xbd, 0x09, 0x45, 0xe5, 0x2e, 0x22, 0x0e, 0x9d, 0xee, 0x41, 0xfa, 0x92, - 0x14, 0xb8, 0xf0, 0x2b, 0x76, 0x55, 0x8a, 0xc9, 0xd9, 0x7f, 0x3f, 0x67, 0x34, 0xc8, 0x27, 0x71, - 0x0d, 0x0a, 0x4e, 0x18, 0xba, 0x5b, 0x1e, 0xa9, 0x8b, 0x1d, 0xfb, 0xe1, 0xac, 0x1d, 0x6b, 0x34, - 0xc3, 0x5c, 0x76, 0x16, 0x44, 0x4d, 0xac, 0x68, 0xa0, 0xeb, 0xdc, 0x3c, 0x6d, 0x57, 0xbe, 0xd4, - 0xfa, 0xa3, 0x06, 0xd2, 0x80, 0x6d, 0x97, 0x60, 0x51, 0x1f, 0x7d, 0x86, 0xdb, 0x0f, 0xde, 0xf0, - 0xfc, 0x7b, 0xde, 0x35, 0xdf, 0x97, 0xe1, 0x1f, 0xfa, 0x23, 0x38, 0x25, 0xad, 0x06, 0x55, 0x75, - 0x6c, 0x52, 0xeb, 0x2f, 0x5c, 0xe7, 0xe7, 0x61, 0x9a, 0x92, 0x36, 0xbd, 0xb3, 0x43, 0x44, 0x60, - 0x42, 0xc4, 0x3d, 0x93, 0x65, 0x62, 0xec, 0x52, 0x1f, 0x61, 0x66, 0xed, 0x58, 0x02, 0x7c, 0xc3, - 0x24, 0x81, 0x93, 0x34, 0xed, 0x9f, 0xb2, 0x80, 0x79, 0xaa, 0x9e, 0x00, 0x3f, 0xf2, 0x09, 0x93, - 0x1f, 0x99, 0xc9, 0x1a, 0xe4, 0x0c, 0x56, 0xe4, 0x79, 0xbe, 0xb2, 0x2a, 0x81, 0x7f, 0x7f, 0x4f, - 0x18, 0x7d, 0xf4, 0x7e, 0x7f, 0xd8, 0xff, 0xdb, 0xe2, 0x87, 0x98, 0x72, 0xe6, 0x40, 0xdf, 0x0e, - 0x85, 0x9a, 0xd3, 0x72, 0x6a, 0x3c, 0x31, 0x4c, 0xa6, 0x2c, 0xce, 0xa8, 0x34, 0xbf, 0x24, 0x6a, - 0x70, 0xd9, 0x92, 0x8c, 0x9f, 0x57, 0x90, 0xc5, 0x3d, 0xe5, 0x49, 0xaa, 0xc9, 0xd9, 0x1d, 0x18, - 0x33, 0x88, 0x1d, 0xab, 0x20, 0xe2, 0xdb, 0xf9, 0x15, 0xab, 0xe2, 0x3d, 0x36, 0x61, 0xca, 0xd3, - 0xfe, 0xd3, 0x0b, 0x45, 0x3e, 0x2e, 0x3f, 0xdc, 0xeb, 0x12, 0x65, 0xb7, 0x8f, 0xe6, 0x04, 0x9b, - 0x20, 0x83, 0x3b, 0x29, 0xdb, 0x3f, 0x61, 0xc1, 0x23, 0x3a, 0xa2, 0xe6, 0x67, 0xd3, 0x4b, 0xba, - 0x5f, 0x82, 0x82, 0xdf, 0x22, 0x81, 0x13, 0xf9, 0x81, 0xb8, 0x35, 0xae, 0xc8, 0x41, 0xbf, 0x25, - 0xca, 0x0f, 0x44, 0x58, 0x75, 0x49, 0x5d, 0x96, 0x63, 0x55, 0x93, 0xbe, 0x3e, 0xd9, 0x60, 0x84, - 0xc2, 0xa3, 0x8a, 0x9d, 0x01, 0x4c, 0xd1, 0x1d, 0x62, 0x01, 0xb1, 0xbf, 0x66, 0xf1, 0x85, 0xa5, - 0x77, 0x1d, 0xbd, 0x03, 0x93, 0x4d, 0x27, 0xaa, 0x6d, 0x2f, 0xdf, 0x6f, 0x05, 0x5c, 0x57, 0x22, - 0xc7, 0xe9, 0xc9, 0x5e, 0xe3, 0xa4, 0x7d, 0x64, 0x6c, 0x12, 0xb9, 0x9a, 0x20, 0x86, 0x3b, 0xc8, - 0xa3, 0x0d, 0x18, 0x61, 0x65, 0xcc, 0x59, 0x30, 0xec, 0xc6, 0x1a, 0x64, 0xb5, 0xa6, 0x6c, 0x05, - 0x56, 0x63, 0x3a, 0x58, 0x27, 0x6a, 0xff, 0x4c, 0x9e, 0xef, 0x76, 0xc6, 0xca, 0x3f, 0x01, 0xc3, - 0x2d, 0xbf, 0xbe, 0x54, 0x2e, 0x61, 0x31, 0x0b, 0xea, 0x1a, 0xa9, 0xf0, 0x62, 0x2c, 0xe1, 0xe8, - 0x0a, 0x14, 0xc4, 0x4f, 0xa9, 0xdb, 0x62, 0x67, 0xb3, 0xc0, 0x0b, 0xb1, 0x82, 0xa2, 0x67, 0x01, - 0x5a, 0x81, 0xbf, 0xeb, 0xd6, 0x59, 0x10, 0x8b, 0xbc, 0x69, 0xe6, 0x53, 0x51, 0x10, 0xac, 0x61, - 0xa1, 0x57, 0x60, 0xac, 0xed, 0x85, 0x9c, 0x1d, 0xd1, 0x42, 0xd6, 0x2a, 0x03, 0x94, 0xdb, 0x3a, - 0x10, 0x9b, 0xb8, 0x68, 0x01, 0x86, 0x22, 0x87, 0x99, 0xad, 0x0c, 0x66, 0x9b, 0xcd, 0xae, 0x53, - 0x0c, 0x3d, 0x07, 0x09, 0xad, 0x80, 0x45, 0x45, 0xf4, 0xa6, 0xf4, 0xdb, 0xe5, 0x07, 0xbb, 0xb0, - 0x57, 0xef, 0xef, 0x12, 0xd0, 0xbc, 0x76, 0x85, 0x1d, 0xbc, 0x41, 0x0b, 0xbd, 0x0c, 0x40, 0xee, - 0x47, 0x24, 0xf0, 0x9c, 0x86, 0xb2, 0x0a, 0x53, 0x7c, 0x41, 0xc9, 0x5f, 0xf3, 0xa3, 0xdb, 0x21, - 0x59, 0x56, 0x18, 0x58, 0xc3, 0xb6, 0x7f, 0xb3, 0x08, 0x10, 0xf3, 0xed, 0xe8, 0xdd, 0x8e, 0x83, - 0xeb, 0xa9, 0xee, 0x9c, 0xfe, 0xd1, 0x9d, 0x5a, 0xe8, 0x7b, 0x2c, 0x18, 0x71, 0x1a, 0x0d, 0xbf, - 0xe6, 0xf0, 0xa0, 0xc2, 0xb9, 0xee, 0x07, 0xa7, 0x68, 0x7f, 0x21, 0xae, 0xc1, 0xbb, 0xf0, 0x9c, - 0x5c, 0xa1, 0x1a, 0xa4, 0x67, 0x2f, 0xf4, 0x86, 0xd1, 0xc7, 0xe4, 0x53, 0x31, 0x6f, 0x0c, 0xa5, - 0x7a, 0x2a, 0x16, 0xd9, 0x1d, 0xa1, 0xbf, 0x12, 0x6f, 0x1b, 0xaf, 0xc4, 0x81, 0x6c, 0xc7, 0x44, - 0x83, 0x7d, 0xed, 0xf5, 0x40, 0x44, 0x15, 0x3d, 0x48, 0xc1, 0x60, 0xb6, 0x17, 0xa0, 0xf6, 0x4e, - 0xea, 0x11, 0xa0, 0xe0, 0x6d, 0x98, 0xa8, 0x9b, 0x4c, 0x80, 0x58, 0x89, 0x8f, 0x67, 0xd1, 0x4d, - 0xf0, 0x0c, 0xf1, 0xb5, 0x9f, 0x00, 0xe0, 0x24, 0x61, 0x54, 0xe1, 0x31, 0x2b, 0xca, 0xde, 0xa6, - 0x2f, 0x7c, 0x26, 0xec, 0xcc, 0xb9, 0xdc, 0x0b, 0x23, 0xd2, 0xa4, 0x98, 0xf1, 0xed, 0xbe, 0x26, - 0xea, 0x62, 0x45, 0x05, 0xbd, 0x0e, 0x43, 0xcc, 0x0d, 0x2c, 0x9c, 0x29, 0x64, 0xcb, 0x8a, 0xcd, - 0x20, 0x6c, 0xf1, 0x86, 0x64, 0x7f, 0x43, 0x2c, 0x28, 0xa0, 0xeb, 0xd2, 0xc9, 0x32, 0x2c, 0x7b, - 0xb7, 0x43, 0xc2, 0x9c, 0x2c, 0x8b, 0x8b, 0x1f, 0x8e, 0xfd, 0x27, 0x79, 0x79, 0x6a, 0xa6, 0x32, - 0xa3, 0x26, 0xe5, 0xa2, 0xc4, 0x7f, 0x99, 0x00, 0x6d, 0x06, 0xb2, 0xbb, 0x67, 0x26, 0x49, 0x8b, - 0x87, 0xf3, 0x8e, 0x49, 0x02, 0x27, 0x69, 0x52, 0x8e, 0x94, 0xef, 0x7a, 0xe1, 0x75, 0xd1, 0xeb, - 0xec, 0xe0, 0x0f, 0x71, 0x76, 0x1b, 0xf1, 0x12, 0x2c, 0xea, 0x9f, 0x28, 0x7b, 0x30, 0xeb, 0xc1, - 0x64, 0x72, 0x8b, 0x1e, 0x2b, 0x3b, 0xf2, 0x07, 0x03, 0x30, 0x6e, 0x2e, 0x29, 0x74, 0x15, 0x8a, - 0x82, 0x88, 0x4a, 0x5a, 0xa0, 0x76, 0xc9, 0xaa, 0x04, 0xe0, 0x18, 0x87, 0xe5, 0xaa, 0x60, 0xd5, - 0x35, 0x33, 0xdb, 0x38, 0x57, 0x85, 0x82, 0x60, 0x0d, 0x8b, 0x3e, 0xac, 0x36, 0x7c, 0x3f, 0x52, - 0x17, 0x92, 0x5a, 0x77, 0x8b, 0xac, 0x14, 0x0b, 0x28, 0xbd, 0x88, 0x76, 0x48, 0xe0, 0x91, 0x86, - 0x19, 0xde, 0x58, 0x5d, 0x44, 0x37, 0x74, 0x20, 0x36, 0x71, 0xe9, 0x75, 0xea, 0x87, 0x6c, 0x21, - 0x8b, 0xe7, 0x5b, 0x6c, 0xb6, 0x5c, 0xe5, 0x7e, 0xde, 0x12, 0x8e, 0x3e, 0x0d, 0x8f, 0xa8, 0x10, - 0x4e, 0x98, 0xeb, 0x21, 0x64, 0x8b, 0x43, 0x86, 0xb4, 0xe5, 0x91, 0xa5, 0x74, 0x34, 0x9c, 0x55, - 0x1f, 0xbd, 0x06, 0xe3, 0x82, 0xc5, 0x97, 0x14, 0x87, 0x4d, 0xd3, 0x98, 0x1b, 0x06, 0x14, 0x27, - 0xb0, 0x65, 0x80, 0x66, 0xc6, 0x65, 0x4b, 0x0a, 0x85, 0xce, 0x00, 0xcd, 0x3a, 0x1c, 0x77, 0xd4, - 0x40, 0x0b, 0x30, 0xc1, 0x79, 0x30, 0xd7, 0xdb, 0xe2, 0x73, 0x22, 0x9c, 0xa2, 0xd4, 0x96, 0xba, - 0x65, 0x82, 0x71, 0x12, 0x1f, 0xbd, 0x04, 0xa3, 0x4e, 0x50, 0xdb, 0x76, 0x23, 0x52, 0x8b, 0xda, - 0x01, 0xf7, 0x96, 0xd2, 0x6c, 0x8b, 0x16, 0x34, 0x18, 0x36, 0x30, 0xed, 0x77, 0x61, 0x3a, 0x25, - 0x00, 0x04, 0x5d, 0x38, 0x4e, 0xcb, 0x95, 0xdf, 0x94, 0x30, 0x40, 0x5e, 0xa8, 0x94, 0xe5, 0xd7, - 0x68, 0x58, 0x74, 0x75, 0xb2, 0x40, 0x11, 0x5a, 0xbe, 0x43, 0xb5, 0x3a, 0x57, 0x24, 0x00, 0xc7, - 0x38, 0xf6, 0xff, 0xc8, 0xc1, 0x44, 0x8a, 0x6e, 0x85, 0xe5, 0xdc, 0x4b, 0x3c, 0x52, 0xe2, 0x14, - 0x7b, 0x66, 0xbc, 0xef, 0xdc, 0x21, 0xe2, 0x7d, 0xe7, 0x7b, 0xc5, 0xfb, 0x1e, 0x78, 0x2f, 0xf1, - 0xbe, 0xcd, 0x11, 0x1b, 0xec, 0x6b, 0xc4, 0x52, 0x62, 0x84, 0x0f, 0x1d, 0x32, 0x46, 0xb8, 0x31, - 0xe8, 0xc3, 0x7d, 0x0c, 0xfa, 0x0f, 0xe7, 0x60, 0x32, 0x69, 0x03, 0x79, 0x02, 0x72, 0xdb, 0xd7, - 0x0d, 0xb9, 0xed, 0x95, 0x7e, 0x5c, 0x5e, 0x33, 0x65, 0xb8, 0x38, 0x21, 0xc3, 0xfd, 0x68, 0x5f, - 0xd4, 0xba, 0xcb, 0x73, 0xff, 0x66, 0x0e, 0x4e, 0xa7, 0xfa, 0xdc, 0x9e, 0xc0, 0xd8, 0xdc, 0x32, - 0xc6, 0xe6, 0xe9, 0xbe, 0xdd, 0x81, 0x33, 0x07, 0xe8, 0x6e, 0x62, 0x80, 0xae, 0xf6, 0x4f, 0xb2, - 0xfb, 0x28, 0x7d, 0x35, 0x0f, 0x17, 0x52, 0xeb, 0xc5, 0x62, 0xcf, 0x15, 0x43, 0xec, 0xf9, 0x6c, - 0x42, 0xec, 0x69, 0x77, 0xaf, 0x7d, 0x34, 0x72, 0x50, 0xe1, 0xe8, 0xca, 0xa2, 0x19, 0x3c, 0xa4, - 0x0c, 0xd4, 0x70, 0x74, 0x55, 0x84, 0xb0, 0x49, 0xf7, 0x9b, 0x49, 0xf6, 0xf9, 0xaf, 0x2d, 0x38, - 0x9b, 0x3a, 0x37, 0x27, 0x20, 0xeb, 0x5a, 0x33, 0x65, 0x5d, 0x4f, 0xf4, 0xbd, 0x5a, 0x33, 0x84, - 0x5f, 0xbf, 0x3e, 0x90, 0xf1, 0x2d, 0xec, 0x25, 0x7f, 0x0b, 0x46, 0x9c, 0x5a, 0x8d, 0x84, 0xe1, - 0xaa, 0x5f, 0x57, 0x21, 0x8d, 0x9f, 0x66, 0xef, 0xac, 0xb8, 0xf8, 0x60, 0x7f, 0x6e, 0x36, 0x49, - 0x22, 0x06, 0x63, 0x9d, 0x02, 0xfa, 0x0c, 0x14, 0x42, 0x71, 0x6f, 0x8a, 0xb9, 0x7f, 0xae, 0xcf, - 0xc1, 0x71, 0x36, 0x48, 0xc3, 0x8c, 0xb9, 0xa4, 0x24, 0x15, 0x8a, 0xa4, 0x19, 0x9f, 0x25, 0x77, - 0xa4, 0xf1, 0x59, 0x9e, 0x05, 0xd8, 0x55, 0x8f, 0x81, 0xa4, 0xfc, 0x41, 0x7b, 0x26, 0x68, 0x58, - 0xe8, 0x93, 0x30, 0x19, 0xf2, 0xa0, 0x84, 0x4b, 0x0d, 0x27, 0x64, 0x6e, 0x2e, 0x62, 0x15, 0xb2, - 0xb8, 0x4e, 0xd5, 0x04, 0x0c, 0x77, 0x60, 0xa3, 0x15, 0xd9, 0x2a, 0x8b, 0xa0, 0xc8, 0x17, 0xe6, - 0xe5, 0xb8, 0x45, 0x91, 0xf1, 0xf7, 0x54, 0x72, 0xf8, 0xd9, 0xc0, 0x6b, 0x35, 0xd1, 0x67, 0x00, - 0xe8, 0xf2, 0x11, 0x72, 0x88, 0xe1, 0xec, 0xc3, 0x93, 0x9e, 0x2a, 0xf5, 0x54, 0xab, 0x5c, 0xe6, - 0x9b, 0x5a, 0x52, 0x44, 0xb0, 0x46, 0xd0, 0xfe, 0xe1, 0x01, 0x78, 0xb4, 0xcb, 0x19, 0x89, 0x16, - 0x4c, 0x3d, 0xec, 0x93, 0xc9, 0xc7, 0xf5, 0x6c, 0x6a, 0x65, 0xe3, 0xb5, 0x9d, 0x58, 0x8a, 0xb9, - 0xf7, 0xbc, 0x14, 0x7f, 0xc0, 0xd2, 0xc4, 0x1e, 0xdc, 0x56, 0xf3, 0x13, 0x87, 0x3c, 0xfb, 0x8f, - 0x50, 0x0e, 0xb2, 0x99, 0x22, 0x4c, 0x78, 0xb6, 0xef, 0xee, 0xf4, 0x2d, 0x5d, 0x38, 0x59, 0x29, - 0xf1, 0x6f, 0x5b, 0x70, 0xbe, 0x6b, 0x70, 0x8e, 0x6f, 0x40, 0x86, 0xc1, 0xfe, 0x82, 0x05, 0x8f, - 0xa5, 0xd6, 0x30, 0xcc, 0x8c, 0xae, 0x42, 0xb1, 0x46, 0x0b, 0x35, 0xff, 0xca, 0xd8, 0xf1, 0x5c, - 0x02, 0x70, 0x8c, 0x73, 0xc8, 0xc0, 0x23, 0xbf, 0x6a, 0x41, 0xc7, 0xa6, 0x3f, 0x81, 0xdb, 0xa7, - 0x6c, 0xde, 0x3e, 0x1f, 0xee, 0x67, 0x34, 0x33, 0x2e, 0x9e, 0x3f, 0x9e, 0x80, 0x33, 0x19, 0xfe, - 0x45, 0xbb, 0x30, 0xb5, 0x55, 0x23, 0xa6, 0xe7, 0x6a, 0xb7, 0xf8, 0x2f, 0x5d, 0xdd, 0x5c, 0x59, - 0x4e, 0xd2, 0xa9, 0x0e, 0x14, 0xdc, 0xd9, 0x04, 0xfa, 0x82, 0x05, 0xa7, 0x9c, 0x7b, 0xe1, 0x32, - 0xe5, 0x22, 0xdc, 0xda, 0x62, 0xc3, 0xaf, 0xed, 0xd0, 0x23, 0x5a, 0x6e, 0x84, 0xe7, 0x53, 0x25, - 0x3b, 0x77, 0xab, 0x1d, 0xf8, 0x46, 0xf3, 0x2c, 0x49, 0x6b, 0x1a, 0x16, 0x4e, 0x6d, 0x0b, 0x61, - 0x11, 0xb9, 0x9f, 0xbe, 0x51, 0xba, 0xf8, 0x56, 0xa7, 0x39, 0x82, 0xf1, 0x6b, 0x51, 0x42, 0xb0, - 0xa2, 0x83, 0x3e, 0x07, 0xc5, 0x2d, 0xe9, 0x9d, 0x99, 0x72, 0xed, 0xc6, 0x03, 0xd9, 0xdd, 0x67, - 0x95, 0xab, 0x67, 0x15, 0x12, 0x8e, 0x89, 0xa2, 0xd7, 0x20, 0xef, 0x6d, 0x86, 0xdd, 0xf2, 0x9c, - 0x26, 0xec, 0xf0, 0x78, 0x04, 0x83, 0xb5, 0x95, 0x2a, 0xa6, 0x15, 0xd1, 0x75, 0xc8, 0x07, 0x1b, - 0x75, 0x21, 0x96, 0x4c, 0xdd, 0xa4, 0x78, 0xb1, 0x94, 0xd1, 0x2b, 0x46, 0x09, 0x2f, 0x96, 0x30, - 0x25, 0x81, 0x2a, 0x30, 0xc8, 0x9c, 0x72, 0xc4, 0x25, 0x97, 0xca, 0xce, 0x77, 0x71, 0x6e, 0xe3, - 0x61, 0x0e, 0x18, 0x02, 0xe6, 0x84, 0xd0, 0x3a, 0x0c, 0xd5, 0x58, 0x4e, 0x4c, 0x11, 0xf1, 0xed, - 0x63, 0xa9, 0x02, 0xc8, 0x2e, 0xc9, 0x42, 0x85, 0x3c, 0x8e, 0x61, 0x60, 0x41, 0x8b, 0x51, 0x25, - 0xad, 0xed, 0xcd, 0x50, 0xe4, 0x70, 0x4e, 0xa7, 0xda, 0x25, 0x07, 0xae, 0xa0, 0xca, 0x30, 0xb0, - 0xa0, 0x85, 0x5e, 0x86, 0xdc, 0x66, 0x4d, 0x38, 0xdc, 0xa4, 0x4a, 0x22, 0xcd, 0x20, 0x14, 0x8b, - 0x43, 0x0f, 0xf6, 0xe7, 0x72, 0x2b, 0x4b, 0x38, 0xb7, 0x59, 0x43, 0x6b, 0x30, 0xbc, 0xc9, 0xdd, - 0xd6, 0x85, 0xb0, 0xf1, 0xf1, 0x74, 0x8f, 0xfa, 0x0e, 0xcf, 0x76, 0xee, 0x6b, 0x22, 0x00, 0x58, - 0x12, 0x61, 0x81, 0xf0, 0x95, 0xfb, 0xbd, 0x08, 0x8e, 0x36, 0x7f, 0xb8, 0x90, 0x09, 0x9c, 0xe9, - 0x88, 0x9d, 0xf8, 0xb1, 0x46, 0x91, 0xae, 0x6a, 0x47, 0x26, 0xd2, 0x17, 0x61, 0x62, 0x52, 0x57, - 0xb5, 0xca, 0xb6, 0xdf, 0x6d, 0x55, 0x2b, 0x24, 0x1c, 0x13, 0x45, 0x3b, 0x30, 0xb6, 0x1b, 0xb6, - 0xb6, 0x89, 0xdc, 0xd2, 0x2c, 0x6a, 0x4c, 0xc6, 0xbd, 0x7c, 0x47, 0x20, 0xba, 0x41, 0xd4, 0x76, - 0x1a, 0x1d, 0xa7, 0x10, 0xd3, 0xe9, 0xdf, 0xd1, 0x89, 0x61, 0x93, 0x36, 0x1d, 0xfe, 0x77, 0xda, - 0xfe, 0xc6, 0x5e, 0x44, 0x44, 0x4c, 0xb3, 0xd4, 0xe1, 0x7f, 0x83, 0xa3, 0x74, 0x0e, 0xbf, 0x00, - 0x60, 0x49, 0x04, 0xdd, 0x11, 0xc3, 0xc3, 0x4e, 0xcf, 0xc9, 0xec, 0xc0, 0xa3, 0x0b, 0x12, 0x29, - 0x63, 0x50, 0xd8, 0x69, 0x19, 0x93, 0x62, 0xa7, 0x64, 0x6b, 0xdb, 0x8f, 0x7c, 0x2f, 0x71, 0x42, - 0x4f, 0x65, 0x9f, 0x92, 0x95, 0x14, 0xfc, 0xce, 0x53, 0x32, 0x0d, 0x0b, 0xa7, 0xb6, 0x85, 0xea, - 0x30, 0xde, 0xf2, 0x83, 0xe8, 0x9e, 0x1f, 0xc8, 0xf5, 0x85, 0xba, 0x08, 0x4b, 0x0c, 0x4c, 0xd1, - 0x22, 0x0b, 0x17, 0x68, 0x42, 0x70, 0x82, 0x26, 0xfa, 0x14, 0x0c, 0x87, 0x35, 0xa7, 0x41, 0xca, - 0xb7, 0x66, 0xa6, 0xb3, 0xaf, 0x9f, 0x2a, 0x47, 0xc9, 0x58, 0x5d, 0x3c, 0x6a, 0x3e, 0x47, 0xc1, - 0x92, 0x1c, 0x5a, 0x81, 0x41, 0x96, 0xe8, 0x8c, 0x05, 0xe0, 0xcb, 0x88, 0x9f, 0xda, 0x61, 0x15, - 0xcd, 0xcf, 0x26, 0x56, 0x8c, 0x79, 0x75, 0xba, 0x07, 0xc4, 0x9b, 0xc1, 0x0f, 0x67, 0x4e, 0x67, - 0xef, 0x01, 0xf1, 0xd4, 0xb8, 0x55, 0xed, 0xb6, 0x07, 0x14, 0x12, 0x8e, 0x89, 0xd2, 0x93, 0x99, - 0x9e, 0xa6, 0x67, 0xba, 0x98, 0xf3, 0x64, 0x9e, 0xa5, 0xec, 0x64, 0xa6, 0x27, 0x29, 0x25, 0x61, - 0xff, 0xde, 0x70, 0x27, 0xcf, 0xc2, 0x5e, 0x99, 0xdf, 0x65, 0x75, 0x28, 0x20, 0x3f, 0xde, 0xaf, - 0xd0, 0xeb, 0x08, 0x59, 0xf0, 0x2f, 0x58, 0x70, 0xa6, 0x95, 0xfa, 0x21, 0x82, 0x01, 0xe8, 0x4f, - 0x76, 0xc6, 0x3f, 0x5d, 0x05, 0x6b, 0x4c, 0x87, 0xe3, 0x8c, 0x96, 0x92, 0xcf, 0x9c, 0xfc, 0x7b, - 0x7e, 0xe6, 0xac, 0x42, 0x81, 0x31, 0x99, 0x3d, 0x72, 0x44, 0x27, 0x5f, 0x7b, 0x8c, 0x95, 0x58, - 0x12, 0x15, 0xb1, 0x22, 0x81, 0x7e, 0xd0, 0x82, 0xf3, 0xc9, 0xae, 0x63, 0xc2, 0xc0, 0x22, 0xc2, - 0x23, 0x7f, 0xe0, 0xae, 0x88, 0xef, 0xef, 0xe0, 0xff, 0x0d, 0xe4, 0x83, 0x5e, 0x08, 0xb8, 0x7b, - 0x63, 0xa8, 0x94, 0xf2, 0xc2, 0x1e, 0x32, 0xb5, 0x0a, 0x7d, 0xbc, 0xb2, 0x9f, 0x87, 0xd1, 0xa6, - 0xdf, 0xf6, 0x22, 0x61, 0xfd, 0x23, 0x2c, 0x11, 0x98, 0x06, 0x7e, 0x55, 0x2b, 0xc7, 0x06, 0x56, - 0xe2, 0x6d, 0x5e, 0x78, 0xe8, 0xb7, 0xf9, 0x5b, 0x30, 0xea, 0x69, 0xe6, 0xaa, 0x82, 0x1f, 0xb8, - 0x9c, 0x1d, 0x9d, 0x55, 0x37, 0x6e, 0xe5, 0xbd, 0xd4, 0x4b, 0xb0, 0x41, 0xed, 0x64, 0x1f, 0x7c, - 0x5f, 0xb6, 0x52, 0x98, 0x7a, 0x2e, 0x02, 0x78, 0xd5, 0x14, 0x01, 0x5c, 0x4e, 0x8a, 0x00, 0x3a, - 0x24, 0xca, 0xc6, 0xeb, 0xbf, 0xff, 0xe4, 0x33, 0xfd, 0x86, 0x30, 0xb4, 0x1b, 0x70, 0xb1, 0xd7, - 0xb5, 0xc4, 0xcc, 0xc0, 0xea, 0x4a, 0x7f, 0x18, 0x9b, 0x81, 0xd5, 0xcb, 0x25, 0xcc, 0x20, 0xfd, - 0x06, 0xc7, 0xb1, 0xff, 0x9b, 0x05, 0xf9, 0x8a, 0x5f, 0x3f, 0x81, 0x07, 0xef, 0x27, 0x8c, 0x07, - 0xef, 0xa3, 0xe9, 0x17, 0x62, 0x3d, 0x53, 0x1e, 0xbe, 0x9c, 0x90, 0x87, 0x9f, 0xcf, 0x22, 0xd0, - 0x5d, 0xfa, 0xfd, 0x93, 0x79, 0x18, 0xa9, 0xf8, 0x75, 0x65, 0x83, 0xfd, 0xeb, 0x0f, 0x63, 0x83, - 0x9d, 0x99, 0x42, 0x41, 0xa3, 0xcc, 0xac, 0xc7, 0xa4, 0xe3, 0xe8, 0x37, 0x98, 0x29, 0xf6, 0x5d, - 0xe2, 0x6e, 0x6d, 0x47, 0xa4, 0x9e, 0xfc, 0x9c, 0x93, 0x33, 0xc5, 0xfe, 0xaf, 0x16, 0x4c, 0x24, - 0x5a, 0x47, 0x0d, 0x18, 0x6b, 0xe8, 0xd2, 0x56, 0xb1, 0x4e, 0x1f, 0x4a, 0x50, 0x2b, 0x4c, 0x59, - 0xb5, 0x22, 0x6c, 0x12, 0x47, 0xf3, 0x00, 0x4a, 0xfd, 0x28, 0xc5, 0x7a, 0x8c, 0xeb, 0x57, 0xfa, - 0xc9, 0x10, 0x6b, 0x18, 0xe8, 0x05, 0x18, 0x89, 0xfc, 0x96, 0xdf, 0xf0, 0xb7, 0xf6, 0x6e, 0x10, - 0x19, 0x8e, 0x49, 0x19, 0xa8, 0xad, 0xc7, 0x20, 0xac, 0xe3, 0xd9, 0x3f, 0x9d, 0xe7, 0x1f, 0xea, - 0x45, 0xee, 0x07, 0x6b, 0xf2, 0xfd, 0xbd, 0x26, 0xbf, 0x6a, 0xc1, 0x24, 0x6d, 0x9d, 0xd9, 0xc0, - 0xc8, 0xcb, 0x56, 0x45, 0x65, 0xb6, 0xba, 0x44, 0x65, 0xbe, 0x4c, 0xcf, 0xae, 0xba, 0xdf, 0x8e, - 0x84, 0x04, 0x4d, 0x3b, 0x9c, 0x68, 0x29, 0x16, 0x50, 0x81, 0x47, 0x82, 0x40, 0xf8, 0xed, 0xe9, - 0x78, 0x24, 0x08, 0xb0, 0x80, 0xca, 0xa0, 0xcd, 0x03, 0xe9, 0x41, 0x9b, 0x79, 0x70, 0x49, 0x61, - 0x2d, 0x21, 0xd8, 0x1e, 0x2d, 0xb8, 0xa4, 0x34, 0xa3, 0x88, 0x71, 0xec, 0x9f, 0xcb, 0xc3, 0x68, - 0xc5, 0xaf, 0xc7, 0x0a, 0xc0, 0xe7, 0x0d, 0x05, 0xe0, 0xc5, 0x84, 0x02, 0x70, 0x52, 0xc7, 0xfd, - 0x40, 0xdd, 0xf7, 0xf5, 0x52, 0xf7, 0xfd, 0x13, 0x8b, 0xcd, 0x5a, 0x69, 0xad, 0xca, 0x4d, 0xaa, - 0xd0, 0x33, 0x30, 0xc2, 0x0e, 0x24, 0xe6, 0x28, 0x2a, 0xb5, 0x62, 0x2c, 0x19, 0xd1, 0x5a, 0x5c, - 0x8c, 0x75, 0x1c, 0x74, 0x05, 0x0a, 0x21, 0x71, 0x82, 0xda, 0xb6, 0x3a, 0xe3, 0x84, 0x0a, 0x8b, - 0x97, 0x61, 0x05, 0x45, 0x6f, 0xc4, 0x71, 0x0d, 0xf3, 0xd9, 0x8e, 0x67, 0x7a, 0x7f, 0xf8, 0x16, - 0xc9, 0x0e, 0x66, 0x68, 0xdf, 0x05, 0xd4, 0x89, 0xdf, 0x47, 0x40, 0xaf, 0x39, 0x33, 0xa0, 0x57, - 0xb1, 0x23, 0x98, 0xd7, 0x9f, 0x5b, 0x30, 0x5e, 0xf1, 0xeb, 0x74, 0xeb, 0x7e, 0x33, 0xed, 0x53, - 0x3d, 0xa8, 0xeb, 0x50, 0x97, 0xa0, 0xae, 0x97, 0x60, 0xb0, 0xe2, 0xd7, 0xcb, 0x95, 0x6e, 0x0e, - 0xdb, 0xf6, 0xdf, 0xb2, 0x60, 0xb8, 0xe2, 0xd7, 0x4f, 0x40, 0x38, 0xff, 0xaa, 0x29, 0x9c, 0x7f, - 0x24, 0x63, 0xdd, 0x64, 0xc8, 0xe3, 0xff, 0xc6, 0x00, 0x8c, 0xd1, 0x7e, 0xfa, 0x5b, 0x72, 0x2a, - 0x8d, 0x61, 0xb3, 0xfa, 0x18, 0x36, 0xca, 0x0b, 0xfb, 0x8d, 0x86, 0x7f, 0x2f, 0x39, 0xad, 0x2b, - 0xac, 0x14, 0x0b, 0x28, 0x7a, 0x0a, 0x0a, 0xad, 0x80, 0xec, 0xba, 0xbe, 0x60, 0x32, 0x35, 0x55, - 0x47, 0x45, 0x94, 0x63, 0x85, 0x41, 0x1f, 0x67, 0xa1, 0xeb, 0xd5, 0x48, 0x95, 0xd4, 0x7c, 0xaf, - 0xce, 0xe5, 0xd7, 0x79, 0x91, 0x98, 0x41, 0x2b, 0xc7, 0x06, 0x16, 0xba, 0x0b, 0x45, 0xf6, 0x9f, - 0x1d, 0x3b, 0x87, 0x4f, 0xf1, 0x29, 0x52, 0xbe, 0x09, 0x02, 0x38, 0xa6, 0x85, 0x9e, 0x05, 0x88, - 0x64, 0xf4, 0xee, 0x50, 0x04, 0x6f, 0x52, 0x0c, 0xb9, 0x8a, 0xeb, 0x1d, 0x62, 0x0d, 0x0b, 0x3d, - 0x09, 0xc5, 0xc8, 0x71, 0x1b, 0x37, 0x5d, 0x8f, 0x84, 0x4c, 0x2e, 0x9d, 0x97, 0x99, 0xd7, 0x44, - 0x21, 0x8e, 0xe1, 0x94, 0x21, 0x62, 0x91, 0x0d, 0x78, 0x82, 0xe0, 0x02, 0xc3, 0x66, 0x0c, 0xd1, - 0x4d, 0x55, 0x8a, 0x35, 0x0c, 0xb4, 0x0d, 0xe7, 0x5c, 0x8f, 0x25, 0x31, 0x20, 0xd5, 0x1d, 0xb7, - 0xb5, 0x7e, 0xb3, 0x7a, 0x87, 0x04, 0xee, 0xe6, 0xde, 0xa2, 0x53, 0xdb, 0x21, 0x9e, 0x4c, 0xde, - 0xf8, 0x61, 0xd1, 0xc5, 0x73, 0xe5, 0x2e, 0xb8, 0xb8, 0x2b, 0x25, 0xfb, 0x25, 0x38, 0x5d, 0xf1, - 0xeb, 0x15, 0x3f, 0x88, 0x56, 0xfc, 0xe0, 0x9e, 0x13, 0xd4, 0xe5, 0x4a, 0x99, 0x93, 0x79, 0x5e, - 0xe8, 0x51, 0x38, 0xc8, 0x0f, 0x0a, 0x23, 0xdb, 0xd8, 0x73, 0x8c, 0xf9, 0x3a, 0xa4, 0x87, 0x4d, - 0x8d, 0xb1, 0x01, 0x2a, 0xa3, 0xc7, 0x35, 0x27, 0x22, 0xe8, 0x16, 0xcb, 0x54, 0x1c, 0xdf, 0x88, - 0xa2, 0xfa, 0x13, 0x5a, 0xa6, 0xe2, 0x18, 0x98, 0x7a, 0x85, 0x9a, 0xf5, 0xed, 0xff, 0x3e, 0xc8, - 0x0e, 0xc7, 0x44, 0x56, 0x08, 0xf4, 0x59, 0x18, 0x0f, 0xc9, 0x4d, 0xd7, 0x6b, 0xdf, 0x97, 0x32, - 0x81, 0x2e, 0x3e, 0x52, 0xd5, 0x65, 0x1d, 0x93, 0x4b, 0x16, 0xcd, 0x32, 0x9c, 0xa0, 0x86, 0x9a, - 0x30, 0x7e, 0xcf, 0xf5, 0xea, 0xfe, 0xbd, 0x50, 0xd2, 0x2f, 0x64, 0x0b, 0x18, 0xef, 0x72, 0xcc, - 0x44, 0x1f, 0x8d, 0xe6, 0xee, 0x1a, 0xc4, 0x70, 0x82, 0x38, 0x5d, 0x80, 0x41, 0xdb, 0x5b, 0x08, - 0x6f, 0x87, 0x24, 0x10, 0x39, 0xa7, 0xd9, 0x02, 0xc4, 0xb2, 0x10, 0xc7, 0x70, 0xba, 0x00, 0xd9, - 0x9f, 0x6b, 0x81, 0xdf, 0xe6, 0x31, 0xf6, 0xc5, 0x02, 0xc4, 0xaa, 0x14, 0x6b, 0x18, 0x74, 0x83, - 0xb2, 0x7f, 0x6b, 0xbe, 0x87, 0x7d, 0x3f, 0x92, 0x5b, 0x9a, 0x65, 0x39, 0xd5, 0xca, 0xb1, 0x81, - 0x85, 0x56, 0x00, 0x85, 0xed, 0x56, 0xab, 0xc1, 0x8c, 0x2f, 0x9c, 0x06, 0x23, 0xc5, 0x15, 0xdf, - 0x79, 0x1e, 0x7a, 0xb4, 0xda, 0x01, 0xc5, 0x29, 0x35, 0xe8, 0x59, 0xbd, 0x29, 0xba, 0x3a, 0xc8, - 0xba, 0xca, 0x95, 0x11, 0x55, 0xde, 0x4f, 0x09, 0x43, 0xcb, 0x30, 0x1c, 0xee, 0x85, 0xb5, 0x48, - 0xc4, 0x50, 0xcb, 0x48, 0xfc, 0x53, 0x65, 0x28, 0x5a, 0xde, 0x39, 0x5e, 0x05, 0xcb, 0xba, 0xa8, - 0x06, 0xd3, 0x82, 0xe2, 0xd2, 0xb6, 0xe3, 0xa9, 0x34, 0x2a, 0xdc, 0x06, 0xf5, 0x99, 0x07, 0xfb, - 0x73, 0xd3, 0xa2, 0x65, 0x1d, 0x7c, 0xb0, 0x3f, 0x77, 0xa6, 0xe2, 0xd7, 0x53, 0x20, 0x38, 0x8d, - 0x1a, 0x5f, 0x7c, 0xb5, 0x9a, 0xdf, 0x6c, 0x55, 0x02, 0x7f, 0xd3, 0x6d, 0x90, 0x6e, 0x0a, 0x9d, - 0xaa, 0x81, 0x29, 0x16, 0x9f, 0x51, 0x86, 0x13, 0xd4, 0xec, 0x6f, 0x67, 0xfc, 0x0c, 0x4b, 0xb3, - 0x1c, 0xb5, 0x03, 0x82, 0x9a, 0x30, 0xd6, 0x62, 0xdb, 0x44, 0x44, 0xbe, 0x17, 0x6b, 0xfd, 0xf9, - 0x3e, 0x05, 0x13, 0xf7, 0xe8, 0x35, 0xa0, 0x04, 0x87, 0xec, 0xc5, 0x57, 0xd1, 0xc9, 0x61, 0x93, - 0xba, 0xfd, 0x63, 0x8f, 0xb0, 0x1b, 0xb1, 0xca, 0xa5, 0x0d, 0xc3, 0xc2, 0xe4, 0x5d, 0x3c, 0xad, - 0x66, 0xb3, 0xc5, 0x5e, 0xf1, 0xb4, 0x08, 0xb3, 0x79, 0x2c, 0xeb, 0xa2, 0xcf, 0xc0, 0x38, 0x7d, - 0xa9, 0x68, 0xf9, 0x4b, 0x4e, 0x65, 0x87, 0x26, 0x88, 0xd3, 0x96, 0x68, 0x59, 0x31, 0xf4, 0xca, - 0x38, 0x41, 0x0c, 0xbd, 0xc1, 0x8c, 0x33, 0xcc, 0xd4, 0x28, 0x3d, 0x48, 0xeb, 0x76, 0x18, 0x92, - 0xac, 0x46, 0x24, 0x2b, 0xed, 0x8a, 0x7d, 0xbc, 0x69, 0x57, 0xd0, 0x4d, 0x18, 0x13, 0xb9, 0x86, - 0xc5, 0xca, 0xcd, 0x1b, 0xd2, 0xb8, 0x31, 0xac, 0x03, 0x0f, 0x92, 0x05, 0xd8, 0xac, 0x8c, 0xb6, - 0xe0, 0xbc, 0x96, 0xfb, 0xe7, 0x5a, 0xe0, 0x30, 0x95, 0xba, 0xcb, 0x8e, 0x53, 0xed, 0xae, 0x7e, - 0xec, 0xc1, 0xfe, 0xdc, 0xf9, 0xf5, 0x6e, 0x88, 0xb8, 0x3b, 0x1d, 0x74, 0x0b, 0x4e, 0x73, 0xc7, - 0xda, 0x12, 0x71, 0xea, 0x0d, 0xd7, 0x53, 0xcc, 0x00, 0xdf, 0xf2, 0x67, 0x1f, 0xec, 0xcf, 0x9d, - 0x5e, 0x48, 0x43, 0xc0, 0xe9, 0xf5, 0xd0, 0xab, 0x50, 0xac, 0x7b, 0xa1, 0x18, 0x83, 0x21, 0x23, - 0xbd, 0x52, 0xb1, 0xb4, 0x56, 0x55, 0xdf, 0x1f, 0xff, 0xc1, 0x71, 0x05, 0xb4, 0xc5, 0x25, 0xb6, - 0x4a, 0x40, 0x32, 0xdc, 0x11, 0x12, 0x28, 0x29, 0x6a, 0x33, 0x5c, 0xeb, 0xb8, 0xaa, 0x42, 0x59, - 0x9c, 0x1b, 0x5e, 0x77, 0x06, 0x61, 0xf4, 0x3a, 0x20, 0xfa, 0x82, 0x70, 0x6b, 0x64, 0xa1, 0xc6, - 0xd2, 0x2a, 0x30, 0x01, 0x77, 0xc1, 0x74, 0xf6, 0xaa, 0x76, 0x60, 0xe0, 0x94, 0x5a, 0xe8, 0x3a, - 0x3d, 0x55, 0xf4, 0x52, 0x71, 0x6a, 0xa9, 0x64, 0x78, 0x25, 0xd2, 0x0a, 0x48, 0xcd, 0x89, 0x48, - 0xdd, 0xa4, 0x88, 0x13, 0xf5, 0x50, 0x1d, 0xce, 0x39, 0xed, 0xc8, 0x67, 0xc2, 0x70, 0x13, 0x75, - 0xdd, 0xdf, 0x21, 0x1e, 0xd3, 0x43, 0x15, 0x16, 0x2f, 0x52, 0x6e, 0x63, 0xa1, 0x0b, 0x1e, 0xee, - 0x4a, 0x85, 0x72, 0x89, 0x2a, 0xfb, 0x2d, 0x98, 0x91, 0x8e, 0x52, 0x32, 0xe0, 0xbe, 0x00, 0x23, - 0xdb, 0x7e, 0x18, 0xad, 0x91, 0xe8, 0x9e, 0x1f, 0xec, 0x88, 0x78, 0x95, 0x71, 0x8c, 0xe3, 0x18, - 0x84, 0x75, 0x3c, 0xfa, 0x0c, 0x64, 0x56, 0x12, 0xe5, 0x12, 0x53, 0x50, 0x17, 0xe2, 0x33, 0xe6, - 0x3a, 0x2f, 0xc6, 0x12, 0x2e, 0x51, 0xcb, 0x95, 0x25, 0xa6, 0x6c, 0x4e, 0xa0, 0x96, 0x2b, 0x4b, - 0x58, 0xc2, 0xe9, 0x72, 0x0d, 0xb7, 0x9d, 0x80, 0x54, 0x02, 0xbf, 0x46, 0x42, 0x2d, 0xb2, 0xf6, - 0xa3, 0x3c, 0x1a, 0x27, 0x5d, 0xae, 0xd5, 0x34, 0x04, 0x9c, 0x5e, 0x0f, 0x91, 0xce, 0xbc, 0x57, - 0xe3, 0xd9, 0x5a, 0x82, 0x4e, 0x7e, 0xa6, 0xcf, 0xd4, 0x57, 0x1e, 0x4c, 0xaa, 0x8c, 0x5b, 0x3c, - 0xfe, 0x66, 0x38, 0x33, 0xc1, 0xd6, 0x76, 0xff, 0xc1, 0x3b, 0x95, 0xde, 0xa5, 0x9c, 0xa0, 0x84, - 0x3b, 0x68, 0x1b, 0xc1, 0xac, 0x26, 0x7b, 0xa6, 0x43, 0xbe, 0x0a, 0xc5, 0xb0, 0xbd, 0x51, 0xf7, - 0x9b, 0x8e, 0xeb, 0x31, 0x65, 0xb3, 0xf6, 0x1e, 0xa9, 0x4a, 0x00, 0x8e, 0x71, 0xd0, 0x0a, 0x14, - 0x1c, 0xa9, 0x54, 0x41, 0xd9, 0x31, 0x50, 0x94, 0x2a, 0x85, 0x87, 0x05, 0x90, 0x6a, 0x14, 0x55, - 0x17, 0xbd, 0x02, 0x63, 0xc2, 0x31, 0x54, 0x24, 0x7b, 0x9c, 0x36, 0xbd, 0x77, 0xaa, 0x3a, 0x10, - 0x9b, 0xb8, 0xe8, 0x36, 0x8c, 0x44, 0x7e, 0x83, 0xb9, 0xa0, 0x50, 0x36, 0xef, 0x4c, 0x76, 0x1c, - 0xb5, 0x75, 0x85, 0xa6, 0xcb, 0x33, 0x55, 0x55, 0xac, 0xd3, 0x41, 0xeb, 0x7c, 0xbd, 0xb3, 0x08, - 0xd3, 0x24, 0x9c, 0x79, 0x24, 0xfb, 0x4e, 0x52, 0x81, 0xa8, 0xcd, 0xed, 0x20, 0x6a, 0x62, 0x9d, - 0x0c, 0xba, 0x06, 0x53, 0xad, 0xc0, 0xf5, 0xd9, 0x9a, 0x50, 0xfa, 0xb4, 0x19, 0x33, 0xbd, 0x4d, - 0x25, 0x89, 0x80, 0x3b, 0xeb, 0x30, 0xbf, 0x5e, 0x51, 0x38, 0x73, 0x96, 0xe7, 0x83, 0xe6, 0xcf, - 0x3b, 0x5e, 0x86, 0x15, 0x14, 0xad, 0xb2, 0x93, 0x98, 0x4b, 0x26, 0x66, 0x66, 0xb3, 0xc3, 0xae, - 0xe8, 0x12, 0x0c, 0xce, 0xbc, 0xaa, 0xbf, 0x38, 0xa6, 0x80, 0xea, 0x5a, 0xe2, 0x40, 0xfa, 0x62, - 0x08, 0x67, 0xce, 0x75, 0x31, 0x55, 0x4b, 0x3c, 0x2f, 0x62, 0x86, 0xc0, 0x28, 0x0e, 0x71, 0x82, - 0x26, 0xfa, 0x24, 0x4c, 0x8a, 0x30, 0x6f, 0xf1, 0x30, 0x9d, 0x8f, 0x0d, 0x7b, 0x71, 0x02, 0x86, - 0x3b, 0xb0, 0x79, 0xe4, 0x7d, 0x67, 0xa3, 0x41, 0xc4, 0xd1, 0x77, 0xd3, 0xf5, 0x76, 0xc2, 0x99, - 0x0b, 0xec, 0x7c, 0x10, 0x91, 0xf7, 0x93, 0x50, 0x9c, 0x52, 0x03, 0xad, 0xc3, 0x64, 0x2b, 0x20, - 0xa4, 0xc9, 0x18, 0x7d, 0x71, 0x9f, 0xcd, 0x71, 0xb7, 0x76, 0xda, 0x93, 0x4a, 0x02, 0x76, 0x90, - 0x52, 0x86, 0x3b, 0x28, 0xa0, 0x7b, 0x50, 0xf0, 0x77, 0x49, 0xb0, 0x4d, 0x9c, 0xfa, 0xcc, 0xc5, - 0x2e, 0x86, 0xe6, 0xe2, 0x72, 0xbb, 0x25, 0x70, 0x13, 0x3a, 0x78, 0x59, 0xdc, 0x5b, 0x07, 0x2f, - 0x1b, 0x43, 0x3f, 0x64, 0xc1, 0x59, 0x29, 0xb6, 0xaf, 0xb6, 0xe8, 0xa8, 0x2f, 0xf9, 0x5e, 0x18, - 0x05, 0xdc, 0x11, 0xfb, 0xb1, 0x6c, 0xe7, 0xe4, 0xf5, 0x8c, 0x4a, 0x4a, 0x38, 0x7a, 0x36, 0x0b, - 0x23, 0xc4, 0xd9, 0x2d, 0xa2, 0x25, 0x98, 0x0a, 0x49, 0x24, 0x0f, 0xa3, 0x85, 0x70, 0xe5, 0x8d, - 0xd2, 0xda, 0xcc, 0x25, 0xee, 0x45, 0x4e, 0x37, 0x43, 0x35, 0x09, 0xc4, 0x9d, 0xf8, 0xb3, 0xdf, - 0x0a, 0x53, 0x1d, 0xd7, 0xff, 0x61, 0x32, 0x8a, 0xcc, 0xee, 0xc0, 0x98, 0x31, 0xc4, 0xc7, 0xaa, - 0xc3, 0xfd, 0x97, 0xc3, 0x50, 0x54, 0xfa, 0x3d, 0x74, 0xd5, 0x54, 0xdb, 0x9e, 0x4d, 0xaa, 0x6d, - 0x0b, 0xf4, 0x5d, 0xaf, 0x6b, 0x6a, 0xd7, 0x53, 0x62, 0x67, 0x65, 0x6d, 0xe8, 0xfe, 0x9d, 0xa2, - 0x35, 0x71, 0x6d, 0xbe, 0x6f, 0xfd, 0xef, 0x40, 0x57, 0x09, 0xf0, 0x35, 0x98, 0xf2, 0x7c, 0xc6, - 0x73, 0x92, 0xba, 0x64, 0x28, 0x18, 0xdf, 0x50, 0xd4, 0x83, 0x51, 0x24, 0x10, 0x70, 0x67, 0x1d, - 0xda, 0x20, 0xbf, 0xf8, 0x93, 0x22, 0x67, 0xce, 0x17, 0x60, 0x01, 0x45, 0x97, 0x60, 0xb0, 0xe5, - 0xd7, 0xcb, 0x15, 0xc1, 0x6f, 0x6a, 0xa9, 0x6e, 0xeb, 0xe5, 0x0a, 0xe6, 0x30, 0xb4, 0x00, 0x43, - 0xec, 0x47, 0x38, 0x33, 0x9a, 0x1d, 0x75, 0x80, 0xd5, 0xd0, 0xf2, 0xb5, 0xb0, 0x0a, 0x58, 0x54, - 0x64, 0xa2, 0x2f, 0xca, 0xa4, 0x33, 0xd1, 0xd7, 0xf0, 0x43, 0x8a, 0xbe, 0x24, 0x01, 0x1c, 0xd3, - 0x42, 0xf7, 0xe1, 0xb4, 0xf1, 0x30, 0xe2, 0x4b, 0x84, 0x84, 0xc2, 0xf3, 0xf9, 0x52, 0xd7, 0x17, - 0x91, 0xd0, 0x17, 0x9f, 0x17, 0x9d, 0x3e, 0x5d, 0x4e, 0xa3, 0x84, 0xd3, 0x1b, 0x40, 0x0d, 0x98, - 0xaa, 0x75, 0xb4, 0x5a, 0xe8, 0xbf, 0x55, 0x35, 0xa1, 0x9d, 0x2d, 0x76, 0x12, 0x46, 0xaf, 0x40, - 0xe1, 0x1d, 0x3f, 0x64, 0x67, 0xb5, 0xe0, 0x91, 0xa5, 0xdb, 0x6c, 0xe1, 0x8d, 0x5b, 0x55, 0x56, - 0x7e, 0xb0, 0x3f, 0x37, 0x52, 0xf1, 0xeb, 0xf2, 0x2f, 0x56, 0x15, 0xd0, 0xf7, 0x5a, 0x30, 0xdb, - 0xf9, 0xf2, 0x52, 0x9d, 0x1e, 0xeb, 0xbf, 0xd3, 0xb6, 0x68, 0x74, 0x76, 0x39, 0x93, 0x1c, 0xee, - 0xd2, 0x94, 0xfd, 0xcb, 0x5c, 0xb7, 0x2b, 0x34, 0x40, 0x24, 0x6c, 0x37, 0x4e, 0x22, 0x4d, 0xe5, - 0xb2, 0xa1, 0x9c, 0x7a, 0x68, 0xfb, 0x81, 0x7f, 0x66, 0x31, 0xfb, 0x81, 0x13, 0x74, 0x14, 0x78, - 0x03, 0x0a, 0x91, 0x4c, 0x36, 0xda, 0x25, 0xb3, 0xa6, 0xd6, 0x29, 0x66, 0x43, 0xa1, 0x38, 0x56, - 0x95, 0x57, 0x54, 0x91, 0xb1, 0xff, 0x21, 0x9f, 0x01, 0x09, 0x39, 0x01, 0x1d, 0x40, 0xc9, 0xd4, - 0x01, 0xcc, 0xf5, 0xf8, 0x82, 0x0c, 0x5d, 0xc0, 0x3f, 0x30, 0xfb, 0xcd, 0x24, 0x35, 0xef, 0x77, - 0xc3, 0x15, 0xfb, 0x47, 0x2c, 0x38, 0x95, 0x66, 0xe9, 0x49, 0x5f, 0x19, 0x5c, 0x4e, 0xa4, 0x0c, - 0x79, 0xd4, 0x08, 0xde, 0x11, 0xe5, 0x58, 0x61, 0xf4, 0x9d, 0xed, 0xea, 0x70, 0xd1, 0x5f, 0x6f, - 0xc1, 0x58, 0x25, 0x20, 0xda, 0x85, 0xf6, 0x1a, 0x77, 0xa3, 0xe6, 0xfd, 0x79, 0xea, 0xd0, 0x2e, - 0xd4, 0xf6, 0xcf, 0xe4, 0xe0, 0x14, 0xd7, 0xc4, 0x2f, 0xec, 0xfa, 0x6e, 0xbd, 0xe2, 0xd7, 0x45, - 0xa6, 0xb2, 0x37, 0x61, 0xb4, 0xa5, 0x09, 0xf7, 0xba, 0x45, 0x32, 0xd4, 0x85, 0x80, 0xb1, 0x38, - 0x42, 0x2f, 0xc5, 0x06, 0x2d, 0x54, 0x87, 0x51, 0xb2, 0xeb, 0xd6, 0x94, 0x3a, 0x37, 0x77, 0xe8, - 0xcb, 0x45, 0xb5, 0xb2, 0xac, 0xd1, 0xc1, 0x06, 0xd5, 0x63, 0xc8, 0x41, 0x6b, 0xff, 0xa8, 0x05, - 0x8f, 0x64, 0xc4, 0x3d, 0xa4, 0xcd, 0xdd, 0x63, 0x36, 0x0f, 0x22, 0x9d, 0xa5, 0x6a, 0x8e, 0x5b, - 0x42, 0x60, 0x01, 0x45, 0x9f, 0x02, 0xe0, 0x96, 0x0c, 0xf4, 0x99, 0xdb, 0x2b, 0x40, 0x9c, 0x11, - 0xdb, 0x4a, 0x0b, 0x53, 0x24, 0xeb, 0x63, 0x8d, 0x96, 0xfd, 0x53, 0x79, 0x18, 0xe4, 0xe9, 0xc4, - 0x57, 0x60, 0x78, 0x9b, 0xe7, 0x6f, 0xe8, 0x27, 0x55, 0x44, 0x2c, 0x80, 0xe0, 0x05, 0x58, 0x56, - 0x46, 0xab, 0x30, 0xcd, 0xf3, 0x5f, 0x34, 0x4a, 0xa4, 0xe1, 0xec, 0x49, 0x69, 0x19, 0xcf, 0x1d, - 0xa9, 0xa4, 0x86, 0xe5, 0x4e, 0x14, 0x9c, 0x56, 0x0f, 0xbd, 0x06, 0xe3, 0xf4, 0xf5, 0xe2, 0xb7, - 0x23, 0x49, 0x89, 0x67, 0xbe, 0x50, 0xcf, 0xa5, 0x75, 0x03, 0x8a, 0x13, 0xd8, 0xf4, 0x01, 0xdd, - 0xea, 0x90, 0x0b, 0x0e, 0xc6, 0x0f, 0x68, 0x53, 0x16, 0x68, 0xe2, 0x32, 0x13, 0xcf, 0x36, 0x33, - 0x68, 0x5d, 0xdf, 0x0e, 0x48, 0xb8, 0xed, 0x37, 0xea, 0x8c, 0xd1, 0x1a, 0xd4, 0x4c, 0x3c, 0x13, - 0x70, 0xdc, 0x51, 0x83, 0x52, 0xd9, 0x74, 0xdc, 0x46, 0x3b, 0x20, 0x31, 0x95, 0x21, 0x93, 0xca, - 0x4a, 0x02, 0x8e, 0x3b, 0x6a, 0xd0, 0x75, 0x74, 0xba, 0x12, 0xf8, 0xf4, 0xf0, 0x92, 0xc1, 0x5c, - 0x94, 0xdd, 0xee, 0xb0, 0xf4, 0x3b, 0xed, 0x12, 0xf6, 0x4c, 0x58, 0x36, 0x72, 0x0a, 0x86, 0xd2, - 0xbe, 0x2a, 0x3c, 0x4e, 0x25, 0x15, 0xf4, 0x0c, 0x8c, 0x88, 0xac, 0x06, 0xcc, 0xbc, 0x94, 0x4f, - 0x1d, 0x33, 0x32, 0x28, 0xc5, 0xc5, 0x58, 0xc7, 0xb1, 0xbf, 0x2f, 0x07, 0xd3, 0x29, 0xfe, 0x01, - 0xfc, 0xa8, 0xda, 0x72, 0xc3, 0x48, 0xe5, 0xc7, 0xd3, 0x8e, 0x2a, 0x5e, 0x8e, 0x15, 0x06, 0xdd, - 0x0f, 0xfc, 0x30, 0x4c, 0x1e, 0x80, 0xc2, 0xfe, 0x56, 0x40, 0x0f, 0x99, 0x69, 0xee, 0x22, 0x0c, - 0xb4, 0x43, 0x22, 0x03, 0x16, 0xaa, 0xf3, 0x9b, 0xe9, 0x9e, 0x18, 0x84, 0xb2, 0xc7, 0x5b, 0x4a, - 0x8d, 0xa3, 0xb1, 0xc7, 0x5c, 0x91, 0xc3, 0x61, 0xb4, 0x73, 0x11, 0xf1, 0x1c, 0x2f, 0x12, 0x4c, - 0x74, 0x1c, 0x79, 0x8b, 0x95, 0x62, 0x01, 0xb5, 0xbf, 0x94, 0x87, 0xb3, 0x99, 0x1e, 0x43, 0xb4, - 0xeb, 0x4d, 0xdf, 0x73, 0x23, 0x5f, 0x59, 0x6f, 0xf0, 0x68, 0x5b, 0xa4, 0xb5, 0xbd, 0x2a, 0xca, - 0xb1, 0xc2, 0x40, 0x97, 0x61, 0x90, 0x49, 0xae, 0x3a, 0x32, 0x05, 0x2e, 0x96, 0x78, 0xf8, 0x15, - 0x0e, 0xee, 0x3b, 0x0b, 0xeb, 0x25, 0x18, 0x68, 0xf9, 0x7e, 0x23, 0x79, 0x68, 0xd1, 0xee, 0xfa, - 0x7e, 0x03, 0x33, 0x20, 0xfa, 0x88, 0x18, 0xaf, 0x84, 0xb9, 0x02, 0x76, 0xea, 0x7e, 0xa8, 0x0d, - 0xda, 0x13, 0x30, 0xbc, 0x43, 0xf6, 0x02, 0xd7, 0xdb, 0x4a, 0x9a, 0xb1, 0xdc, 0xe0, 0xc5, 0x58, - 0xc2, 0xcd, 0xa4, 0x4f, 0xc3, 0x47, 0x9d, 0x3e, 0xb5, 0xd0, 0xf3, 0x0a, 0xfc, 0x81, 0x3c, 0x4c, - 0xe0, 0xc5, 0xd2, 0x07, 0x13, 0x71, 0xbb, 0x73, 0x22, 0x8e, 0x3a, 0x7d, 0x6a, 0xef, 0xd9, 0xf8, - 0x05, 0x0b, 0x26, 0x58, 0x6e, 0x05, 0x11, 0xa7, 0xc9, 0xf5, 0xbd, 0x13, 0x60, 0xf1, 0x2e, 0xc1, - 0x60, 0x40, 0x1b, 0x4d, 0xa6, 0x08, 0x64, 0x3d, 0xc1, 0x1c, 0x86, 0xce, 0xc1, 0x00, 0xeb, 0x02, - 0x9d, 0xbc, 0x51, 0x9e, 0x5d, 0xa9, 0xe4, 0x44, 0x0e, 0x66, 0xa5, 0x2c, 0xf8, 0x08, 0x26, 0xad, - 0x86, 0xcb, 0x3b, 0x1d, 0xeb, 0x15, 0xdf, 0x1f, 0xbe, 0xc4, 0xa9, 0x5d, 0x7b, 0x6f, 0xc1, 0x47, - 0xd2, 0x49, 0x76, 0x7f, 0x3e, 0xfd, 0x51, 0x0e, 0x2e, 0xa4, 0xd6, 0xeb, 0x3b, 0xf8, 0x48, 0xf7, - 0xda, 0xc7, 0x19, 0x83, 0x3f, 0x7f, 0x82, 0x46, 0x82, 0x03, 0xfd, 0x72, 0x98, 0x83, 0x7d, 0xc4, - 0x04, 0x49, 0x1d, 0xb2, 0xf7, 0x49, 0x4c, 0x90, 0xd4, 0xbe, 0x65, 0x3c, 0xff, 0xfe, 0x22, 0x97, - 0xf1, 0x2d, 0xec, 0x21, 0x78, 0x85, 0x9e, 0x33, 0x0c, 0x18, 0x0a, 0x8e, 0x79, 0x94, 0x9f, 0x31, - 0xbc, 0x0c, 0x2b, 0x28, 0x5a, 0x80, 0x89, 0xa6, 0xeb, 0xd1, 0xc3, 0x67, 0xcf, 0x64, 0xfc, 0x54, - 0xc8, 0xa6, 0x55, 0x13, 0x8c, 0x93, 0xf8, 0xc8, 0xd5, 0xe2, 0x85, 0xe4, 0xb2, 0x93, 0x6e, 0x67, - 0xf6, 0x76, 0xde, 0xd4, 0xb9, 0xaa, 0x51, 0x4c, 0x89, 0x1d, 0xb2, 0xaa, 0xbd, 0xff, 0xf3, 0xfd, - 0xbf, 0xff, 0x47, 0xd3, 0xdf, 0xfe, 0xb3, 0xaf, 0xc0, 0xd8, 0x43, 0x0b, 0x7c, 0xed, 0xaf, 0xe6, - 0xe1, 0xd1, 0x2e, 0xdb, 0x9e, 0x9f, 0xf5, 0xc6, 0x1c, 0x68, 0x67, 0x7d, 0xc7, 0x3c, 0x54, 0xe0, - 0xd4, 0x66, 0xbb, 0xd1, 0xd8, 0x63, 0x76, 0xf8, 0xa4, 0x2e, 0x31, 0x04, 0x4f, 0x79, 0x4e, 0xe6, - 0xb3, 0x5a, 0x49, 0xc1, 0xc1, 0xa9, 0x35, 0x29, 0x43, 0x4f, 0x6f, 0x92, 0x3d, 0x45, 0x2a, 0xc1, - 0xd0, 0x63, 0x1d, 0x88, 0x4d, 0x5c, 0x74, 0x0d, 0xa6, 0x9c, 0x5d, 0xc7, 0xe5, 0x41, 0x57, 0x25, - 0x01, 0xce, 0xd1, 0x2b, 0x39, 0xdd, 0x42, 0x12, 0x01, 0x77, 0xd6, 0x41, 0xaf, 0x03, 0xf2, 0x45, - 0xee, 0xff, 0x6b, 0xc4, 0x13, 0xaa, 0x31, 0x36, 0x77, 0xf9, 0xf8, 0x48, 0xb8, 0xd5, 0x81, 0x81, - 0x53, 0x6a, 0x25, 0xe2, 0x6f, 0x0c, 0x65, 0xc7, 0xdf, 0xe8, 0x7e, 0x2e, 0xf6, 0x4c, 0xff, 0xf0, - 0x9f, 0x2c, 0x7a, 0x7d, 0x71, 0x26, 0xdf, 0x0c, 0x23, 0xf7, 0x0a, 0x33, 0x6d, 0xe3, 0x32, 0x3c, - 0x2d, 0x6a, 0xc4, 0x69, 0xcd, 0xb4, 0x2d, 0x06, 0x62, 0x13, 0x97, 0x2f, 0x88, 0x30, 0x76, 0x56, - 0x34, 0x58, 0x7c, 0x11, 0xeb, 0x46, 0x61, 0xa0, 0x4f, 0xc3, 0x70, 0xdd, 0xdd, 0x75, 0x43, 0x3f, - 0x10, 0x2b, 0xfd, 0x90, 0xea, 0x82, 0xf8, 0x1c, 0x2c, 0x71, 0x32, 0x58, 0xd2, 0xb3, 0x7f, 0x20, - 0x07, 0x63, 0xb2, 0xc5, 0x37, 0xda, 0x7e, 0xe4, 0x9c, 0xc0, 0xb5, 0x7c, 0xcd, 0xb8, 0x96, 0x3f, - 0xd2, 0x2d, 0xe0, 0x0f, 0xeb, 0x52, 0xe6, 0x75, 0x7c, 0x2b, 0x71, 0x1d, 0x3f, 0xde, 0x9b, 0x54, - 0xf7, 0x6b, 0xf8, 0x1f, 0x59, 0x30, 0x65, 0xe0, 0x9f, 0xc0, 0x6d, 0xb0, 0x62, 0xde, 0x06, 0x8f, - 0xf5, 0xfc, 0x86, 0x8c, 0x5b, 0xe0, 0xbb, 0xf3, 0x89, 0xbe, 0xb3, 0xd3, 0xff, 0x1d, 0x18, 0xd8, - 0x76, 0x82, 0x7a, 0xb7, 0x00, 0xe7, 0x1d, 0x95, 0xe6, 0xaf, 0x3b, 0x81, 0xd0, 0x0d, 0x3e, 0xa5, - 0x72, 0x5e, 0x3b, 0x41, 0x6f, 0xbd, 0x20, 0x6b, 0x0a, 0xbd, 0x04, 0x43, 0x61, 0xcd, 0x6f, 0x29, - 0xcb, 0xf9, 0x8b, 0x3c, 0x1f, 0x36, 0x2d, 0x39, 0xd8, 0x9f, 0x43, 0x66, 0x73, 0xb4, 0x18, 0x0b, - 0x7c, 0xf4, 0x26, 0x8c, 0xb1, 0x5f, 0xca, 0x50, 0x27, 0x9f, 0x9d, 0x0c, 0xa9, 0xaa, 0x23, 0x72, - 0x2b, 0x36, 0xa3, 0x08, 0x9b, 0xa4, 0x66, 0xb7, 0xa0, 0xa8, 0x3e, 0xeb, 0x58, 0xf5, 0x71, 0xff, - 0x2e, 0x0f, 0xd3, 0x29, 0x6b, 0x0e, 0x85, 0xc6, 0x4c, 0x3c, 0xd3, 0xe7, 0x52, 0x7d, 0x8f, 0x73, - 0x11, 0xb2, 0xd7, 0x50, 0x5d, 0xac, 0xad, 0xbe, 0x1b, 0xbd, 0x1d, 0x92, 0x64, 0xa3, 0xb4, 0xa8, - 0x77, 0xa3, 0xb4, 0xb1, 0x13, 0x1b, 0x6a, 0xda, 0x90, 0xea, 0xe9, 0xb1, 0xce, 0xe9, 0x9f, 0xe6, - 0xe1, 0x54, 0x5a, 0x0c, 0x32, 0xf4, 0xf9, 0x44, 0x62, 0xbc, 0xe7, 0xfb, 0x8d, 0x5e, 0xc6, 0xb3, - 0xe5, 0x71, 0x19, 0xf0, 0xe2, 0xbc, 0x99, 0x2a, 0xaf, 0xe7, 0x30, 0x8b, 0x36, 0x99, 0x23, 0x7e, - 0xc0, 0x13, 0x1a, 0xca, 0xe3, 0xe3, 0xe3, 0x7d, 0x77, 0x40, 0x64, 0x42, 0x0c, 0x13, 0x46, 0x00, - 0xb2, 0xb8, 0xb7, 0x11, 0x80, 0x6c, 0x79, 0xd6, 0x85, 0x11, 0xed, 0x6b, 0x8e, 0x75, 0xc6, 0x77, - 0xe8, 0x6d, 0xa5, 0xf5, 0xfb, 0x58, 0x67, 0xfd, 0x47, 0x2d, 0x48, 0xd8, 0x85, 0x2b, 0xb1, 0x98, - 0x95, 0x29, 0x16, 0xbb, 0x08, 0x03, 0x81, 0xdf, 0x20, 0xc9, 0x3c, 0x74, 0xd8, 0x6f, 0x10, 0xcc, - 0x20, 0x14, 0x23, 0x8a, 0x85, 0x1d, 0xa3, 0xfa, 0x43, 0x4e, 0x3c, 0xd1, 0x2e, 0xc1, 0x60, 0x83, - 0xec, 0x92, 0x46, 0x32, 0x5d, 0xc8, 0x4d, 0x5a, 0x88, 0x39, 0xcc, 0xfe, 0x85, 0x01, 0x38, 0xdf, - 0x35, 0x94, 0x05, 0x7d, 0x0e, 0x6d, 0x39, 0x11, 0xb9, 0xe7, 0xec, 0x25, 0xe3, 0xfa, 0x5f, 0xe3, - 0xc5, 0x58, 0xc2, 0x99, 0xe7, 0x0e, 0x0f, 0xcf, 0x9b, 0x10, 0x22, 0x8a, 0xa8, 0xbc, 0x02, 0x6a, - 0x0a, 0xa5, 0xf2, 0x47, 0x21, 0x94, 0x7a, 0x16, 0x20, 0x0c, 0x1b, 0xdc, 0x7a, 0xa6, 0x2e, 0x5c, - 0x82, 0xe2, 0x30, 0xce, 0xd5, 0x9b, 0x02, 0x82, 0x35, 0x2c, 0x54, 0x82, 0xc9, 0x56, 0xe0, 0x47, - 0x5c, 0x26, 0x5b, 0xe2, 0x06, 0x66, 0x83, 0x66, 0x14, 0x81, 0x4a, 0x02, 0x8e, 0x3b, 0x6a, 0xa0, - 0x17, 0x60, 0x44, 0x44, 0x16, 0xa8, 0xf8, 0x7e, 0x43, 0x88, 0x81, 0x94, 0xcd, 0x55, 0x35, 0x06, - 0x61, 0x1d, 0x4f, 0xab, 0xc6, 0x04, 0xbd, 0xc3, 0xa9, 0xd5, 0xb8, 0xb0, 0x57, 0xc3, 0x4b, 0xc4, - 0x23, 0x2c, 0xf4, 0x15, 0x8f, 0x30, 0x16, 0x8c, 0x15, 0xfb, 0xd6, 0x6d, 0x41, 0x4f, 0x51, 0xd2, - 0xcf, 0x0e, 0xc0, 0xb4, 0x58, 0x38, 0xc7, 0xbd, 0x5c, 0x6e, 0x77, 0x2e, 0x97, 0xa3, 0x10, 0x9d, - 0x7d, 0xb0, 0x66, 0x4e, 0x7a, 0xcd, 0xfc, 0xa0, 0x05, 0x26, 0x7b, 0x85, 0xfe, 0xdf, 0xcc, 0xc4, - 0x28, 0x2f, 0x64, 0xb2, 0x6b, 0x75, 0x79, 0x81, 0xbc, 0xc7, 0x14, 0x29, 0xf6, 0x7f, 0xb0, 0xe0, - 0xb1, 0x9e, 0x14, 0xd1, 0x32, 0x14, 0x19, 0x0f, 0xa8, 0xbd, 0xce, 0x1e, 0x57, 0x06, 0xa8, 0x12, - 0x90, 0xc1, 0x92, 0xc6, 0x35, 0xd1, 0x72, 0x47, 0x06, 0x9a, 0x27, 0x52, 0x32, 0xd0, 0x9c, 0x36, - 0x86, 0xe7, 0x21, 0x53, 0xd0, 0x7c, 0x3f, 0xbd, 0x71, 0x0c, 0xe7, 0x0f, 0xf4, 0x71, 0x43, 0xec, - 0x67, 0x27, 0xc4, 0x7e, 0xc8, 0xc4, 0xd6, 0xee, 0x90, 0x4f, 0xc2, 0x24, 0x0b, 0x39, 0xc4, 0xcc, - 0xa1, 0x85, 0x5b, 0x4a, 0x2e, 0x36, 0x79, 0xbc, 0x99, 0x80, 0xe1, 0x0e, 0x6c, 0xfb, 0x0f, 0xf3, - 0x30, 0xc4, 0xb7, 0xdf, 0x09, 0xbc, 0x09, 0x9f, 0x84, 0xa2, 0xdb, 0x6c, 0xb6, 0x79, 0x52, 0x91, - 0x41, 0xee, 0x8b, 0x4a, 0xe7, 0xa9, 0x2c, 0x0b, 0x71, 0x0c, 0x47, 0x2b, 0x42, 0xe2, 0xdc, 0x25, - 0xaa, 0x21, 0xef, 0xf8, 0x7c, 0xc9, 0x89, 0x1c, 0xce, 0xe0, 0xa8, 0x7b, 0x36, 0x96, 0x4d, 0xa3, - 0xcf, 0x02, 0x84, 0x51, 0xe0, 0x7a, 0x5b, 0xb4, 0x4c, 0x04, 0xf1, 0xfc, 0x68, 0x17, 0x6a, 0x55, - 0x85, 0xcc, 0x69, 0xc6, 0x67, 0x8e, 0x02, 0x60, 0x8d, 0x22, 0x9a, 0x37, 0x6e, 0xfa, 0xd9, 0xc4, - 0xdc, 0x01, 0xa7, 0x1a, 0xcf, 0xd9, 0xec, 0x8b, 0x50, 0x54, 0xc4, 0x7b, 0xc9, 0x9f, 0x46, 0x75, - 0xb6, 0xe8, 0x13, 0x30, 0x91, 0xe8, 0xdb, 0xa1, 0xc4, 0x57, 0xbf, 0x68, 0xc1, 0x04, 0xef, 0xcc, - 0xb2, 0xb7, 0x2b, 0x6e, 0x83, 0x77, 0xe1, 0x54, 0x23, 0xe5, 0x54, 0x16, 0xd3, 0xdf, 0xff, 0x29, - 0xae, 0xc4, 0x55, 0x69, 0x50, 0x9c, 0xda, 0x06, 0xba, 0x42, 0x77, 0x1c, 0x3d, 0x75, 0x9d, 0x86, - 0x70, 0x4d, 0x1d, 0xe5, 0xbb, 0x8d, 0x97, 0x61, 0x05, 0xb5, 0x7f, 0xc7, 0x82, 0x29, 0xde, 0xf3, - 0x1b, 0x64, 0x4f, 0x9d, 0x4d, 0x5f, 0xcf, 0xbe, 0x8b, 0x74, 0x56, 0xb9, 0x8c, 0x74, 0x56, 0xfa, - 0xa7, 0xe5, 0xbb, 0x7e, 0xda, 0xcf, 0x58, 0x20, 0x56, 0xc8, 0x09, 0x08, 0x21, 0xbe, 0xd5, 0x14, - 0x42, 0xcc, 0x66, 0x6f, 0x82, 0x0c, 0xe9, 0xc3, 0x9f, 0x5b, 0x30, 0xc9, 0x11, 0x62, 0x6d, 0xf9, - 0xd7, 0x75, 0x1e, 0xfa, 0x49, 0x7a, 0x7b, 0x83, 0xec, 0xad, 0xfb, 0x15, 0x27, 0xda, 0x4e, 0xff, - 0x28, 0x63, 0xb2, 0x06, 0xba, 0x4e, 0x56, 0x5d, 0x6e, 0xa0, 0x43, 0x64, 0xd2, 0x3e, 0x74, 0xb6, - 0x07, 0xfb, 0x6b, 0x16, 0x20, 0xde, 0x8c, 0xc1, 0xb8, 0x51, 0x76, 0x88, 0x95, 0x6a, 0x17, 0x5d, - 0x7c, 0x34, 0x29, 0x08, 0xd6, 0xb0, 0x8e, 0x64, 0x78, 0x12, 0x26, 0x0f, 0xf9, 0xde, 0x26, 0x0f, - 0x87, 0x18, 0xd1, 0x7f, 0x35, 0x04, 0x49, 0x07, 0x18, 0x74, 0x07, 0x46, 0x6b, 0x4e, 0xcb, 0xd9, - 0x70, 0x1b, 0x6e, 0xe4, 0x92, 0xb0, 0x9b, 0xad, 0xd4, 0x92, 0x86, 0x27, 0x94, 0xd4, 0x5a, 0x09, - 0x36, 0xe8, 0xa0, 0x79, 0x80, 0x56, 0xe0, 0xee, 0xba, 0x0d, 0xb2, 0xc5, 0x64, 0x25, 0xcc, 0x19, - 0x9e, 0x1b, 0x00, 0xc9, 0x52, 0xac, 0x61, 0xa4, 0x78, 0x1b, 0xe7, 0x8f, 0xd9, 0xdb, 0x18, 0x4e, - 0xcc, 0xdb, 0x78, 0xe0, 0x50, 0xde, 0xc6, 0x85, 0x43, 0x7b, 0x1b, 0x0f, 0xf6, 0xe5, 0x6d, 0x8c, - 0xe1, 0x8c, 0xe4, 0x3d, 0xe9, 0xff, 0x15, 0xb7, 0x41, 0xc4, 0x83, 0x83, 0x7b, 0xf0, 0xcf, 0x3e, - 0xd8, 0x9f, 0x3b, 0x83, 0x53, 0x31, 0x70, 0x46, 0x4d, 0xf4, 0x29, 0x98, 0x71, 0x1a, 0x0d, 0xff, - 0x9e, 0x9a, 0xd4, 0xe5, 0xb0, 0xe6, 0x34, 0xb8, 0x12, 0x62, 0x98, 0x51, 0x3d, 0xf7, 0x60, 0x7f, - 0x6e, 0x66, 0x21, 0x03, 0x07, 0x67, 0xd6, 0x46, 0xaf, 0x42, 0xb1, 0x15, 0xf8, 0xb5, 0x55, 0xcd, - 0x4b, 0xef, 0x02, 0x1d, 0xc0, 0x8a, 0x2c, 0x3c, 0xd8, 0x9f, 0x1b, 0x53, 0x7f, 0xd8, 0x85, 0x1f, - 0x57, 0x48, 0x71, 0x1f, 0x1e, 0x39, 0x52, 0xf7, 0xe1, 0x1d, 0x98, 0xae, 0x92, 0xc0, 0x65, 0x79, - 0xb7, 0xeb, 0xf1, 0xf9, 0xb4, 0x0e, 0xc5, 0x20, 0x71, 0x22, 0xf7, 0x15, 0x69, 0x50, 0x0b, 0xbb, - 0x2f, 0x4f, 0xe0, 0x98, 0x90, 0xfd, 0xbf, 0x2c, 0x18, 0x16, 0x0e, 0x2f, 0x27, 0xc0, 0x35, 0x2e, - 0x18, 0x9a, 0x84, 0xb9, 0xf4, 0x01, 0x63, 0x9d, 0xc9, 0xd4, 0x21, 0x94, 0x13, 0x3a, 0x84, 0xc7, - 0xba, 0x11, 0xe9, 0xae, 0x3d, 0xf8, 0x6b, 0x79, 0xca, 0xbd, 0x1b, 0xae, 0x97, 0xc7, 0x3f, 0x04, - 0x6b, 0x30, 0x1c, 0x0a, 0xd7, 0xbf, 0x5c, 0xb6, 0xad, 0x7a, 0x72, 0x12, 0x63, 0x3b, 0x36, 0xe1, - 0xec, 0x27, 0x89, 0xa4, 0xfa, 0x14, 0xe6, 0x8f, 0xd1, 0xa7, 0xb0, 0x97, 0x73, 0xea, 0xc0, 0x51, - 0x38, 0xa7, 0xda, 0x5f, 0x61, 0x37, 0xa7, 0x5e, 0x7e, 0x02, 0x4c, 0xd5, 0x35, 0xf3, 0x8e, 0xb5, - 0xbb, 0xac, 0x2c, 0xd1, 0xa9, 0x0c, 0xe6, 0xea, 0xe7, 0x2d, 0x38, 0x9f, 0xf2, 0x55, 0x1a, 0xa7, - 0xf5, 0x14, 0x14, 0x9c, 0x76, 0xdd, 0x55, 0x7b, 0x59, 0xd3, 0x27, 0x2e, 0x88, 0x72, 0xac, 0x30, - 0xd0, 0x12, 0x4c, 0x91, 0xfb, 0x2d, 0x97, 0xab, 0x52, 0x75, 0x63, 0xd3, 0x3c, 0xf7, 0x92, 0x5a, - 0x4e, 0x02, 0x71, 0x27, 0xbe, 0x0a, 0x08, 0x92, 0xcf, 0x0c, 0x08, 0xf2, 0x77, 0x2d, 0x18, 0x51, - 0xce, 0x6f, 0xc7, 0x3e, 0xda, 0x9f, 0x34, 0x47, 0xfb, 0xd1, 0x2e, 0xa3, 0x9d, 0x31, 0xcc, 0xbf, - 0x9d, 0x53, 0xfd, 0xad, 0xf8, 0x41, 0xd4, 0x07, 0x07, 0xf7, 0x12, 0x14, 0x5a, 0x81, 0x1f, 0xf9, - 0x35, 0xbf, 0x21, 0x18, 0xb8, 0x73, 0x71, 0x64, 0x1c, 0x5e, 0x7e, 0xa0, 0xfd, 0xc6, 0x0a, 0x9b, - 0xf2, 0x4e, 0x4e, 0xab, 0x25, 0x01, 0xd2, 0x06, 0x8d, 0xc5, 0x8d, 0x8d, 0x8b, 0xb1, 0x8e, 0xc3, - 0x06, 0xdc, 0x0f, 0x22, 0xc1, 0x67, 0xc5, 0x03, 0xee, 0x07, 0x11, 0x66, 0x10, 0x54, 0x07, 0x88, - 0x9c, 0x60, 0x8b, 0x44, 0xb4, 0x4c, 0x04, 0xef, 0xca, 0x3e, 0x6f, 0xda, 0x91, 0xdb, 0x98, 0x77, - 0xbd, 0x28, 0x8c, 0x82, 0xf9, 0xb2, 0x17, 0xdd, 0x0a, 0xf8, 0x13, 0x52, 0x8b, 0x8e, 0xa3, 0x68, - 0x61, 0x8d, 0xae, 0x74, 0xf4, 0x66, 0x6d, 0x0c, 0x9a, 0xc6, 0x0c, 0x6b, 0xa2, 0x1c, 0x2b, 0x0c, - 0xfb, 0x45, 0x76, 0xfb, 0xb0, 0x31, 0x3d, 0x5c, 0x38, 0x99, 0x5f, 0x2e, 0xaa, 0xd9, 0x60, 0x9a, - 0xcc, 0x92, 0x1e, 0xb4, 0xa6, 0xfb, 0x61, 0x4f, 0x1b, 0xd6, 0xfd, 0xb5, 0xe2, 0xc8, 0x36, 0xe8, - 0xdb, 0x3a, 0x0c, 0x54, 0x9e, 0xee, 0x71, 0x6b, 0x1c, 0xc2, 0x24, 0x85, 0x25, 0x91, 0x60, 0x21, - 0xf6, 0xcb, 0x15, 0xb1, 0x2f, 0xb4, 0x24, 0x12, 0x02, 0x80, 0x63, 0x1c, 0x74, 0x55, 0x08, 0x08, - 0xb8, 0x9c, 0xff, 0xd1, 0x84, 0x80, 0x40, 0x7e, 0xbe, 0x26, 0xd5, 0x79, 0x06, 0x46, 0x54, 0x5e, - 0xd8, 0x0a, 0x4f, 0x37, 0x2a, 0x96, 0xcd, 0x72, 0x5c, 0x8c, 0x75, 0x1c, 0xb4, 0x0e, 0x13, 0x21, - 0x97, 0x9b, 0xa9, 0x88, 0xb5, 0x5c, 0xfe, 0xf8, 0x51, 0x69, 0xd5, 0x53, 0x35, 0xc1, 0x07, 0xac, - 0x88, 0x9f, 0x36, 0xd2, 0xb9, 0x3a, 0x49, 0x02, 0xbd, 0x06, 0xe3, 0x0d, 0xdf, 0xa9, 0x2f, 0x3a, - 0x0d, 0xc7, 0xab, 0xb1, 0xef, 0x2d, 0x98, 0xe9, 0x05, 0x6f, 0x1a, 0x50, 0x9c, 0xc0, 0xa6, 0xcc, - 0x98, 0x5e, 0x22, 0xa2, 0x2c, 0x3b, 0xde, 0x16, 0x09, 0x45, 0x96, 0x4f, 0xc6, 0x8c, 0xdd, 0xcc, - 0xc0, 0xc1, 0x99, 0xb5, 0xd1, 0x4b, 0x30, 0x2a, 0x3f, 0x5f, 0x8b, 0x45, 0x10, 0x3b, 0x32, 0x68, - 0x30, 0x6c, 0x60, 0xa2, 0x7b, 0x70, 0x5a, 0xfe, 0x5f, 0x0f, 0x9c, 0xcd, 0x4d, 0xb7, 0x26, 0x1c, - 0x74, 0xb9, 0x97, 0xe1, 0x82, 0x74, 0x85, 0x5b, 0x4e, 0x43, 0x3a, 0xd8, 0x9f, 0xbb, 0x28, 0x46, - 0x2d, 0x15, 0xce, 0x26, 0x31, 0x9d, 0x3e, 0x5a, 0x85, 0xe9, 0x6d, 0xe2, 0x34, 0xa2, 0xed, 0xa5, - 0x6d, 0x52, 0xdb, 0x91, 0x9b, 0x88, 0x45, 0x38, 0xd0, 0xcc, 0xff, 0xaf, 0x77, 0xa2, 0xe0, 0xb4, - 0x7a, 0xe8, 0x2d, 0x98, 0x69, 0xb5, 0x37, 0x1a, 0x6e, 0xb8, 0xbd, 0xe6, 0x47, 0xcc, 0xb4, 0x47, - 0xa5, 0x99, 0x15, 0xa1, 0x10, 0x54, 0x0c, 0x89, 0x4a, 0x06, 0x1e, 0xce, 0xa4, 0x80, 0xde, 0x85, - 0xd3, 0x89, 0xc5, 0x20, 0x9c, 0xc1, 0xc7, 0xb3, 0x63, 0xd6, 0x57, 0xd3, 0x2a, 0x88, 0xb8, 0x0a, - 0x69, 0x20, 0x9c, 0xde, 0x04, 0x7a, 0x1e, 0x0a, 0x6e, 0x6b, 0xc5, 0x69, 0xba, 0x8d, 0x3d, 0x16, - 0x74, 0xbf, 0xc8, 0x02, 0xd1, 0x17, 0xca, 0x15, 0x5e, 0x76, 0xa0, 0xfd, 0xc6, 0x0a, 0x93, 0x3e, - 0x41, 0xb4, 0xd0, 0xa2, 0xe1, 0xcc, 0x64, 0x6c, 0xb9, 0xac, 0xc5, 0x1f, 0x0d, 0xb1, 0x81, 0xf5, - 0xde, 0x0c, 0xc2, 0xde, 0xa1, 0x95, 0x35, 0x9e, 0x11, 0x7d, 0x0e, 0x46, 0xf5, 0x15, 0x2b, 0xee, - 0xbf, 0xcb, 0xe9, 0x2c, 0x95, 0xb6, 0xb2, 0x39, 0xc7, 0xa9, 0x56, 0xaf, 0x0e, 0xc3, 0x06, 0x45, - 0x9b, 0x40, 0xfa, 0x58, 0xa2, 0x9b, 0x50, 0xa8, 0x35, 0x5c, 0xe2, 0x45, 0xe5, 0x4a, 0xb7, 0xa8, - 0x58, 0x4b, 0x02, 0x47, 0x4c, 0x8e, 0x08, 0x28, 0xce, 0xcb, 0xb0, 0xa2, 0x60, 0xff, 0x5a, 0x0e, - 0xe6, 0x7a, 0x44, 0xa7, 0x4f, 0xe8, 0x2d, 0xac, 0xbe, 0xf4, 0x16, 0x0b, 0x32, 0x41, 0xef, 0x5a, - 0x42, 0x24, 0x92, 0x48, 0xbe, 0x1b, 0x0b, 0x46, 0x92, 0xf8, 0x7d, 0xdb, 0x91, 0xeb, 0xaa, 0x8f, - 0x81, 0x9e, 0x9e, 0x10, 0x86, 0xca, 0x73, 0xb0, 0xff, 0x77, 0x52, 0xa6, 0xfa, 0xca, 0xfe, 0x4a, - 0x0e, 0x4e, 0xab, 0x21, 0xfc, 0xe6, 0x1d, 0xb8, 0xdb, 0x9d, 0x03, 0x77, 0x04, 0xca, 0x3f, 0xfb, - 0x16, 0x0c, 0xf1, 0x30, 0x5f, 0x7d, 0xf0, 0x67, 0x97, 0xcc, 0x88, 0x98, 0x8a, 0x25, 0x30, 0xa2, - 0x62, 0x7e, 0xaf, 0x05, 0x13, 0xeb, 0x4b, 0x95, 0xaa, 0x5f, 0xdb, 0x21, 0xd1, 0x02, 0xe7, 0xa7, - 0xb1, 0xe0, 0xb5, 0xac, 0x87, 0xe4, 0xa1, 0xd2, 0xb8, 0xb3, 0x8b, 0x30, 0xb0, 0xed, 0x87, 0x51, - 0xd2, 0x32, 0xe0, 0xba, 0x1f, 0x46, 0x98, 0x41, 0xec, 0xdf, 0xb5, 0x60, 0x90, 0xa5, 0xa4, 0x97, - 0x52, 0x64, 0x2b, 0x43, 0x8a, 0xdc, 0xcf, 0x77, 0xa1, 0x17, 0x60, 0x88, 0x6c, 0x6e, 0x92, 0x5a, - 0x24, 0x66, 0x55, 0xba, 0x72, 0x0f, 0x2d, 0xb3, 0x52, 0xca, 0x60, 0xb0, 0xc6, 0xf8, 0x5f, 0x2c, - 0x90, 0xd1, 0x5d, 0x28, 0x46, 0x6e, 0x93, 0x2c, 0xd4, 0xeb, 0x42, 0xb7, 0xfa, 0x10, 0xee, 0xe8, - 0xeb, 0x92, 0x00, 0x8e, 0x69, 0xd9, 0x5f, 0xca, 0x01, 0xc4, 0xf1, 0x51, 0x7a, 0x7d, 0xe2, 0x62, - 0x87, 0xd6, 0xed, 0x72, 0x8a, 0xd6, 0x0d, 0xc5, 0x04, 0x53, 0x54, 0x6e, 0x6a, 0x98, 0xf2, 0x7d, - 0x0d, 0xd3, 0xc0, 0x61, 0x86, 0x69, 0x09, 0xa6, 0xe2, 0xf8, 0x2e, 0x66, 0x78, 0x2b, 0xf6, 0x86, - 0x5a, 0x4f, 0x02, 0x71, 0x27, 0xbe, 0x4d, 0xe0, 0xa2, 0x0a, 0x73, 0x21, 0xee, 0x1a, 0x66, 0xba, - 0xab, 0x6b, 0x31, 0x7b, 0x8c, 0x53, 0xac, 0x56, 0xcc, 0x65, 0xaa, 0x15, 0x7f, 0xc2, 0x82, 0x53, - 0xc9, 0x76, 0x98, 0x2f, 0xe5, 0x17, 0x2d, 0x38, 0xcd, 0x94, 0xab, 0xac, 0xd5, 0x4e, 0x55, 0xee, - 0xf3, 0x5d, 0x43, 0x77, 0x64, 0xf4, 0x38, 0x8e, 0x19, 0xb0, 0x9a, 0x46, 0x1a, 0xa7, 0xb7, 0x68, - 0xff, 0xfb, 0x1c, 0xcc, 0x64, 0xc5, 0xfc, 0x60, 0x96, 0xfd, 0xce, 0xfd, 0xea, 0x0e, 0xb9, 0x27, - 0xec, 0xa7, 0x63, 0xcb, 0x7e, 0x5e, 0x8c, 0x25, 0x3c, 0x19, 0x70, 0x3c, 0xd7, 0x5f, 0xc0, 0x71, - 0xb4, 0x0d, 0x53, 0xf7, 0xb6, 0x89, 0x77, 0xdb, 0x0b, 0x9d, 0xc8, 0x0d, 0x37, 0x5d, 0xa6, 0x88, - 0xe4, 0xeb, 0xe6, 0x65, 0x69, 0xe5, 0x7c, 0x37, 0x89, 0x70, 0xb0, 0x3f, 0x77, 0xde, 0x28, 0x88, - 0xbb, 0xcc, 0x0f, 0x12, 0xdc, 0x49, 0xb4, 0x33, 0x5e, 0xfb, 0xc0, 0x31, 0xc6, 0x6b, 0xb7, 0xbf, - 0x68, 0xc1, 0xd9, 0xcc, 0x24, 0x91, 0xe8, 0x0a, 0x14, 0x9c, 0x96, 0xcb, 0x65, 0xb9, 0xe2, 0x18, - 0x65, 0x32, 0x83, 0x4a, 0x99, 0x4b, 0x72, 0x15, 0x54, 0x25, 0xaf, 0xce, 0x65, 0x26, 0xaf, 0xee, - 0x99, 0x8b, 0xda, 0xfe, 0x1e, 0x0b, 0x84, 0x57, 0x62, 0x1f, 0x67, 0xf7, 0x9b, 0x32, 0xf7, 0xbf, - 0x91, 0xd3, 0xe5, 0x62, 0xb6, 0x9b, 0xa6, 0xc8, 0xe4, 0xa2, 0x78, 0x25, 0x23, 0x7f, 0x8b, 0x41, - 0xcb, 0xae, 0x83, 0x80, 0x96, 0x08, 0x93, 0x54, 0xf6, 0xee, 0xcd, 0xb3, 0x00, 0x75, 0x86, 0xab, - 0x65, 0x00, 0x57, 0x37, 0x73, 0x49, 0x41, 0xb0, 0x86, 0x65, 0xff, 0x9b, 0x1c, 0x8c, 0xc8, 0x1c, - 0x22, 0x6d, 0xaf, 0x1f, 0x79, 0xc2, 0xa1, 0x92, 0x0a, 0xb2, 0x94, 0xf9, 0x94, 0x70, 0x25, 0x16, - 0xc3, 0xc4, 0x29, 0xf3, 0x25, 0x00, 0xc7, 0x38, 0x74, 0x17, 0x85, 0xed, 0x0d, 0x86, 0x9e, 0xf0, - 0xa1, 0xab, 0xf2, 0x62, 0x2c, 0xe1, 0xe8, 0x53, 0x30, 0xc9, 0xeb, 0x05, 0x7e, 0xcb, 0xd9, 0xe2, - 0x42, 0xf2, 0x41, 0xe5, 0xfc, 0x3e, 0xb9, 0x9a, 0x80, 0x1d, 0xec, 0xcf, 0x9d, 0x4a, 0x96, 0x31, - 0xed, 0x4f, 0x07, 0x15, 0x66, 0x0b, 0xc3, 0x1b, 0xa1, 0xbb, 0xbf, 0xc3, 0x84, 0x26, 0x06, 0x61, - 0x1d, 0xcf, 0xfe, 0x1c, 0xa0, 0xce, 0x6c, 0x2a, 0xe8, 0x75, 0x6e, 0x00, 0xe9, 0x06, 0xa4, 0xde, - 0x4d, 0x1b, 0xa4, 0xbb, 0x78, 0x4b, 0xf7, 0x17, 0x5e, 0x0b, 0xab, 0xfa, 0xf6, 0x5f, 0xca, 0xc3, - 0x64, 0xd2, 0xe1, 0x17, 0x5d, 0x87, 0x21, 0xce, 0x7a, 0x08, 0xf2, 0x5d, 0x8c, 0x0d, 0x34, 0x37, - 0x61, 0x76, 0x08, 0x0b, 0xee, 0x45, 0xd4, 0x47, 0x6f, 0xc1, 0x48, 0xdd, 0xbf, 0xe7, 0xdd, 0x73, - 0x82, 0xfa, 0x42, 0xa5, 0x2c, 0x96, 0x73, 0xea, 0x6b, 0xa9, 0x14, 0xa3, 0xe9, 0xae, 0xc7, 0x4c, - 0xb1, 0x16, 0x83, 0xb0, 0x4e, 0x0e, 0xad, 0xb3, 0xe0, 0xcf, 0x9b, 0xee, 0xd6, 0xaa, 0xd3, 0xea, - 0x66, 0x0d, 0xbf, 0x24, 0x91, 0x34, 0xca, 0x63, 0x22, 0x42, 0x34, 0x07, 0xe0, 0x98, 0x10, 0xfa, - 0x3c, 0x4c, 0x87, 0x19, 0x32, 0xd9, 0xac, 0xe4, 0x5a, 0xdd, 0xc4, 0x94, 0x8b, 0x8f, 0xd0, 0x77, - 0x6c, 0x9a, 0xf4, 0x36, 0xad, 0x19, 0xfb, 0x47, 0x4e, 0x81, 0xb1, 0x89, 0x8d, 0x5c, 0x8b, 0xd6, - 0x11, 0xe5, 0x5a, 0xc4, 0x50, 0x20, 0xcd, 0x56, 0xb4, 0x57, 0x72, 0x83, 0x6e, 0x19, 0x88, 0x97, - 0x05, 0x4e, 0x27, 0x4d, 0x09, 0xc1, 0x8a, 0x4e, 0x7a, 0x42, 0xcc, 0xfc, 0xd7, 0x31, 0x21, 0xe6, - 0xc0, 0x09, 0x26, 0xc4, 0x5c, 0x83, 0xe1, 0x2d, 0x37, 0xc2, 0xa4, 0xe5, 0x0b, 0xa6, 0x3f, 0x75, - 0x1d, 0x5e, 0xe3, 0x28, 0x9d, 0xa9, 0xd7, 0x04, 0x00, 0x4b, 0x22, 0xe8, 0x75, 0xb5, 0x03, 0x87, - 0xb2, 0xdf, 0xcc, 0x9d, 0x5a, 0xf1, 0xd4, 0x3d, 0x28, 0xd2, 0x5e, 0x0e, 0x3f, 0x6c, 0xda, 0xcb, - 0x15, 0x99, 0xac, 0xb2, 0x90, 0xed, 0xba, 0xc2, 0x72, 0x51, 0xf6, 0x48, 0x51, 0x79, 0x47, 0x4f, - 0xf0, 0x59, 0xcc, 0x3e, 0x09, 0x54, 0xee, 0xce, 0x3e, 0xd3, 0x7a, 0x7e, 0x8f, 0x05, 0xa7, 0x5b, - 0x69, 0xb9, 0x6e, 0x85, 0x02, 0xf9, 0x85, 0xbe, 0xd3, 0xe9, 0x1a, 0x0d, 0x32, 0x41, 0x4d, 0x2a, - 0x1a, 0x4e, 0x6f, 0x8e, 0x0e, 0x74, 0xb0, 0x51, 0x17, 0x8a, 0xcc, 0x4b, 0x19, 0xf9, 0x41, 0xbb, - 0x64, 0x05, 0x5d, 0x4f, 0xc9, 0x45, 0xf9, 0xe1, 0xac, 0x5c, 0x94, 0x7d, 0x67, 0xa0, 0x7c, 0x5d, - 0x65, 0x06, 0x1d, 0xcb, 0x5e, 0x4a, 0x3c, 0xef, 0x67, 0xcf, 0x7c, 0xa0, 0xaf, 0xab, 0x7c, 0xa0, - 0x5d, 0x22, 0x7b, 0xf2, 0x6c, 0x9f, 0x3d, 0xb3, 0x80, 0x6a, 0x99, 0x3c, 0x27, 0x8e, 0x26, 0x93, - 0xa7, 0x71, 0xd5, 0xf0, 0x64, 0x92, 0x4f, 0xf6, 0xb8, 0x6a, 0x0c, 0xba, 0xdd, 0x2f, 0x1b, 0x9e, - 0xb5, 0x74, 0xea, 0xa1, 0xb2, 0x96, 0xde, 0xd1, 0xb3, 0x80, 0xa2, 0x1e, 0x69, 0x2e, 0x29, 0x52, - 0x9f, 0xb9, 0x3f, 0xef, 0xe8, 0x17, 0xe0, 0x74, 0x36, 0x5d, 0x75, 0xcf, 0x75, 0xd2, 0x4d, 0xbd, - 0x02, 0x3b, 0x72, 0x8a, 0x9e, 0x3a, 0x99, 0x9c, 0xa2, 0xa7, 0x8f, 0x3c, 0xa7, 0xe8, 0x99, 0x13, - 0xc8, 0x29, 0xfa, 0xc8, 0x09, 0xe6, 0x14, 0xbd, 0xc3, 0xac, 0x2e, 0x78, 0x6c, 0x17, 0x11, 0x89, - 0x34, 0x3d, 0xea, 0x65, 0x5a, 0x00, 0x18, 0xfe, 0x71, 0x0a, 0x84, 0x63, 0x52, 0x29, 0xb9, 0x4a, - 0x67, 0x8e, 0x21, 0x57, 0xe9, 0x5a, 0x9c, 0xab, 0xf4, 0x6c, 0xf6, 0x54, 0xa7, 0xd8, 0xe9, 0x67, - 0x64, 0x28, 0xbd, 0xa3, 0x67, 0x16, 0x7d, 0xb4, 0x8b, 0x28, 0x3e, 0x4d, 0xf0, 0xd8, 0x25, 0x9f, - 0xe8, 0x6b, 0x3c, 0x9f, 0xe8, 0xb9, 0xec, 0x93, 0x3c, 0x79, 0xdd, 0x19, 0x59, 0x44, 0x69, 0xbf, - 0x54, 0xcc, 0x3b, 0x16, 0x73, 0x35, 0xa3, 0x5f, 0x2a, 0x68, 0x5e, 0x67, 0xbf, 0x14, 0x08, 0xc7, - 0xa4, 0xec, 0xef, 0xcb, 0xc1, 0x85, 0xee, 0xfb, 0x2d, 0x96, 0xa6, 0x56, 0x62, 0x4d, 0x63, 0x42, - 0x9a, 0xca, 0xdf, 0x6c, 0x31, 0x56, 0xdf, 0xe1, 0xc4, 0xae, 0xc1, 0x94, 0x32, 0xf0, 0x6f, 0xb8, - 0xb5, 0xbd, 0xb5, 0xf8, 0xe5, 0xab, 0x9c, 0xa2, 0xab, 0x49, 0x04, 0xdc, 0x59, 0x07, 0x2d, 0xc0, - 0x84, 0x51, 0x58, 0x2e, 0x89, 0xb7, 0x99, 0x12, 0xdf, 0x56, 0x4d, 0x30, 0x4e, 0xe2, 0xdb, 0x5f, - 0xb6, 0xe0, 0x91, 0x8c, 0x34, 0x60, 0x7d, 0x47, 0xcb, 0xda, 0x84, 0x89, 0x96, 0x59, 0xb5, 0x47, - 0x50, 0x3d, 0x23, 0xd9, 0x98, 0xea, 0x6b, 0x02, 0x80, 0x93, 0x44, 0xed, 0x3f, 0xb3, 0xe0, 0x7c, - 0x57, 0x8b, 0x35, 0x84, 0xe1, 0xcc, 0x56, 0x33, 0x74, 0x96, 0x02, 0x52, 0x27, 0x5e, 0xe4, 0x3a, - 0x8d, 0x6a, 0x8b, 0xd4, 0x34, 0x79, 0x38, 0x33, 0xfd, 0xba, 0xb6, 0x5a, 0x5d, 0xe8, 0xc4, 0xc0, - 0x19, 0x35, 0xd1, 0x0a, 0xa0, 0x4e, 0x88, 0x98, 0x61, 0x16, 0xbd, 0xb7, 0x93, 0x1e, 0x4e, 0xa9, - 0x81, 0x5e, 0x84, 0x31, 0x65, 0x09, 0xa7, 0xcd, 0x38, 0x3b, 0xd8, 0xb1, 0x0e, 0xc0, 0x26, 0xde, - 0xe2, 0x95, 0xdf, 0xf8, 0xfd, 0x0b, 0x1f, 0xfa, 0xad, 0xdf, 0xbf, 0xf0, 0xa1, 0xdf, 0xf9, 0xfd, - 0x0b, 0x1f, 0xfa, 0x8e, 0x07, 0x17, 0xac, 0xdf, 0x78, 0x70, 0xc1, 0xfa, 0xad, 0x07, 0x17, 0xac, - 0xdf, 0x79, 0x70, 0xc1, 0xfa, 0xbd, 0x07, 0x17, 0xac, 0x2f, 0xfd, 0xc1, 0x85, 0x0f, 0xbd, 0x99, - 0xdb, 0x7d, 0xe6, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0xaf, 0x9f, 0xac, 0x23, 0x24, 0x01, 0x01, - 0x00, + // 13999 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x7d, 0x6b, 0x70, 0x5c, 0xd7, + 0x79, 0x98, 0xef, 0x2e, 0x5e, 0xfb, 0xe1, 0x7d, 0x40, 0x52, 0x20, 0x24, 0x12, 0xd4, 0x95, 0x4d, + 0x51, 0x96, 0x04, 0x9a, 0x7a, 0xd8, 0x8a, 0x64, 0x2b, 0x06, 0xb0, 0x00, 0xb9, 0x22, 0x01, 0xae, + 0xce, 0x82, 0xa4, 0xed, 0xc8, 0x1e, 0x5f, 0xec, 0x1e, 0x00, 0x57, 0xd8, 0xbd, 0x77, 0x75, 0xef, + 0x5d, 0x90, 0x50, 0x9d, 0x69, 0xea, 0x3c, 0x9d, 0x47, 0xc7, 0xd3, 0xc9, 0xf4, 0x91, 0x64, 0x32, + 0x9d, 0x34, 0x9d, 0xc4, 0x75, 0xdb, 0x69, 0x9a, 0x34, 0x49, 0xe3, 0xb4, 0x49, 0x9b, 0x3e, 0xd2, + 0xfe, 0x48, 0xd3, 0x4c, 0x1b, 0x67, 0x26, 0x53, 0x34, 0x61, 0x3a, 0xcd, 0xf8, 0x47, 0x93, 0xb4, + 0x49, 0x7f, 0x14, 0xcd, 0x34, 0x9d, 0xf3, 0xbc, 0xe7, 0xdc, 0xc7, 0xee, 0x82, 0x02, 0x61, 0xd9, + 0xa3, 0x7f, 0xbb, 0xe7, 0xfb, 0xce, 0x77, 0xce, 0x3d, 0xcf, 0xef, 0x7c, 0x4f, 0x78, 0x65, 0xf7, + 0xa5, 0x70, 0xc1, 0xf5, 0x2f, 0xef, 0x76, 0x36, 0x49, 0xe0, 0x91, 0x88, 0x84, 0x97, 0xf7, 0x88, + 0xd7, 0xf0, 0x83, 0xcb, 0x02, 0xe0, 0xb4, 0xdd, 0xcb, 0x75, 0x3f, 0x20, 0x97, 0xf7, 0xae, 0x5c, + 0xde, 0x26, 0x1e, 0x09, 0x9c, 0x88, 0x34, 0x16, 0xda, 0x81, 0x1f, 0xf9, 0x08, 0x71, 0x9c, 0x05, + 0xa7, 0xed, 0x2e, 0x50, 0x9c, 0x85, 0xbd, 0x2b, 0x73, 0xcf, 0x6e, 0xbb, 0xd1, 0x4e, 0x67, 0x73, + 0xa1, 0xee, 0xb7, 0x2e, 0x6f, 0xfb, 0xdb, 0xfe, 0x65, 0x86, 0xba, 0xd9, 0xd9, 0x62, 0xff, 0xd8, + 0x1f, 0xf6, 0x8b, 0x93, 0x98, 0x7b, 0x21, 0x6e, 0xa6, 0xe5, 0xd4, 0x77, 0x5c, 0x8f, 0x04, 0xfb, + 0x97, 0xdb, 0xbb, 0xdb, 0xac, 0xdd, 0x80, 0x84, 0x7e, 0x27, 0xa8, 0x93, 0x64, 0xc3, 0x5d, 0x6b, + 0x85, 0x97, 0x5b, 0x24, 0x72, 0x32, 0xba, 0x3b, 0x77, 0x39, 0xaf, 0x56, 0xd0, 0xf1, 0x22, 0xb7, + 0x95, 0x6e, 0xe6, 0xc3, 0xbd, 0x2a, 0x84, 0xf5, 0x1d, 0xd2, 0x72, 0x52, 0xf5, 0x9e, 0xcf, 0xab, + 0xd7, 0x89, 0xdc, 0xe6, 0x65, 0xd7, 0x8b, 0xc2, 0x28, 0x48, 0x56, 0xb2, 0xbf, 0x6a, 0xc1, 0x85, + 0xc5, 0x3b, 0xb5, 0x95, 0xa6, 0x13, 0x46, 0x6e, 0x7d, 0xa9, 0xe9, 0xd7, 0x77, 0x6b, 0x91, 0x1f, + 0x90, 0xdb, 0x7e, 0xb3, 0xd3, 0x22, 0x35, 0x36, 0x10, 0xe8, 0x19, 0x18, 0xd9, 0x63, 0xff, 0x2b, + 0xe5, 0x59, 0xeb, 0x82, 0x75, 0xa9, 0xb4, 0x34, 0xf5, 0xeb, 0x07, 0xf3, 0xef, 0xbb, 0x7f, 0x30, + 0x3f, 0x72, 0x5b, 0x94, 0x63, 0x85, 0x81, 0x2e, 0xc2, 0xd0, 0x56, 0xb8, 0xb1, 0xdf, 0x26, 0xb3, + 0x05, 0x86, 0x3b, 0x21, 0x70, 0x87, 0x56, 0x6b, 0xb4, 0x14, 0x0b, 0x28, 0xba, 0x0c, 0xa5, 0xb6, + 0x13, 0x44, 0x6e, 0xe4, 0xfa, 0xde, 0x6c, 0xf1, 0x82, 0x75, 0x69, 0x70, 0x69, 0x5a, 0xa0, 0x96, + 0xaa, 0x12, 0x80, 0x63, 0x1c, 0xda, 0x8d, 0x80, 0x38, 0x8d, 0x9b, 0x5e, 0x73, 0x7f, 0x76, 0xe0, + 0x82, 0x75, 0x69, 0x24, 0xee, 0x06, 0x16, 0xe5, 0x58, 0x61, 0xd8, 0x3f, 0x52, 0x80, 0x91, 0xc5, + 0xad, 0x2d, 0xd7, 0x73, 0xa3, 0x7d, 0x74, 0x1b, 0xc6, 0x3c, 0xbf, 0x41, 0xe4, 0x7f, 0xf6, 0x15, + 0xa3, 0xcf, 0x5d, 0x58, 0x48, 0x2f, 0xa5, 0x85, 0x75, 0x0d, 0x6f, 0x69, 0xea, 0xfe, 0xc1, 0xfc, + 0x98, 0x5e, 0x82, 0x0d, 0x3a, 0x08, 0xc3, 0x68, 0xdb, 0x6f, 0x28, 0xb2, 0x05, 0x46, 0x76, 0x3e, + 0x8b, 0x6c, 0x35, 0x46, 0x5b, 0x9a, 0xbc, 0x7f, 0x30, 0x3f, 0xaa, 0x15, 0x60, 0x9d, 0x08, 0xda, + 0x84, 0x49, 0xfa, 0xd7, 0x8b, 0x5c, 0x45, 0xb7, 0xc8, 0xe8, 0x3e, 0x91, 0x47, 0x57, 0x43, 0x5d, + 0x9a, 0xb9, 0x7f, 0x30, 0x3f, 0x99, 0x28, 0xc4, 0x49, 0x82, 0xf6, 0xdb, 0x30, 0xb1, 0x18, 0x45, + 0x4e, 0x7d, 0x87, 0x34, 0xf8, 0x0c, 0xa2, 0x17, 0x60, 0xc0, 0x73, 0x5a, 0x44, 0xcc, 0xef, 0x05, + 0x31, 0xb0, 0x03, 0xeb, 0x4e, 0x8b, 0x1c, 0x1e, 0xcc, 0x4f, 0xdd, 0xf2, 0xdc, 0xb7, 0x3a, 0x62, + 0x55, 0xd0, 0x32, 0xcc, 0xb0, 0xd1, 0x73, 0x00, 0x0d, 0xb2, 0xe7, 0xd6, 0x49, 0xd5, 0x89, 0x76, + 0xc4, 0x7c, 0x23, 0x51, 0x17, 0xca, 0x0a, 0x82, 0x35, 0x2c, 0xfb, 0x1e, 0x94, 0x16, 0xf7, 0x7c, + 0xb7, 0x51, 0xf5, 0x1b, 0x21, 0xda, 0x85, 0xc9, 0x76, 0x40, 0xb6, 0x48, 0xa0, 0x8a, 0x66, 0xad, + 0x0b, 0xc5, 0x4b, 0xa3, 0xcf, 0x5d, 0xca, 0xfc, 0x58, 0x13, 0x75, 0xc5, 0x8b, 0x82, 0xfd, 0xa5, + 0x47, 0x44, 0x7b, 0x93, 0x09, 0x28, 0x4e, 0x52, 0xb6, 0xff, 0x55, 0x01, 0x4e, 0x2f, 0xbe, 0xdd, + 0x09, 0x48, 0xd9, 0x0d, 0x77, 0x93, 0x2b, 0xbc, 0xe1, 0x86, 0xbb, 0xeb, 0xf1, 0x08, 0xa8, 0xa5, + 0x55, 0x16, 0xe5, 0x58, 0x61, 0xa0, 0x67, 0x61, 0x98, 0xfe, 0xbe, 0x85, 0x2b, 0xe2, 0x93, 0x67, + 0x04, 0xf2, 0x68, 0xd9, 0x89, 0x9c, 0x32, 0x07, 0x61, 0x89, 0x83, 0xd6, 0x60, 0xb4, 0xce, 0x36, + 0xe4, 0xf6, 0x9a, 0xdf, 0x20, 0x6c, 0x32, 0x4b, 0x4b, 0x4f, 0x53, 0xf4, 0xe5, 0xb8, 0xf8, 0xf0, + 0x60, 0x7e, 0x96, 0xf7, 0x4d, 0x90, 0xd0, 0x60, 0x58, 0xaf, 0x8f, 0x6c, 0xb5, 0xbf, 0x06, 0x18, + 0x25, 0xc8, 0xd8, 0x5b, 0x97, 0xb4, 0xad, 0x32, 0xc8, 0xb6, 0xca, 0x58, 0xf6, 0x36, 0x41, 0x57, + 0x60, 0x60, 0xd7, 0xf5, 0x1a, 0xb3, 0x43, 0x8c, 0xd6, 0x39, 0x3a, 0xe7, 0xd7, 0x5d, 0xaf, 0x71, + 0x78, 0x30, 0x3f, 0x6d, 0x74, 0x87, 0x16, 0x62, 0x86, 0x6a, 0xff, 0xa9, 0x05, 0xf3, 0x0c, 0xb6, + 0xea, 0x36, 0x49, 0x95, 0x04, 0xa1, 0x1b, 0x46, 0xc4, 0x8b, 0x8c, 0x01, 0x7d, 0x0e, 0x20, 0x24, + 0xf5, 0x80, 0x44, 0xda, 0x90, 0xaa, 0x85, 0x51, 0x53, 0x10, 0xac, 0x61, 0xd1, 0x03, 0x21, 0xdc, + 0x71, 0x02, 0xb6, 0xbe, 0xc4, 0xc0, 0xaa, 0x03, 0xa1, 0x26, 0x01, 0x38, 0xc6, 0x31, 0x0e, 0x84, + 0x62, 0xaf, 0x03, 0x01, 0x7d, 0x0c, 0x26, 0xe3, 0xc6, 0xc2, 0xb6, 0x53, 0x97, 0x03, 0xc8, 0xb6, + 0x4c, 0xcd, 0x04, 0xe1, 0x24, 0xae, 0xfd, 0xf7, 0x2c, 0xb1, 0x78, 0xe8, 0x57, 0xbf, 0xcb, 0xbf, + 0xd5, 0xfe, 0x45, 0x0b, 0x86, 0x97, 0x5c, 0xaf, 0xe1, 0x7a, 0xdb, 0xe8, 0xb3, 0x30, 0x42, 0xef, + 0xa6, 0x86, 0x13, 0x39, 0xe2, 0xdc, 0xfb, 0x90, 0xb6, 0xb7, 0xd4, 0x55, 0xb1, 0xd0, 0xde, 0xdd, + 0xa6, 0x05, 0xe1, 0x02, 0xc5, 0xa6, 0xbb, 0xed, 0xe6, 0xe6, 0x9b, 0xa4, 0x1e, 0xad, 0x91, 0xc8, + 0x89, 0x3f, 0x27, 0x2e, 0xc3, 0x8a, 0x2a, 0xba, 0x0e, 0x43, 0x91, 0x13, 0x6c, 0x93, 0x48, 0x1c, + 0x80, 0x99, 0x07, 0x15, 0xaf, 0x89, 0xe9, 0x8e, 0x24, 0x5e, 0x9d, 0xc4, 0xd7, 0xc2, 0x06, 0xab, + 0x8a, 0x05, 0x09, 0xfb, 0x87, 0x86, 0xe1, 0xec, 0x72, 0xad, 0x92, 0xb3, 0xae, 0x2e, 0xc2, 0x50, + 0x23, 0x70, 0xf7, 0x48, 0x20, 0xc6, 0x59, 0x51, 0x29, 0xb3, 0x52, 0x2c, 0xa0, 0xe8, 0x25, 0x18, + 0xe3, 0x17, 0xd2, 0x35, 0xc7, 0x6b, 0x34, 0xe5, 0x10, 0x9f, 0x12, 0xd8, 0x63, 0xb7, 0x35, 0x18, + 0x36, 0x30, 0x8f, 0xb8, 0xa8, 0x2e, 0x26, 0x36, 0x63, 0xde, 0x65, 0xf7, 0x05, 0x0b, 0xa6, 0x78, + 0x33, 0x8b, 0x51, 0x14, 0xb8, 0x9b, 0x9d, 0x88, 0x84, 0xb3, 0x83, 0xec, 0xa4, 0x5b, 0xce, 0x1a, + 0xad, 0xdc, 0x11, 0x58, 0xb8, 0x9d, 0xa0, 0xc2, 0x0f, 0xc1, 0x59, 0xd1, 0xee, 0x54, 0x12, 0x8c, + 0x53, 0xcd, 0xa2, 0xef, 0xb4, 0x60, 0xae, 0xee, 0x7b, 0x51, 0xe0, 0x37, 0x9b, 0x24, 0xa8, 0x76, + 0x36, 0x9b, 0x6e, 0xb8, 0xc3, 0xd7, 0x29, 0x26, 0x5b, 0xec, 0x24, 0xc8, 0x99, 0x43, 0x85, 0x24, + 0xe6, 0xf0, 0xfc, 0xfd, 0x83, 0xf9, 0xb9, 0xe5, 0x5c, 0x52, 0xb8, 0x4b, 0x33, 0x68, 0x17, 0x10, + 0xbd, 0x4a, 0x6b, 0x91, 0xb3, 0x4d, 0xe2, 0xc6, 0x87, 0xfb, 0x6f, 0xfc, 0xcc, 0xfd, 0x83, 0x79, + 0xb4, 0x9e, 0x22, 0x81, 0x33, 0xc8, 0xa2, 0xb7, 0xe0, 0x14, 0x2d, 0x4d, 0x7d, 0xeb, 0x48, 0xff, + 0xcd, 0xcd, 0xde, 0x3f, 0x98, 0x3f, 0xb5, 0x9e, 0x41, 0x04, 0x67, 0x92, 0x46, 0xdf, 0x61, 0xc1, + 0xd9, 0xf8, 0xf3, 0x57, 0xee, 0xb5, 0x1d, 0xaf, 0x11, 0x37, 0x5c, 0xea, 0xbf, 0x61, 0x7a, 0x26, + 0x9f, 0x5d, 0xce, 0xa3, 0x84, 0xf3, 0x1b, 0x99, 0x5b, 0x86, 0xd3, 0x99, 0xab, 0x05, 0x4d, 0x41, + 0x71, 0x97, 0x70, 0x2e, 0xa8, 0x84, 0xe9, 0x4f, 0x74, 0x0a, 0x06, 0xf7, 0x9c, 0x66, 0x47, 0x6c, + 0x14, 0xcc, 0xff, 0xbc, 0x5c, 0x78, 0xc9, 0xb2, 0xff, 0x75, 0x11, 0x26, 0x97, 0x6b, 0x95, 0x07, + 0xda, 0x85, 0xfa, 0x35, 0x54, 0xe8, 0x7a, 0x0d, 0xc5, 0x97, 0x5a, 0x31, 0xf7, 0x52, 0xfb, 0xcb, + 0x19, 0x5b, 0x68, 0x80, 0x6d, 0xa1, 0x6f, 0xc9, 0xd9, 0x42, 0xc7, 0xbc, 0x71, 0xf6, 0x72, 0x56, + 0xd1, 0x20, 0x9b, 0xcc, 0x4c, 0x8e, 0xe5, 0x86, 0x5f, 0x77, 0x9a, 0xc9, 0xa3, 0xef, 0x88, 0x4b, + 0xe9, 0x78, 0xe6, 0xb1, 0x0e, 0x63, 0xcb, 0x4e, 0xdb, 0xd9, 0x74, 0x9b, 0x6e, 0xe4, 0x92, 0x10, + 0x3d, 0x09, 0x45, 0xa7, 0xd1, 0x60, 0xdc, 0x56, 0x69, 0xe9, 0xf4, 0xfd, 0x83, 0xf9, 0xe2, 0x62, + 0x83, 0x5e, 0xfb, 0xa0, 0xb0, 0xf6, 0x31, 0xc5, 0x40, 0x1f, 0x84, 0x81, 0x46, 0xe0, 0xb7, 0x67, + 0x0b, 0x0c, 0x93, 0xee, 0xba, 0x81, 0x72, 0xe0, 0xb7, 0x13, 0xa8, 0x0c, 0xc7, 0xfe, 0xd5, 0x02, + 0x3c, 0xb6, 0x4c, 0xda, 0x3b, 0xab, 0xb5, 0x9c, 0xf3, 0xfb, 0x12, 0x8c, 0xb4, 0x7c, 0xcf, 0x8d, + 0xfc, 0x20, 0x14, 0x4d, 0xb3, 0x15, 0xb1, 0x26, 0xca, 0xb0, 0x82, 0xa2, 0x0b, 0x30, 0xd0, 0x8e, + 0x99, 0xca, 0x31, 0xc9, 0x90, 0x32, 0x76, 0x92, 0x41, 0x28, 0x46, 0x27, 0x24, 0x81, 0x58, 0x31, + 0x0a, 0xe3, 0x56, 0x48, 0x02, 0xcc, 0x20, 0xf1, 0xcd, 0x4c, 0xef, 0x6c, 0x71, 0x42, 0x27, 0x6e, + 0x66, 0x0a, 0xc1, 0x1a, 0x16, 0xaa, 0x42, 0x29, 0x4c, 0xcc, 0x6c, 0x5f, 0xdb, 0x74, 0x9c, 0x5d, + 0xdd, 0x6a, 0x26, 0x63, 0x22, 0xc6, 0x8d, 0x32, 0xd4, 0xf3, 0xea, 0xfe, 0x4a, 0x01, 0x10, 0x1f, + 0xc2, 0x6f, 0xb0, 0x81, 0xbb, 0x95, 0x1e, 0xb8, 0xfe, 0xb7, 0xc4, 0x71, 0x8d, 0xde, 0x9f, 0x59, + 0xf0, 0xd8, 0xb2, 0xeb, 0x35, 0x48, 0x90, 0xb3, 0x00, 0x1f, 0xce, 0x5b, 0xf6, 0x68, 0x4c, 0x83, + 0xb1, 0xc4, 0x06, 0x8e, 0x61, 0x89, 0xd9, 0x7f, 0x6c, 0x01, 0xe2, 0x9f, 0xfd, 0xae, 0xfb, 0xd8, + 0x5b, 0xe9, 0x8f, 0x3d, 0x86, 0x65, 0x61, 0xdf, 0x80, 0x89, 0xe5, 0xa6, 0x4b, 0xbc, 0xa8, 0x52, + 0x5d, 0xf6, 0xbd, 0x2d, 0x77, 0x1b, 0xbd, 0x0c, 0x13, 0x91, 0xdb, 0x22, 0x7e, 0x27, 0xaa, 0x91, + 0xba, 0xef, 0xb1, 0x97, 0xa4, 0x75, 0x69, 0x70, 0x09, 0xdd, 0x3f, 0x98, 0x9f, 0xd8, 0x30, 0x20, + 0x38, 0x81, 0x69, 0xff, 0x2e, 0x1d, 0x3f, 0xbf, 0xd5, 0xf6, 0x3d, 0xe2, 0x45, 0xcb, 0xbe, 0xd7, + 0xe0, 0x12, 0x87, 0x97, 0x61, 0x20, 0xa2, 0xe3, 0xc1, 0xc7, 0xee, 0xa2, 0xdc, 0x28, 0x74, 0x14, + 0x0e, 0x0f, 0xe6, 0xcf, 0xa4, 0x6b, 0xb0, 0x71, 0x62, 0x75, 0xd0, 0xb7, 0xc0, 0x50, 0x18, 0x39, + 0x51, 0x27, 0x14, 0xa3, 0xf9, 0xb8, 0x1c, 0xcd, 0x1a, 0x2b, 0x3d, 0x3c, 0x98, 0x9f, 0x54, 0xd5, + 0x78, 0x11, 0x16, 0x15, 0xd0, 0x53, 0x30, 0xdc, 0x22, 0x61, 0xe8, 0x6c, 0xcb, 0xdb, 0x70, 0x52, + 0xd4, 0x1d, 0x5e, 0xe3, 0xc5, 0x58, 0xc2, 0xd1, 0x13, 0x30, 0x48, 0x82, 0xc0, 0x0f, 0xc4, 0x1e, + 0x1d, 0x17, 0x88, 0x83, 0x2b, 0xb4, 0x10, 0x73, 0x98, 0xfd, 0x1f, 0x2c, 0x98, 0x54, 0x7d, 0xe5, + 0x6d, 0x9d, 0xc0, 0xab, 0xe0, 0x53, 0x00, 0x75, 0xf9, 0x81, 0x21, 0xbb, 0x3d, 0x46, 0x9f, 0xbb, + 0x98, 0x79, 0x51, 0xa7, 0x86, 0x31, 0xa6, 0xac, 0x8a, 0x42, 0xac, 0x51, 0xb3, 0xff, 0x99, 0x05, + 0x33, 0x89, 0x2f, 0xba, 0xe1, 0x86, 0x11, 0x7a, 0x23, 0xf5, 0x55, 0x0b, 0xfd, 0x7d, 0x15, 0xad, + 0xcd, 0xbe, 0x49, 0x2d, 0x65, 0x59, 0xa2, 0x7d, 0xd1, 0x35, 0x18, 0x74, 0x23, 0xd2, 0x92, 0x1f, + 0xf3, 0x44, 0xd7, 0x8f, 0xe1, 0xbd, 0x8a, 0x67, 0xa4, 0x42, 0x6b, 0x62, 0x4e, 0xc0, 0xfe, 0xd5, + 0x22, 0x94, 0xf8, 0xb2, 0x5d, 0x73, 0xda, 0x27, 0x30, 0x17, 0x4f, 0x43, 0xc9, 0x6d, 0xb5, 0x3a, + 0x91, 0xb3, 0x29, 0x8e, 0xf3, 0x11, 0xbe, 0xb5, 0x2a, 0xb2, 0x10, 0xc7, 0x70, 0x54, 0x81, 0x01, + 0xd6, 0x15, 0xfe, 0x95, 0x4f, 0x66, 0x7f, 0xa5, 0xe8, 0xfb, 0x42, 0xd9, 0x89, 0x1c, 0xce, 0x49, + 0xa9, 0x7b, 0x84, 0x16, 0x61, 0x46, 0x02, 0x39, 0x00, 0x9b, 0xae, 0xe7, 0x04, 0xfb, 0xb4, 0x6c, + 0xb6, 0xc8, 0x08, 0x3e, 0xdb, 0x9d, 0xe0, 0x92, 0xc2, 0xe7, 0x64, 0xd5, 0x87, 0xc5, 0x00, 0xac, + 0x11, 0x9d, 0xfb, 0x08, 0x94, 0x14, 0xf2, 0x51, 0x18, 0xa2, 0xb9, 0x8f, 0xc1, 0x64, 0xa2, 0xad, + 0x5e, 0xd5, 0xc7, 0x74, 0x7e, 0xea, 0x97, 0xd8, 0x91, 0x21, 0x7a, 0xbd, 0xe2, 0xed, 0x89, 0x23, + 0xf7, 0x6d, 0x38, 0xd5, 0xcc, 0x38, 0xc9, 0xc4, 0xbc, 0xf6, 0x7f, 0xf2, 0x3d, 0x26, 0x3e, 0xfb, + 0x54, 0x16, 0x14, 0x67, 0xb6, 0x41, 0x79, 0x04, 0xbf, 0x4d, 0x37, 0x88, 0xd3, 0xd4, 0xd9, 0xed, + 0x9b, 0xa2, 0x0c, 0x2b, 0x28, 0x3d, 0xef, 0x4e, 0xa9, 0xce, 0x5f, 0x27, 0xfb, 0x35, 0xd2, 0x24, + 0xf5, 0xc8, 0x0f, 0xbe, 0xae, 0xdd, 0x3f, 0xc7, 0x47, 0x9f, 0x1f, 0x97, 0xa3, 0x82, 0x40, 0xf1, + 0x3a, 0xd9, 0xe7, 0x53, 0xa1, 0x7f, 0x5d, 0xb1, 0xeb, 0xd7, 0xfd, 0x8c, 0x05, 0xe3, 0xea, 0xeb, + 0x4e, 0xe0, 0x5c, 0x58, 0x32, 0xcf, 0x85, 0x73, 0x5d, 0x17, 0x78, 0xce, 0x89, 0xf0, 0x95, 0x02, + 0x9c, 0x55, 0x38, 0xf4, 0x6d, 0xc0, 0xff, 0x88, 0x55, 0x75, 0x19, 0x4a, 0x9e, 0x92, 0x5a, 0x59, + 0xa6, 0xb8, 0x28, 0x96, 0x59, 0xc5, 0x38, 0x94, 0xc5, 0xf3, 0x62, 0xd1, 0xd2, 0x98, 0x2e, 0xce, + 0x15, 0xa2, 0xdb, 0x25, 0x28, 0x76, 0xdc, 0x86, 0xb8, 0x60, 0x3e, 0x24, 0x47, 0xfb, 0x56, 0xa5, + 0x7c, 0x78, 0x30, 0xff, 0x78, 0x9e, 0x2a, 0x81, 0xde, 0x6c, 0xe1, 0xc2, 0xad, 0x4a, 0x19, 0xd3, + 0xca, 0x68, 0x11, 0x26, 0xa5, 0xb6, 0xe4, 0x36, 0x65, 0xb7, 0x7c, 0x4f, 0xdc, 0x43, 0x4a, 0x26, + 0x8b, 0x4d, 0x30, 0x4e, 0xe2, 0xa3, 0x32, 0x4c, 0xed, 0x76, 0x36, 0x49, 0x93, 0x44, 0xfc, 0x83, + 0xaf, 0x13, 0x2e, 0xb1, 0x2c, 0xc5, 0x2f, 0xb3, 0xeb, 0x09, 0x38, 0x4e, 0xd5, 0xb0, 0xff, 0x82, + 0xdd, 0x07, 0x62, 0xf4, 0xaa, 0x81, 0x4f, 0x17, 0x16, 0xa5, 0xfe, 0xf5, 0x5c, 0xce, 0xfd, 0xac, + 0x8a, 0xeb, 0x64, 0x7f, 0xc3, 0xa7, 0x9c, 0x79, 0xf6, 0xaa, 0x30, 0xd6, 0xfc, 0x40, 0xd7, 0x35, + 0xff, 0x73, 0x05, 0x38, 0xad, 0x46, 0xc0, 0x60, 0x02, 0xbf, 0xd1, 0xc7, 0xe0, 0x0a, 0x8c, 0x36, + 0xc8, 0x96, 0xd3, 0x69, 0x46, 0x4a, 0x7c, 0x3e, 0xc8, 0x55, 0x28, 0xe5, 0xb8, 0x18, 0xeb, 0x38, + 0x47, 0x18, 0xb6, 0xff, 0x3d, 0xca, 0x2e, 0xe2, 0xc8, 0xa1, 0x6b, 0x5c, 0xed, 0x1a, 0x2b, 0x77, + 0xd7, 0x3c, 0x01, 0x83, 0x6e, 0x8b, 0x32, 0x66, 0x05, 0x93, 0xdf, 0xaa, 0xd0, 0x42, 0xcc, 0x61, + 0xe8, 0x03, 0x30, 0x5c, 0xf7, 0x5b, 0x2d, 0xc7, 0x6b, 0xb0, 0x2b, 0xaf, 0xb4, 0x34, 0x4a, 0x79, + 0xb7, 0x65, 0x5e, 0x84, 0x25, 0x0c, 0x3d, 0x06, 0x03, 0x4e, 0xb0, 0xcd, 0x65, 0x18, 0xa5, 0xa5, + 0x11, 0xda, 0xd2, 0x62, 0xb0, 0x1d, 0x62, 0x56, 0x4a, 0x9f, 0x60, 0x77, 0xfd, 0x60, 0xd7, 0xf5, + 0xb6, 0xcb, 0x6e, 0x20, 0xb6, 0x84, 0xba, 0x0b, 0xef, 0x28, 0x08, 0xd6, 0xb0, 0xd0, 0x2a, 0x0c, + 0xb6, 0xfd, 0x20, 0x0a, 0x67, 0x87, 0xd8, 0x70, 0x3f, 0x9e, 0x73, 0x10, 0xf1, 0xaf, 0xad, 0xfa, + 0x41, 0x14, 0x7f, 0x00, 0xfd, 0x17, 0x62, 0x5e, 0x1d, 0xdd, 0x80, 0x61, 0xe2, 0xed, 0xad, 0x06, + 0x7e, 0x6b, 0x76, 0x26, 0x9f, 0xd2, 0x0a, 0x47, 0xe1, 0xcb, 0x2c, 0xe6, 0x51, 0x45, 0x31, 0x96, + 0x24, 0xd0, 0xb7, 0x40, 0x91, 0x78, 0x7b, 0xb3, 0xc3, 0x8c, 0xd2, 0x5c, 0x0e, 0xa5, 0xdb, 0x4e, + 0x10, 0x9f, 0xf9, 0x2b, 0xde, 0x1e, 0xa6, 0x75, 0xd0, 0x27, 0xa1, 0x24, 0x0f, 0x8c, 0x50, 0x08, + 0xeb, 0x32, 0x17, 0xac, 0x3c, 0x66, 0x30, 0x79, 0xab, 0xe3, 0x06, 0xa4, 0x45, 0xbc, 0x28, 0x8c, + 0x4f, 0x48, 0x09, 0x0d, 0x71, 0x4c, 0x0d, 0x7d, 0x52, 0x4a, 0x88, 0xd7, 0xfc, 0x8e, 0x17, 0x85, + 0xb3, 0x25, 0xd6, 0xbd, 0x4c, 0xdd, 0xdd, 0xed, 0x18, 0x2f, 0x29, 0x42, 0xe6, 0x95, 0xb1, 0x41, + 0x0a, 0x7d, 0x1a, 0xc6, 0xf9, 0x7f, 0xae, 0x01, 0x0b, 0x67, 0x4f, 0x33, 0xda, 0x17, 0xf2, 0x69, + 0x73, 0xc4, 0xa5, 0xd3, 0x82, 0xf8, 0xb8, 0x5e, 0x1a, 0x62, 0x93, 0x1a, 0xc2, 0x30, 0xde, 0x74, + 0xf7, 0x88, 0x47, 0xc2, 0xb0, 0x1a, 0xf8, 0x9b, 0x64, 0x16, 0xd8, 0xc0, 0x9c, 0xcd, 0xd6, 0x98, + 0xf9, 0x9b, 0x64, 0x69, 0x9a, 0xd2, 0xbc, 0xa1, 0xd7, 0xc1, 0x26, 0x09, 0x74, 0x0b, 0x26, 0xe8, + 0x8b, 0xcd, 0x8d, 0x89, 0x8e, 0xf6, 0x22, 0xca, 0xde, 0x55, 0xd8, 0xa8, 0x84, 0x13, 0x44, 0xd0, + 0x4d, 0x18, 0x0b, 0x23, 0x27, 0x88, 0x3a, 0x6d, 0x4e, 0xf4, 0x4c, 0x2f, 0xa2, 0x4c, 0xe1, 0x5a, + 0xd3, 0xaa, 0x60, 0x83, 0x00, 0x7a, 0x0d, 0x4a, 0x4d, 0x77, 0x8b, 0xd4, 0xf7, 0xeb, 0x4d, 0x32, + 0x3b, 0xc6, 0xa8, 0x65, 0x1e, 0x2a, 0x37, 0x24, 0x12, 0xe7, 0x73, 0xd5, 0x5f, 0x1c, 0x57, 0x47, + 0xb7, 0xe1, 0x4c, 0x44, 0x82, 0x96, 0xeb, 0x39, 0xf4, 0x30, 0x10, 0x4f, 0x2b, 0xa6, 0xc8, 0x1c, + 0x67, 0xbb, 0xed, 0xbc, 0x98, 0x8d, 0x33, 0x1b, 0x99, 0x58, 0x38, 0xa7, 0x36, 0xba, 0x07, 0xb3, + 0x19, 0x10, 0xbf, 0xe9, 0xd6, 0xf7, 0x67, 0x4f, 0x31, 0xca, 0x1f, 0x15, 0x94, 0x67, 0x37, 0x72, + 0xf0, 0x0e, 0xbb, 0xc0, 0x70, 0x2e, 0x75, 0x74, 0x13, 0x26, 0xd9, 0x09, 0x54, 0xed, 0x34, 0x9b, + 0xa2, 0xc1, 0x09, 0xd6, 0xe0, 0x07, 0xe4, 0x7d, 0x5c, 0x31, 0xc1, 0x87, 0x07, 0xf3, 0x10, 0xff, + 0xc3, 0xc9, 0xda, 0x68, 0x93, 0xe9, 0xcc, 0x3a, 0x81, 0x1b, 0xed, 0xd3, 0x73, 0x83, 0xdc, 0x8b, + 0x66, 0x27, 0xbb, 0xca, 0x2b, 0x74, 0x54, 0xa5, 0x58, 0xd3, 0x0b, 0x71, 0x92, 0x20, 0x3d, 0x52, + 0xc3, 0xa8, 0xe1, 0x7a, 0xb3, 0x53, 0xfc, 0x5d, 0x22, 0x4f, 0xa4, 0x1a, 0x2d, 0xc4, 0x1c, 0xc6, + 0xf4, 0x65, 0xf4, 0xc7, 0x4d, 0x7a, 0x73, 0x4d, 0x33, 0xc4, 0x58, 0x5f, 0x26, 0x01, 0x38, 0xc6, + 0xa1, 0xcc, 0x64, 0x14, 0xed, 0xcf, 0x22, 0x86, 0xaa, 0x0e, 0x96, 0x8d, 0x8d, 0x4f, 0x62, 0x5a, + 0x6e, 0x6f, 0xc2, 0x84, 0x3a, 0x08, 0xd9, 0x98, 0xa0, 0x79, 0x18, 0x64, 0xec, 0x93, 0x90, 0xae, + 0x95, 0x68, 0x17, 0x18, 0x6b, 0x85, 0x79, 0x39, 0xeb, 0x82, 0xfb, 0x36, 0x59, 0xda, 0x8f, 0x08, + 0x7f, 0xd3, 0x17, 0xb5, 0x2e, 0x48, 0x00, 0x8e, 0x71, 0xec, 0xff, 0xc7, 0xd9, 0xd0, 0xf8, 0xb4, + 0xed, 0xe3, 0x7e, 0x79, 0x06, 0x46, 0x76, 0xfc, 0x30, 0xa2, 0xd8, 0xac, 0x8d, 0xc1, 0x98, 0xf1, + 0xbc, 0x26, 0xca, 0xb1, 0xc2, 0x40, 0xaf, 0xc0, 0x78, 0x5d, 0x6f, 0x40, 0x5c, 0x8e, 0xea, 0x18, + 0x31, 0x5a, 0xc7, 0x26, 0x2e, 0x7a, 0x09, 0x46, 0x98, 0x0d, 0x48, 0xdd, 0x6f, 0x0a, 0xae, 0x4d, + 0xde, 0xf0, 0x23, 0x55, 0x51, 0x7e, 0xa8, 0xfd, 0xc6, 0x0a, 0x1b, 0x5d, 0x84, 0x21, 0xda, 0x85, + 0x4a, 0x55, 0x5c, 0x4b, 0x4a, 0x50, 0x74, 0x8d, 0x95, 0x62, 0x01, 0xb5, 0xff, 0x5a, 0x41, 0x1b, + 0x65, 0xfa, 0x1e, 0x26, 0xa8, 0x0a, 0xc3, 0x77, 0x1d, 0x37, 0x72, 0xbd, 0x6d, 0xc1, 0x7f, 0x3c, + 0xd5, 0xf5, 0x8e, 0x62, 0x95, 0xee, 0xf0, 0x0a, 0xfc, 0x16, 0x15, 0x7f, 0xb0, 0x24, 0x43, 0x29, + 0x06, 0x1d, 0xcf, 0xa3, 0x14, 0x0b, 0xfd, 0x52, 0xc4, 0xbc, 0x02, 0xa7, 0x28, 0xfe, 0x60, 0x49, + 0x06, 0xbd, 0x01, 0x20, 0x77, 0x18, 0x69, 0x08, 0xdb, 0x8b, 0x67, 0x7a, 0x13, 0xdd, 0x50, 0x75, + 0x96, 0x26, 0xe8, 0x1d, 0x1d, 0xff, 0xc7, 0x1a, 0x3d, 0x3b, 0x62, 0x7c, 0x5a, 0xba, 0x33, 0xe8, + 0xdb, 0xe8, 0x12, 0x77, 0x82, 0x88, 0x34, 0x16, 0x23, 0x31, 0x38, 0x1f, 0xec, 0xef, 0x91, 0xb2, + 0xe1, 0xb6, 0x88, 0xbe, 0x1d, 0x04, 0x11, 0x1c, 0xd3, 0xb3, 0x7f, 0xa1, 0x08, 0xb3, 0x79, 0xdd, + 0xa5, 0x8b, 0x8e, 0xdc, 0x73, 0xa3, 0x65, 0xca, 0x5e, 0x59, 0xe6, 0xa2, 0x5b, 0x11, 0xe5, 0x58, + 0x61, 0xd0, 0xd9, 0x0f, 0xdd, 0x6d, 0xf9, 0xc6, 0x1c, 0x8c, 0x67, 0xbf, 0xc6, 0x4a, 0xb1, 0x80, + 0x52, 0xbc, 0x80, 0x38, 0xa1, 0x30, 0xee, 0xd1, 0x56, 0x09, 0x66, 0xa5, 0x58, 0x40, 0x75, 0x69, + 0xd7, 0x40, 0x0f, 0x69, 0x97, 0x31, 0x44, 0x83, 0xc7, 0x3b, 0x44, 0xe8, 0x33, 0x00, 0x5b, 0xae, + 0xe7, 0x86, 0x3b, 0x8c, 0xfa, 0xd0, 0x91, 0xa9, 0x2b, 0xe6, 0x6c, 0x55, 0x51, 0xc1, 0x1a, 0x45, + 0xf4, 0x22, 0x8c, 0xaa, 0x0d, 0x58, 0x29, 0x33, 0x4d, 0xa7, 0x66, 0x39, 0x12, 0x9f, 0x46, 0x65, + 0xac, 0xe3, 0xd9, 0x6f, 0x26, 0xd7, 0x8b, 0xd8, 0x01, 0xda, 0xf8, 0x5a, 0xfd, 0x8e, 0x6f, 0xa1, + 0xfb, 0xf8, 0xda, 0x5f, 0x2b, 0xc2, 0xa4, 0xd1, 0x58, 0x27, 0xec, 0xe3, 0xcc, 0xba, 0x4a, 0x0f, + 0x70, 0x27, 0x22, 0x62, 0xff, 0xd9, 0xbd, 0xb7, 0x8a, 0x7e, 0xc8, 0xd3, 0x1d, 0xc0, 0xeb, 0xa3, + 0xcf, 0x40, 0xa9, 0xe9, 0x84, 0x4c, 0x72, 0x46, 0xc4, 0xbe, 0xeb, 0x87, 0x58, 0xfc, 0x30, 0x71, + 0xc2, 0x48, 0xbb, 0x35, 0x39, 0xed, 0x98, 0x24, 0xbd, 0x69, 0x28, 0x7f, 0x22, 0xad, 0xc7, 0x54, + 0x27, 0x28, 0x13, 0xb3, 0x8f, 0x39, 0x0c, 0xbd, 0x04, 0x63, 0x01, 0x61, 0xab, 0x62, 0x99, 0x72, + 0x73, 0x6c, 0x99, 0x0d, 0xc6, 0x6c, 0x1f, 0xd6, 0x60, 0xd8, 0xc0, 0x8c, 0xdf, 0x06, 0x43, 0x5d, + 0xde, 0x06, 0x4f, 0xc1, 0x30, 0xfb, 0xa1, 0x56, 0x80, 0x9a, 0x8d, 0x0a, 0x2f, 0xc6, 0x12, 0x9e, + 0x5c, 0x30, 0x23, 0xfd, 0x2d, 0x18, 0xfa, 0xfa, 0x10, 0x8b, 0x9a, 0x69, 0x99, 0x47, 0xf8, 0x29, + 0x27, 0x96, 0x3c, 0x96, 0x30, 0xfb, 0x83, 0x30, 0x51, 0x76, 0x48, 0xcb, 0xf7, 0x56, 0xbc, 0x46, + 0xdb, 0x77, 0xbd, 0x08, 0xcd, 0xc2, 0x00, 0xbb, 0x44, 0xf8, 0x11, 0x30, 0x40, 0x1b, 0xc2, 0x03, + 0xf4, 0x41, 0x60, 0x6f, 0xc3, 0xe9, 0xb2, 0x7f, 0xd7, 0xbb, 0xeb, 0x04, 0x8d, 0xc5, 0x6a, 0x45, + 0x7b, 0x5f, 0xaf, 0xcb, 0xf7, 0x1d, 0x37, 0xda, 0xca, 0x3c, 0x7a, 0xb5, 0x9a, 0x9c, 0xad, 0x5d, + 0x75, 0x9b, 0x24, 0x47, 0x0a, 0xf2, 0x37, 0x0a, 0x46, 0x4b, 0x31, 0xbe, 0xd2, 0x6a, 0x59, 0xb9, + 0x5a, 0xad, 0xd7, 0x61, 0x64, 0xcb, 0x25, 0xcd, 0x06, 0x26, 0x5b, 0x62, 0x25, 0x3e, 0x99, 0x6f, + 0x87, 0xb2, 0x4a, 0x31, 0xa5, 0xd4, 0x8b, 0xbf, 0x0e, 0x57, 0x45, 0x65, 0xac, 0xc8, 0xa0, 0x5d, + 0x98, 0x92, 0x0f, 0x06, 0x09, 0x15, 0xeb, 0xf2, 0xa9, 0x6e, 0xaf, 0x10, 0x93, 0xf8, 0xa9, 0xfb, + 0x07, 0xf3, 0x53, 0x38, 0x41, 0x06, 0xa7, 0x08, 0xd3, 0xe7, 0x60, 0x8b, 0x9e, 0xc0, 0x03, 0x6c, + 0xf8, 0xd9, 0x73, 0x90, 0xbd, 0x6c, 0x59, 0xa9, 0xfd, 0x63, 0x16, 0x3c, 0x92, 0x1a, 0x19, 0xf1, + 0xc2, 0x3f, 0xe6, 0x59, 0x48, 0xbe, 0xb8, 0x0b, 0xbd, 0x5f, 0xdc, 0xf6, 0xdf, 0xb7, 0xe0, 0xd4, + 0x4a, 0xab, 0x1d, 0xed, 0x97, 0x5d, 0x53, 0x05, 0xf5, 0x11, 0x18, 0x6a, 0x91, 0x86, 0xdb, 0x69, + 0x89, 0x99, 0x9b, 0x97, 0xa7, 0xd4, 0x1a, 0x2b, 0x3d, 0x3c, 0x98, 0x1f, 0xaf, 0x45, 0x7e, 0xe0, + 0x6c, 0x13, 0x5e, 0x80, 0x05, 0x3a, 0x3b, 0xeb, 0xdd, 0xb7, 0xc9, 0x0d, 0xb7, 0xe5, 0x4a, 0xbb, + 0xa2, 0xae, 0x32, 0xbb, 0x05, 0x39, 0xa0, 0x0b, 0xaf, 0x77, 0x1c, 0x2f, 0x72, 0xa3, 0x7d, 0xa1, + 0x3d, 0x92, 0x44, 0x70, 0x4c, 0xcf, 0xfe, 0xaa, 0x05, 0x93, 0x72, 0xdd, 0x2f, 0x36, 0x1a, 0x01, + 0x09, 0x43, 0x34, 0x07, 0x05, 0xb7, 0x2d, 0x7a, 0x09, 0xa2, 0x97, 0x85, 0x4a, 0x15, 0x17, 0xdc, + 0xb6, 0x64, 0xcb, 0xd8, 0x41, 0x58, 0x34, 0x15, 0x69, 0xd7, 0x44, 0x39, 0x56, 0x18, 0xe8, 0x12, + 0x8c, 0x78, 0x7e, 0x83, 0xdb, 0x76, 0xf1, 0x2b, 0x8d, 0x2d, 0xb0, 0x75, 0x51, 0x86, 0x15, 0x14, + 0x55, 0xa1, 0xc4, 0xcd, 0x9e, 0xe2, 0x45, 0xdb, 0x97, 0xf1, 0x14, 0xfb, 0xb2, 0x0d, 0x59, 0x13, + 0xc7, 0x44, 0xec, 0x5f, 0xb1, 0x60, 0x4c, 0x7e, 0x59, 0x9f, 0x3c, 0x27, 0xdd, 0x5a, 0x31, 0xbf, + 0x19, 0x6f, 0x2d, 0xca, 0x33, 0x32, 0x88, 0xc1, 0x2a, 0x16, 0x8f, 0xc4, 0x2a, 0x5e, 0x81, 0x51, + 0xa7, 0xdd, 0xae, 0x9a, 0x7c, 0x26, 0x5b, 0x4a, 0x8b, 0x71, 0x31, 0xd6, 0x71, 0xec, 0x1f, 0x2d, + 0xc0, 0x84, 0xfc, 0x82, 0x5a, 0x67, 0x33, 0x24, 0x11, 0xda, 0x80, 0x92, 0xc3, 0x67, 0x89, 0xc8, + 0x45, 0xfe, 0x44, 0xb6, 0x1c, 0xc1, 0x98, 0xd2, 0xf8, 0xc2, 0x5f, 0x94, 0xb5, 0x71, 0x4c, 0x08, + 0x35, 0x61, 0xda, 0xf3, 0x23, 0x76, 0xf8, 0x2b, 0x78, 0x37, 0xd5, 0x4e, 0x92, 0xfa, 0x59, 0x41, + 0x7d, 0x7a, 0x3d, 0x49, 0x05, 0xa7, 0x09, 0xa3, 0x15, 0x29, 0x9b, 0x29, 0xe6, 0x0b, 0x03, 0xf4, + 0x89, 0xcb, 0x16, 0xcd, 0xd8, 0xbf, 0x6c, 0x41, 0x49, 0xa2, 0x9d, 0x84, 0x16, 0x6f, 0x0d, 0x86, + 0x43, 0x36, 0x09, 0x72, 0x68, 0xec, 0x6e, 0x1d, 0xe7, 0xf3, 0x15, 0xdf, 0x69, 0xfc, 0x7f, 0x88, + 0x25, 0x0d, 0x26, 0x9a, 0x57, 0xdd, 0x7f, 0x97, 0x88, 0xe6, 0x55, 0x7f, 0x72, 0x2e, 0xa5, 0x3f, + 0x64, 0x7d, 0xd6, 0x64, 0x5d, 0x94, 0xf5, 0x6a, 0x07, 0x64, 0xcb, 0xbd, 0x97, 0x64, 0xbd, 0xaa, + 0xac, 0x14, 0x0b, 0x28, 0x7a, 0x03, 0xc6, 0xea, 0x52, 0x26, 0x1b, 0xef, 0xf0, 0x8b, 0x5d, 0xf5, + 0x03, 0x4a, 0x95, 0xc4, 0x65, 0x21, 0xcb, 0x5a, 0x7d, 0x6c, 0x50, 0x33, 0xcd, 0x08, 0x8a, 0xbd, + 0xcc, 0x08, 0x62, 0xba, 0xf9, 0x4a, 0xf5, 0x1f, 0xb7, 0x60, 0x88, 0xcb, 0xe2, 0xfa, 0x13, 0x85, + 0x6a, 0x9a, 0xb5, 0x78, 0xec, 0x6e, 0xd3, 0x42, 0xa1, 0x29, 0x43, 0x6b, 0x50, 0x62, 0x3f, 0x98, + 0x2c, 0xb1, 0x98, 0x6f, 0x75, 0xcf, 0x5b, 0xd5, 0x3b, 0x78, 0x5b, 0x56, 0xc3, 0x31, 0x05, 0xfb, + 0x87, 0x8b, 0xf4, 0x74, 0x8b, 0x51, 0x8d, 0x4b, 0xdf, 0x7a, 0x78, 0x97, 0x7e, 0xe1, 0x61, 0x5d, + 0xfa, 0xdb, 0x30, 0x59, 0xd7, 0xf4, 0x70, 0xf1, 0x4c, 0x5e, 0xea, 0xba, 0x48, 0x34, 0x95, 0x1d, + 0x97, 0xb2, 0x2c, 0x9b, 0x44, 0x70, 0x92, 0x2a, 0xfa, 0x36, 0x18, 0xe3, 0xf3, 0x2c, 0x5a, 0xe1, + 0x96, 0x18, 0x1f, 0xc8, 0x5f, 0x2f, 0x7a, 0x13, 0x5c, 0x2a, 0xa7, 0x55, 0xc7, 0x06, 0x31, 0xfb, + 0x4f, 0x2c, 0x40, 0x2b, 0xed, 0x1d, 0xd2, 0x22, 0x81, 0xd3, 0x8c, 0xc5, 0xe9, 0xdf, 0x6f, 0xc1, + 0x2c, 0x49, 0x15, 0x2f, 0xfb, 0xad, 0x96, 0x78, 0xb4, 0xe4, 0xbc, 0xab, 0x57, 0x72, 0xea, 0x28, + 0xb7, 0x84, 0xd9, 0x3c, 0x0c, 0x9c, 0xdb, 0x1e, 0x5a, 0x83, 0x19, 0x7e, 0x4b, 0x2a, 0x80, 0x66, + 0x7b, 0xfd, 0xa8, 0x20, 0x3c, 0xb3, 0x91, 0x46, 0xc1, 0x59, 0xf5, 0xec, 0xef, 0x1a, 0x83, 0xdc, + 0x5e, 0xbc, 0xa7, 0x47, 0x78, 0x4f, 0x8f, 0xf0, 0x9e, 0x1e, 0xe1, 0x3d, 0x3d, 0xc2, 0x7b, 0x7a, + 0x84, 0x6f, 0x7a, 0x3d, 0xc2, 0x1f, 0x59, 0x30, 0x93, 0xbe, 0x06, 0x4e, 0x82, 0x31, 0xef, 0xc0, + 0x4c, 0xfa, 0xae, 0xeb, 0x6a, 0x67, 0x97, 0xee, 0x67, 0x7c, 0xef, 0x65, 0x7c, 0x03, 0xce, 0xa2, + 0x6f, 0xff, 0x9a, 0x05, 0xa7, 0x15, 0xb2, 0xf1, 0xd2, 0xff, 0x1c, 0xcc, 0xf0, 0xf3, 0x65, 0xb9, + 0xe9, 0xb8, 0xad, 0x0d, 0xd2, 0x6a, 0x37, 0x9d, 0x48, 0x9a, 0x19, 0x5c, 0xc9, 0xdc, 0xaa, 0x09, + 0x13, 0x5d, 0xa3, 0xe2, 0xd2, 0x23, 0xb4, 0x5f, 0x19, 0x00, 0x9c, 0xd5, 0x8c, 0x61, 0x94, 0x5a, + 0xe8, 0x69, 0x26, 0xfc, 0x0b, 0x23, 0x30, 0xb8, 0xb2, 0x47, 0xbc, 0xe8, 0x04, 0x26, 0xaa, 0x0e, + 0x13, 0xae, 0xb7, 0xe7, 0x37, 0xf7, 0x48, 0x83, 0xc3, 0x8f, 0xf2, 0xd0, 0x3f, 0x23, 0x48, 0x4f, + 0x54, 0x0c, 0x12, 0x38, 0x41, 0xf2, 0x61, 0x08, 0xdb, 0xaf, 0xc2, 0x10, 0xbf, 0xe3, 0x84, 0xa4, + 0x3d, 0xf3, 0x4a, 0x63, 0x83, 0x28, 0x6e, 0xee, 0x58, 0x11, 0xc0, 0xef, 0x50, 0x51, 0x1d, 0xbd, + 0x09, 0x13, 0x5b, 0x6e, 0x10, 0x46, 0x1b, 0x6e, 0x8b, 0x84, 0x91, 0xd3, 0x6a, 0x3f, 0x80, 0x70, + 0x5d, 0x8d, 0xc3, 0xaa, 0x41, 0x09, 0x27, 0x28, 0xa3, 0x6d, 0x18, 0x6f, 0x3a, 0x7a, 0x53, 0xc3, + 0x47, 0x6e, 0x4a, 0x5d, 0x9e, 0x37, 0x74, 0x42, 0xd8, 0xa4, 0x4b, 0x4f, 0x9b, 0x3a, 0x93, 0x0f, + 0x8f, 0x30, 0xa9, 0x89, 0x3a, 0x6d, 0xb8, 0x60, 0x98, 0xc3, 0x28, 0x1f, 0xc8, 0xec, 0x87, 0x4b, + 0x26, 0x1f, 0xa8, 0x59, 0x09, 0x7f, 0x16, 0x4a, 0x84, 0x0e, 0x21, 0x25, 0x2c, 0xee, 0xdf, 0xcb, + 0xfd, 0xf5, 0x75, 0xcd, 0xad, 0x07, 0xbe, 0xa9, 0xd6, 0x58, 0x91, 0x94, 0x70, 0x4c, 0x14, 0x2d, + 0xc3, 0x50, 0x48, 0x02, 0x97, 0x84, 0xe2, 0x26, 0xee, 0x32, 0x8d, 0x0c, 0x8d, 0xbb, 0xde, 0xf0, + 0xdf, 0x58, 0x54, 0xa5, 0xcb, 0xcb, 0x61, 0x12, 0x5f, 0x76, 0x57, 0x6a, 0xcb, 0x6b, 0x91, 0x95, + 0x62, 0x01, 0x45, 0xaf, 0xc1, 0x70, 0x40, 0x9a, 0x4c, 0x6f, 0x36, 0xde, 0xff, 0x22, 0xe7, 0x6a, + 0x38, 0x5e, 0x0f, 0x4b, 0x02, 0xe8, 0x3a, 0xa0, 0x80, 0x50, 0x3e, 0xd2, 0xf5, 0xb6, 0x95, 0x55, + 0xad, 0xb8, 0x87, 0xd4, 0xb9, 0x85, 0x63, 0x0c, 0xe9, 0x05, 0x85, 0x33, 0xaa, 0xa1, 0xab, 0x30, + 0xad, 0x4a, 0x2b, 0x5e, 0x18, 0x39, 0xf4, 0xfc, 0x9f, 0x64, 0xb4, 0x94, 0x18, 0x07, 0x27, 0x11, + 0x70, 0xba, 0x8e, 0xfd, 0x25, 0x0b, 0xf8, 0x38, 0x9f, 0x80, 0xf0, 0xe2, 0x55, 0x53, 0x78, 0x71, + 0x36, 0x77, 0xe6, 0x72, 0x04, 0x17, 0x5f, 0xb2, 0x60, 0x54, 0x9b, 0xd9, 0x78, 0xcd, 0x5a, 0x5d, + 0xd6, 0x6c, 0x07, 0xa6, 0xe8, 0x4a, 0xbf, 0xb9, 0x19, 0x92, 0x60, 0x8f, 0x34, 0xd8, 0xc2, 0x2c, + 0x3c, 0xd8, 0xc2, 0x54, 0x16, 0x7c, 0x37, 0x12, 0x04, 0x71, 0xaa, 0x09, 0xfb, 0xb3, 0xb2, 0xab, + 0xca, 0xe0, 0xb1, 0xae, 0xe6, 0x3c, 0x61, 0xf0, 0xa8, 0x66, 0x15, 0xc7, 0x38, 0x74, 0xab, 0xed, + 0xf8, 0x61, 0x94, 0x34, 0x78, 0xbc, 0xe6, 0x87, 0x11, 0x66, 0x10, 0xfb, 0x79, 0x80, 0x95, 0x7b, + 0xa4, 0xce, 0x57, 0xac, 0xfe, 0xb6, 0xb2, 0xf2, 0xdf, 0x56, 0xf6, 0x6f, 0x59, 0x30, 0xb1, 0xba, + 0x6c, 0xdc, 0x73, 0x0b, 0x00, 0xfc, 0x41, 0x78, 0xe7, 0xce, 0xba, 0xb4, 0x16, 0xe0, 0x0a, 0x5f, + 0x55, 0x8a, 0x35, 0x0c, 0x74, 0x16, 0x8a, 0xcd, 0x8e, 0x27, 0xa4, 0xab, 0xc3, 0x94, 0x7b, 0xb8, + 0xd1, 0xf1, 0x30, 0x2d, 0xd3, 0x3c, 0x2e, 0x8a, 0x7d, 0x7b, 0x5c, 0xf4, 0x8c, 0x7c, 0x80, 0xe6, + 0x61, 0xf0, 0xee, 0x5d, 0xb7, 0xc1, 0xfd, 0x4b, 0x85, 0x25, 0xc3, 0x9d, 0x3b, 0x95, 0x72, 0x88, + 0x79, 0xb9, 0xfd, 0xc5, 0x22, 0xcc, 0xad, 0x36, 0xc9, 0xbd, 0x77, 0xe8, 0x63, 0xdb, 0xaf, 0xbf, + 0xc8, 0xd1, 0xe4, 0x54, 0x47, 0xf5, 0x09, 0xea, 0x3d, 0x1e, 0x5b, 0x30, 0xcc, 0xed, 0xfd, 0xa4, + 0xc7, 0xed, 0x2b, 0x59, 0xad, 0xe7, 0x0f, 0xc8, 0x02, 0xb7, 0x1b, 0x14, 0x0e, 0x83, 0xea, 0xc2, + 0x14, 0xa5, 0x58, 0x12, 0x9f, 0x7b, 0x19, 0xc6, 0x74, 0xcc, 0x23, 0x79, 0xe7, 0xfd, 0x95, 0x22, + 0x4c, 0xd1, 0x1e, 0x3c, 0xd4, 0x89, 0xb8, 0x95, 0x9e, 0x88, 0xe3, 0xf6, 0xd0, 0xea, 0x3d, 0x1b, + 0x6f, 0x24, 0x67, 0xe3, 0x4a, 0xde, 0x6c, 0x9c, 0xf4, 0x1c, 0x7c, 0xa7, 0x05, 0x33, 0xab, 0x4d, + 0xbf, 0xbe, 0x9b, 0xf0, 0xa2, 0x7a, 0x11, 0x46, 0xe9, 0x71, 0x1c, 0x1a, 0x0e, 0xfe, 0x46, 0xc8, + 0x07, 0x01, 0xc2, 0x3a, 0x9e, 0x56, 0xed, 0xd6, 0xad, 0x4a, 0x39, 0x2b, 0x52, 0x84, 0x00, 0x61, + 0x1d, 0xcf, 0xfe, 0x0d, 0x0b, 0xce, 0x5d, 0x5d, 0x5e, 0x89, 0x97, 0x62, 0x2a, 0x58, 0xc5, 0x45, + 0x18, 0x6a, 0x37, 0xb4, 0xae, 0xc4, 0xd2, 0xe7, 0x32, 0xeb, 0x85, 0x80, 0xbe, 0x5b, 0x02, 0xb1, + 0xfc, 0xb4, 0x05, 0x33, 0x57, 0xdd, 0x88, 0xde, 0xae, 0xc9, 0xb0, 0x09, 0xf4, 0x7a, 0x0d, 0xdd, + 0xc8, 0x0f, 0xf6, 0x93, 0x61, 0x13, 0xb0, 0x82, 0x60, 0x0d, 0x8b, 0xb7, 0xbc, 0xe7, 0x32, 0x4b, + 0xf3, 0x82, 0xa9, 0x87, 0xc3, 0xa2, 0x1c, 0x2b, 0x0c, 0xfa, 0x61, 0x0d, 0x37, 0x60, 0x22, 0xcc, + 0x7d, 0x71, 0xc2, 0xaa, 0x0f, 0x2b, 0x4b, 0x00, 0x8e, 0x71, 0xe8, 0x6b, 0x6e, 0xfe, 0x6a, 0xb3, + 0x13, 0x46, 0x24, 0xd8, 0x0a, 0x73, 0x4e, 0xc7, 0xe7, 0xa1, 0x44, 0xa4, 0xc2, 0x40, 0xf4, 0x5a, + 0x71, 0x8c, 0x4a, 0x93, 0xc0, 0xa3, 0x37, 0x28, 0xbc, 0x3e, 0x7c, 0x32, 0x8f, 0xe6, 0x54, 0xb7, + 0x0a, 0x88, 0xe8, 0x6d, 0xe9, 0xe1, 0x2c, 0x98, 0x5f, 0xfc, 0x4a, 0x0a, 0x8a, 0x33, 0x6a, 0xd8, + 0x3f, 0x66, 0xc1, 0x69, 0xf5, 0xc1, 0xef, 0xba, 0xcf, 0xb4, 0x7f, 0xb6, 0x00, 0xe3, 0xd7, 0x36, + 0x36, 0xaa, 0x57, 0x49, 0x24, 0xae, 0xed, 0xde, 0x66, 0x00, 0x58, 0xd3, 0x66, 0x76, 0x7b, 0xcc, + 0x75, 0x22, 0xb7, 0xb9, 0xc0, 0xa3, 0x22, 0x2d, 0x54, 0xbc, 0xe8, 0x66, 0x50, 0x8b, 0x02, 0xd7, + 0xdb, 0xce, 0xd4, 0x7f, 0x4a, 0xe6, 0xa2, 0x98, 0xc7, 0x5c, 0xa0, 0xe7, 0x61, 0x88, 0x85, 0x65, + 0x92, 0x93, 0xf0, 0xa8, 0x7a, 0x0b, 0xb1, 0xd2, 0xc3, 0x83, 0xf9, 0xd2, 0x2d, 0x5c, 0xe1, 0x7f, + 0xb0, 0x40, 0x45, 0xb7, 0x60, 0x74, 0x27, 0x8a, 0xda, 0xd7, 0x88, 0xd3, 0xa0, 0x4f, 0x77, 0x7e, + 0x1c, 0x9e, 0xcf, 0x3a, 0x0e, 0xe9, 0x20, 0x70, 0xb4, 0xf8, 0x04, 0x89, 0xcb, 0x42, 0xac, 0xd3, + 0xb1, 0x6b, 0x00, 0x31, 0xec, 0x98, 0x14, 0x39, 0xf6, 0x1f, 0x58, 0x30, 0xcc, 0x23, 0x64, 0x04, + 0xe8, 0xa3, 0x30, 0x40, 0xee, 0x91, 0xba, 0xe0, 0x78, 0x33, 0x3b, 0x1c, 0x73, 0x5a, 0x5c, 0x20, + 0x4d, 0xff, 0x63, 0x56, 0x0b, 0x5d, 0x83, 0x61, 0xda, 0xdb, 0xab, 0x2a, 0x5c, 0xc8, 0xe3, 0x79, + 0x5f, 0xac, 0xa6, 0x9d, 0x33, 0x67, 0xa2, 0x08, 0xcb, 0xea, 0x4c, 0x7b, 0x5e, 0x6f, 0xd7, 0xe8, + 0x89, 0x1d, 0x75, 0x63, 0x2c, 0x36, 0x96, 0xab, 0x1c, 0x49, 0x50, 0xe3, 0xda, 0x73, 0x59, 0x88, + 0x63, 0x22, 0xf6, 0x06, 0x94, 0xe8, 0xa4, 0x2e, 0x36, 0x5d, 0xa7, 0xbb, 0x41, 0xc0, 0xd3, 0x50, + 0x92, 0xea, 0xfe, 0x50, 0x78, 0xc6, 0x33, 0xaa, 0xd2, 0x1a, 0x20, 0xc4, 0x31, 0xdc, 0xde, 0x82, + 0x53, 0xcc, 0x78, 0xd3, 0x89, 0x76, 0x8c, 0x3d, 0xd6, 0x7b, 0x31, 0x3f, 0x23, 0x1e, 0x90, 0x7c, + 0x66, 0x66, 0x35, 0xe7, 0xd3, 0x31, 0x49, 0x31, 0x7e, 0x4c, 0xda, 0x5f, 0x1b, 0x80, 0x47, 0x2b, + 0xb5, 0xfc, 0xe0, 0x29, 0x2f, 0xc1, 0x18, 0xe7, 0x4b, 0xe9, 0xd2, 0x76, 0x9a, 0xa2, 0x5d, 0x25, + 0x89, 0xde, 0xd0, 0x60, 0xd8, 0xc0, 0x44, 0xe7, 0xa0, 0xe8, 0xbe, 0xe5, 0x25, 0x5d, 0xb3, 0x2a, + 0xaf, 0xaf, 0x63, 0x5a, 0x4e, 0xc1, 0x94, 0xc5, 0xe5, 0x77, 0x87, 0x02, 0x2b, 0x36, 0xf7, 0x55, + 0x98, 0x70, 0xc3, 0x7a, 0xe8, 0x56, 0x3c, 0x7a, 0xce, 0x68, 0x27, 0x95, 0x12, 0x6e, 0xd0, 0x4e, + 0x2b, 0x28, 0x4e, 0x60, 0x6b, 0x17, 0xd9, 0x60, 0xdf, 0x6c, 0x72, 0x4f, 0x57, 0x71, 0xfa, 0x02, + 0x68, 0xb3, 0xaf, 0x0b, 0x99, 0x4a, 0x41, 0xbc, 0x00, 0xf8, 0x07, 0x87, 0x58, 0xc2, 0xe8, 0xcb, + 0xb1, 0xbe, 0xe3, 0xb4, 0x17, 0x3b, 0xd1, 0x4e, 0xd9, 0x0d, 0xeb, 0xfe, 0x1e, 0x09, 0xf6, 0xd9, + 0xa3, 0x7f, 0x24, 0x7e, 0x39, 0x2a, 0xc0, 0xf2, 0xb5, 0xc5, 0x2a, 0xc5, 0xc4, 0xe9, 0x3a, 0x68, + 0x11, 0x26, 0x65, 0x61, 0x8d, 0x84, 0xec, 0x0a, 0x1b, 0x65, 0x64, 0x94, 0xb3, 0x94, 0x28, 0x56, + 0x44, 0x92, 0xf8, 0x26, 0x27, 0x0d, 0xc7, 0xc1, 0x49, 0x7f, 0x04, 0xc6, 0x5d, 0xcf, 0x8d, 0x5c, + 0x27, 0xf2, 0xb9, 0x3e, 0x8c, 0xbf, 0xef, 0x99, 0xa0, 0xbf, 0xa2, 0x03, 0xb0, 0x89, 0x67, 0xff, + 0xb7, 0x01, 0x98, 0x66, 0xd3, 0xf6, 0xde, 0x0a, 0xfb, 0x66, 0x5a, 0x61, 0xb7, 0xd2, 0x2b, 0xec, + 0x38, 0x9e, 0x08, 0x0f, 0xbc, 0xcc, 0xde, 0x84, 0x92, 0xf2, 0x0f, 0x93, 0x0e, 0xa2, 0x56, 0x8e, + 0x83, 0x68, 0x6f, 0xee, 0x43, 0x9a, 0xd8, 0x15, 0x33, 0x4d, 0xec, 0xfe, 0x96, 0x05, 0xb1, 0x82, + 0x07, 0x5d, 0x83, 0x52, 0xdb, 0x67, 0x96, 0xa3, 0x81, 0x34, 0xc7, 0x7e, 0x34, 0xf3, 0xa2, 0xe2, + 0x97, 0x22, 0xff, 0xf8, 0xaa, 0xac, 0x81, 0xe3, 0xca, 0x68, 0x09, 0x86, 0xdb, 0x01, 0xa9, 0x45, + 0x2c, 0x86, 0x4a, 0x4f, 0x3a, 0x7c, 0x8d, 0x70, 0x7c, 0x2c, 0x2b, 0xda, 0x3f, 0x67, 0x01, 0x70, + 0x2b, 0x36, 0xc7, 0xdb, 0x26, 0x27, 0x20, 0xb5, 0x2e, 0xc3, 0x40, 0xd8, 0x26, 0xf5, 0x6e, 0x36, + 0xbd, 0x71, 0x7f, 0x6a, 0x6d, 0x52, 0x8f, 0x07, 0x9c, 0xfe, 0xc3, 0xac, 0xb6, 0xfd, 0xdd, 0x00, + 0x13, 0x31, 0x5a, 0x25, 0x22, 0x2d, 0xf4, 0xac, 0x11, 0x53, 0xe1, 0x6c, 0x22, 0xa6, 0x42, 0x89, + 0x61, 0x6b, 0x02, 0xd2, 0x37, 0xa1, 0xd8, 0x72, 0xee, 0x09, 0x09, 0xd8, 0xd3, 0xdd, 0xbb, 0x41, + 0xe9, 0x2f, 0xac, 0x39, 0xf7, 0xf8, 0x23, 0xf1, 0x69, 0xb9, 0x40, 0xd6, 0x9c, 0x7b, 0x87, 0xdc, + 0x72, 0x97, 0x1d, 0x52, 0x37, 0xdc, 0x30, 0xfa, 0xfc, 0x7f, 0x8d, 0xff, 0xb3, 0x65, 0x47, 0x1b, + 0x61, 0x6d, 0xb9, 0x9e, 0x30, 0xd0, 0xea, 0xab, 0x2d, 0xd7, 0x4b, 0xb6, 0xe5, 0x7a, 0x7d, 0xb4, + 0xe5, 0x7a, 0xe8, 0x6d, 0x18, 0x16, 0xf6, 0x93, 0x22, 0x86, 0xd1, 0xe5, 0x3e, 0xda, 0x13, 0xe6, + 0x97, 0xbc, 0xcd, 0xcb, 0xf2, 0x11, 0x2c, 0x4a, 0x7b, 0xb6, 0x2b, 0x1b, 0x44, 0x7f, 0xdd, 0x82, + 0x09, 0xf1, 0x1b, 0x93, 0xb7, 0x3a, 0x24, 0x8c, 0x04, 0xef, 0xf9, 0xe1, 0xfe, 0xfb, 0x20, 0x2a, + 0xf2, 0xae, 0x7c, 0x58, 0x1e, 0xb3, 0x26, 0xb0, 0x67, 0x8f, 0x12, 0xbd, 0x40, 0xff, 0xd0, 0x82, + 0x53, 0x2d, 0xe7, 0x1e, 0x6f, 0x91, 0x97, 0x61, 0x27, 0x72, 0x7d, 0x61, 0x87, 0xf0, 0xd1, 0xfe, + 0xa6, 0x3f, 0x55, 0x9d, 0x77, 0x52, 0x2a, 0x4b, 0x4f, 0x65, 0xa1, 0xf4, 0xec, 0x6a, 0x66, 0xbf, + 0xe6, 0xb6, 0x60, 0x44, 0xae, 0xb7, 0x0c, 0x51, 0x43, 0x59, 0x67, 0xac, 0x8f, 0x6c, 0xbe, 0xaa, + 0xc7, 0x2a, 0xa0, 0xed, 0x88, 0xb5, 0xf6, 0x50, 0xdb, 0x79, 0x13, 0xc6, 0xf4, 0x35, 0xf6, 0x50, + 0xdb, 0x7a, 0x0b, 0x66, 0x32, 0xd6, 0xd2, 0x43, 0x6d, 0xf2, 0x2e, 0x9c, 0xcd, 0x5d, 0x1f, 0x0f, + 0xb3, 0x61, 0xfb, 0x67, 0x2d, 0xfd, 0x1c, 0x3c, 0x01, 0xd5, 0xc1, 0xb2, 0xa9, 0x3a, 0x38, 0xdf, + 0x7d, 0xe7, 0xe4, 0xe8, 0x0f, 0xde, 0xd0, 0x3b, 0x4d, 0x4f, 0x75, 0xf4, 0x1a, 0x0c, 0x35, 0x69, + 0x89, 0xb4, 0xc2, 0xb5, 0x7b, 0xef, 0xc8, 0x98, 0x97, 0x62, 0xe5, 0x21, 0x16, 0x14, 0xec, 0x5f, + 0xb4, 0x60, 0xe0, 0x04, 0x46, 0x02, 0x9b, 0x23, 0xf1, 0x6c, 0x2e, 0x69, 0x11, 0x5e, 0x79, 0x01, + 0x3b, 0x77, 0x57, 0xee, 0x45, 0xc4, 0x0b, 0xd9, 0x53, 0x31, 0x73, 0x60, 0x7e, 0xd2, 0x82, 0x99, + 0x1b, 0xbe, 0xd3, 0x58, 0x72, 0x9a, 0x8e, 0x57, 0x27, 0x41, 0xc5, 0xdb, 0x3e, 0x92, 0x09, 0x79, + 0xa1, 0xa7, 0x09, 0xf9, 0xb2, 0xb4, 0xc0, 0x1a, 0xc8, 0x9f, 0x3f, 0xca, 0x48, 0x26, 0xa3, 0xcc, + 0x18, 0xb6, 0xc2, 0x3b, 0x80, 0xf4, 0x5e, 0x0a, 0x87, 0x1e, 0x0c, 0xc3, 0x2e, 0xef, 0xaf, 0x98, + 0xc4, 0x27, 0xb3, 0x19, 0xbc, 0xd4, 0xe7, 0x69, 0xae, 0x2a, 0xbc, 0x00, 0x4b, 0x42, 0xf6, 0x4b, + 0x90, 0x19, 0x15, 0xa0, 0xb7, 0xf0, 0xc1, 0xfe, 0x24, 0x4c, 0xb3, 0x9a, 0x47, 0x7c, 0x18, 0xdb, + 0x09, 0xd9, 0x66, 0x46, 0xbc, 0x40, 0xfb, 0x0b, 0x16, 0x4c, 0xae, 0x27, 0xc2, 0xa8, 0x5d, 0x64, + 0xda, 0xd0, 0x0c, 0x91, 0x7a, 0x8d, 0x95, 0x62, 0x01, 0x3d, 0x76, 0x49, 0xd6, 0x5f, 0x58, 0x10, + 0x07, 0xea, 0x38, 0x01, 0xf6, 0x6d, 0xd9, 0x60, 0xdf, 0x32, 0x25, 0x2c, 0xaa, 0x3b, 0x79, 0xdc, + 0x1b, 0xba, 0xae, 0x42, 0x58, 0x75, 0x11, 0xae, 0xc4, 0x64, 0xf8, 0x52, 0x9c, 0x30, 0xe3, 0x5c, + 0xc9, 0xa0, 0x56, 0xf6, 0x6f, 0x17, 0x00, 0x29, 0xdc, 0xbe, 0x43, 0x6c, 0xa5, 0x6b, 0x1c, 0x4f, + 0x88, 0xad, 0x3d, 0x40, 0x4c, 0x9f, 0x1f, 0x38, 0x5e, 0xc8, 0xc9, 0xba, 0x42, 0x76, 0x77, 0x34, + 0x63, 0x81, 0x39, 0xd1, 0x24, 0xba, 0x91, 0xa2, 0x86, 0x33, 0x5a, 0xd0, 0xec, 0x34, 0x06, 0xfb, + 0xb5, 0xd3, 0x18, 0xea, 0xe1, 0xb4, 0xf7, 0x33, 0x16, 0x8c, 0xab, 0x61, 0x7a, 0x97, 0x98, 0xd4, + 0xab, 0xfe, 0xe4, 0x1c, 0xa0, 0x55, 0xad, 0xcb, 0xec, 0x62, 0xf9, 0x56, 0xe6, 0x7c, 0xe9, 0x34, + 0xdd, 0xb7, 0x89, 0x0a, 0x70, 0x38, 0x2f, 0x9c, 0x29, 0x45, 0xe9, 0xe1, 0xc1, 0xfc, 0xb8, 0xfa, + 0xc7, 0x03, 0x2a, 0xc7, 0x55, 0xe8, 0x91, 0x3c, 0x99, 0x58, 0x8a, 0xe8, 0x45, 0x18, 0x6c, 0xef, + 0x38, 0x21, 0x49, 0xb8, 0x1e, 0x0d, 0x56, 0x69, 0xe1, 0xe1, 0xc1, 0xfc, 0x84, 0xaa, 0xc0, 0x4a, + 0x30, 0xc7, 0xee, 0x3f, 0x70, 0x59, 0x7a, 0x71, 0xf6, 0x0c, 0x5c, 0xf6, 0x27, 0x16, 0x0c, 0xac, + 0xfb, 0x8d, 0x93, 0x38, 0x02, 0x5e, 0x35, 0x8e, 0x80, 0xc7, 0xf2, 0x62, 0xdd, 0xe7, 0xee, 0xfe, + 0xd5, 0xc4, 0xee, 0x3f, 0x9f, 0x4b, 0xa1, 0xfb, 0xc6, 0x6f, 0xc1, 0x28, 0x8b, 0xa0, 0x2f, 0xdc, + 0xac, 0x9e, 0x37, 0x36, 0xfc, 0x7c, 0x62, 0xc3, 0x4f, 0x6a, 0xa8, 0xda, 0x4e, 0x7f, 0x0a, 0x86, + 0x85, 0xdf, 0x4e, 0xd2, 0x87, 0x55, 0xe0, 0x62, 0x09, 0xb7, 0x7f, 0xbc, 0x08, 0x46, 0xc4, 0x7e, + 0xf4, 0xcb, 0x16, 0x2c, 0x04, 0xdc, 0x9e, 0xb7, 0x51, 0xee, 0x04, 0xae, 0xb7, 0x5d, 0xab, 0xef, + 0x90, 0x46, 0xa7, 0xe9, 0x7a, 0xdb, 0x95, 0x6d, 0xcf, 0x57, 0xc5, 0x2b, 0xf7, 0x48, 0xbd, 0xc3, + 0x94, 0x60, 0x3d, 0xd2, 0x03, 0x28, 0xbb, 0xf8, 0xe7, 0xee, 0x1f, 0xcc, 0x2f, 0xe0, 0x23, 0xd1, + 0xc6, 0x47, 0xec, 0x0b, 0xfa, 0x0d, 0x0b, 0x2e, 0xf3, 0x40, 0xf6, 0xfd, 0xf7, 0xbf, 0xcb, 0x6b, + 0xb9, 0x2a, 0x49, 0xc5, 0x44, 0x36, 0x48, 0xd0, 0x5a, 0xfa, 0x88, 0x18, 0xd0, 0xcb, 0xd5, 0xa3, + 0xb5, 0x85, 0x8f, 0xda, 0x39, 0xfb, 0x5f, 0x14, 0x61, 0x5c, 0x04, 0xb8, 0x12, 0x77, 0xc0, 0x8b, + 0xc6, 0x92, 0x78, 0x3c, 0xb1, 0x24, 0xa6, 0x0d, 0xe4, 0xe3, 0x39, 0xfe, 0x43, 0x98, 0xa6, 0x87, + 0xf3, 0x35, 0xe2, 0x04, 0xd1, 0x26, 0x71, 0xb8, 0xf9, 0x55, 0xf1, 0xc8, 0xa7, 0xbf, 0x12, 0xcf, + 0xdd, 0x48, 0x12, 0xc3, 0x69, 0xfa, 0xdf, 0x4c, 0x77, 0x8e, 0x07, 0x53, 0xa9, 0x18, 0x65, 0x9f, + 0x82, 0x92, 0x72, 0x3a, 0x11, 0x87, 0x4e, 0xf7, 0x50, 0x7f, 0x49, 0x0a, 0x5c, 0x84, 0x16, 0x3b, + 0x3c, 0xc5, 0xe4, 0xec, 0x7f, 0x54, 0x30, 0x1a, 0xe4, 0x93, 0xb8, 0x0e, 0x23, 0x4e, 0x18, 0xba, + 0xdb, 0x1e, 0x69, 0x88, 0x1d, 0xfb, 0xfe, 0xbc, 0x1d, 0x6b, 0x34, 0xc3, 0x1c, 0x7f, 0x16, 0x45, + 0x4d, 0xac, 0x68, 0xa0, 0x6b, 0xdc, 0xc8, 0x6d, 0x4f, 0xbe, 0xf7, 0xfa, 0xa3, 0x06, 0xd2, 0x0c, + 0x6e, 0x8f, 0x60, 0x51, 0x1f, 0x7d, 0x9a, 0x5b, 0x21, 0x5e, 0xf7, 0xfc, 0xbb, 0xde, 0x55, 0xdf, + 0x97, 0x41, 0x24, 0xfa, 0x23, 0x38, 0x2d, 0x6d, 0x0f, 0x55, 0x75, 0x6c, 0x52, 0xeb, 0x2f, 0xe8, + 0xe7, 0xe7, 0x60, 0x86, 0x92, 0x36, 0x7d, 0xbc, 0x43, 0x44, 0x60, 0x52, 0x44, 0x4f, 0x93, 0x65, + 0x62, 0xec, 0x32, 0x9f, 0x72, 0x66, 0xed, 0x58, 0x8e, 0x7c, 0xdd, 0x24, 0x81, 0x93, 0x34, 0xed, + 0x9f, 0xb2, 0x80, 0xf9, 0xbb, 0x9e, 0x00, 0x3f, 0xf2, 0x31, 0x93, 0x1f, 0x99, 0xcd, 0x1b, 0xe4, + 0x1c, 0x56, 0xe4, 0x05, 0xbe, 0xb2, 0xaa, 0x81, 0x7f, 0x6f, 0x5f, 0x98, 0x8e, 0xf4, 0x7e, 0x7f, + 0xd8, 0xff, 0xd7, 0xe2, 0x87, 0x98, 0x72, 0x09, 0x41, 0xdf, 0x0e, 0x23, 0x75, 0xa7, 0xed, 0xd4, + 0x79, 0x7a, 0x99, 0x5c, 0x89, 0x9e, 0x51, 0x69, 0x61, 0x59, 0xd4, 0xe0, 0x12, 0x2a, 0x19, 0x85, + 0x6f, 0x44, 0x16, 0xf7, 0x94, 0x4a, 0xa9, 0x26, 0xe7, 0x76, 0x61, 0xdc, 0x20, 0xf6, 0x50, 0xc5, + 0x19, 0xdf, 0xce, 0xaf, 0x58, 0x15, 0x35, 0xb2, 0x05, 0xd3, 0x9e, 0xf6, 0x9f, 0x5e, 0x28, 0xf2, + 0x71, 0xf9, 0xfe, 0x5e, 0x97, 0x28, 0xbb, 0x7d, 0x34, 0x57, 0xda, 0x04, 0x19, 0x9c, 0xa6, 0x6c, + 0xff, 0x84, 0x05, 0x8f, 0xe8, 0x88, 0x9a, 0xb7, 0x4e, 0x2f, 0x1d, 0x41, 0x19, 0x46, 0xfc, 0x36, + 0x09, 0x9c, 0xc8, 0x0f, 0xc4, 0xad, 0x71, 0x49, 0x0e, 0xfa, 0x4d, 0x51, 0x7e, 0x28, 0x82, 0xb3, + 0x4b, 0xea, 0xb2, 0x1c, 0xab, 0x9a, 0xf4, 0xf5, 0xc9, 0x06, 0x23, 0x14, 0x7e, 0x59, 0xec, 0x0c, + 0x60, 0xea, 0xf2, 0x10, 0x0b, 0x88, 0xfd, 0x35, 0x8b, 0x2f, 0x2c, 0xbd, 0xeb, 0xe8, 0x2d, 0x98, + 0x6a, 0x39, 0x51, 0x7d, 0x67, 0xe5, 0x5e, 0x3b, 0xe0, 0x1a, 0x17, 0x39, 0x4e, 0x4f, 0xf7, 0x1a, + 0x27, 0xed, 0x23, 0x63, 0xc3, 0xca, 0xb5, 0x04, 0x31, 0x9c, 0x22, 0x8f, 0x36, 0x61, 0x94, 0x95, + 0x31, 0x97, 0xc3, 0xb0, 0x1b, 0x6b, 0x90, 0xd7, 0x9a, 0xb2, 0x38, 0x58, 0x8b, 0xe9, 0x60, 0x9d, + 0xa8, 0xfd, 0xe5, 0x22, 0xdf, 0xed, 0x8c, 0x95, 0x7f, 0x0a, 0x86, 0xdb, 0x7e, 0x63, 0xb9, 0x52, + 0xc6, 0x62, 0x16, 0xd4, 0x35, 0x52, 0xe5, 0xc5, 0x58, 0xc2, 0xd1, 0x25, 0x18, 0x11, 0x3f, 0xa5, + 0x86, 0x8c, 0x9d, 0xcd, 0x02, 0x2f, 0xc4, 0x0a, 0x8a, 0x9e, 0x03, 0x68, 0x07, 0xfe, 0x9e, 0xdb, + 0x60, 0xa1, 0x30, 0x8a, 0xa6, 0xb1, 0x50, 0x55, 0x41, 0xb0, 0x86, 0x85, 0x5e, 0x81, 0xf1, 0x8e, + 0x17, 0x72, 0x76, 0x44, 0x0b, 0x7c, 0xab, 0xcc, 0x58, 0x6e, 0xe9, 0x40, 0x6c, 0xe2, 0xa2, 0x45, + 0x18, 0x8a, 0x1c, 0x66, 0xfc, 0x32, 0x98, 0x6f, 0x7c, 0xbb, 0x41, 0x31, 0xf4, 0x4c, 0x26, 0xb4, + 0x02, 0x16, 0x15, 0xd1, 0xa7, 0xa4, 0xf7, 0x2f, 0x3f, 0xd8, 0x85, 0xd5, 0x7b, 0x7f, 0x97, 0x80, + 0xe6, 0xfb, 0x2b, 0xac, 0xe9, 0x0d, 0x5a, 0xe8, 0x65, 0x00, 0x72, 0x2f, 0x22, 0x81, 0xe7, 0x34, + 0x95, 0x6d, 0x99, 0xe2, 0x0b, 0xca, 0xfe, 0xba, 0x1f, 0xdd, 0x0a, 0xc9, 0x8a, 0xc2, 0xc0, 0x1a, + 0xb6, 0xfd, 0x1b, 0x25, 0x80, 0x98, 0x6f, 0x47, 0x6f, 0xa7, 0x0e, 0xae, 0x67, 0xba, 0x73, 0xfa, + 0xc7, 0x77, 0x6a, 0xa1, 0xef, 0xb1, 0x60, 0xd4, 0x69, 0x36, 0xfd, 0xba, 0xc3, 0x43, 0x13, 0x17, + 0xba, 0x1f, 0x9c, 0xa2, 0xfd, 0xc5, 0xb8, 0x06, 0xef, 0xc2, 0xf3, 0x72, 0x85, 0x6a, 0x90, 0x9e, + 0xbd, 0xd0, 0x1b, 0x46, 0x1f, 0x92, 0x4f, 0xc5, 0xa2, 0x31, 0x94, 0xea, 0xa9, 0x58, 0x62, 0x77, + 0x84, 0xfe, 0x4a, 0xbc, 0x65, 0xbc, 0x12, 0x07, 0xf2, 0xdd, 0x1b, 0x0d, 0xf6, 0xb5, 0xd7, 0x03, + 0x11, 0x55, 0xf5, 0x50, 0x07, 0x83, 0xf9, 0xbe, 0x84, 0xda, 0x3b, 0xa9, 0x47, 0x98, 0x83, 0x37, + 0x61, 0xb2, 0x61, 0x32, 0x01, 0x62, 0x25, 0x3e, 0x99, 0x47, 0x37, 0xc1, 0x33, 0xc4, 0xd7, 0x7e, + 0x02, 0x80, 0x93, 0x84, 0x51, 0x95, 0x47, 0xbe, 0xa8, 0x78, 0x5b, 0xbe, 0xf0, 0xbc, 0xb0, 0x73, + 0xe7, 0x72, 0x3f, 0x8c, 0x48, 0x8b, 0x62, 0xc6, 0xb7, 0xfb, 0xba, 0xa8, 0x8b, 0x15, 0x15, 0xf4, + 0x1a, 0x0c, 0x31, 0x67, 0xb2, 0x70, 0x76, 0x24, 0x5f, 0xe2, 0x6c, 0x86, 0x72, 0x8b, 0x37, 0x24, + 0xfb, 0x1b, 0x62, 0x41, 0x01, 0x5d, 0x93, 0xae, 0x9a, 0x61, 0xc5, 0xbb, 0x15, 0x12, 0xe6, 0xaa, + 0x59, 0x5a, 0x7a, 0x7f, 0xec, 0x85, 0xc9, 0xcb, 0x33, 0xf3, 0x9d, 0x19, 0x35, 0x29, 0x17, 0x25, + 0xfe, 0xcb, 0x34, 0x6a, 0xb3, 0x90, 0xdf, 0x3d, 0x33, 0xd5, 0x5a, 0x3c, 0x9c, 0xb7, 0x4d, 0x12, + 0x38, 0x49, 0x93, 0x72, 0xa4, 0x7c, 0xd7, 0x0b, 0xdf, 0x8d, 0x5e, 0x67, 0x07, 0x7f, 0x88, 0xb3, + 0xdb, 0x88, 0x97, 0x60, 0x51, 0xff, 0x44, 0xd9, 0x83, 0x39, 0x0f, 0xa6, 0x92, 0x5b, 0xf4, 0xa1, + 0xb2, 0x23, 0x7f, 0x30, 0x00, 0x13, 0xe6, 0x92, 0x42, 0x97, 0xa1, 0x24, 0x88, 0xa8, 0xd4, 0x07, + 0x6a, 0x97, 0xac, 0x49, 0x00, 0x8e, 0x71, 0x58, 0xc6, 0x0b, 0x56, 0x5d, 0x33, 0xd6, 0x8d, 0x33, + 0x5e, 0x28, 0x08, 0xd6, 0xb0, 0xe8, 0xc3, 0x6a, 0xd3, 0xf7, 0x23, 0x75, 0x21, 0xa9, 0x75, 0xb7, + 0xc4, 0x4a, 0xb1, 0x80, 0xd2, 0x8b, 0x68, 0x97, 0x04, 0x1e, 0x69, 0x9a, 0x41, 0x92, 0xd5, 0x45, + 0x74, 0x5d, 0x07, 0x62, 0x13, 0x97, 0x5e, 0xa7, 0x7e, 0xc8, 0x16, 0xb2, 0x78, 0xbe, 0xc5, 0xc6, + 0xcf, 0x35, 0xee, 0x2d, 0x2e, 0xe1, 0xe8, 0x93, 0xf0, 0x88, 0x0a, 0x04, 0x85, 0xb9, 0x36, 0x43, + 0xb6, 0x38, 0x64, 0x48, 0x5b, 0x1e, 0x59, 0xce, 0x46, 0xc3, 0x79, 0xf5, 0xd1, 0xab, 0x30, 0x21, + 0x58, 0x7c, 0x49, 0x71, 0xd8, 0x34, 0xb0, 0xb9, 0x6e, 0x40, 0x71, 0x02, 0x5b, 0x86, 0x79, 0x66, + 0x5c, 0xb6, 0xa4, 0x30, 0x92, 0x0e, 0xf3, 0xac, 0xc3, 0x71, 0xaa, 0x06, 0x5a, 0x84, 0x49, 0xce, + 0x83, 0xb9, 0xde, 0x36, 0x9f, 0x13, 0xe1, 0x5a, 0xa5, 0xb6, 0xd4, 0x4d, 0x13, 0x8c, 0x93, 0xf8, + 0xe8, 0x25, 0x18, 0x73, 0x82, 0xfa, 0x8e, 0x1b, 0x91, 0x7a, 0xd4, 0x09, 0xb8, 0xcf, 0x95, 0x66, + 0xa1, 0xb4, 0xa8, 0xc1, 0xb0, 0x81, 0x69, 0xbf, 0x0d, 0x33, 0x19, 0x61, 0x24, 0xe8, 0xc2, 0x71, + 0xda, 0xae, 0xfc, 0xa6, 0x84, 0x19, 0xf3, 0x62, 0xb5, 0x22, 0xbf, 0x46, 0xc3, 0xa2, 0xab, 0x93, + 0x85, 0x9b, 0xd0, 0xb2, 0x26, 0xaa, 0xd5, 0xb9, 0x2a, 0x01, 0x38, 0xc6, 0xb1, 0xff, 0x57, 0x01, + 0x26, 0x33, 0x74, 0x2b, 0x2c, 0x73, 0x5f, 0xe2, 0x91, 0x12, 0x27, 0xea, 0x33, 0xa3, 0x86, 0x17, + 0x8e, 0x10, 0x35, 0xbc, 0xd8, 0x2b, 0x6a, 0xf8, 0xc0, 0x3b, 0x89, 0x1a, 0x6e, 0x8e, 0xd8, 0x60, + 0x5f, 0x23, 0x96, 0x11, 0x69, 0x7c, 0xe8, 0x88, 0x91, 0xc6, 0x8d, 0x41, 0x1f, 0xee, 0x63, 0xd0, + 0x7f, 0xb8, 0x00, 0x53, 0x49, 0x4b, 0xca, 0x13, 0x90, 0xdb, 0xbe, 0x66, 0xc8, 0x6d, 0x2f, 0xf5, + 0xe3, 0x38, 0x9b, 0x2b, 0xc3, 0xc5, 0x09, 0x19, 0xee, 0x07, 0xfb, 0xa2, 0xd6, 0x5d, 0x9e, 0xfb, + 0x77, 0x0a, 0x70, 0x3a, 0xd3, 0x73, 0xf7, 0x04, 0xc6, 0xe6, 0xa6, 0x31, 0x36, 0xcf, 0xf6, 0xed, + 0x54, 0x9c, 0x3b, 0x40, 0x77, 0x12, 0x03, 0x74, 0xb9, 0x7f, 0x92, 0xdd, 0x47, 0xe9, 0xab, 0x45, + 0x38, 0x9f, 0x59, 0x2f, 0x16, 0x7b, 0xae, 0x1a, 0x62, 0xcf, 0xe7, 0x12, 0x62, 0x4f, 0xbb, 0x7b, + 0xed, 0xe3, 0x91, 0x83, 0x0a, 0x77, 0x59, 0x16, 0x13, 0xe1, 0x01, 0x65, 0xa0, 0x86, 0xbb, 0xac, + 0x22, 0x84, 0x4d, 0xba, 0xdf, 0x4c, 0xb2, 0xcf, 0x7f, 0x67, 0xc1, 0xd9, 0xcc, 0xb9, 0x39, 0x01, + 0x59, 0xd7, 0xba, 0x29, 0xeb, 0x7a, 0xaa, 0xef, 0xd5, 0x9a, 0x23, 0xfc, 0xfa, 0xb5, 0x81, 0x9c, + 0x6f, 0x61, 0x2f, 0xf9, 0x9b, 0x30, 0xea, 0xd4, 0xeb, 0x24, 0x0c, 0xd7, 0xfc, 0x86, 0x0a, 0x8c, + 0xfc, 0x2c, 0x7b, 0x67, 0xc5, 0xc5, 0x87, 0x07, 0xf3, 0x73, 0x49, 0x12, 0x31, 0x18, 0xeb, 0x14, + 0xd0, 0xa7, 0x61, 0x24, 0x14, 0xf7, 0xa6, 0x98, 0xfb, 0xe7, 0xfb, 0x1c, 0x1c, 0x67, 0x93, 0x34, + 0xcd, 0xc8, 0x4d, 0x4a, 0x52, 0xa1, 0x48, 0x9a, 0x51, 0x5e, 0x0a, 0xc7, 0x1a, 0xe5, 0xe5, 0x39, + 0x80, 0x3d, 0xf5, 0x18, 0x48, 0xca, 0x1f, 0xb4, 0x67, 0x82, 0x86, 0x85, 0x3e, 0x0e, 0x53, 0x21, + 0x0f, 0x6d, 0xb8, 0xdc, 0x74, 0x42, 0xe6, 0x2c, 0x23, 0x56, 0x21, 0x8b, 0x0e, 0x55, 0x4b, 0xc0, + 0x70, 0x0a, 0x1b, 0xad, 0xca, 0x56, 0x59, 0x1c, 0x46, 0xbe, 0x30, 0x2f, 0xc6, 0x2d, 0x8a, 0xbc, + 0xc1, 0xa7, 0x92, 0xc3, 0xcf, 0x06, 0x5e, 0xab, 0x89, 0x3e, 0x0d, 0x40, 0x97, 0x8f, 0x90, 0x43, + 0x0c, 0xe7, 0x1f, 0x9e, 0xf4, 0x54, 0x69, 0x64, 0xda, 0xf6, 0x32, 0x0f, 0xd7, 0xb2, 0x22, 0x82, + 0x35, 0x82, 0xf6, 0x0f, 0x0f, 0xc0, 0xa3, 0x5d, 0xce, 0x48, 0xb4, 0x68, 0xea, 0x61, 0x9f, 0x4e, + 0x3e, 0xae, 0xe7, 0x32, 0x2b, 0x1b, 0xaf, 0xed, 0xc4, 0x52, 0x2c, 0xbc, 0xe3, 0xa5, 0xf8, 0x03, + 0x96, 0x26, 0xf6, 0xe0, 0x16, 0x9f, 0x1f, 0x3b, 0xe2, 0xd9, 0x7f, 0x8c, 0x72, 0x90, 0xad, 0x0c, + 0x61, 0xc2, 0x73, 0x7d, 0x77, 0xa7, 0x6f, 0xe9, 0xc2, 0xc9, 0x4a, 0x89, 0x7f, 0xcb, 0x82, 0x73, + 0x5d, 0x43, 0x7c, 0x7c, 0x03, 0x32, 0x0c, 0xf6, 0xe7, 0x2d, 0x78, 0x3c, 0xb3, 0x86, 0x61, 0x66, + 0x74, 0x19, 0x4a, 0x75, 0x5a, 0xa8, 0x79, 0x69, 0xc6, 0xee, 0xeb, 0x12, 0x80, 0x63, 0x9c, 0x23, + 0x86, 0x2f, 0xf9, 0x15, 0x0b, 0x52, 0x9b, 0xfe, 0x04, 0x6e, 0x9f, 0x8a, 0x79, 0xfb, 0xbc, 0xbf, + 0x9f, 0xd1, 0xcc, 0xb9, 0x78, 0xfe, 0x78, 0x12, 0xce, 0xe4, 0x78, 0x29, 0xed, 0xc1, 0xf4, 0x76, + 0x9d, 0x98, 0xfe, 0xaf, 0xdd, 0xa2, 0xc8, 0x74, 0x75, 0x96, 0x65, 0x99, 0x4d, 0xa7, 0x53, 0x28, + 0x38, 0xdd, 0x04, 0xfa, 0xbc, 0x05, 0xa7, 0x9c, 0xbb, 0xe1, 0x0a, 0xe5, 0x22, 0xdc, 0xfa, 0x52, + 0xd3, 0xaf, 0xef, 0xd2, 0x23, 0x5a, 0x6e, 0x84, 0x17, 0x32, 0x25, 0x3b, 0x77, 0x6a, 0x29, 0x7c, + 0xa3, 0x79, 0x96, 0xea, 0x35, 0x0b, 0x0b, 0x67, 0xb6, 0x85, 0xb0, 0x88, 0xff, 0x4f, 0xdf, 0x28, + 0x5d, 0x3c, 0xb4, 0xb3, 0xdc, 0xc9, 0xf8, 0xb5, 0x28, 0x21, 0x58, 0xd1, 0x41, 0x9f, 0x85, 0xd2, + 0xb6, 0xf4, 0xf1, 0xcc, 0xb8, 0x76, 0xe3, 0x81, 0xec, 0xee, 0xf9, 0xca, 0xd5, 0xb3, 0x0a, 0x09, + 0xc7, 0x44, 0xd1, 0xab, 0x50, 0xf4, 0xb6, 0xc2, 0x6e, 0xd9, 0x52, 0x13, 0x76, 0x78, 0x3c, 0x0e, + 0xc2, 0xfa, 0x6a, 0x0d, 0xd3, 0x8a, 0xe8, 0x1a, 0x14, 0x83, 0xcd, 0x86, 0x10, 0x4b, 0x66, 0x6e, + 0x52, 0xbc, 0x54, 0xce, 0xe9, 0x15, 0xa3, 0x84, 0x97, 0xca, 0x98, 0x92, 0x40, 0x55, 0x18, 0x64, + 0xae, 0x3d, 0xe2, 0x92, 0xcb, 0x64, 0xe7, 0xbb, 0xb8, 0xc8, 0xf1, 0x60, 0x09, 0x0c, 0x01, 0x73, + 0x42, 0x68, 0x03, 0x86, 0xea, 0x2c, 0xb3, 0xa6, 0x88, 0x1b, 0xf7, 0xa1, 0x4c, 0x01, 0x64, 0x97, + 0x94, 0xa3, 0x42, 0x1e, 0xc7, 0x30, 0xb0, 0xa0, 0xc5, 0xa8, 0x92, 0xf6, 0xce, 0x56, 0x28, 0x32, + 0x41, 0x67, 0x53, 0xed, 0x92, 0x49, 0x57, 0x50, 0x65, 0x18, 0x58, 0xd0, 0x42, 0x2f, 0x43, 0x61, + 0xab, 0x2e, 0xdc, 0x76, 0x32, 0x25, 0x91, 0x66, 0x28, 0x8b, 0xa5, 0xa1, 0xfb, 0x07, 0xf3, 0x85, + 0xd5, 0x65, 0x5c, 0xd8, 0xaa, 0xa3, 0x75, 0x18, 0xde, 0xe2, 0xce, 0xef, 0x42, 0xd8, 0xf8, 0x64, + 0xb6, 0x5f, 0x7e, 0xca, 0x3f, 0x9e, 0x7b, 0xac, 0x08, 0x00, 0x96, 0x44, 0x58, 0x38, 0x7d, 0xe5, + 0xc4, 0x2f, 0x42, 0xac, 0x2d, 0x1c, 0x2d, 0xf0, 0x02, 0x67, 0x3a, 0xe2, 0x50, 0x00, 0x58, 0xa3, + 0x48, 0x57, 0xb5, 0x23, 0xd3, 0xf1, 0x8b, 0x60, 0x33, 0x99, 0xab, 0x5a, 0xe5, 0xec, 0xef, 0xb6, + 0xaa, 0x15, 0x12, 0x8e, 0x89, 0xa2, 0x5d, 0x18, 0xdf, 0x0b, 0xdb, 0x3b, 0x44, 0x6e, 0x69, 0x16, + 0x7b, 0x26, 0xe7, 0x5e, 0xbe, 0x2d, 0x10, 0xdd, 0x20, 0xea, 0x38, 0xcd, 0xd4, 0x29, 0xc4, 0x74, + 0xfa, 0xb7, 0x75, 0x62, 0xd8, 0xa4, 0x4d, 0x87, 0xff, 0xad, 0x8e, 0xbf, 0xb9, 0x1f, 0x11, 0x11, + 0x19, 0x2d, 0x73, 0xf8, 0x5f, 0xe7, 0x28, 0xe9, 0xe1, 0x17, 0x00, 0x2c, 0x89, 0xa0, 0xdb, 0x62, + 0x78, 0xd8, 0xe9, 0x39, 0x95, 0x1f, 0xbe, 0x74, 0x51, 0x22, 0xe5, 0x0c, 0x0a, 0x3b, 0x2d, 0x63, + 0x52, 0xec, 0x94, 0x6c, 0xef, 0xf8, 0x91, 0xef, 0x25, 0x4e, 0xe8, 0xe9, 0xfc, 0x53, 0xb2, 0x9a, + 0x81, 0x9f, 0x3e, 0x25, 0xb3, 0xb0, 0x70, 0x66, 0x5b, 0xa8, 0x01, 0x13, 0x6d, 0x3f, 0x88, 0xee, + 0xfa, 0x81, 0x5c, 0x5f, 0xa8, 0x8b, 0xb0, 0xc4, 0xc0, 0x14, 0x2d, 0xb2, 0xa0, 0x83, 0x26, 0x04, + 0x27, 0x68, 0xa2, 0x4f, 0xc0, 0x70, 0x58, 0x77, 0x9a, 0xa4, 0x72, 0x73, 0x76, 0x26, 0xff, 0xfa, + 0xa9, 0x71, 0x94, 0x9c, 0xd5, 0xc5, 0x63, 0xef, 0x73, 0x14, 0x2c, 0xc9, 0xa1, 0x55, 0x18, 0x64, + 0xe9, 0xd2, 0x58, 0x18, 0xbf, 0x9c, 0x28, 0xac, 0x29, 0xab, 0x68, 0x7e, 0x36, 0xb1, 0x62, 0xcc, + 0xab, 0xd3, 0x3d, 0x20, 0xde, 0x0c, 0x7e, 0x38, 0x7b, 0x3a, 0x7f, 0x0f, 0x88, 0xa7, 0xc6, 0xcd, + 0x5a, 0xb7, 0x3d, 0xa0, 0x90, 0x70, 0x4c, 0x94, 0x9e, 0xcc, 0xf4, 0x34, 0x3d, 0xd3, 0xc5, 0x9c, + 0x27, 0xf7, 0x2c, 0x65, 0x27, 0x33, 0x3d, 0x49, 0x29, 0x09, 0xfb, 0xf7, 0x86, 0xd3, 0x3c, 0x0b, + 0x7b, 0x65, 0x7e, 0x97, 0x95, 0x52, 0x40, 0x7e, 0xb8, 0x5f, 0xa1, 0xd7, 0x31, 0xb2, 0xe0, 0x9f, + 0xb7, 0xe0, 0x4c, 0x3b, 0xf3, 0x43, 0x04, 0x03, 0xd0, 0x9f, 0xec, 0x8c, 0x7f, 0xba, 0x0a, 0xf9, + 0x98, 0x0d, 0xc7, 0x39, 0x2d, 0x25, 0x9f, 0x39, 0xc5, 0x77, 0xfc, 0xcc, 0x59, 0x83, 0x11, 0xc6, + 0x64, 0xf6, 0xc8, 0x34, 0x9d, 0x7c, 0xed, 0x31, 0x56, 0x62, 0x59, 0x54, 0xc4, 0x8a, 0x04, 0xfa, + 0x41, 0x0b, 0xce, 0x25, 0xbb, 0x8e, 0x09, 0x03, 0x8b, 0x38, 0x91, 0xfc, 0x81, 0xbb, 0x2a, 0xbe, + 0x3f, 0xc5, 0xff, 0x1b, 0xc8, 0x87, 0xbd, 0x10, 0x70, 0xf7, 0xc6, 0x50, 0x39, 0xe3, 0x85, 0x3d, + 0x64, 0x6a, 0x15, 0xfa, 0x78, 0x65, 0xbf, 0x00, 0x63, 0x2d, 0xbf, 0xe3, 0x45, 0xc2, 0xfa, 0x47, + 0x58, 0x22, 0x30, 0x0d, 0xfc, 0x9a, 0x56, 0x8e, 0x0d, 0xac, 0xc4, 0xdb, 0x7c, 0xe4, 0x81, 0xdf, + 0xe6, 0x6f, 0xc0, 0x98, 0xa7, 0x99, 0xab, 0x0a, 0x7e, 0xe0, 0x62, 0x7e, 0x8c, 0x57, 0xdd, 0xb8, + 0x95, 0xf7, 0x52, 0x2f, 0xc1, 0x06, 0xb5, 0x93, 0x7d, 0xf0, 0x7d, 0xc9, 0xca, 0x60, 0xea, 0xb9, + 0x08, 0xe0, 0xa3, 0xa6, 0x08, 0xe0, 0x62, 0x52, 0x04, 0x90, 0x92, 0x28, 0x1b, 0xaf, 0xff, 0xfe, + 0x53, 0xd8, 0xf4, 0x1b, 0x08, 0xd1, 0x6e, 0xc2, 0x85, 0x5e, 0xd7, 0x12, 0x33, 0x03, 0x6b, 0x28, + 0xfd, 0x61, 0x6c, 0x06, 0xd6, 0xa8, 0x94, 0x31, 0x83, 0xf4, 0x1b, 0x62, 0xc7, 0xfe, 0x1f, 0x16, + 0x14, 0xab, 0x7e, 0xe3, 0x04, 0x1e, 0xbc, 0x1f, 0x33, 0x1e, 0xbc, 0x8f, 0x66, 0x5f, 0x88, 0x8d, + 0x5c, 0x79, 0xf8, 0x4a, 0x42, 0x1e, 0x7e, 0x2e, 0x8f, 0x40, 0x77, 0xe9, 0xf7, 0x4f, 0x16, 0x61, + 0xb4, 0xea, 0x37, 0x94, 0x0d, 0xf6, 0xaf, 0x3d, 0x88, 0x0d, 0x76, 0x6e, 0x22, 0x06, 0x8d, 0x32, + 0xb3, 0x1e, 0x93, 0xee, 0xa7, 0xdf, 0x60, 0xa6, 0xd8, 0x77, 0x88, 0xbb, 0xbd, 0x13, 0x91, 0x46, + 0xf2, 0x73, 0x4e, 0xce, 0x14, 0xfb, 0xbf, 0x5b, 0x30, 0x99, 0x68, 0x1d, 0x35, 0x61, 0xbc, 0xa9, + 0x4b, 0x5b, 0xc5, 0x3a, 0x7d, 0x20, 0x41, 0xad, 0x30, 0x65, 0xd5, 0x8a, 0xb0, 0x49, 0x1c, 0x2d, + 0x00, 0x28, 0xf5, 0xa3, 0x14, 0xeb, 0x31, 0xae, 0x5f, 0xe9, 0x27, 0x43, 0xac, 0x61, 0xa0, 0x17, + 0x61, 0x34, 0xf2, 0xdb, 0x7e, 0xd3, 0xdf, 0xde, 0xbf, 0x4e, 0x64, 0x50, 0x27, 0x65, 0xa0, 0xb6, + 0x11, 0x83, 0xb0, 0x8e, 0x67, 0xff, 0x74, 0x91, 0x7f, 0xa8, 0x17, 0xb9, 0xef, 0xad, 0xc9, 0x77, + 0xf7, 0x9a, 0xfc, 0xaa, 0x05, 0x53, 0xb4, 0x75, 0x66, 0x03, 0x23, 0x2f, 0x5b, 0x15, 0xdb, 0xd9, + 0xea, 0x12, 0xdb, 0xf9, 0x22, 0x3d, 0xbb, 0x1a, 0x7e, 0x27, 0x12, 0x12, 0x34, 0xed, 0x70, 0xa2, + 0xa5, 0x58, 0x40, 0x05, 0x1e, 0x09, 0x02, 0xe1, 0xb7, 0xa7, 0xe3, 0x91, 0x20, 0xc0, 0x02, 0x2a, + 0x43, 0x3f, 0x0f, 0x64, 0x87, 0x7e, 0xe6, 0x21, 0x2a, 0x85, 0xb5, 0x84, 0x60, 0x7b, 0xb4, 0x10, + 0x95, 0xd2, 0x8c, 0x22, 0xc6, 0xb1, 0x7f, 0xb6, 0x08, 0x63, 0x55, 0xbf, 0x11, 0x2b, 0x00, 0x5f, + 0x30, 0x14, 0x80, 0x17, 0x12, 0x0a, 0xc0, 0x29, 0x1d, 0xf7, 0x3d, 0x75, 0xdf, 0xd7, 0x4b, 0xdd, + 0xf7, 0xcf, 0x2d, 0x36, 0x6b, 0xe5, 0xf5, 0x1a, 0x37, 0xa9, 0x42, 0x57, 0x60, 0x94, 0x1d, 0x48, + 0xcc, 0x51, 0x54, 0x6a, 0xc5, 0x58, 0x4a, 0xa3, 0xf5, 0xb8, 0x18, 0xeb, 0x38, 0xe8, 0x12, 0x8c, + 0x84, 0xc4, 0x09, 0xea, 0x3b, 0xea, 0x8c, 0x13, 0x2a, 0x2c, 0x5e, 0x86, 0x15, 0x14, 0xbd, 0x1e, + 0x47, 0x47, 0x2c, 0xe6, 0x3b, 0x9e, 0xe9, 0xfd, 0xe1, 0x5b, 0x24, 0x3f, 0x24, 0xa2, 0x7d, 0x07, + 0x50, 0x1a, 0xbf, 0x8f, 0xb0, 0x60, 0xf3, 0x66, 0x58, 0xb0, 0x52, 0x2a, 0x24, 0xd8, 0x9f, 0x5b, + 0x30, 0x51, 0xf5, 0x1b, 0x74, 0xeb, 0x7e, 0x33, 0xed, 0x53, 0x3d, 0x34, 0xec, 0x50, 0x97, 0xd0, + 0xb0, 0x4f, 0xc0, 0x60, 0xd5, 0x6f, 0x54, 0xaa, 0xdd, 0xbc, 0xbe, 0xed, 0xbf, 0x6b, 0xc1, 0x70, + 0xd5, 0x6f, 0x9c, 0x80, 0x70, 0xfe, 0xa3, 0xa6, 0x70, 0xfe, 0x91, 0x9c, 0x75, 0x93, 0x23, 0x8f, + 0xff, 0xdb, 0x03, 0x30, 0x4e, 0xfb, 0xe9, 0x6f, 0xcb, 0xa9, 0x34, 0x86, 0xcd, 0xea, 0x63, 0xd8, + 0x28, 0x2f, 0xec, 0x37, 0x9b, 0xfe, 0xdd, 0xe4, 0xb4, 0xae, 0xb2, 0x52, 0x2c, 0xa0, 0xe8, 0x19, + 0x18, 0x69, 0x07, 0x64, 0xcf, 0xf5, 0x05, 0x93, 0xa9, 0xa9, 0x3a, 0xaa, 0xa2, 0x1c, 0x2b, 0x0c, + 0xfa, 0x38, 0x0b, 0x5d, 0xaf, 0x4e, 0x6a, 0xa4, 0xee, 0x7b, 0x0d, 0x2e, 0xbf, 0x2e, 0x8a, 0xf4, + 0x0e, 0x5a, 0x39, 0x36, 0xb0, 0xd0, 0x1d, 0x28, 0xb1, 0xff, 0xec, 0xd8, 0x39, 0x7a, 0xa2, 0x50, + 0x91, 0x38, 0x4e, 0x10, 0xc0, 0x31, 0x2d, 0xf4, 0x1c, 0x40, 0x24, 0x63, 0x80, 0x87, 0x22, 0x04, + 0x94, 0x62, 0xc8, 0x55, 0x74, 0xf0, 0x10, 0x6b, 0x58, 0xe8, 0x69, 0x28, 0x45, 0x8e, 0xdb, 0xbc, + 0xe1, 0x7a, 0x24, 0x64, 0x72, 0xe9, 0xa2, 0xcc, 0xdf, 0x26, 0x0a, 0x71, 0x0c, 0xa7, 0x0c, 0x11, + 0x8b, 0x8f, 0xc0, 0xd3, 0x0c, 0x8f, 0x30, 0x6c, 0xc6, 0x10, 0xdd, 0x50, 0xa5, 0x58, 0xc3, 0x40, + 0x3b, 0xf0, 0x98, 0xeb, 0xb1, 0x54, 0x08, 0xa4, 0xb6, 0xeb, 0xb6, 0x37, 0x6e, 0xd4, 0x6e, 0x93, + 0xc0, 0xdd, 0xda, 0x5f, 0x72, 0xea, 0xbb, 0xc4, 0x93, 0x29, 0x20, 0xdf, 0x2f, 0xba, 0xf8, 0x58, + 0xa5, 0x0b, 0x2e, 0xee, 0x4a, 0xc9, 0x7e, 0x09, 0x4e, 0x57, 0xfd, 0x46, 0xd5, 0x0f, 0xa2, 0x55, + 0x3f, 0xb8, 0xeb, 0x04, 0x0d, 0xb9, 0x52, 0xe6, 0x65, 0xac, 0x02, 0x7a, 0x14, 0x0e, 0xf2, 0x83, + 0xc2, 0x88, 0x43, 0xf0, 0x3c, 0x63, 0xbe, 0x8e, 0xe8, 0x61, 0x53, 0x67, 0x6c, 0x80, 0xca, 0x0b, + 0x72, 0xd5, 0x89, 0x08, 0xba, 0xc9, 0xf2, 0x1d, 0xc7, 0x37, 0xa2, 0xa8, 0xfe, 0x94, 0x96, 0xef, + 0x38, 0x06, 0x66, 0x5e, 0xa1, 0x66, 0x7d, 0xfb, 0x7f, 0x0e, 0xb2, 0xc3, 0x31, 0x91, 0x5b, 0x02, + 0x7d, 0x06, 0x26, 0x42, 0x72, 0xc3, 0xf5, 0x3a, 0xf7, 0xa4, 0x4c, 0xa0, 0x8b, 0x8f, 0x54, 0x6d, + 0x45, 0xc7, 0xe4, 0x92, 0x45, 0xb3, 0x0c, 0x27, 0xa8, 0xa1, 0x16, 0x4c, 0xdc, 0x75, 0xbd, 0x86, + 0x7f, 0x37, 0x94, 0xf4, 0x47, 0xf2, 0x05, 0x8c, 0x77, 0x38, 0x66, 0xa2, 0x8f, 0x46, 0x73, 0x77, + 0x0c, 0x62, 0x38, 0x41, 0x9c, 0x2e, 0xc0, 0xa0, 0xe3, 0x2d, 0x86, 0xb7, 0x42, 0x12, 0x88, 0xcc, + 0xd5, 0x6c, 0x01, 0x62, 0x59, 0x88, 0x63, 0x38, 0x5d, 0x80, 0xec, 0xcf, 0xd5, 0xc0, 0xef, 0xf0, + 0x48, 0xfd, 0x62, 0x01, 0x62, 0x55, 0x8a, 0x35, 0x0c, 0xba, 0x41, 0xd9, 0xbf, 0x75, 0xdf, 0xc3, + 0xbe, 0x1f, 0xc9, 0x2d, 0xcd, 0x72, 0xa5, 0x6a, 0xe5, 0xd8, 0xc0, 0x42, 0xab, 0x80, 0xc2, 0x4e, + 0xbb, 0xdd, 0x64, 0xc6, 0x17, 0x4e, 0x93, 0x91, 0xe2, 0x8a, 0xef, 0x22, 0x0f, 0x60, 0x5a, 0x4b, + 0x41, 0x71, 0x46, 0x0d, 0x7a, 0x56, 0x6f, 0x89, 0xae, 0x0e, 0xb2, 0xae, 0x72, 0x65, 0x44, 0x8d, + 0xf7, 0x53, 0xc2, 0xd0, 0x0a, 0x0c, 0x87, 0xfb, 0x61, 0x3d, 0x12, 0x91, 0xd8, 0x72, 0xd2, 0x07, + 0xd5, 0x18, 0x8a, 0x96, 0xbd, 0x8e, 0x57, 0xc1, 0xb2, 0x2e, 0xaa, 0xc3, 0x8c, 0xa0, 0xb8, 0xbc, + 0xe3, 0x78, 0x2a, 0x19, 0x0b, 0xb7, 0x41, 0xbd, 0x72, 0xff, 0x60, 0x7e, 0x46, 0xb4, 0xac, 0x83, + 0x0f, 0x0f, 0xe6, 0xcf, 0x54, 0xfd, 0x46, 0x06, 0x04, 0x67, 0x51, 0xe3, 0x8b, 0xaf, 0x5e, 0xf7, + 0x5b, 0xed, 0x6a, 0xe0, 0x6f, 0xb9, 0x4d, 0xd2, 0x4d, 0xa1, 0x53, 0x33, 0x30, 0xc5, 0xe2, 0x33, + 0xca, 0x70, 0x82, 0x9a, 0xfd, 0xed, 0x8c, 0x9f, 0x61, 0xc9, 0x9a, 0xa3, 0x4e, 0x40, 0x50, 0x0b, + 0xc6, 0xdb, 0x6c, 0x9b, 0x88, 0xf8, 0xf9, 0x62, 0xad, 0xbf, 0xd0, 0xa7, 0x60, 0xe2, 0x2e, 0xbd, + 0x06, 0x94, 0xe0, 0x90, 0xbd, 0xf8, 0xaa, 0x3a, 0x39, 0x6c, 0x52, 0xb7, 0x7f, 0xec, 0x11, 0x76, + 0x23, 0xd6, 0xb8, 0xb4, 0x61, 0x58, 0x98, 0xbc, 0x8b, 0xa7, 0xd5, 0x5c, 0xbe, 0xd8, 0x2b, 0x9e, + 0x16, 0x61, 0x36, 0x8f, 0x65, 0x5d, 0xf4, 0x69, 0x98, 0xa0, 0x2f, 0x15, 0x2d, 0x0b, 0xca, 0xa9, + 0xfc, 0xd0, 0x04, 0x71, 0xf2, 0x13, 0x2d, 0xb7, 0x86, 0x5e, 0x19, 0x27, 0x88, 0xa1, 0xd7, 0x99, + 0x71, 0x86, 0x99, 0x60, 0xa5, 0x07, 0x69, 0xdd, 0x0e, 0x43, 0x92, 0xd5, 0x88, 0xe4, 0x25, 0x6f, + 0xb1, 0x1f, 0x6e, 0xf2, 0x16, 0x74, 0x03, 0xc6, 0x45, 0xc6, 0x62, 0xb1, 0x72, 0x8b, 0x86, 0x34, + 0x6e, 0x1c, 0xeb, 0xc0, 0xc3, 0x64, 0x01, 0x36, 0x2b, 0xa3, 0x6d, 0x38, 0xa7, 0x65, 0x10, 0xba, + 0x1a, 0x38, 0x4c, 0xa5, 0xee, 0xb2, 0xe3, 0x54, 0xbb, 0xab, 0x1f, 0xbf, 0x7f, 0x30, 0x7f, 0x6e, + 0xa3, 0x1b, 0x22, 0xee, 0x4e, 0x07, 0xdd, 0x84, 0xd3, 0xdc, 0xb1, 0xb6, 0x4c, 0x9c, 0x46, 0xd3, + 0xf5, 0x14, 0x33, 0xc0, 0xb7, 0xfc, 0xd9, 0xfb, 0x07, 0xf3, 0xa7, 0x17, 0xb3, 0x10, 0x70, 0x76, + 0x3d, 0xf4, 0x51, 0x28, 0x35, 0xbc, 0x50, 0x8c, 0xc1, 0x90, 0x91, 0xa4, 0xa9, 0x54, 0x5e, 0xaf, + 0xa9, 0xef, 0x8f, 0xff, 0xe0, 0xb8, 0x02, 0xda, 0xe6, 0x12, 0x5b, 0x25, 0x20, 0x19, 0x4e, 0x05, + 0x16, 0x4a, 0x8a, 0xda, 0x0c, 0xd7, 0x3a, 0xae, 0xaa, 0x50, 0x16, 0xe7, 0x86, 0xd7, 0x9d, 0x41, + 0x18, 0xbd, 0x06, 0x88, 0xbe, 0x20, 0xdc, 0x3a, 0x59, 0xac, 0xb3, 0xe4, 0x0c, 0x4c, 0xc0, 0x3d, + 0x62, 0x3a, 0x7b, 0xd5, 0x52, 0x18, 0x38, 0xa3, 0x16, 0xba, 0x46, 0x4f, 0x15, 0xbd, 0x54, 0x9c, + 0x5a, 0x2a, 0xa5, 0x5e, 0x99, 0xb4, 0x03, 0x52, 0x77, 0x22, 0xd2, 0x30, 0x29, 0xe2, 0x44, 0x3d, + 0xd4, 0x80, 0xc7, 0x9c, 0x4e, 0xe4, 0x33, 0x61, 0xb8, 0x89, 0xba, 0xe1, 0xef, 0x12, 0x8f, 0xe9, + 0xa1, 0x46, 0x96, 0x2e, 0x50, 0x6e, 0x63, 0xb1, 0x0b, 0x1e, 0xee, 0x4a, 0x85, 0x72, 0x89, 0x2a, + 0x87, 0x2e, 0x98, 0xe1, 0x92, 0x32, 0xf2, 0xe8, 0xbe, 0x08, 0xa3, 0x3b, 0x7e, 0x18, 0xad, 0x93, + 0xe8, 0xae, 0x1f, 0xec, 0x8a, 0xa8, 0x97, 0x71, 0xa4, 0xe4, 0x18, 0x84, 0x75, 0x3c, 0xfa, 0x0c, + 0x64, 0x56, 0x12, 0x95, 0x32, 0x53, 0x50, 0x8f, 0xc4, 0x67, 0xcc, 0x35, 0x5e, 0x8c, 0x25, 0x5c, + 0xa2, 0x56, 0xaa, 0xcb, 0x4c, 0xd9, 0x9c, 0x40, 0xad, 0x54, 0x97, 0xb1, 0x84, 0xd3, 0xe5, 0x1a, + 0xee, 0x38, 0x01, 0xa9, 0x06, 0x7e, 0x9d, 0x84, 0x5a, 0x7c, 0xee, 0x47, 0x79, 0x4c, 0x4f, 0xba, + 0x5c, 0x6b, 0x59, 0x08, 0x38, 0xbb, 0x1e, 0x22, 0xe9, 0xec, 0x59, 0x13, 0xf9, 0x5a, 0x82, 0x34, + 0x3f, 0xd3, 0x67, 0x02, 0x2d, 0x0f, 0xa6, 0x54, 0xde, 0x2e, 0x1e, 0xc5, 0x33, 0x9c, 0x9d, 0x64, + 0x6b, 0xbb, 0xff, 0x10, 0xa0, 0x4a, 0xef, 0x52, 0x49, 0x50, 0xc2, 0x29, 0xda, 0x46, 0x44, 0xac, + 0xa9, 0x9e, 0x11, 0xb1, 0x2e, 0x43, 0x29, 0xec, 0x6c, 0x36, 0xfc, 0x96, 0xe3, 0x7a, 0x4c, 0xd9, + 0xac, 0xbd, 0x47, 0x6a, 0x12, 0x80, 0x63, 0x1c, 0xb4, 0x0a, 0x23, 0x8e, 0x54, 0xaa, 0xa0, 0xfc, + 0x18, 0x28, 0x4a, 0x95, 0xc2, 0xc3, 0x02, 0x48, 0x35, 0x8a, 0xaa, 0x8b, 0x5e, 0x81, 0x71, 0xe1, + 0x18, 0x2a, 0x52, 0x46, 0xce, 0x98, 0xde, 0x3b, 0x35, 0x1d, 0x88, 0x4d, 0x5c, 0x74, 0x0b, 0x46, + 0x23, 0xbf, 0xc9, 0x5c, 0x50, 0x28, 0x9b, 0x77, 0x26, 0x3f, 0x9a, 0xd7, 0x86, 0x42, 0xd3, 0xe5, + 0x99, 0xaa, 0x2a, 0xd6, 0xe9, 0xa0, 0x0d, 0xbe, 0xde, 0x59, 0x9c, 0x6a, 0x12, 0xce, 0x3e, 0x92, + 0x7f, 0x27, 0xa9, 0x70, 0xd6, 0xe6, 0x76, 0x10, 0x35, 0xb1, 0x4e, 0x06, 0x5d, 0x85, 0xe9, 0x76, + 0xe0, 0xfa, 0x6c, 0x4d, 0x28, 0x7d, 0xda, 0xac, 0x99, 0x24, 0xa7, 0x9a, 0x44, 0xc0, 0xe9, 0x3a, + 0xcc, 0xaf, 0x57, 0x14, 0xce, 0x9e, 0xe5, 0x59, 0xa5, 0xf9, 0xf3, 0x8e, 0x97, 0x61, 0x05, 0x45, + 0x6b, 0xec, 0x24, 0xe6, 0x92, 0x89, 0xd9, 0xb9, 0xfc, 0xb0, 0x2b, 0xba, 0x04, 0x83, 0x33, 0xaf, + 0xea, 0x2f, 0x8e, 0x29, 0xa0, 0x86, 0x96, 0x7e, 0x90, 0xbe, 0x18, 0xc2, 0xd9, 0xc7, 0xba, 0x98, + 0xaa, 0x25, 0x9e, 0x17, 0x31, 0x43, 0x60, 0x14, 0x87, 0x38, 0x41, 0x13, 0x7d, 0x1c, 0xa6, 0x44, + 0xb0, 0xb8, 0x78, 0x98, 0xce, 0xc5, 0x86, 0xbd, 0x38, 0x01, 0xc3, 0x29, 0x6c, 0x1e, 0xbf, 0xdf, + 0xd9, 0x6c, 0x12, 0x71, 0xf4, 0xdd, 0x70, 0xbd, 0xdd, 0x70, 0xf6, 0x3c, 0x3b, 0x1f, 0x44, 0xfc, + 0xfe, 0x24, 0x14, 0x67, 0xd4, 0x40, 0x1b, 0x30, 0xd5, 0x0e, 0x08, 0x69, 0x31, 0x46, 0x5f, 0xdc, + 0x67, 0xf3, 0xdc, 0xad, 0x9d, 0xf6, 0xa4, 0x9a, 0x80, 0x1d, 0x66, 0x94, 0xe1, 0x14, 0x05, 0x74, + 0x17, 0x46, 0xfc, 0x3d, 0x12, 0xec, 0x10, 0xa7, 0x31, 0x7b, 0xa1, 0x8b, 0xa1, 0xb9, 0xb8, 0xdc, + 0x6e, 0x0a, 0xdc, 0x84, 0x0e, 0x5e, 0x16, 0xf7, 0xd6, 0xc1, 0xcb, 0xc6, 0xd0, 0x0f, 0x59, 0x70, + 0x56, 0x8a, 0xed, 0x6b, 0x6d, 0x3a, 0xea, 0xcb, 0xbe, 0x17, 0x46, 0x01, 0x77, 0xc4, 0x7e, 0x3c, + 0xdf, 0x39, 0x79, 0x23, 0xa7, 0x92, 0x12, 0x8e, 0x9e, 0xcd, 0xc3, 0x08, 0x71, 0x7e, 0x8b, 0x68, + 0x19, 0xa6, 0x43, 0x12, 0xc9, 0xc3, 0x68, 0x31, 0x5c, 0x7d, 0xbd, 0xbc, 0x3e, 0xfb, 0x04, 0xf7, + 0x22, 0xa7, 0x9b, 0xa1, 0x96, 0x04, 0xe2, 0x34, 0xfe, 0xdc, 0xb7, 0xc2, 0x74, 0xea, 0xfa, 0x3f, + 0x4a, 0x5e, 0x92, 0xb9, 0x5d, 0x18, 0x37, 0x86, 0xf8, 0xa1, 0xea, 0x70, 0xff, 0xcd, 0x30, 0x94, + 0x94, 0x7e, 0x0f, 0x5d, 0x36, 0xd5, 0xb6, 0x67, 0x93, 0x6a, 0xdb, 0x11, 0xfa, 0xae, 0xd7, 0x35, + 0xb5, 0x1b, 0x19, 0xb1, 0xb3, 0xf2, 0x36, 0x74, 0xff, 0x4e, 0xd1, 0x9a, 0xb8, 0xb6, 0xd8, 0xb7, + 0xfe, 0x77, 0xa0, 0xab, 0x04, 0xf8, 0x2a, 0x4c, 0x7b, 0x3e, 0xe3, 0x39, 0x49, 0x43, 0x32, 0x14, + 0x8c, 0x6f, 0x28, 0xe9, 0xc1, 0x28, 0x12, 0x08, 0x38, 0x5d, 0x87, 0x36, 0xc8, 0x2f, 0xfe, 0xa4, + 0xc8, 0x99, 0xf3, 0x05, 0x58, 0x40, 0xd1, 0x13, 0x30, 0xd8, 0xf6, 0x1b, 0x95, 0xaa, 0xe0, 0x37, + 0xb5, 0x88, 0x8d, 0x8d, 0x4a, 0x15, 0x73, 0x18, 0x5a, 0x84, 0x21, 0xf6, 0x23, 0x9c, 0x1d, 0xcb, + 0x8f, 0x3a, 0xc0, 0x6a, 0x68, 0x59, 0x5f, 0x58, 0x05, 0x2c, 0x2a, 0x32, 0xd1, 0x17, 0x65, 0xd2, + 0x99, 0xe8, 0x6b, 0xf8, 0x01, 0x45, 0x5f, 0x92, 0x00, 0x8e, 0x69, 0xa1, 0x7b, 0x70, 0xda, 0x78, + 0x18, 0xf1, 0x25, 0x42, 0x42, 0xe1, 0xf9, 0xfc, 0x44, 0xd7, 0x17, 0x91, 0xd0, 0x17, 0x9f, 0x13, + 0x9d, 0x3e, 0x5d, 0xc9, 0xa2, 0x84, 0xb3, 0x1b, 0x40, 0x4d, 0x98, 0xae, 0xa7, 0x5a, 0x1d, 0xe9, + 0xbf, 0x55, 0x35, 0xa1, 0xe9, 0x16, 0xd3, 0x84, 0xd1, 0x2b, 0x30, 0xf2, 0x96, 0x1f, 0xb2, 0xb3, + 0x5a, 0xf0, 0xc8, 0xd2, 0x6d, 0x76, 0xe4, 0xf5, 0x9b, 0x35, 0x56, 0x7e, 0x78, 0x30, 0x3f, 0x5a, + 0xf5, 0x1b, 0xf2, 0x2f, 0x56, 0x15, 0xd0, 0xf7, 0x5a, 0x30, 0x97, 0x7e, 0x79, 0xa9, 0x4e, 0x8f, + 0xf7, 0xdf, 0x69, 0x5b, 0x34, 0x3a, 0xb7, 0x92, 0x4b, 0x0e, 0x77, 0x69, 0xca, 0xfe, 0x25, 0xae, + 0xdb, 0x15, 0x1a, 0x20, 0x12, 0x76, 0x9a, 0x27, 0x91, 0xec, 0x72, 0xc5, 0x50, 0x4e, 0x3d, 0xb0, + 0xfd, 0xc0, 0xbf, 0xb4, 0x98, 0xfd, 0xc0, 0x09, 0x3a, 0x0a, 0xbc, 0x0e, 0x23, 0x91, 0x4c, 0x59, + 0xda, 0x25, 0x3f, 0xa7, 0xd6, 0x29, 0x66, 0x43, 0xa1, 0x38, 0x56, 0x95, 0x9d, 0x54, 0x91, 0xb1, + 0xff, 0x09, 0x9f, 0x01, 0x09, 0x39, 0x01, 0x1d, 0x40, 0xd9, 0xd4, 0x01, 0xcc, 0xf7, 0xf8, 0x82, + 0x1c, 0x5d, 0xc0, 0x3f, 0x36, 0xfb, 0xcd, 0x24, 0x35, 0xef, 0x76, 0xc3, 0x15, 0xfb, 0x0b, 0x16, + 0x40, 0x1c, 0x10, 0x97, 0xc9, 0x97, 0xfd, 0x40, 0x66, 0x3a, 0xcc, 0xca, 0xe9, 0xf3, 0x12, 0xe5, + 0x51, 0xfd, 0xc8, 0xaf, 0xfb, 0x4d, 0xa1, 0xe1, 0x7a, 0x2c, 0x56, 0x43, 0xf0, 0xf2, 0x43, 0xed, + 0x37, 0x56, 0xd8, 0x68, 0x5e, 0x86, 0xdf, 0x2a, 0xc6, 0x8a, 0x31, 0x23, 0xf4, 0xd6, 0x8f, 0x58, + 0x70, 0x2a, 0xcb, 0xea, 0x94, 0xbe, 0x78, 0xb8, 0xcc, 0x4a, 0x19, 0x15, 0xa9, 0xd9, 0xbc, 0x2d, + 0xca, 0xb1, 0xc2, 0xe8, 0x3b, 0x7f, 0xd7, 0xd1, 0x22, 0xd1, 0xde, 0x84, 0xf1, 0x6a, 0x40, 0xb4, + 0xcb, 0xf5, 0x55, 0xee, 0xd2, 0xcd, 0xfb, 0xf3, 0xcc, 0x91, 0xdd, 0xb9, 0xed, 0x2f, 0x17, 0xe0, + 0x14, 0xb7, 0x0a, 0x58, 0xdc, 0xf3, 0xdd, 0x46, 0xd5, 0x6f, 0x88, 0xdc, 0x6b, 0x9f, 0x82, 0xb1, + 0xb6, 0x26, 0x68, 0xec, 0x16, 0x55, 0x51, 0x17, 0x48, 0xc6, 0xa2, 0x11, 0xbd, 0x14, 0x1b, 0xb4, + 0x50, 0x03, 0xc6, 0xc8, 0x9e, 0x5b, 0x57, 0xaa, 0xe5, 0xc2, 0x91, 0x2f, 0x3a, 0xd5, 0xca, 0x8a, + 0x46, 0x07, 0x1b, 0x54, 0x1f, 0x42, 0x56, 0x5d, 0xfb, 0x47, 0x2d, 0x78, 0x24, 0x27, 0x06, 0x23, + 0x6d, 0xee, 0x2e, 0xb3, 0xbf, 0x10, 0xcb, 0x56, 0x35, 0xc7, 0xad, 0x32, 0xb0, 0x80, 0xa2, 0x4f, + 0x00, 0x70, 0xab, 0x0a, 0xfa, 0xe4, 0xee, 0x15, 0xac, 0xce, 0x88, 0xb3, 0xa5, 0x85, 0x4c, 0x92, + 0xf5, 0xb1, 0x46, 0xcb, 0xfe, 0xa9, 0x22, 0x0c, 0xf2, 0x04, 0xe9, 0xab, 0x30, 0xbc, 0xc3, 0x33, + 0x52, 0xf4, 0x93, 0xfc, 0x22, 0x16, 0x86, 0xf0, 0x02, 0x2c, 0x2b, 0xa3, 0x35, 0x98, 0xe1, 0x19, + 0x3d, 0x9a, 0x65, 0xd2, 0x74, 0xf6, 0xa5, 0xe4, 0x8e, 0x67, 0xc3, 0x54, 0x12, 0xcc, 0x4a, 0x1a, + 0x05, 0x67, 0xd5, 0x43, 0xaf, 0xc2, 0x04, 0x7d, 0x49, 0xf9, 0x9d, 0x48, 0x52, 0xe2, 0xb9, 0x3c, + 0xd4, 0xd3, 0x6d, 0xc3, 0x80, 0xe2, 0x04, 0x36, 0x7d, 0xcc, 0xb7, 0x53, 0x32, 0xca, 0xc1, 0xf8, + 0x31, 0x6f, 0xca, 0x25, 0x4d, 0x5c, 0x66, 0x6e, 0xda, 0x61, 0xc6, 0xb5, 0x1b, 0x3b, 0x01, 0x09, + 0x77, 0xfc, 0x66, 0x83, 0x31, 0x7d, 0x83, 0x9a, 0xb9, 0x69, 0x02, 0x8e, 0x53, 0x35, 0x28, 0x95, + 0x2d, 0xc7, 0x6d, 0x76, 0x02, 0x12, 0x53, 0x19, 0x32, 0xa9, 0xac, 0x26, 0xe0, 0x38, 0x55, 0x83, + 0xae, 0xa3, 0xd3, 0xd5, 0xc0, 0xa7, 0x07, 0xa9, 0x0c, 0x2c, 0xa3, 0x6c, 0x88, 0x87, 0xa5, 0x0f, + 0x6c, 0x97, 0x10, 0x6c, 0xc2, 0xca, 0x92, 0x53, 0x30, 0x0c, 0x08, 0x6a, 0xc2, 0xfb, 0x55, 0x52, + 0x41, 0x57, 0x60, 0x54, 0xe4, 0x69, 0x60, 0xa6, 0xae, 0x7c, 0xea, 0x98, 0xc1, 0x43, 0x39, 0x2e, + 0xc6, 0x3a, 0x8e, 0xfd, 0x7d, 0x05, 0x98, 0xc9, 0xf0, 0x55, 0xe0, 0x47, 0xd5, 0xb6, 0x1b, 0x46, + 0x2a, 0xe3, 0x9f, 0x76, 0x54, 0xf1, 0x72, 0xac, 0x30, 0xe8, 0x7e, 0xe0, 0x87, 0x61, 0xf2, 0x00, + 0x14, 0xb6, 0xc0, 0x02, 0x7a, 0xc4, 0xdc, 0x79, 0x17, 0x60, 0xa0, 0x13, 0x12, 0x19, 0x3c, 0x51, + 0x5d, 0x0d, 0x4c, 0x0f, 0xc6, 0x20, 0x94, 0x55, 0xdf, 0x56, 0x2a, 0x25, 0x8d, 0x55, 0xe7, 0x4a, + 0x25, 0x0e, 0xa3, 0x9d, 0x8b, 0x88, 0xe7, 0x78, 0x91, 0x60, 0xe8, 0xe3, 0x28, 0x60, 0xac, 0x14, + 0x0b, 0xa8, 0xfd, 0xc5, 0x22, 0x9c, 0xcd, 0xf5, 0x5e, 0xa2, 0x5d, 0x6f, 0xf9, 0x9e, 0x1b, 0xf9, + 0xca, 0x92, 0x84, 0x47, 0xfe, 0x22, 0xed, 0x9d, 0x35, 0x51, 0x8e, 0x15, 0x06, 0xba, 0x08, 0x83, + 0x4c, 0x8a, 0x96, 0xca, 0x7d, 0xb8, 0x54, 0xe6, 0xa1, 0x60, 0x38, 0xb8, 0xef, 0xbc, 0xb2, 0x4f, + 0xd0, 0x5b, 0xd2, 0x6f, 0x26, 0x0f, 0x2d, 0xda, 0x5d, 0xdf, 0x6f, 0x62, 0x06, 0x44, 0x1f, 0x10, + 0xe3, 0x95, 0x30, 0x9d, 0xc0, 0x4e, 0xc3, 0x0f, 0xb5, 0x41, 0x7b, 0x0a, 0x86, 0x77, 0xc9, 0x7e, + 0xe0, 0x7a, 0xdb, 0x49, 0x93, 0x9a, 0xeb, 0xbc, 0x18, 0x4b, 0xb8, 0x99, 0xc6, 0x6a, 0xf8, 0xb8, + 0x13, 0xc2, 0x8e, 0xf4, 0xbc, 0x02, 0x7f, 0xa0, 0x08, 0x93, 0x78, 0xa9, 0xfc, 0xde, 0x44, 0xdc, + 0x4a, 0x4f, 0xc4, 0x71, 0x27, 0x84, 0xed, 0x3d, 0x1b, 0x3f, 0x6f, 0xc1, 0x24, 0xcb, 0x16, 0x21, + 0x62, 0x46, 0xb9, 0xbe, 0x77, 0x02, 0xec, 0xe6, 0x13, 0x30, 0x18, 0xd0, 0x46, 0x93, 0x49, 0x0f, + 0x59, 0x4f, 0x30, 0x87, 0xa1, 0xc7, 0x60, 0x80, 0x75, 0x81, 0x4e, 0xde, 0x18, 0xcf, 0x17, 0x55, + 0x76, 0x22, 0x07, 0xb3, 0x52, 0x16, 0x08, 0x05, 0x93, 0x76, 0xd3, 0xe5, 0x9d, 0x8e, 0x75, 0x9c, + 0xef, 0x0e, 0xbf, 0xe6, 0xcc, 0xae, 0xbd, 0xb3, 0x40, 0x28, 0xd9, 0x24, 0xbb, 0x3f, 0xe5, 0xfe, + 0xa8, 0x00, 0xe7, 0x33, 0xeb, 0xf5, 0x1d, 0x08, 0xa5, 0x7b, 0xed, 0x87, 0x99, 0x0f, 0xa0, 0x78, + 0x82, 0x06, 0x8b, 0x03, 0xfd, 0x72, 0x98, 0x83, 0x7d, 0xc4, 0x27, 0xc9, 0x1c, 0xb2, 0x77, 0x49, + 0x7c, 0x92, 0xcc, 0xbe, 0xe5, 0x3c, 0x45, 0xff, 0xa2, 0x90, 0xf3, 0x2d, 0xec, 0x51, 0x7a, 0x89, + 0x9e, 0x33, 0x0c, 0x18, 0xca, 0x87, 0x1e, 0x3f, 0x63, 0x78, 0x19, 0x56, 0x50, 0xb4, 0x08, 0x93, + 0x2d, 0xd7, 0xa3, 0x87, 0xcf, 0xbe, 0xc9, 0xf8, 0xa9, 0xf0, 0x51, 0x6b, 0x26, 0x18, 0x27, 0xf1, + 0x91, 0xab, 0xc5, 0x2e, 0x29, 0xe4, 0xa7, 0x11, 0xcf, 0xed, 0xed, 0x82, 0xa9, 0xff, 0x55, 0xa3, + 0x98, 0x11, 0xc7, 0x64, 0x4d, 0x93, 0x45, 0x14, 0xfb, 0x97, 0x45, 0x8c, 0x65, 0xcb, 0x21, 0xe6, + 0x5e, 0x81, 0xf1, 0x07, 0x16, 0x3e, 0xdb, 0x5f, 0x2d, 0xc2, 0xa3, 0x5d, 0xb6, 0x3d, 0x3f, 0xeb, + 0x8d, 0x39, 0xd0, 0xce, 0xfa, 0xd4, 0x3c, 0x54, 0xe1, 0xd4, 0x56, 0xa7, 0xd9, 0xdc, 0x67, 0x3e, + 0x01, 0xa4, 0x21, 0x31, 0x04, 0x4f, 0x29, 0x1f, 0xe0, 0xa7, 0x56, 0x33, 0x70, 0x70, 0x66, 0x4d, + 0xca, 0xd0, 0xd3, 0x9b, 0x64, 0x5f, 0x91, 0x4a, 0x30, 0xf4, 0x58, 0x07, 0x62, 0x13, 0x17, 0x5d, + 0x85, 0x69, 0x67, 0xcf, 0x71, 0x79, 0x00, 0x58, 0x49, 0x80, 0x73, 0xf4, 0x4a, 0x66, 0xb8, 0x98, + 0x44, 0xc0, 0xe9, 0x3a, 0xe8, 0x35, 0x40, 0xfe, 0x26, 0xb3, 0xf6, 0x6d, 0x5c, 0x25, 0x9e, 0x50, + 0xd3, 0xb1, 0xb9, 0x2b, 0xc6, 0x47, 0xc2, 0xcd, 0x14, 0x06, 0xce, 0xa8, 0x95, 0x88, 0x05, 0x32, + 0x94, 0x1f, 0x0b, 0xa4, 0xfb, 0xb9, 0xd8, 0x33, 0x15, 0xc5, 0x7f, 0xb1, 0xe8, 0xf5, 0xc5, 0x99, + 0x7c, 0x33, 0xa4, 0xdd, 0x2b, 0xcc, 0xcc, 0x8e, 0xcb, 0x13, 0xb5, 0x08, 0x16, 0xa7, 0x35, 0x33, + 0xbb, 0x18, 0x88, 0x4d, 0x5c, 0xbe, 0x20, 0xc2, 0xd8, 0x71, 0xd2, 0x60, 0xf1, 0x45, 0xdc, 0x1d, + 0x85, 0x81, 0x3e, 0x09, 0xc3, 0x0d, 0x77, 0xcf, 0x0d, 0x85, 0x34, 0xe5, 0xc8, 0xaa, 0x8b, 0xf8, + 0x1c, 0x2c, 0x73, 0x32, 0x58, 0xd2, 0xb3, 0x7f, 0xa0, 0x00, 0xe3, 0xb2, 0xc5, 0xd7, 0x3b, 0x7e, + 0xe4, 0x9c, 0xc0, 0xb5, 0x7c, 0xd5, 0xb8, 0x96, 0x3f, 0xd0, 0x2d, 0xf8, 0x10, 0xeb, 0x52, 0xee, + 0x75, 0x7c, 0x33, 0x71, 0x1d, 0x3f, 0xd9, 0x9b, 0x54, 0xf7, 0x6b, 0xf8, 0x9f, 0x5a, 0x30, 0x6d, + 0xe0, 0x9f, 0xc0, 0x6d, 0xb0, 0x6a, 0xde, 0x06, 0x8f, 0xf7, 0xfc, 0x86, 0x9c, 0x5b, 0xe0, 0xbb, + 0x8b, 0x89, 0xbe, 0xb3, 0xd3, 0xff, 0x2d, 0x18, 0xd8, 0x71, 0x82, 0x46, 0xb7, 0x60, 0xeb, 0xa9, + 0x4a, 0x0b, 0xd7, 0x9c, 0x40, 0xe8, 0x29, 0x9f, 0x51, 0x59, 0xbc, 0x9d, 0xa0, 0xb7, 0x8e, 0x92, + 0x35, 0x85, 0x5e, 0x82, 0xa1, 0xb0, 0xee, 0xb7, 0x95, 0x15, 0xff, 0x05, 0x9e, 0xe1, 0x9b, 0x96, + 0x1c, 0x1e, 0xcc, 0x23, 0xb3, 0x39, 0x5a, 0x8c, 0x05, 0x3e, 0xfa, 0x14, 0x8c, 0xb3, 0x5f, 0xca, + 0x68, 0xa8, 0x98, 0x9f, 0x98, 0xa9, 0xa6, 0x23, 0x72, 0x8b, 0x3a, 0xa3, 0x08, 0x9b, 0xa4, 0xe6, + 0xb6, 0xa1, 0xa4, 0x3e, 0xeb, 0xa1, 0xea, 0x06, 0xff, 0x63, 0x11, 0x66, 0x32, 0xd6, 0x1c, 0x0a, + 0x8d, 0x99, 0xb8, 0xd2, 0xe7, 0x52, 0x7d, 0x87, 0x73, 0x11, 0xb2, 0xd7, 0x50, 0x43, 0xac, 0xad, + 0xbe, 0x1b, 0xbd, 0x15, 0x92, 0x64, 0xa3, 0xb4, 0xa8, 0x77, 0xa3, 0xb4, 0xb1, 0x13, 0x1b, 0x6a, + 0xda, 0x90, 0xea, 0xe9, 0x43, 0x9d, 0xd3, 0x3f, 0x2d, 0xc2, 0xa9, 0xac, 0x78, 0x68, 0xe8, 0x73, + 0x89, 0x54, 0x7f, 0x2f, 0xf4, 0x1b, 0x49, 0x8d, 0xe7, 0xff, 0xe3, 0x32, 0xe0, 0xa5, 0x05, 0x33, + 0xf9, 0x5f, 0xcf, 0x61, 0x16, 0x6d, 0xb2, 0xa0, 0x00, 0x01, 0x4f, 0xd1, 0x28, 0x8f, 0x8f, 0x0f, + 0xf7, 0xdd, 0x01, 0x91, 0xdb, 0x31, 0x4c, 0x18, 0x24, 0xc8, 0xe2, 0xde, 0x06, 0x09, 0xb2, 0xe5, + 0x39, 0x17, 0x46, 0xb5, 0xaf, 0x79, 0xa8, 0x33, 0xbe, 0x4b, 0x6f, 0x2b, 0xad, 0xdf, 0x0f, 0x75, + 0xd6, 0x7f, 0xd4, 0x82, 0x84, 0x8d, 0xba, 0x12, 0x8b, 0x59, 0xb9, 0x62, 0xb1, 0x0b, 0x30, 0x10, + 0xf8, 0x4d, 0x92, 0xcc, 0x89, 0x87, 0xfd, 0x26, 0xc1, 0x0c, 0x42, 0x31, 0xa2, 0x58, 0xd8, 0x31, + 0xa6, 0x3f, 0xe4, 0xc4, 0x13, 0xed, 0x09, 0x18, 0x6c, 0x92, 0x3d, 0xd2, 0x4c, 0xa6, 0x2e, 0xb9, + 0x41, 0x0b, 0x31, 0x87, 0xd9, 0x3f, 0x3f, 0x00, 0xe7, 0xba, 0x86, 0xd5, 0xa0, 0xcf, 0xa1, 0x6d, + 0x27, 0x22, 0x77, 0x9d, 0xfd, 0x64, 0x8e, 0x81, 0xab, 0xbc, 0x18, 0x4b, 0x38, 0xf3, 0x22, 0xe2, + 0xa1, 0x82, 0x13, 0x42, 0x44, 0x11, 0x21, 0x58, 0x40, 0x4d, 0xa1, 0x54, 0xf1, 0x38, 0x84, 0x52, + 0xcf, 0x01, 0x84, 0x61, 0x93, 0x5b, 0xf2, 0x34, 0x84, 0x7b, 0x52, 0x1c, 0x52, 0xba, 0x76, 0x43, + 0x40, 0xb0, 0x86, 0x85, 0xca, 0x30, 0xd5, 0x0e, 0xfc, 0x88, 0xcb, 0x64, 0xcb, 0xdc, 0xd8, 0x6d, + 0xd0, 0x8c, 0x68, 0x50, 0x4d, 0xc0, 0x71, 0xaa, 0x06, 0x7a, 0x11, 0x46, 0x45, 0x94, 0x83, 0xaa, + 0xef, 0x37, 0x85, 0x18, 0x48, 0xd9, 0x7f, 0xd5, 0x62, 0x10, 0xd6, 0xf1, 0xb4, 0x6a, 0x4c, 0xd0, + 0x3b, 0x9c, 0x59, 0x8d, 0x0b, 0x7b, 0x35, 0xbc, 0x44, 0x6c, 0xc4, 0x91, 0xbe, 0x62, 0x23, 0xc6, + 0x82, 0xb1, 0x52, 0xdf, 0xba, 0x2d, 0xe8, 0x29, 0x4a, 0xfa, 0x99, 0x01, 0x98, 0x11, 0x0b, 0xe7, + 0x61, 0x2f, 0x97, 0x5b, 0xe9, 0xe5, 0x72, 0x1c, 0xa2, 0xb3, 0xf7, 0xd6, 0xcc, 0x49, 0xaf, 0x99, + 0x1f, 0xb4, 0xc0, 0x64, 0xaf, 0xd0, 0x5f, 0xca, 0x4d, 0xd2, 0xf2, 0x62, 0x2e, 0xbb, 0xd6, 0x90, + 0x17, 0xc8, 0x3b, 0x4c, 0xd7, 0x62, 0xff, 0x67, 0x0b, 0x1e, 0xef, 0x49, 0x11, 0xad, 0x40, 0x89, + 0xf1, 0x80, 0xda, 0xeb, 0xec, 0x49, 0x65, 0x0c, 0x2b, 0x01, 0x39, 0x2c, 0x69, 0x5c, 0x13, 0xad, + 0xa4, 0xb2, 0xe1, 0x3c, 0x95, 0x91, 0x0d, 0xe7, 0xb4, 0x31, 0x3c, 0x0f, 0x98, 0x0e, 0xe7, 0xfb, + 0xe9, 0x8d, 0x63, 0x38, 0xa2, 0xa0, 0x0f, 0x1b, 0x62, 0x3f, 0x3b, 0x21, 0xf6, 0x43, 0x26, 0xb6, + 0x76, 0x87, 0x7c, 0x1c, 0xa6, 0x58, 0xf8, 0x23, 0x66, 0x9a, 0x2d, 0x5c, 0x64, 0x0a, 0xb1, 0xf9, + 0xe5, 0x8d, 0x04, 0x0c, 0xa7, 0xb0, 0xed, 0x3f, 0x2c, 0xc2, 0x10, 0xdf, 0x7e, 0x27, 0xf0, 0x26, + 0x7c, 0x1a, 0x4a, 0x6e, 0xab, 0xd5, 0xe1, 0x09, 0x4e, 0x06, 0xb9, 0x5f, 0x2c, 0x9d, 0xa7, 0x8a, + 0x2c, 0xc4, 0x31, 0x1c, 0xad, 0x0a, 0x89, 0x73, 0x97, 0x08, 0x8b, 0xbc, 0xe3, 0x0b, 0x65, 0x27, + 0x72, 0x38, 0x83, 0xa3, 0xee, 0xd9, 0x58, 0x36, 0x8d, 0x3e, 0x03, 0x10, 0x46, 0x81, 0xeb, 0x6d, + 0xd3, 0x32, 0x11, 0x50, 0xf4, 0x83, 0x5d, 0xa8, 0xd5, 0x14, 0x32, 0xa7, 0x19, 0x9f, 0x39, 0x0a, + 0x80, 0x35, 0x8a, 0x68, 0xc1, 0xb8, 0xe9, 0xe7, 0x12, 0x73, 0x07, 0x9c, 0x6a, 0x3c, 0x67, 0x73, + 0x1f, 0x81, 0x92, 0x22, 0xde, 0x4b, 0xfe, 0x34, 0xa6, 0xb3, 0x45, 0x1f, 0x83, 0xc9, 0x44, 0xdf, + 0x8e, 0x24, 0xbe, 0xfa, 0x05, 0x0b, 0x26, 0x79, 0x67, 0x56, 0xbc, 0x3d, 0x71, 0x1b, 0xbc, 0x0d, + 0xa7, 0x9a, 0x19, 0xa7, 0xb2, 0x98, 0xfe, 0xfe, 0x4f, 0x71, 0x25, 0xae, 0xca, 0x82, 0xe2, 0xcc, + 0x36, 0xd0, 0x25, 0xba, 0xe3, 0xe8, 0xa9, 0xeb, 0x34, 0x85, 0x9b, 0xec, 0x18, 0xdf, 0x6d, 0xbc, + 0x0c, 0x2b, 0xa8, 0xfd, 0x3b, 0x16, 0x4c, 0xf3, 0x9e, 0x5f, 0x27, 0xfb, 0xea, 0x6c, 0xfa, 0x7a, + 0xf6, 0x5d, 0xa4, 0xd6, 0x2a, 0xe4, 0xa4, 0xd6, 0xd2, 0x3f, 0xad, 0xd8, 0xf5, 0xd3, 0xbe, 0x6c, + 0x81, 0x58, 0x21, 0x27, 0x20, 0x84, 0xf8, 0x56, 0x53, 0x08, 0x31, 0x97, 0xbf, 0x09, 0x72, 0xa4, + 0x0f, 0x7f, 0x6e, 0xc1, 0x14, 0x47, 0x88, 0xb5, 0xe5, 0x5f, 0xd7, 0x79, 0xe8, 0x27, 0x01, 0xef, + 0x75, 0xb2, 0xbf, 0xe1, 0x57, 0x9d, 0x68, 0x27, 0xfb, 0xa3, 0x8c, 0xc9, 0x1a, 0xe8, 0x3a, 0x59, + 0x0d, 0xb9, 0x81, 0x8e, 0x90, 0xd5, 0xfb, 0xc8, 0x99, 0x27, 0xec, 0xaf, 0x59, 0x80, 0x78, 0x33, + 0x06, 0xe3, 0x46, 0xd9, 0x21, 0x56, 0xaa, 0x5d, 0x74, 0xf1, 0xd1, 0xa4, 0x20, 0x58, 0xc3, 0x3a, + 0x96, 0xe1, 0x49, 0x98, 0x3c, 0x14, 0x7b, 0x9b, 0x3c, 0x1c, 0x61, 0x44, 0xff, 0xed, 0x10, 0x24, + 0x9d, 0x71, 0xd0, 0x6d, 0x18, 0xab, 0x3b, 0x6d, 0x67, 0xd3, 0x6d, 0xba, 0x91, 0x4b, 0xc2, 0x6e, + 0xb6, 0x52, 0xcb, 0x1a, 0x9e, 0x50, 0x52, 0x6b, 0x25, 0xd8, 0xa0, 0x83, 0x16, 0x00, 0xda, 0x81, + 0xbb, 0xe7, 0x36, 0xc9, 0x36, 0x93, 0x95, 0x30, 0xc7, 0x7c, 0x6e, 0x00, 0x24, 0x4b, 0xb1, 0x86, + 0x91, 0xe1, 0xf9, 0x5c, 0x7c, 0xc8, 0x9e, 0xcf, 0x70, 0x62, 0x9e, 0xcf, 0x03, 0x47, 0xf2, 0x7c, + 0x1e, 0x39, 0xb2, 0xe7, 0xf3, 0x60, 0x5f, 0x9e, 0xcf, 0x18, 0xce, 0x48, 0xde, 0x93, 0xfe, 0x5f, + 0x75, 0x9b, 0x44, 0x3c, 0x38, 0x78, 0x34, 0x81, 0xb9, 0xfb, 0x07, 0xf3, 0x67, 0x70, 0x26, 0x06, + 0xce, 0xa9, 0x89, 0x3e, 0x01, 0xb3, 0x4e, 0xb3, 0xe9, 0xdf, 0x55, 0x93, 0xba, 0x12, 0xd6, 0x9d, + 0x26, 0x57, 0x42, 0x0c, 0x33, 0xaa, 0x8f, 0xdd, 0x3f, 0x98, 0x9f, 0x5d, 0xcc, 0xc1, 0xc1, 0xb9, + 0xb5, 0xd1, 0x47, 0xa1, 0xd4, 0x0e, 0xfc, 0xfa, 0x9a, 0xe6, 0x31, 0x78, 0x9e, 0x0e, 0x60, 0x55, + 0x16, 0x1e, 0x1e, 0xcc, 0x8f, 0xab, 0x3f, 0xec, 0xc2, 0x8f, 0x2b, 0x64, 0xb8, 0x32, 0x8f, 0x1e, + 0xab, 0x2b, 0xf3, 0x2e, 0xcc, 0xd4, 0x48, 0xe0, 0xb2, 0x1c, 0xe0, 0x8d, 0xf8, 0x7c, 0xda, 0x80, + 0x52, 0x90, 0x38, 0x91, 0xfb, 0x8a, 0x7a, 0xa8, 0xa5, 0x00, 0x90, 0x27, 0x70, 0x4c, 0xc8, 0xfe, + 0x3f, 0x16, 0x0c, 0x0b, 0xe7, 0x9b, 0x13, 0xe0, 0x1a, 0x17, 0x0d, 0x4d, 0xc2, 0x7c, 0xf6, 0x80, + 0xb1, 0xce, 0xe4, 0xea, 0x10, 0x2a, 0x09, 0x1d, 0xc2, 0xe3, 0xdd, 0x88, 0x74, 0xd7, 0x1e, 0xfc, + 0xcd, 0x22, 0xe5, 0xde, 0x0d, 0x37, 0xd0, 0x87, 0x3f, 0x04, 0xeb, 0x30, 0x1c, 0x0a, 0x37, 0xc4, + 0x42, 0xbe, 0xdd, 0x7c, 0x72, 0x12, 0x63, 0x3b, 0x36, 0xe1, 0x78, 0x28, 0x89, 0x64, 0xfa, 0x37, + 0x16, 0x1f, 0xa2, 0x7f, 0x63, 0x2f, 0x47, 0xd9, 0x81, 0xe3, 0x70, 0x94, 0xb5, 0xbf, 0xc2, 0x6e, + 0x4e, 0xbd, 0xfc, 0x04, 0x98, 0xaa, 0xab, 0xe6, 0x1d, 0x6b, 0x77, 0x59, 0x59, 0xa2, 0x53, 0x39, + 0xcc, 0xd5, 0xcf, 0x59, 0x70, 0x2e, 0xe3, 0xab, 0x34, 0x4e, 0xeb, 0x19, 0x18, 0x71, 0x3a, 0x0d, + 0x57, 0xed, 0x65, 0x4d, 0x9f, 0xb8, 0x28, 0xca, 0xb1, 0xc2, 0x40, 0xcb, 0x30, 0x4d, 0xee, 0xb5, + 0x5d, 0xae, 0x4a, 0xd5, 0x8d, 0x4d, 0x8b, 0xdc, 0x63, 0x6b, 0x25, 0x09, 0xc4, 0x69, 0x7c, 0x15, + 0x9c, 0xa4, 0x98, 0x1b, 0x9c, 0xe4, 0x1f, 0x58, 0x30, 0xaa, 0x1c, 0xf1, 0x1e, 0xfa, 0x68, 0x7f, + 0xdc, 0x1c, 0xed, 0x47, 0xbb, 0x8c, 0x76, 0xce, 0x30, 0xff, 0x56, 0x41, 0xf5, 0xb7, 0xea, 0x07, + 0x51, 0x1f, 0x1c, 0xdc, 0x83, 0x9b, 0xc7, 0x5f, 0x81, 0x51, 0xa7, 0xdd, 0x96, 0x00, 0x69, 0x83, + 0xc6, 0x62, 0xd8, 0xc6, 0xc5, 0x58, 0xc7, 0x51, 0xd6, 0xfa, 0xc5, 0x5c, 0x6b, 0xfd, 0x06, 0x40, + 0xe4, 0x04, 0xdb, 0x24, 0xa2, 0x65, 0x22, 0x90, 0x58, 0xfe, 0x79, 0xd3, 0x89, 0xdc, 0xe6, 0x82, + 0xeb, 0x45, 0x61, 0x14, 0x2c, 0x54, 0xbc, 0xe8, 0x66, 0xc0, 0x9f, 0x90, 0x5a, 0xa4, 0x1e, 0x45, + 0x0b, 0x6b, 0x74, 0xa5, 0xd3, 0x39, 0x6b, 0x63, 0xd0, 0x34, 0x66, 0x58, 0x17, 0xe5, 0x58, 0x61, + 0xd8, 0x1f, 0x61, 0xb7, 0x0f, 0x1b, 0xd3, 0xa3, 0x85, 0xb6, 0xf9, 0xf2, 0xa8, 0x9a, 0x0d, 0xa6, + 0xc9, 0x2c, 0xeb, 0x01, 0x74, 0xba, 0x1f, 0xf6, 0xb4, 0x61, 0xdd, 0x77, 0x2c, 0x8e, 0xb2, 0x83, + 0xbe, 0x2d, 0x65, 0xa0, 0xf2, 0x6c, 0x8f, 0x5b, 0xe3, 0x08, 0x26, 0x29, 0x2c, 0xa1, 0x05, 0x0b, + 0xf7, 0x5f, 0xa9, 0x8a, 0x7d, 0xa1, 0x25, 0xb4, 0x10, 0x00, 0x1c, 0xe3, 0x50, 0x66, 0x4a, 0xfd, + 0x09, 0x67, 0x51, 0x1c, 0xd8, 0x51, 0x61, 0x87, 0x58, 0xc3, 0x40, 0x97, 0x85, 0x40, 0x81, 0xeb, + 0x05, 0x1e, 0x4d, 0x08, 0x14, 0xe4, 0x70, 0x69, 0x52, 0xa0, 0x2b, 0x30, 0xaa, 0x72, 0xda, 0x56, + 0x79, 0xaa, 0x54, 0xb1, 0xcc, 0x56, 0xe2, 0x62, 0xac, 0xe3, 0xa0, 0x0d, 0x98, 0x0c, 0xb9, 0x9c, + 0x4d, 0x45, 0xdb, 0xe5, 0xf2, 0xca, 0x0f, 0x4a, 0x2b, 0xa0, 0x9a, 0x09, 0x3e, 0x64, 0x45, 0xfc, + 0x74, 0x92, 0x8e, 0xe1, 0x49, 0x12, 0xe8, 0x55, 0x98, 0x68, 0xfa, 0x4e, 0x63, 0xc9, 0x69, 0x3a, + 0x5e, 0x9d, 0x8d, 0xcf, 0x88, 0x99, 0x1a, 0xf1, 0x86, 0x01, 0xc5, 0x09, 0x6c, 0xca, 0xbc, 0xe9, + 0x25, 0x22, 0x42, 0xb4, 0xe3, 0x6d, 0x93, 0x50, 0x64, 0x28, 0x65, 0xcc, 0xdb, 0x8d, 0x1c, 0x1c, + 0x9c, 0x5b, 0x1b, 0xbd, 0x04, 0x63, 0xf2, 0xf3, 0xb5, 0x38, 0x0a, 0xb1, 0xe3, 0x83, 0x06, 0xc3, + 0x06, 0x26, 0xba, 0x0b, 0xa7, 0xe5, 0xff, 0x8d, 0xc0, 0xd9, 0xda, 0x72, 0xeb, 0xc2, 0xb9, 0x98, + 0x7b, 0x48, 0x2e, 0x4a, 0x37, 0xbe, 0x95, 0x2c, 0xa4, 0xc3, 0x83, 0xf9, 0x0b, 0x62, 0xd4, 0x32, + 0xe1, 0x6c, 0x12, 0xb3, 0xe9, 0xa3, 0x35, 0x98, 0xd9, 0x21, 0x4e, 0x33, 0xda, 0x59, 0xde, 0x21, + 0xf5, 0x5d, 0xb9, 0xe9, 0x58, 0x74, 0x06, 0xcd, 0x5d, 0xe0, 0x5a, 0x1a, 0x05, 0x67, 0xd5, 0x43, + 0x6f, 0xc0, 0x6c, 0xbb, 0xb3, 0xd9, 0x74, 0xc3, 0x9d, 0x75, 0x3f, 0x62, 0xa6, 0x40, 0x2a, 0x45, + 0xae, 0x08, 0xe3, 0xa0, 0xe2, 0x5f, 0x54, 0x73, 0xf0, 0x70, 0x2e, 0x05, 0xf4, 0x36, 0x9c, 0x4e, + 0x2c, 0x06, 0xe1, 0xc8, 0x3e, 0x91, 0x1f, 0x6f, 0xbf, 0x96, 0x55, 0x41, 0xc4, 0x84, 0xc8, 0x02, + 0xe1, 0xec, 0x26, 0xe8, 0xe3, 0x43, 0x0b, 0x70, 0x1a, 0xce, 0x4e, 0xc5, 0x36, 0xcb, 0x5a, 0x14, + 0xd4, 0x10, 0x1b, 0x58, 0xe8, 0x65, 0x00, 0xb7, 0xbd, 0xea, 0xb4, 0xdc, 0x26, 0x7d, 0x64, 0xce, + 0xb0, 0x3a, 0xf4, 0xc1, 0x01, 0x95, 0xaa, 0x2c, 0xa5, 0xa7, 0xba, 0xf8, 0xb7, 0x8f, 0x35, 0x6c, + 0x54, 0x85, 0x09, 0xf1, 0x6f, 0x5f, 0x2c, 0x86, 0x69, 0xe5, 0x69, 0x3e, 0x21, 0x6b, 0xa8, 0x15, + 0x80, 0xcc, 0x12, 0x36, 0xe7, 0x89, 0xfa, 0x68, 0x1b, 0xce, 0x89, 0x1c, 0xcc, 0x44, 0x5f, 0xdd, + 0x72, 0xf6, 0x42, 0x16, 0x1e, 0x7f, 0x84, 0x07, 0x90, 0x59, 0xec, 0x86, 0x88, 0xbb, 0xd3, 0x79, + 0x67, 0x16, 0x70, 0xbf, 0x6d, 0xd1, 0xda, 0x1a, 0x97, 0x8c, 0x3e, 0x0b, 0x63, 0xfa, 0x9e, 0x13, + 0x37, 0xfe, 0xc5, 0x6c, 0x26, 0x52, 0xdb, 0x9b, 0x9c, 0xc7, 0x56, 0xfb, 0x4f, 0x87, 0x61, 0x83, + 0x22, 0xaa, 0x67, 0xb8, 0x51, 0x5f, 0xee, 0x8f, 0xa3, 0xe8, 0xdf, 0x00, 0x8c, 0x40, 0xf6, 0x92, + 0x43, 0x37, 0x60, 0xa4, 0xde, 0x74, 0x89, 0x17, 0x55, 0xaa, 0xdd, 0x02, 0x9f, 0x2d, 0x0b, 0x1c, + 0xb1, 0x86, 0x45, 0xcc, 0x78, 0x5e, 0x86, 0x15, 0x05, 0xfb, 0x57, 0x0b, 0x30, 0xdf, 0x23, 0x01, + 0x41, 0x42, 0x1d, 0x64, 0xf5, 0xa5, 0x0e, 0x5a, 0x94, 0x39, 0x98, 0xd7, 0x13, 0x92, 0xa6, 0x44, + 0x7e, 0xe5, 0x58, 0xde, 0x94, 0xc4, 0xef, 0xdb, 0x3c, 0x5f, 0xd7, 0x28, 0x0d, 0xf4, 0x74, 0x30, + 0x31, 0x34, 0xc9, 0x83, 0xfd, 0x3f, 0x3f, 0x73, 0xb5, 0x82, 0xf6, 0x57, 0x0a, 0x70, 0x5a, 0x0d, + 0xe1, 0x37, 0xef, 0xc0, 0xdd, 0x4a, 0x0f, 0xdc, 0x31, 0xe8, 0x54, 0xed, 0x9b, 0x30, 0xc4, 0x23, + 0xb9, 0xf5, 0xc1, 0xf6, 0x3e, 0x61, 0x06, 0x3d, 0x55, 0x9c, 0x96, 0x11, 0xf8, 0xf4, 0x7b, 0x2d, + 0x98, 0xdc, 0x58, 0xae, 0xd6, 0xfc, 0xfa, 0x2e, 0x89, 0x16, 0xf9, 0x33, 0x05, 0x6b, 0x0e, 0xa7, + 0x0f, 0xc2, 0x9a, 0x66, 0x31, 0xbd, 0x17, 0x60, 0x60, 0xc7, 0x0f, 0xa3, 0xa4, 0xc1, 0xc5, 0x35, + 0x3f, 0x8c, 0x30, 0x83, 0xd8, 0xbf, 0x6b, 0xc1, 0xe0, 0x86, 0xe3, 0x7a, 0x91, 0x14, 0xce, 0x5b, + 0x39, 0xc2, 0xf9, 0x7e, 0xbe, 0x0b, 0xbd, 0x08, 0x43, 0x64, 0x6b, 0x8b, 0xd4, 0x23, 0x31, 0xab, + 0xd2, 0x5b, 0x7f, 0x68, 0x85, 0x95, 0x52, 0x3e, 0x8c, 0x35, 0xc6, 0xff, 0x62, 0x81, 0x8c, 0xee, + 0x40, 0x29, 0x72, 0x5b, 0x64, 0xb1, 0xd1, 0x10, 0x2a, 0xeb, 0x07, 0x88, 0x38, 0xb0, 0x21, 0x09, + 0xe0, 0x98, 0x96, 0xfd, 0xc5, 0x02, 0x40, 0x1c, 0x02, 0xa7, 0xd7, 0x27, 0x2e, 0xa5, 0x94, 0x99, + 0x17, 0x33, 0x94, 0x99, 0x28, 0x26, 0x98, 0xa1, 0xc9, 0x54, 0xc3, 0x54, 0xec, 0x6b, 0x98, 0x06, + 0x8e, 0x32, 0x4c, 0xcb, 0x30, 0x1d, 0x87, 0xf0, 0x31, 0x23, 0x98, 0xb1, 0xa7, 0xe9, 0x46, 0x12, + 0x88, 0xd3, 0xf8, 0x36, 0x81, 0x0b, 0x2a, 0x92, 0x89, 0xb8, 0xd1, 0x98, 0x45, 0xb4, 0xae, 0x1c, + 0xee, 0x31, 0x4e, 0xb1, 0xb6, 0xb6, 0x90, 0xab, 0xad, 0xfd, 0x09, 0x0b, 0x4e, 0x25, 0xdb, 0x61, + 0x2e, 0xaa, 0x5f, 0xb0, 0xe0, 0x34, 0xd3, 0x59, 0xb3, 0x56, 0xd3, 0x1a, 0xf2, 0x17, 0xba, 0x46, + 0x67, 0xc9, 0xe9, 0x71, 0x1c, 0x16, 0x62, 0x2d, 0x8b, 0x34, 0xce, 0x6e, 0xd1, 0xfe, 0x4f, 0x05, + 0x98, 0xcd, 0x0b, 0xeb, 0xc2, 0x1c, 0x26, 0x9c, 0x7b, 0xb5, 0x5d, 0x72, 0x57, 0x98, 0xa5, 0xc7, + 0x0e, 0x13, 0xbc, 0x18, 0x4b, 0x78, 0x32, 0xa6, 0x7c, 0xa1, 0xbf, 0x98, 0xf2, 0x68, 0x07, 0xa6, + 0xef, 0xee, 0x10, 0xef, 0x96, 0x17, 0x3a, 0x91, 0x1b, 0x6e, 0xb9, 0x4c, 0xbf, 0xcb, 0xd7, 0xcd, + 0xcb, 0xd2, 0x78, 0xfc, 0x4e, 0x12, 0xe1, 0xf0, 0x60, 0xfe, 0x9c, 0x51, 0x10, 0x77, 0x99, 0x1f, + 0x24, 0x38, 0x4d, 0x34, 0x1d, 0x92, 0x7f, 0xe0, 0x21, 0x86, 0xe4, 0xb7, 0xbf, 0x60, 0xc1, 0xd9, + 0xdc, 0x3c, 0xa0, 0xe8, 0x12, 0x8c, 0x38, 0x6d, 0x97, 0x8b, 0xc8, 0xc5, 0x31, 0xca, 0x44, 0x31, + 0xd5, 0x0a, 0x17, 0x90, 0x2b, 0xa8, 0xca, 0x4f, 0x5e, 0xc8, 0xcd, 0x4f, 0xde, 0x33, 0xdd, 0xb8, + 0xfd, 0x3d, 0x16, 0x08, 0x67, 0xcf, 0x3e, 0xce, 0xee, 0x4f, 0xc1, 0xd8, 0x5e, 0x3a, 0x6d, 0xcf, + 0x85, 0x7c, 0xef, 0x57, 0x91, 0xac, 0x47, 0x31, 0x64, 0x46, 0x8a, 0x1e, 0x83, 0x96, 0xdd, 0x00, + 0x01, 0x2d, 0x13, 0x26, 0x00, 0xee, 0xdd, 0x9b, 0xe7, 0x00, 0x1a, 0x0c, 0x57, 0x4b, 0xf2, 0xae, + 0x6e, 0xe6, 0xb2, 0x82, 0x60, 0x0d, 0xcb, 0xfe, 0xf7, 0x05, 0x18, 0x95, 0x69, 0x62, 0x3a, 0x5e, + 0x3f, 0x62, 0x9a, 0x23, 0xe5, 0x8d, 0xa4, 0xaf, 0x78, 0x26, 0x47, 0xac, 0xc6, 0xd2, 0x2d, 0xf5, + 0x8a, 0x5f, 0x93, 0x00, 0x1c, 0xe3, 0xd0, 0x5d, 0x14, 0x76, 0x36, 0x19, 0x7a, 0xc2, 0x35, 0xb1, + 0xc6, 0x8b, 0xb1, 0x84, 0xa3, 0x4f, 0xc0, 0x14, 0xaf, 0x17, 0xf8, 0x6d, 0x67, 0x9b, 0xeb, 0x1e, + 0x06, 0x55, 0x4c, 0x81, 0xa9, 0xb5, 0x04, 0xec, 0xf0, 0x60, 0xfe, 0x54, 0xb2, 0x8c, 0x29, 0xd5, + 0x52, 0x54, 0x98, 0x89, 0x11, 0x6f, 0x84, 0xee, 0xfe, 0x94, 0x65, 0x52, 0x0c, 0xc2, 0x3a, 0x9e, + 0xfd, 0x59, 0x40, 0xe9, 0x84, 0x39, 0xe8, 0x35, 0x6e, 0x57, 0xea, 0x06, 0xa4, 0xd1, 0x4d, 0xc9, + 0xa6, 0x7b, 0xce, 0x4b, 0xaf, 0x22, 0x5e, 0x0b, 0xab, 0xfa, 0xf6, 0x5f, 0x2d, 0xc2, 0x54, 0xd2, + 0x8f, 0x1a, 0x5d, 0x83, 0x21, 0xce, 0x7a, 0x08, 0xf2, 0x5d, 0x6c, 0x38, 0x34, 0xef, 0x6b, 0x76, + 0x08, 0x0b, 0xee, 0x45, 0xd4, 0x47, 0x6f, 0xc0, 0x68, 0xc3, 0xbf, 0xeb, 0xdd, 0x75, 0x82, 0xc6, + 0x62, 0xb5, 0x22, 0x96, 0x73, 0xe6, 0xa3, 0xb2, 0x1c, 0xa3, 0xe9, 0x1e, 0xdd, 0x4c, 0x5f, 0x19, + 0x83, 0xb0, 0x4e, 0x0e, 0x6d, 0xb0, 0xf8, 0xde, 0x5b, 0xee, 0xf6, 0x9a, 0xd3, 0xee, 0xe6, 0x64, + 0xb0, 0x2c, 0x91, 0x34, 0xca, 0xe3, 0x22, 0x08, 0x38, 0x07, 0xe0, 0x98, 0x10, 0xfa, 0x1c, 0xcc, + 0x84, 0x39, 0xa2, 0xee, 0xbc, 0xfc, 0x69, 0xdd, 0xa4, 0xbf, 0x4b, 0x8f, 0xd0, 0xe7, 0x7e, 0x96, + 0x50, 0x3c, 0xab, 0x19, 0xfb, 0x47, 0x4e, 0x81, 0xb1, 0x89, 0x8d, 0x74, 0x9a, 0xd6, 0x31, 0xa5, + 0xd3, 0xc4, 0x30, 0x42, 0x5a, 0xed, 0x68, 0xbf, 0xec, 0x06, 0xdd, 0x92, 0x4c, 0xaf, 0x08, 0x9c, + 0x34, 0x4d, 0x09, 0xc1, 0x8a, 0x4e, 0x76, 0xce, 0xd3, 0xe2, 0xd7, 0x31, 0xe7, 0xe9, 0xc0, 0x09, + 0xe6, 0x3c, 0x5d, 0x87, 0xe1, 0x6d, 0x37, 0xc2, 0xa4, 0xed, 0x0b, 0xa6, 0x3f, 0x73, 0x1d, 0x5e, + 0xe5, 0x28, 0xe9, 0xec, 0x7a, 0x02, 0x80, 0x25, 0x11, 0xf4, 0x9a, 0xda, 0x81, 0x43, 0xf9, 0x0f, + 0xf3, 0xb4, 0xb1, 0x41, 0xe6, 0x1e, 0x14, 0x99, 0x4d, 0x87, 0x1f, 0x34, 0xb3, 0xe9, 0xaa, 0xcc, + 0x47, 0x3a, 0x92, 0xef, 0x11, 0xc4, 0xd2, 0x8d, 0xf6, 0xc8, 0x42, 0x7a, 0x5b, 0xcf, 0xe1, 0x5a, + 0xca, 0x3f, 0x09, 0x54, 0x7a, 0xd6, 0x3e, 0x33, 0xb7, 0x7e, 0x8f, 0x05, 0xa7, 0xdb, 0x59, 0xe9, + 0x8c, 0x85, 0x5e, 0xfe, 0xc5, 0xbe, 0x33, 0x26, 0x1b, 0x0d, 0x32, 0x79, 0x56, 0x26, 0x1a, 0xce, + 0x6e, 0x8e, 0x0e, 0x74, 0xb0, 0xd9, 0x10, 0xfa, 0xe1, 0x27, 0x72, 0x52, 0xc0, 0x76, 0x49, 0xfc, + 0xba, 0x91, 0x91, 0x6e, 0xf4, 0xfd, 0x79, 0xe9, 0x46, 0xfb, 0x4e, 0x32, 0xfa, 0x9a, 0x4a, 0xfe, + 0x3a, 0x9e, 0xbf, 0x94, 0x78, 0x6a, 0xd7, 0x9e, 0x29, 0x5f, 0x5f, 0x53, 0x29, 0x5f, 0xbb, 0x04, + 0x6f, 0xe5, 0x09, 0x5d, 0x7b, 0x26, 0x7a, 0xd5, 0x92, 0xb5, 0x4e, 0x1e, 0x4f, 0xb2, 0x56, 0xe3, + 0xaa, 0xe1, 0xf9, 0x42, 0x9f, 0xee, 0x71, 0xd5, 0x18, 0x74, 0xbb, 0x5f, 0x36, 0x3c, 0x31, 0xed, + 0xf4, 0x03, 0x25, 0xa6, 0xbd, 0xad, 0x27, 0x7a, 0x45, 0x3d, 0x32, 0x99, 0x52, 0xa4, 0x3e, 0xd3, + 0xbb, 0xde, 0xd6, 0x2f, 0xc0, 0x99, 0x7c, 0xba, 0xea, 0x9e, 0x4b, 0xd3, 0xcd, 0xbc, 0x02, 0x53, + 0x69, 0x63, 0x4f, 0x9d, 0x4c, 0xda, 0xd8, 0xd3, 0xc7, 0x9e, 0x36, 0xf6, 0xcc, 0x09, 0xa4, 0x8d, + 0x7d, 0xe4, 0x04, 0xd3, 0xc6, 0xde, 0x66, 0xc6, 0x2c, 0x3c, 0x64, 0x8e, 0x08, 0x36, 0x9b, 0x1d, + 0xd8, 0x34, 0x2b, 0xae, 0x0e, 0xff, 0x38, 0x05, 0xc2, 0x31, 0xa9, 0x8c, 0x74, 0xb4, 0xb3, 0x0f, + 0x21, 0x1d, 0xed, 0x7a, 0x9c, 0x8e, 0xf6, 0x6c, 0xfe, 0x54, 0x67, 0xb8, 0x3f, 0xe4, 0x24, 0xa1, + 0xbd, 0xad, 0x27, 0x8f, 0x7d, 0xb4, 0x8b, 0xc6, 0x22, 0x4b, 0xf0, 0xd8, 0x25, 0x65, 0xec, 0xab, + 0x3c, 0x65, 0xec, 0x63, 0xf9, 0x27, 0x79, 0xf2, 0xba, 0x33, 0x12, 0xc5, 0xd2, 0x7e, 0xa9, 0xb0, + 0x86, 0x2c, 0xac, 0x6e, 0x4e, 0xbf, 0x54, 0x5c, 0xc4, 0x74, 0xbf, 0x14, 0x08, 0xc7, 0xa4, 0xec, + 0xef, 0x2b, 0xc0, 0xf9, 0xee, 0xfb, 0x2d, 0x96, 0xa6, 0x56, 0x63, 0x05, 0x6e, 0x42, 0x9a, 0xca, + 0xdf, 0x6c, 0x31, 0x56, 0xdf, 0x51, 0xda, 0xae, 0xc2, 0xb4, 0xf2, 0x9b, 0x68, 0xba, 0xf5, 0xfd, + 0xf5, 0xf8, 0xe5, 0xab, 0x7c, 0xcd, 0x6b, 0x49, 0x04, 0x9c, 0xae, 0x83, 0x16, 0x61, 0xd2, 0x28, + 0xac, 0x94, 0xc5, 0xdb, 0x4c, 0x89, 0x6f, 0x6b, 0x26, 0x18, 0x27, 0xf1, 0xed, 0x2f, 0x59, 0xf0, + 0x48, 0x4e, 0xa6, 0xb7, 0xbe, 0x83, 0x90, 0x6d, 0xc1, 0x64, 0xdb, 0xac, 0xda, 0x23, 0x6e, 0xa2, + 0x91, 0x4f, 0x4e, 0xf5, 0x35, 0x01, 0xc0, 0x49, 0xa2, 0xf6, 0x9f, 0x59, 0x70, 0xae, 0xab, 0x21, + 0x20, 0xc2, 0x70, 0x66, 0xbb, 0x15, 0x3a, 0xcb, 0x01, 0x69, 0x10, 0x2f, 0x72, 0x9d, 0x66, 0xad, + 0x4d, 0xea, 0x9a, 0x3c, 0x9c, 0x59, 0xd4, 0x5d, 0x5d, 0xab, 0x2d, 0xa6, 0x31, 0x70, 0x4e, 0x4d, + 0xb4, 0x0a, 0x28, 0x0d, 0x11, 0x33, 0xcc, 0x02, 0x34, 0xa7, 0xe9, 0xe1, 0x8c, 0x1a, 0xe8, 0x23, + 0x30, 0xae, 0x0c, 0x0c, 0xb5, 0x19, 0x67, 0x07, 0x3b, 0xd6, 0x01, 0xd8, 0xc4, 0x5b, 0xba, 0xf4, + 0xeb, 0xbf, 0x7f, 0xfe, 0x7d, 0xbf, 0xf9, 0xfb, 0xe7, 0xdf, 0xf7, 0x3b, 0xbf, 0x7f, 0xfe, 0x7d, + 0xdf, 0x71, 0xff, 0xbc, 0xf5, 0xeb, 0xf7, 0xcf, 0x5b, 0xbf, 0x79, 0xff, 0xbc, 0xf5, 0x3b, 0xf7, + 0xcf, 0x5b, 0xbf, 0x77, 0xff, 0xbc, 0xf5, 0xc5, 0x3f, 0x38, 0xff, 0xbe, 0x4f, 0x15, 0xf6, 0xae, + 0xfc, 0xff, 0x00, 0x00, 0x00, 0xff, 0xff, 0xad, 0xf2, 0xaa, 0x3b, 0x4d, 0x03, 0x01, 0x00, } func (m *AWSElasticBlockStoreVolumeSource) Marshal() (dAtA []byte, err error) { @@ -11552,6 +11587,20 @@ func (m *LoadBalancerIngress) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.Ports) > 0 { + for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ports[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } i -= len(m.Hostname) copy(dAtA[i:], m.Hostname) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Hostname))) @@ -15561,6 +15610,44 @@ func (m *PodTemplateSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *PortStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PortStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PortStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Error != nil { + i -= len(*m.Error) + copy(dAtA[i:], *m.Error) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Error))) + i-- + dAtA[i] = 0x1a + } + i -= len(m.Protocol) + copy(dAtA[i:], m.Protocol) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Protocol))) + i-- + dAtA[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(m.Port)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + func (m *PortworxVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -17949,6 +18036,49 @@ func (m *ServiceSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.AllocateLoadBalancerNodePorts != nil { + i-- + if *m.AllocateLoadBalancerNodePorts { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa0 + } + if len(m.IPFamilies) > 0 { + for iNdEx := len(m.IPFamilies) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.IPFamilies[iNdEx]) + copy(dAtA[i:], m.IPFamilies[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.IPFamilies[iNdEx]))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x9a + } + } + if len(m.ClusterIPs) > 0 { + for iNdEx := len(m.ClusterIPs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ClusterIPs[iNdEx]) + copy(dAtA[i:], m.ClusterIPs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClusterIPs[iNdEx]))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x92 + } + } + if m.IPFamilyPolicy != nil { + i -= len(*m.IPFamilyPolicy) + copy(dAtA[i:], *m.IPFamilyPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.IPFamilyPolicy))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + } if len(m.TopologyKeys) > 0 { for iNdEx := len(m.TopologyKeys) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.TopologyKeys[iNdEx]) @@ -17960,13 +18090,6 @@ func (m *ServiceSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { dAtA[i] = 0x82 } } - if m.IPFamily != nil { - i -= len(*m.IPFamily) - copy(dAtA[i:], *m.IPFamily) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.IPFamily))) - i-- - dAtA[i] = 0x7a - } if m.SessionAffinityConfig != nil { { size, err := m.SessionAffinityConfig.MarshalToSizedBuffer(dAtA[:i]) @@ -18099,6 +18222,20 @@ func (m *ServiceStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } { size, err := m.LoadBalancer.MarshalToSizedBuffer(dAtA[:i]) if err != nil { @@ -21043,6 +21180,12 @@ func (m *LoadBalancerIngress) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = len(m.Hostname) n += 1 + l + sovGenerated(uint64(l)) + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -22499,6 +22642,22 @@ func (m *PodTemplateSpec) Size() (n int) { return n } +func (m *PortStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Port)) + l = len(m.Protocol) + n += 1 + l + sovGenerated(uint64(l)) + if m.Error != nil { + l = len(*m.Error) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + func (m *PortworxVolumeSource) Size() (n int) { if m == nil { return 0 @@ -23405,16 +23564,31 @@ func (m *ServiceSpec) Size() (n int) { l = m.SessionAffinityConfig.Size() n += 1 + l + sovGenerated(uint64(l)) } - if m.IPFamily != nil { - l = len(*m.IPFamily) - n += 1 + l + sovGenerated(uint64(l)) - } if len(m.TopologyKeys) > 0 { for _, s := range m.TopologyKeys { l = len(s) n += 2 + l + sovGenerated(uint64(l)) } } + if m.IPFamilyPolicy != nil { + l = len(*m.IPFamilyPolicy) + n += 2 + l + sovGenerated(uint64(l)) + } + if len(m.ClusterIPs) > 0 { + for _, s := range m.ClusterIPs { + l = len(s) + n += 2 + l + sovGenerated(uint64(l)) + } + } + if len(m.IPFamilies) > 0 { + for _, s := range m.IPFamilies { + l = len(s) + n += 2 + l + sovGenerated(uint64(l)) + } + } + if m.AllocateLoadBalancerNodePorts != nil { + n += 3 + } return n } @@ -23426,6 +23600,12 @@ func (m *ServiceStatus) Size() (n int) { _ = l l = m.LoadBalancer.Size() n += 1 + l + sovGenerated(uint64(l)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -25156,9 +25336,15 @@ func (this *LoadBalancerIngress) String() string { if this == nil { return "nil" } + repeatedStringForPorts := "[]PortStatus{" + for _, f := range this.Ports { + repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "PortStatus", "PortStatus", 1), `&`, ``, 1) + "," + } + repeatedStringForPorts += "}" s := strings.Join([]string{`&LoadBalancerIngress{`, `IP:` + fmt.Sprintf("%v", this.IP) + `,`, `Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`, + `Ports:` + repeatedStringForPorts + `,`, `}`, }, "") return s @@ -26257,6 +26443,18 @@ func (this *PodTemplateSpec) String() string { }, "") return s } +func (this *PortStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PortStatus{`, + `Port:` + fmt.Sprintf("%v", this.Port) + `,`, + `Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`, + `Error:` + valueToStringGenerated(this.Error) + `,`, + `}`, + }, "") + return s +} func (this *PortworxVolumeSource) String() string { if this == nil { return "nil" @@ -26979,8 +27177,11 @@ func (this *ServiceSpec) String() string { `HealthCheckNodePort:` + fmt.Sprintf("%v", this.HealthCheckNodePort) + `,`, `PublishNotReadyAddresses:` + fmt.Sprintf("%v", this.PublishNotReadyAddresses) + `,`, `SessionAffinityConfig:` + strings.Replace(this.SessionAffinityConfig.String(), "SessionAffinityConfig", "SessionAffinityConfig", 1) + `,`, - `IPFamily:` + valueToStringGenerated(this.IPFamily) + `,`, `TopologyKeys:` + fmt.Sprintf("%v", this.TopologyKeys) + `,`, + `IPFamilyPolicy:` + valueToStringGenerated(this.IPFamilyPolicy) + `,`, + `ClusterIPs:` + fmt.Sprintf("%v", this.ClusterIPs) + `,`, + `IPFamilies:` + fmt.Sprintf("%v", this.IPFamilies) + `,`, + `AllocateLoadBalancerNodePorts:` + valueToStringGenerated(this.AllocateLoadBalancerNodePorts) + `,`, `}`, }, "") return s @@ -26989,8 +27190,14 @@ func (this *ServiceStatus) String() string { if this == nil { return "nil" } + repeatedStringForConditions := "[]Condition{" + for _, f := range this.Conditions { + repeatedStringForConditions += fmt.Sprintf("%v", f) + "," + } + repeatedStringForConditions += "}" s := strings.Join([]string{`&ServiceStatus{`, `LoadBalancer:` + strings.Replace(strings.Replace(this.LoadBalancer.String(), "LoadBalancerStatus", "LoadBalancerStatus", 1), `&`, ``, 1) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, `}`, }, "") return s @@ -27413,10 +27620,7 @@ func (m *AWSElasticBlockStoreVolumeSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -27574,10 +27778,7 @@ func (m *Affinity) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -27691,10 +27892,7 @@ func (m *AttachedVolume) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -27778,10 +27976,7 @@ func (m *AvoidPods) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -28015,10 +28210,7 @@ func (m *AzureDiskVolumeSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -28185,10 +28377,7 @@ func (m *AzureFilePersistentVolumeSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -28322,10 +28511,7 @@ func (m *AzureFileVolumeSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -28441,10 +28627,7 @@ func (m *Binding) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -28720,7 +28903,7 @@ func (m *CSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -28881,10 +29064,7 @@ func (m *CSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -29130,7 +29310,7 @@ func (m *CSIVolumeSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -29183,10 +29363,7 @@ func (m *CSIVolumeSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -29300,10 +29477,7 @@ func (m *Capabilities) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -29537,10 +29711,7 @@ func (m *CephFSPersistentVolumeSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -29774,10 +29945,7 @@ func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -29947,10 +30115,7 @@ func (m *CinderPersistentVolumeSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -30120,10 +30285,7 @@ func (m *CinderVolumeSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -30193,10 +30355,7 @@ func (m *ClientIPConfig) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -30374,10 +30533,7 @@ func (m *ComponentCondition) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -30494,10 +30650,7 @@ func (m *ComponentStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -30614,10 +30767,7 @@ func (m *ComponentStatusList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -30810,7 +30960,7 @@ func (m *ConfigMap) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -30938,7 +31088,7 @@ func (m *ConfigMap) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -30976,10 +31126,7 @@ func (m *ConfigMap) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -31083,10 +31230,7 @@ func (m *ConfigMapEnvSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -31222,10 +31366,7 @@ func (m *ConfigMapKeySelector) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -31342,10 +31483,7 @@ func (m *ConfigMapList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -31555,10 +31693,7 @@ func (m *ConfigMapNodeConfigSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -31696,10 +31831,7 @@ func (m *ConfigMapProjection) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -31857,10 +31989,7 @@ func (m *ConfigMapVolumeSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -32609,10 +32738,7 @@ func (m *Container) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -32713,10 +32839,7 @@ func (m *ContainerImage) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -32900,10 +33023,7 @@ func (m *ContainerPort) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -33061,10 +33181,7 @@ func (m *ContainerState) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -33147,10 +33264,7 @@ func (m *ContainerStateRunning) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -33400,10 +33514,7 @@ func (m *ContainerStateTerminated) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -33517,10 +33628,7 @@ func (m *ContainerStateWaiting) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -33824,10 +33932,7 @@ func (m *ContainerStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -33896,10 +34001,7 @@ func (m *DaemonEndpoint) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -33983,10 +34085,7 @@ func (m *DownwardAPIProjection) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -34160,10 +34259,7 @@ func (m *DownwardAPIVolumeFile) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -34267,10 +34363,7 @@ func (m *DownwardAPIVolumeSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -34388,10 +34481,7 @@ func (m *EmptyDirVolumeSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -34574,10 +34664,7 @@ func (m *EndpointAddress) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -34743,10 +34830,7 @@ func (m *EndpointPort) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -34898,10 +34982,7 @@ func (m *EndpointSubset) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -35018,10 +35099,7 @@ func (m *Endpoints) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -35138,10 +35216,7 @@ func (m *EndpointsList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -35295,10 +35370,7 @@ func (m *EnvFromSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -35448,10 +35520,7 @@ func (m *EnvVar) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -35645,10 +35714,7 @@ func (m *EnvVarSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -35763,10 +35829,7 @@ func (m *EphemeralContainer) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -36515,10 +36578,7 @@ func (m *EphemeralContainerCommon) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -36635,10 +36695,7 @@ func (m *EphemeralContainers) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -36744,10 +36801,7 @@ func (m *EphemeralVolumeSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -37278,10 +37332,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -37398,10 +37449,7 @@ func (m *EventList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -37503,10 +37551,7 @@ func (m *EventSeries) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -37620,10 +37665,7 @@ func (m *EventSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -37705,10 +37747,7 @@ func (m *ExecAction) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -37894,10 +37933,7 @@ func (m *FCVolumeSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -38177,7 +38213,7 @@ func (m *FlexPersistentVolumeSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -38194,10 +38230,7 @@ func (m *FlexPersistentVolumeSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -38477,7 +38510,7 @@ func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -38494,10 +38527,7 @@ func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -38611,10 +38641,7 @@ func (m *FlockerVolumeSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -38767,10 +38794,7 @@ func (m *GCEPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -38916,10 +38940,7 @@ func (m *GitRepoVolumeSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -39086,10 +39107,7 @@ func (m *GlusterfsPersistentVolumeSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -39223,10 +39241,7 @@ func (m *GlusterfsVolumeSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -39439,10 +39454,7 @@ func (m *HTTPGetAction) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -39556,10 +39568,7 @@ func (m *HTTPHeader) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -39717,10 +39726,7 @@ func (m *Handler) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -39834,10 +39840,7 @@ func (m *HostAlias) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -39952,10 +39955,7 @@ func (m *HostPathVolumeSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -40313,10 +40313,7 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -40674,10 +40671,7 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -40811,10 +40805,7 @@ func (m *KeyToPath) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -40936,10 +40927,7 @@ func (m *Lifecycle) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -41055,10 +41043,7 @@ func (m *LimitRange) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -41252,7 +41237,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -41381,7 +41366,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -41510,7 +41495,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -41639,7 +41624,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -41768,7 +41753,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -41785,10 +41770,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -41905,10 +41887,7 @@ func (m *LimitRangeList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -41992,10 +41971,7 @@ func (m *LimitRangeSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -42112,10 +42088,7 @@ func (m *List) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -42223,16 +42196,47 @@ func (m *LoadBalancerIngress) Unmarshal(dAtA []byte) error { } m.Hostname = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ports = append(m.Ports, PortStatus{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -42316,10 +42320,7 @@ func (m *LoadBalancerStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -42401,10 +42402,7 @@ func (m *LocalObjectReference) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -42519,10 +42517,7 @@ func (m *LocalVolumeSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -42656,10 +42651,7 @@ func (m *NFSVolumeSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -42808,10 +42800,7 @@ func (m *Namespace) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -43022,10 +43011,7 @@ func (m *NamespaceCondition) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -43142,10 +43128,7 @@ func (m *NamespaceList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -43227,10 +43210,7 @@ func (m *NamespaceSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -43346,10 +43326,7 @@ func (m *NamespaceStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -43498,10 +43475,7 @@ func (m *Node) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -43615,10 +43589,7 @@ func (m *NodeAddress) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -43738,10 +43709,7 @@ func (m *NodeAffinity) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -43985,10 +43953,7 @@ func (m *NodeCondition) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -44074,10 +44039,7 @@ func (m *NodeConfigSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -44267,10 +44229,7 @@ func (m *NodeConfigStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -44353,10 +44312,7 @@ func (m *NodeDaemonEndpoints) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -44473,10 +44429,7 @@ func (m *NodeList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -44558,10 +44511,7 @@ func (m *NodeProxyOptions) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -44723,7 +44673,7 @@ func (m *NodeResources) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -44740,10 +44690,7 @@ func (m *NodeResources) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -44827,10 +44774,7 @@ func (m *NodeSelector) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -44976,10 +44920,7 @@ func (m *NodeSelectorRequirement) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -45097,10 +45038,7 @@ func (m *NodeSelectorTerm) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -45368,10 +45306,7 @@ func (m *NodeSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -45533,7 +45468,7 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -45662,7 +45597,7 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -45981,10 +45916,7 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -46354,10 +46286,7 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -46471,10 +46400,7 @@ func (m *ObjectFieldSelector) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -46748,10 +46674,7 @@ func (m *ObjectReference) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -46900,10 +46823,7 @@ func (m *PersistentVolume) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -47052,10 +46972,7 @@ func (m *PersistentVolumeClaim) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -47299,10 +47216,7 @@ func (m *PersistentVolumeClaimCondition) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -47419,10 +47333,7 @@ func (m *PersistentVolumeClaimList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -47707,10 +47618,7 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -47936,7 +47844,7 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -47987,10 +47895,7 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -48106,10 +48011,7 @@ func (m *PersistentVolumeClaimTemplate) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -48211,10 +48113,7 @@ func (m *PersistentVolumeClaimVolumeSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -48331,10 +48230,7 @@ func (m *PersistentVolumeList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -49176,10 +49072,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -49341,7 +49234,7 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -49624,10 +49517,7 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -49773,10 +49663,7 @@ func (m *PersistentVolumeStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -49890,10 +49777,7 @@ func (m *PhotonPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -50042,10 +49926,7 @@ func (m *Pod) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -50163,10 +50044,7 @@ func (m *PodAffinity) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -50316,10 +50194,7 @@ func (m *PodAffinityTerm) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -50437,10 +50312,7 @@ func (m *PodAntiAffinity) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -50602,10 +50474,7 @@ func (m *PodAttachOptions) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -50849,10 +50718,7 @@ func (m *PodCondition) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -51000,10 +50866,7 @@ func (m *PodDNSConfig) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -51118,10 +50981,7 @@ func (m *PodDNSConfigOption) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -51315,10 +51175,7 @@ func (m *PodExecOptions) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -51400,10 +51257,7 @@ func (m *PodIP) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -51520,10 +51374,7 @@ func (m *PodList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -51781,10 +51632,7 @@ func (m *PodLogOptions) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -51910,10 +51758,7 @@ func (m *PodPortForwardOptions) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -51995,10 +51840,7 @@ func (m *PodProxyOptions) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -52080,10 +51922,7 @@ func (m *PodReadinessGate) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -52465,10 +52304,7 @@ func (m *PodSecurityContext) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -52554,10 +52390,7 @@ func (m *PodSignature) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -52889,7 +52722,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -53729,7 +53562,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -53835,10 +53668,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -54318,10 +54148,7 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -54437,10 +54264,7 @@ func (m *PodStatusResult) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -54556,10 +54380,7 @@ func (m *PodTemplate) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -54676,10 +54497,7 @@ func (m *PodTemplateList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -54795,10 +54613,141 @@ func (m *PodTemplateSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PortStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PortStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PortStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + m.Port = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Port |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Protocol = Protocol(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Error = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -54932,10 +54881,7 @@ func (m *PortworxVolumeSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -55018,10 +54964,7 @@ func (m *Preconditions) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -55201,10 +55144,7 @@ func (m *PreferAvoidPodsEntry) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -55306,10 +55246,7 @@ func (m *PreferredSchedulingTerm) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -55487,10 +55424,7 @@ func (m *Probe) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -55594,10 +55528,7 @@ func (m *ProjectedVolumeSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -55827,10 +55758,7 @@ func (m *QuobyteVolumeSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -56128,10 +56056,7 @@ func (m *RBDPersistentVolumeSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -56429,10 +56354,7 @@ func (m *RBDVolumeSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -56581,10 +56503,7 @@ func (m *RangeAllocation) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -56733,10 +56652,7 @@ func (m *ReplicationController) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -56947,10 +56863,7 @@ func (m *ReplicationControllerCondition) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -57067,10 +56980,7 @@ func (m *ReplicationControllerList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -57250,7 +57160,7 @@ func (m *ReplicationControllerSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -57322,10 +57232,7 @@ func (m *ReplicationControllerSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -57504,10 +57411,7 @@ func (m *ReplicationControllerStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -57654,10 +57558,7 @@ func (m *ResourceFieldSelector) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -57806,10 +57707,7 @@ func (m *ResourceQuota) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -57926,10 +57824,7 @@ func (m *ResourceQuotaList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -58091,7 +57986,7 @@ func (m *ResourceQuotaSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -58176,10 +58071,7 @@ func (m *ResourceQuotaSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -58341,7 +58233,7 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -58470,7 +58362,7 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -58487,10 +58379,7 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -58652,7 +58541,7 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -58781,7 +58670,7 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -58798,10 +58687,7 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -58979,10 +58865,7 @@ func (m *SELinuxOptions) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -59332,10 +59215,7 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -59685,10 +59565,7 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -59772,10 +59649,7 @@ func (m *ScopeSelector) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -59921,10 +59795,7 @@ func (m *ScopedResourceSelectorRequirement) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -60039,10 +59910,7 @@ func (m *SeccompProfile) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -60236,7 +60104,7 @@ func (m *Secret) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -60395,7 +60263,7 @@ func (m *Secret) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -60433,10 +60301,7 @@ func (m *Secret) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -60540,10 +60405,7 @@ func (m *SecretEnvSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -60679,10 +60541,7 @@ func (m *SecretKeySelector) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -60799,10 +60658,7 @@ func (m *SecretList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -60940,10 +60796,7 @@ func (m *SecretProjection) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -61057,10 +60910,7 @@ func (m *SecretReference) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -61217,10 +61067,7 @@ func (m *SecretVolumeSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -61571,10 +61418,7 @@ func (m *SecurityContext) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -61657,10 +61501,7 @@ func (m *SerializedReference) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -61809,10 +61650,7 @@ func (m *Service) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -61984,10 +61822,7 @@ func (m *ServiceAccount) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -62104,10 +61939,7 @@ func (m *ServiceAccountList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -62241,10 +62073,7 @@ func (m *ServiceAccountTokenProjection) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -62361,10 +62190,7 @@ func (m *ServiceList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -62582,10 +62408,7 @@ func (m *ServicePort) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -62667,10 +62490,7 @@ func (m *ServiceProxyOptions) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -62864,7 +62684,7 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -62937,13 +62757,205 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Type = ServiceType(dAtA[iNdEx:postIndex]) + m.Type = ServiceType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExternalIPs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExternalIPs = append(m.ExternalIPs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SessionAffinity", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SessionAffinity = ServiceAffinity(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancerIP", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LoadBalancerIP = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancerSourceRanges", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LoadBalancerSourceRanges = append(m.LoadBalancerSourceRanges, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExternalName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExternalName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExternalTrafficPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExternalTrafficPolicy = ServiceExternalTrafficPolicyType(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExternalIPs", wireType) + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HealthCheckNodePort", wireType) } - var stringLen uint64 + m.HealthCheckNodePort = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -62953,29 +62965,16 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + m.HealthCheckNodePort |= int32(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ExternalIPs = append(m.ExternalIPs, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SessionAffinity", wireType) + case 13: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PublishNotReadyAddresses", wireType) } - var stringLen uint64 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -62985,29 +62984,17 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SessionAffinity = ServiceAffinity(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 8: + m.PublishNotReadyAddresses = bool(v != 0) + case 14: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancerIP", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SessionAffinityConfig", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -63017,27 +63004,31 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.LoadBalancerIP = string(dAtA[iNdEx:postIndex]) + if m.SessionAffinityConfig == nil { + m.SessionAffinityConfig = &SessionAffinityConfig{} + } + if err := m.SessionAffinityConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 9: + case 16: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancerSourceRanges", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TopologyKeys", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -63065,11 +63056,11 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.LoadBalancerSourceRanges = append(m.LoadBalancerSourceRanges, string(dAtA[iNdEx:postIndex])) + m.TopologyKeys = append(m.TopologyKeys, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 10: + case 17: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExternalName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field IPFamilyPolicy", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -63097,11 +63088,12 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ExternalName = string(dAtA[iNdEx:postIndex]) + s := IPFamilyPolicyType(dAtA[iNdEx:postIndex]) + m.IPFamilyPolicy = &s iNdEx = postIndex - case 11: + case 18: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExternalTrafficPolicy", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ClusterIPs", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -63129,86 +63121,11 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ExternalTrafficPolicy = ServiceExternalTrafficPolicyType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 12: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field HealthCheckNodePort", wireType) - } - m.HealthCheckNodePort = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.HealthCheckNodePort |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 13: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PublishNotReadyAddresses", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.PublishNotReadyAddresses = bool(v != 0) - case 14: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SessionAffinityConfig", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SessionAffinityConfig == nil { - m.SessionAffinityConfig = &SessionAffinityConfig{} - } - if err := m.SessionAffinityConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.ClusterIPs = append(m.ClusterIPs, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 15: + case 19: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IPFamily", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field IPFamilies", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -63236,14 +63153,13 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := IPFamily(dAtA[iNdEx:postIndex]) - m.IPFamily = &s + m.IPFamilies = append(m.IPFamilies, IPFamily(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 16: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TopologyKeys", wireType) + case 20: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllocateLoadBalancerNodePorts", wireType) } - var stringLen uint64 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -63253,34 +63169,20 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.TopologyKeys = append(m.TopologyKeys, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex + b := bool(v != 0) + m.AllocateLoadBalancerNodePorts = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -63357,16 +63259,47 @@ func (m *ServiceStatus) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, v1.Condition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -63452,10 +63385,7 @@ func (m *SessionAffinityConfig) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -63657,10 +63587,7 @@ func (m *StorageOSPersistentVolumeSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -63862,10 +63789,7 @@ func (m *StorageOSVolumeSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -63979,10 +63903,7 @@ func (m *Sysctl) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -64097,10 +64018,7 @@ func (m *TCPSocketAction) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -64282,10 +64200,7 @@ func (m *Taint) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -64483,10 +64398,7 @@ func (m *Toleration) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -64600,10 +64512,7 @@ func (m *TopologySelectorLabelRequirement) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -64687,10 +64596,7 @@ func (m *TopologySelectorTerm) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -64859,10 +64765,7 @@ func (m *TopologySpreadConstraint) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -65009,10 +64912,7 @@ func (m *TypedLocalObjectReference) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -65127,10 +65027,7 @@ func (m *Volume) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -65244,10 +65141,7 @@ func (m *VolumeDevice) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -65478,10 +65372,7 @@ func (m *VolumeMount) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -65567,10 +65458,7 @@ func (m *VolumeNodeAffinity) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -65764,10 +65652,7 @@ func (m *VolumeProjection) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -66861,10 +66746,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -67042,10 +66924,7 @@ func (m *VsphereVirtualDiskVolumeSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -67147,10 +67026,7 @@ func (m *WeightedPodAffinityTerm) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -67299,10 +67175,7 @@ func (m *WindowsSecurityContextOptions) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/k8s.io/api/core/v1/generated.proto b/vendor/k8s.io/api/core/v1/generated.proto index 916e2601e6..14c733e9fa 100644 --- a/vendor/k8s.io/api/core/v1/generated.proto +++ b/vendor/k8s.io/api/core/v1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.core.v1; @@ -718,7 +718,6 @@ message Container { // This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, // when it might take a long time to load data or warm a cache, than during steady-state operation. // This cannot be updated. - // This is a beta feature enabled by the StartupProbe feature flag. // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes // +optional optional Probe startupProbe = 22; @@ -817,6 +816,7 @@ message ContainerPort { // Protocol for port. Must be UDP, TCP, or SCTP. // Defaults to "TCP". // +optional + // +default="TCP" optional string protocol = 4; // What host IP to bind the external port to. @@ -1404,7 +1404,12 @@ message EphemeralVolumeSource { optional bool readOnly = 2; } -// Event is a report of an event somewhere in the cluster. +// Event is a report of an event somewhere in the cluster. Events +// have a limited retention time and triggers and messages may evolve +// with time. Event consumers should not rely on the timing of an event +// with a given Reason reflecting a consistent underlying trigger, or the +// continued existence of events with that Reason. Events should be +// treated as informative, best-effort, supplemental data. message Event { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata @@ -2032,6 +2037,12 @@ message LoadBalancerIngress { // (typically AWS load-balancers) // +optional optional string hostname = 2; + + // Ports is a list of records of service ports + // If used, every port defined in the service should have an entry in it + // +listType=atomic + // +optional + repeated PortStatus ports = 4; } // LoadBalancerStatus represents the status of a load-balancer. @@ -2677,17 +2688,13 @@ message PersistentVolumeClaimSpec { optional string volumeMode = 6; // This field can be used to specify either: - // * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot - Beta) + // * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) // * An existing PVC (PersistentVolumeClaim) - // * An existing custom resource/object that implements data population (Alpha) - // In order to use VolumeSnapshot object types, the appropriate feature gate - // must be enabled (VolumeSnapshotDataSource or AnyVolumeDataSource) + // * An existing custom resource that implements data population (Alpha) + // In order to use custom resource types that implement data population, + // the AnyVolumeDataSource feature gate must be enabled. // If the provisioner or an external controller can support the specified data source, // it will create a new volume based on the contents of the specified data source. - // If the specified data source is not supported, the volume will - // not be created and the failure will be reported as an event. - // In the future, we plan to support more data source types and the behavior - // of the provisioner may change. // +optional optional TypedLocalObjectReference dataSource = 7; } @@ -3340,7 +3347,7 @@ message PodSecurityContext { // volume types which support fsGroup based ownership(and permissions). // It will have no effect on ephemeral volume types such as: secret, configmaps // and emptydir. - // Valid values are "OnRootMismatch" and "Always". If not specified defaults to "Always". + // Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used. // +optional optional string fsGroupChangePolicy = 9; @@ -3765,6 +3772,29 @@ message PodTemplateSpec { optional PodSpec spec = 2; } +message PortStatus { + // Port is the port number of the service port of which status is recorded here + optional int32 port = 1; + + // Protocol is the protocol of the service port of which status is recorded here + // The supported values are: "TCP", "UDP", "SCTP" + optional string protocol = 2; + + // Error is to record the problem with the service port + // The format of the error shall comply with the following rules: + // - built-in error values shall be specified in this file and those shall use + // CamelCase names + // - cloud provider specific error values must have names that comply with the + // format foo.example.com/CamelCase. + // --- + // The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + // +optional + // +kubebuilder:validation:Required + // +kubebuilder:validation:Pattern=`^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$` + // +kubebuilder:validation:MaxLength=316 + optional string error = 3; +} + // PortworxVolumeSource represents a Portworx volume resource. message PortworxVolumeSource { // VolumeID uniquely identifies a Portworx volume @@ -3853,6 +3883,7 @@ message Probe { // Represents a projected volume source message ProjectedVolumeSource { // list of volume projections + // +optional repeated VolumeProjection sources = 1; // Mode bits used to set permissions on created files by default. @@ -4722,6 +4753,7 @@ message ServicePort { // The IP protocol for this port. Supports "TCP", "UDP", and "SCTP". // Default is TCP. + // +default="TCP" // +optional optional string protocol = 2; @@ -4750,10 +4782,14 @@ message ServicePort { // +optional optional k8s.io.apimachinery.pkg.util.intstr.IntOrString targetPort = 4; - // The port on each node on which this service is exposed when type=NodePort or LoadBalancer. - // Usually assigned by the system. If specified, it will be allocated to the service - // if unused or else creation of the service will fail. - // Default is to auto-allocate a port if the ServiceType of this Service requires one. + // The port on each node on which this service is exposed when type is + // NodePort or LoadBalancer. Usually assigned by the system. If a value is + // specified, in-range, and not in use it will be used, otherwise the + // operation will fail. If not specified, a port will be allocated if this + // Service requires one. If this field is specified when creating a + // Service which does not need it, creation will fail. This field will be + // wiped when updating a Service to no longer need it (e.g. changing type + // from NodePort to ClusterIP). // More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport // +optional optional int32 nodePort = 5; @@ -4791,30 +4827,68 @@ message ServiceSpec { map selector = 2; // clusterIP is the IP address of the service and is usually assigned - // randomly by the master. If an address is specified manually and is not in - // use by others, it will be allocated to the service; otherwise, creation - // of the service will fail. This field can not be changed through updates. - // Valid values are "None", empty string (""), or a valid IP address. "None" - // can be specified for headless services when proxying is not required. - // Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if - // type is ExternalName. + // randomly. If an address is specified manually, is in-range (as per + // system configuration), and is not in use, it will be allocated to the + // service; otherwise creation of the service will fail. This field may not + // be changed through updates unless the type field is also being changed + // to ExternalName (which requires this field to be blank) or the type + // field is being changed from ExternalName (in which case this field may + // optionally be specified, as describe above). Valid values are "None", + // empty string (""), or a valid IP address. Setting this to "None" makes a + // "headless service" (no virtual IP), which is useful when direct endpoint + // connections are preferred and proxying is not required. Only applies to + // types ClusterIP, NodePort, and LoadBalancer. If this field is specified + // when creating a Service of type ExternalName, creation will fail. This + // field will be wiped when updating a Service to type ExternalName. // More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies // +optional optional string clusterIP = 3; + // ClusterIPs is a list of IP addresses assigned to this service, and are + // usually assigned randomly. If an address is specified manually, is + // in-range (as per system configuration), and is not in use, it will be + // allocated to the service; otherwise creation of the service will fail. + // This field may not be changed through updates unless the type field is + // also being changed to ExternalName (which requires this field to be + // empty) or the type field is being changed from ExternalName (in which + // case this field may optionally be specified, as describe above). Valid + // values are "None", empty string (""), or a valid IP address. Setting + // this to "None" makes a "headless service" (no virtual IP), which is + // useful when direct endpoint connections are preferred and proxying is + // not required. Only applies to types ClusterIP, NodePort, and + // LoadBalancer. If this field is specified when creating a Service of type + // ExternalName, creation will fail. This field will be wiped when updating + // a Service to type ExternalName. If this field is not specified, it will + // be initialized from the clusterIP field. If this field is specified, + // clients must ensure that clusterIPs[0] and clusterIP have the same + // value. + // + // Unless the "IPv6DualStack" feature gate is enabled, this field is + // limited to one value, which must be the same as the clusterIP field. If + // the feature gate is enabled, this field may hold a maximum of two + // entries (dual-stack IPs, in either order). These IPs must correspond to + // the values of the ipFamilies field. Both clusterIPs and ipFamilies are + // governed by the ipFamilyPolicy field. + // More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + // +listType=atomic + // +optional + repeated string clusterIPs = 18; + // type determines how the Service is exposed. Defaults to ClusterIP. Valid // options are ExternalName, ClusterIP, NodePort, and LoadBalancer. - // "ExternalName" maps to the specified externalName. - // "ClusterIP" allocates a cluster-internal IP address for load-balancing to - // endpoints. Endpoints are determined by the selector or if that is not - // specified, by manual construction of an Endpoints object. If clusterIP is - // "None", no virtual IP is allocated and the endpoints are published as a - // set of endpoints rather than a stable IP. + // "ClusterIP" allocates a cluster-internal IP address for load-balancing + // to endpoints. Endpoints are determined by the selector or if that is not + // specified, by manual construction of an Endpoints object or + // EndpointSlice objects. If clusterIP is "None", no virtual IP is + // allocated and the endpoints are published as a set of endpoints rather + // than a virtual IP. // "NodePort" builds on ClusterIP and allocates a port on every node which - // routes to the clusterIP. - // "LoadBalancer" builds on NodePort and creates an - // external load-balancer (if supported in the current cloud) which routes - // to the clusterIP. + // routes to the same endpoints as the clusterIP. + // "LoadBalancer" builds on NodePort and creates an external load-balancer + // (if supported in the current cloud) which routes to the same endpoints + // as the clusterIP. + // "ExternalName" aliases this service to the specified externalName. + // Several other fields do not apply to ExternalName services. // More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types // +optional optional string type = 4; @@ -4850,10 +4924,10 @@ message ServiceSpec { // +optional repeated string loadBalancerSourceRanges = 9; - // externalName is the external reference that kubedns or equivalent will - // return as a CNAME record for this service. No proxying will be involved. - // Must be a valid RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) - // and requires Type to be ExternalName. + // externalName is the external reference that discovery mechanisms will + // return as an alias for this service (e.g. a DNS CNAME record). No + // proxying will be involved. Must be a lowercase RFC-1123 hostname + // (https://tools.ietf.org/html/rfc1123) and requires Type to be // +optional optional string externalName = 10; @@ -4867,10 +4941,14 @@ message ServiceSpec { optional string externalTrafficPolicy = 11; // healthCheckNodePort specifies the healthcheck nodePort for the service. - // If not specified, HealthCheckNodePort is created by the service api - // backend with the allocated nodePort. Will use user-specified nodePort value - // if specified by the client. Only effects when Type is set to LoadBalancer - // and ExternalTrafficPolicy is set to Local. + // This only applies when type is set to LoadBalancer and + // externalTrafficPolicy is set to Local. If a value is specified, is + // in-range, and is not in use, it will be used. If not specified, a value + // will be automatically allocated. External systems (e.g. load-balancers) + // can use this port to determine if a given node holds endpoints for this + // service or not. If this field is specified when creating a Service + // which does not need it, creation will fail. This field will be wiped + // when updating a Service to no longer need it (e.g. changing type). // +optional optional int32 healthCheckNodePort = 12; @@ -4889,24 +4967,6 @@ message ServiceSpec { // +optional optional SessionAffinityConfig sessionAffinityConfig = 14; - // ipFamily specifies whether this Service has a preference for a particular IP family (e.g. - // IPv4 vs. IPv6) when the IPv6DualStack feature gate is enabled. In a dual-stack cluster, - // you can specify ipFamily when creating a ClusterIP Service to determine whether the - // controller will allocate an IPv4 or IPv6 IP for it, and you can specify ipFamily when - // creating a headless Service to determine whether it will have IPv4 or IPv6 Endpoints. In - // either case, if you do not specify an ipFamily explicitly, it will default to the - // cluster's primary IP family. - // This field is part of an alpha feature, and you should not make any assumptions about its - // semantics other than those described above. In particular, you should not assume that it - // can (or cannot) be changed after creation time; that it can only have the values "IPv4" - // and "IPv6"; or that its current value on a given Service correctly reflects the current - // state of that Service. (For ClusterIP Services, look at clusterIP to see if the Service - // is IPv4 or IPv6. For headless Services, look at the endpoints, which may be dual-stack in - // the future. For ExternalName Services, ipFamily has no meaning, but it may be set to an - // irrelevant value anyway.) - // +optional - optional string ipFamily = 15; - // topologyKeys is a preference-order list of topology keys which // implementations of services should use to preferentially sort endpoints // when accessing this Service, it can not be used at the same time as @@ -4919,8 +4979,51 @@ message ServiceSpec { // The special value "*" may be used to mean "any topology". This catch-all // value, if used, only makes sense as the last value in the list. // If this is not specified or empty, no topology constraints will be applied. + // This field is alpha-level and is only honored by servers that enable the ServiceTopology feature. // +optional repeated string topologyKeys = 16; + + // IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this + // service, and is gated by the "IPv6DualStack" feature gate. This field + // is usually assigned automatically based on cluster configuration and the + // ipFamilyPolicy field. If this field is specified manually, the requested + // family is available in the cluster, and ipFamilyPolicy allows it, it + // will be used; otherwise creation of the service will fail. This field + // is conditionally mutable: it allows for adding or removing a secondary + // IP family, but it does not allow changing the primary IP family of the + // Service. Valid values are "IPv4" and "IPv6". This field only applies + // to Services of types ClusterIP, NodePort, and LoadBalancer, and does + // apply to "headless" services. This field will be wiped when updating a + // Service to type ExternalName. + // + // This field may hold a maximum of two entries (dual-stack families, in + // either order). These families must correspond to the values of the + // clusterIPs field, if specified. Both clusterIPs and ipFamilies are + // governed by the ipFamilyPolicy field. + // +listType=atomic + // +optional + repeated string ipFamilies = 19; + + // IPFamilyPolicy represents the dual-stack-ness requested or required by + // this Service, and is gated by the "IPv6DualStack" feature gate. If + // there is no value provided, then this field will be set to SingleStack. + // Services can be "SingleStack" (a single IP family), "PreferDualStack" + // (two IP families on dual-stack configured clusters or a single IP family + // on single-stack clusters), or "RequireDualStack" (two IP families on + // dual-stack configured clusters, otherwise fail). The ipFamilies and + // clusterIPs fields depend on the value of this field. This field will be + // wiped when updating a service to type ExternalName. + // +optional + optional string ipFamilyPolicy = 17; + + // allocateLoadBalancerNodePorts defines if NodePorts will be automatically + // allocated for services with type LoadBalancer. Default is "true". It may be + // set to "false" if the cluster load-balancer does not rely on NodePorts. + // allocateLoadBalancerNodePorts may only be set for services with type LoadBalancer + // and will be cleared if the type is changed to any other type. + // This field is alpha-level and is only honored by servers that enable the ServiceLBNodePortControl feature. + // +optional + optional bool allocateLoadBalancerNodePorts = 20; } // ServiceStatus represents the current status of a service. @@ -4929,6 +5032,14 @@ message ServiceStatus { // if one is present. // +optional optional LoadBalancerStatus loadBalancer = 1; + + // Current service state + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + repeated k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 2; } // SessionAffinityConfig represents the configurations of session affinity. diff --git a/vendor/k8s.io/api/core/v1/resource.go b/vendor/k8s.io/api/core/v1/resource.go index 5bc9cd5bf1..4e249d03ab 100644 --- a/vendor/k8s.io/api/core/v1/resource.go +++ b/vendor/k8s.io/api/core/v1/resource.go @@ -21,44 +21,39 @@ import ( ) // Returns string version of ResourceName. -func (self ResourceName) String() string { - return string(self) +func (rn ResourceName) String() string { + return string(rn) } -// Returns the CPU limit if specified. -func (self *ResourceList) Cpu() *resource.Quantity { - if val, ok := (*self)[ResourceCPU]; ok { - return &val - } - return &resource.Quantity{Format: resource.DecimalSI} +// Cpu returns the Cpu limit if specified. +func (rl *ResourceList) Cpu() *resource.Quantity { + return rl.Name(ResourceCPU, resource.DecimalSI) } -// Returns the Memory limit if specified. -func (self *ResourceList) Memory() *resource.Quantity { - if val, ok := (*self)[ResourceMemory]; ok { - return &val - } - return &resource.Quantity{Format: resource.BinarySI} +// Memory returns the Memory limit if specified. +func (rl *ResourceList) Memory() *resource.Quantity { + return rl.Name(ResourceMemory, resource.BinarySI) } -// Returns the Storage limit if specified. -func (self *ResourceList) Storage() *resource.Quantity { - if val, ok := (*self)[ResourceStorage]; ok { - return &val - } - return &resource.Quantity{Format: resource.BinarySI} +// Storage returns the Storage limit if specified. +func (rl *ResourceList) Storage() *resource.Quantity { + return rl.Name(ResourceStorage, resource.BinarySI) } -func (self *ResourceList) Pods() *resource.Quantity { - if val, ok := (*self)[ResourcePods]; ok { - return &val - } - return &resource.Quantity{} +// Pods returns the list of pods +func (rl *ResourceList) Pods() *resource.Quantity { + return rl.Name(ResourcePods, resource.DecimalSI) +} + +// StorageEphemeral returns the list of ephemeral storage volumes, if any +func (rl *ResourceList) StorageEphemeral() *resource.Quantity { + return rl.Name(ResourceEphemeralStorage, resource.BinarySI) } -func (self *ResourceList) StorageEphemeral() *resource.Quantity { - if val, ok := (*self)[ResourceEphemeralStorage]; ok { +// Name returns the resource with name if specified, otherwise it returns a nil quantity with default format. +func (rl *ResourceList) Name(name ResourceName, defaultFormat resource.Format) *resource.Quantity { + if val, ok := (*rl)[name]; ok { return &val } - return &resource.Quantity{} + return &resource.Quantity{Format: defaultFormat} } diff --git a/vendor/k8s.io/api/core/v1/types.go b/vendor/k8s.io/api/core/v1/types.go index f3ec52e71c..769b70a951 100644 --- a/vendor/k8s.io/api/core/v1/types.go +++ b/vendor/k8s.io/api/core/v1/types.go @@ -489,17 +489,13 @@ type PersistentVolumeClaimSpec struct { // +optional VolumeMode *PersistentVolumeMode `json:"volumeMode,omitempty" protobuf:"bytes,6,opt,name=volumeMode,casttype=PersistentVolumeMode"` // This field can be used to specify either: - // * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot - Beta) + // * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) // * An existing PVC (PersistentVolumeClaim) - // * An existing custom resource/object that implements data population (Alpha) - // In order to use VolumeSnapshot object types, the appropriate feature gate - // must be enabled (VolumeSnapshotDataSource or AnyVolumeDataSource) + // * An existing custom resource that implements data population (Alpha) + // In order to use custom resource types that implement data population, + // the AnyVolumeDataSource feature gate must be enabled. // If the provisioner or an external controller can support the specified data source, // it will create a new volume based on the contents of the specified data source. - // If the specified data source is not supported, the volume will - // not be created and the failure will be reported as an event. - // In the future, we plan to support more data source types and the behavior - // of the provisioner may change. // +optional DataSource *TypedLocalObjectReference `json:"dataSource,omitempty" protobuf:"bytes,7,opt,name=dataSource"` } @@ -1615,6 +1611,7 @@ type ServiceAccountTokenProjection struct { // Represents a projected volume source type ProjectedVolumeSource struct { // list of volume projections + // +optional Sources []VolumeProjection `json:"sources" protobuf:"bytes,1,rep,name=sources"` // Mode bits used to set permissions on created files by default. // Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. @@ -1840,6 +1837,7 @@ type ContainerPort struct { // Protocol for port. Must be UDP, TCP, or SCTP. // Defaults to "TCP". // +optional + // +default="TCP" Protocol Protocol `json:"protocol,omitempty" protobuf:"bytes,4,opt,name=protocol,casttype=Protocol"` // What host IP to bind the external port to. // +optional @@ -2287,7 +2285,6 @@ type Container struct { // This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, // when it might take a long time to load data or warm a cache, than during steady-state operation. // This cannot be updated. - // This is a beta feature enabled by the StartupProbe feature flag. // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes // +optional StartupProbe *Probe `json:"startupProbe,omitempty" protobuf:"bytes,22,opt,name=startupProbe"` @@ -3298,7 +3295,7 @@ type PodSecurityContext struct { // volume types which support fsGroup based ownership(and permissions). // It will have no effect on ephemeral volume types such as: secret, configmaps // and emptydir. - // Valid values are "OnRootMismatch" and "Always". If not specified defaults to "Always". + // Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used. // +optional FSGroupChangePolicy *PodFSGroupChangePolicy `json:"fsGroupChangePolicy,omitempty" protobuf:"bytes,9,opt,name=fsGroupChangePolicy"` // The seccomp options to use by the containers in this pod. @@ -3943,12 +3940,26 @@ const ( ServiceExternalTrafficPolicyTypeCluster ServiceExternalTrafficPolicyType = "Cluster" ) +// These are the valid conditions of a service. +const ( + // LoadBalancerPortsError represents the condition of the requested ports + // on the cloud load balancer instance. + LoadBalancerPortsError = "LoadBalancerPortsError" +) + // ServiceStatus represents the current status of a service. type ServiceStatus struct { // LoadBalancer contains the current status of the load-balancer, // if one is present. // +optional LoadBalancer LoadBalancerStatus `json:"loadBalancer,omitempty" protobuf:"bytes,1,opt,name=loadBalancer"` + // Current service state + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=conditions"` } // LoadBalancerStatus represents the status of a load-balancer. @@ -3971,10 +3982,21 @@ type LoadBalancerIngress struct { // (typically AWS load-balancers) // +optional Hostname string `json:"hostname,omitempty" protobuf:"bytes,2,opt,name=hostname"` + + // Ports is a list of records of service ports + // If used, every port defined in the service should have an entry in it + // +listType=atomic + // +optional + Ports []PortStatus `json:"ports,omitempty" protobuf:"bytes,4,rep,name=ports"` } +const ( + // MaxServiceTopologyKeys is the largest number of topology keys allowed on a service + MaxServiceTopologyKeys = 16 +) + // IPFamily represents the IP Family (IPv4 or IPv6). This type is used -// to express the family of an IP expressed by a type (i.e. service.Spec.IPFamily) +// to express the family of an IP expressed by a type (e.g. service.spec.ipFamilies). type IPFamily string const ( @@ -3982,8 +4004,29 @@ const ( IPv4Protocol IPFamily = "IPv4" // IPv6Protocol indicates that this IP is IPv6 protocol IPv6Protocol IPFamily = "IPv6" - // MaxServiceTopologyKeys is the largest number of topology keys allowed on a service - MaxServiceTopologyKeys = 16 +) + +// IPFamilyPolicyType represents the dual-stack-ness requested or required by a Service +type IPFamilyPolicyType string + +const ( + // IPFamilyPolicySingleStack indicates that this service is required to have a single IPFamily. + // The IPFamily assigned is based on the default IPFamily used by the cluster + // or as identified by service.spec.ipFamilies field + IPFamilyPolicySingleStack IPFamilyPolicyType = "SingleStack" + // IPFamilyPolicyPreferDualStack indicates that this service prefers dual-stack when + // the cluster is configured for dual-stack. If the cluster is not configured + // for dual-stack the service will be assigned a single IPFamily. If the IPFamily is not + // set in service.spec.ipFamilies then the service will be assigned the default IPFamily + // configured on the cluster + IPFamilyPolicyPreferDualStack IPFamilyPolicyType = "PreferDualStack" + // IPFamilyPolicyRequireDualStack indicates that this service requires dual-stack. Using + // IPFamilyPolicyRequireDualStack on a single stack cluster will result in validation errors. The + // IPFamilies (and their order) assigned to this service is based on service.spec.ipFamilies. If + // service.spec.ipFamilies was not provided then it will be assigned according to how they are + // configured on the cluster. If service.spec.ipFamilies has only one entry then the alternative + // IPFamily will be added by apiserver + IPFamilyPolicyRequireDualStack IPFamilyPolicyType = "RequireDualStack" ) // ServiceSpec describes the attributes that a user creates on a service. @@ -4007,30 +4050,68 @@ type ServiceSpec struct { Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,2,rep,name=selector"` // clusterIP is the IP address of the service and is usually assigned - // randomly by the master. If an address is specified manually and is not in - // use by others, it will be allocated to the service; otherwise, creation - // of the service will fail. This field can not be changed through updates. - // Valid values are "None", empty string (""), or a valid IP address. "None" - // can be specified for headless services when proxying is not required. - // Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if - // type is ExternalName. + // randomly. If an address is specified manually, is in-range (as per + // system configuration), and is not in use, it will be allocated to the + // service; otherwise creation of the service will fail. This field may not + // be changed through updates unless the type field is also being changed + // to ExternalName (which requires this field to be blank) or the type + // field is being changed from ExternalName (in which case this field may + // optionally be specified, as describe above). Valid values are "None", + // empty string (""), or a valid IP address. Setting this to "None" makes a + // "headless service" (no virtual IP), which is useful when direct endpoint + // connections are preferred and proxying is not required. Only applies to + // types ClusterIP, NodePort, and LoadBalancer. If this field is specified + // when creating a Service of type ExternalName, creation will fail. This + // field will be wiped when updating a Service to type ExternalName. // More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies // +optional ClusterIP string `json:"clusterIP,omitempty" protobuf:"bytes,3,opt,name=clusterIP"` + // ClusterIPs is a list of IP addresses assigned to this service, and are + // usually assigned randomly. If an address is specified manually, is + // in-range (as per system configuration), and is not in use, it will be + // allocated to the service; otherwise creation of the service will fail. + // This field may not be changed through updates unless the type field is + // also being changed to ExternalName (which requires this field to be + // empty) or the type field is being changed from ExternalName (in which + // case this field may optionally be specified, as describe above). Valid + // values are "None", empty string (""), or a valid IP address. Setting + // this to "None" makes a "headless service" (no virtual IP), which is + // useful when direct endpoint connections are preferred and proxying is + // not required. Only applies to types ClusterIP, NodePort, and + // LoadBalancer. If this field is specified when creating a Service of type + // ExternalName, creation will fail. This field will be wiped when updating + // a Service to type ExternalName. If this field is not specified, it will + // be initialized from the clusterIP field. If this field is specified, + // clients must ensure that clusterIPs[0] and clusterIP have the same + // value. + // + // Unless the "IPv6DualStack" feature gate is enabled, this field is + // limited to one value, which must be the same as the clusterIP field. If + // the feature gate is enabled, this field may hold a maximum of two + // entries (dual-stack IPs, in either order). These IPs must correspond to + // the values of the ipFamilies field. Both clusterIPs and ipFamilies are + // governed by the ipFamilyPolicy field. + // More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + // +listType=atomic + // +optional + ClusterIPs []string `json:"clusterIPs,omitempty" protobuf:"bytes,18,opt,name=clusterIPs"` + // type determines how the Service is exposed. Defaults to ClusterIP. Valid // options are ExternalName, ClusterIP, NodePort, and LoadBalancer. - // "ExternalName" maps to the specified externalName. - // "ClusterIP" allocates a cluster-internal IP address for load-balancing to - // endpoints. Endpoints are determined by the selector or if that is not - // specified, by manual construction of an Endpoints object. If clusterIP is - // "None", no virtual IP is allocated and the endpoints are published as a - // set of endpoints rather than a stable IP. + // "ClusterIP" allocates a cluster-internal IP address for load-balancing + // to endpoints. Endpoints are determined by the selector or if that is not + // specified, by manual construction of an Endpoints object or + // EndpointSlice objects. If clusterIP is "None", no virtual IP is + // allocated and the endpoints are published as a set of endpoints rather + // than a virtual IP. // "NodePort" builds on ClusterIP and allocates a port on every node which - // routes to the clusterIP. - // "LoadBalancer" builds on NodePort and creates an - // external load-balancer (if supported in the current cloud) which routes - // to the clusterIP. + // routes to the same endpoints as the clusterIP. + // "LoadBalancer" builds on NodePort and creates an external load-balancer + // (if supported in the current cloud) which routes to the same endpoints + // as the clusterIP. + // "ExternalName" aliases this service to the specified externalName. + // Several other fields do not apply to ExternalName services. // More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types // +optional Type ServiceType `json:"type,omitempty" protobuf:"bytes,4,opt,name=type,casttype=ServiceType"` @@ -4066,10 +4147,10 @@ type ServiceSpec struct { // +optional LoadBalancerSourceRanges []string `json:"loadBalancerSourceRanges,omitempty" protobuf:"bytes,9,opt,name=loadBalancerSourceRanges"` - // externalName is the external reference that kubedns or equivalent will - // return as a CNAME record for this service. No proxying will be involved. - // Must be a valid RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) - // and requires Type to be ExternalName. + // externalName is the external reference that discovery mechanisms will + // return as an alias for this service (e.g. a DNS CNAME record). No + // proxying will be involved. Must be a lowercase RFC-1123 hostname + // (https://tools.ietf.org/html/rfc1123) and requires Type to be // +optional ExternalName string `json:"externalName,omitempty" protobuf:"bytes,10,opt,name=externalName"` @@ -4083,10 +4164,14 @@ type ServiceSpec struct { ExternalTrafficPolicy ServiceExternalTrafficPolicyType `json:"externalTrafficPolicy,omitempty" protobuf:"bytes,11,opt,name=externalTrafficPolicy"` // healthCheckNodePort specifies the healthcheck nodePort for the service. - // If not specified, HealthCheckNodePort is created by the service api - // backend with the allocated nodePort. Will use user-specified nodePort value - // if specified by the client. Only effects when Type is set to LoadBalancer - // and ExternalTrafficPolicy is set to Local. + // This only applies when type is set to LoadBalancer and + // externalTrafficPolicy is set to Local. If a value is specified, is + // in-range, and is not in use, it will be used. If not specified, a value + // will be automatically allocated. External systems (e.g. load-balancers) + // can use this port to determine if a given node holds endpoints for this + // service or not. If this field is specified when creating a Service + // which does not need it, creation will fail. This field will be wiped + // when updating a Service to no longer need it (e.g. changing type). // +optional HealthCheckNodePort int32 `json:"healthCheckNodePort,omitempty" protobuf:"bytes,12,opt,name=healthCheckNodePort"` @@ -4105,24 +4190,6 @@ type ServiceSpec struct { // +optional SessionAffinityConfig *SessionAffinityConfig `json:"sessionAffinityConfig,omitempty" protobuf:"bytes,14,opt,name=sessionAffinityConfig"` - // ipFamily specifies whether this Service has a preference for a particular IP family (e.g. - // IPv4 vs. IPv6) when the IPv6DualStack feature gate is enabled. In a dual-stack cluster, - // you can specify ipFamily when creating a ClusterIP Service to determine whether the - // controller will allocate an IPv4 or IPv6 IP for it, and you can specify ipFamily when - // creating a headless Service to determine whether it will have IPv4 or IPv6 Endpoints. In - // either case, if you do not specify an ipFamily explicitly, it will default to the - // cluster's primary IP family. - // This field is part of an alpha feature, and you should not make any assumptions about its - // semantics other than those described above. In particular, you should not assume that it - // can (or cannot) be changed after creation time; that it can only have the values "IPv4" - // and "IPv6"; or that its current value on a given Service correctly reflects the current - // state of that Service. (For ClusterIP Services, look at clusterIP to see if the Service - // is IPv4 or IPv6. For headless Services, look at the endpoints, which may be dual-stack in - // the future. For ExternalName Services, ipFamily has no meaning, but it may be set to an - // irrelevant value anyway.) - // +optional - IPFamily *IPFamily `json:"ipFamily,omitempty" protobuf:"bytes,15,opt,name=ipFamily,Configcasttype=IPFamily"` - // topologyKeys is a preference-order list of topology keys which // implementations of services should use to preferentially sort endpoints // when accessing this Service, it can not be used at the same time as @@ -4135,8 +4202,54 @@ type ServiceSpec struct { // The special value "*" may be used to mean "any topology". This catch-all // value, if used, only makes sense as the last value in the list. // If this is not specified or empty, no topology constraints will be applied. + // This field is alpha-level and is only honored by servers that enable the ServiceTopology feature. // +optional TopologyKeys []string `json:"topologyKeys,omitempty" protobuf:"bytes,16,opt,name=topologyKeys"` + + // IPFamily is tombstoned to show why 15 is a reserved protobuf tag. + // IPFamily *IPFamily `json:"ipFamily,omitempty" protobuf:"bytes,15,opt,name=ipFamily,Configcasttype=IPFamily"` + + // IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this + // service, and is gated by the "IPv6DualStack" feature gate. This field + // is usually assigned automatically based on cluster configuration and the + // ipFamilyPolicy field. If this field is specified manually, the requested + // family is available in the cluster, and ipFamilyPolicy allows it, it + // will be used; otherwise creation of the service will fail. This field + // is conditionally mutable: it allows for adding or removing a secondary + // IP family, but it does not allow changing the primary IP family of the + // Service. Valid values are "IPv4" and "IPv6". This field only applies + // to Services of types ClusterIP, NodePort, and LoadBalancer, and does + // apply to "headless" services. This field will be wiped when updating a + // Service to type ExternalName. + // + // This field may hold a maximum of two entries (dual-stack families, in + // either order). These families must correspond to the values of the + // clusterIPs field, if specified. Both clusterIPs and ipFamilies are + // governed by the ipFamilyPolicy field. + // +listType=atomic + // +optional + IPFamilies []IPFamily `json:"ipFamilies,omitempty" protobuf:"bytes,19,opt,name=ipFamilies,casttype=IPFamily"` + + // IPFamilyPolicy represents the dual-stack-ness requested or required by + // this Service, and is gated by the "IPv6DualStack" feature gate. If + // there is no value provided, then this field will be set to SingleStack. + // Services can be "SingleStack" (a single IP family), "PreferDualStack" + // (two IP families on dual-stack configured clusters or a single IP family + // on single-stack clusters), or "RequireDualStack" (two IP families on + // dual-stack configured clusters, otherwise fail). The ipFamilies and + // clusterIPs fields depend on the value of this field. This field will be + // wiped when updating a service to type ExternalName. + // +optional + IPFamilyPolicy *IPFamilyPolicyType `json:"ipFamilyPolicy,omitempty" protobuf:"bytes,17,opt,name=ipFamilyPolicy,casttype=IPFamilyPolicyType"` + + // allocateLoadBalancerNodePorts defines if NodePorts will be automatically + // allocated for services with type LoadBalancer. Default is "true". It may be + // set to "false" if the cluster load-balancer does not rely on NodePorts. + // allocateLoadBalancerNodePorts may only be set for services with type LoadBalancer + // and will be cleared if the type is changed to any other type. + // This field is alpha-level and is only honored by servers that enable the ServiceLBNodePortControl feature. + // +optional + AllocateLoadBalancerNodePorts *bool `json:"allocateLoadBalancerNodePorts,omitempty" protobuf:"bytes,20,opt,name=allocateLoadBalancerNodePorts"` } // ServicePort contains information on service's port. @@ -4151,6 +4264,7 @@ type ServicePort struct { // The IP protocol for this port. Supports "TCP", "UDP", and "SCTP". // Default is TCP. + // +default="TCP" // +optional Protocol Protocol `json:"protocol,omitempty" protobuf:"bytes,2,opt,name=protocol,casttype=Protocol"` @@ -4179,10 +4293,14 @@ type ServicePort struct { // +optional TargetPort intstr.IntOrString `json:"targetPort,omitempty" protobuf:"bytes,4,opt,name=targetPort"` - // The port on each node on which this service is exposed when type=NodePort or LoadBalancer. - // Usually assigned by the system. If specified, it will be allocated to the service - // if unused or else creation of the service will fail. - // Default is to auto-allocate a port if the ServiceType of this Service requires one. + // The port on each node on which this service is exposed when type is + // NodePort or LoadBalancer. Usually assigned by the system. If a value is + // specified, in-range, and not in use it will be used, otherwise the + // operation will fail. If not specified, a port will be allocated if this + // Service requires one. If this field is specified when creating a + // Service which does not need it, creation will fail. This field will be + // wiped when updating a Service to no longer need it (e.g. changing type + // from NodePort to ClusterIP). // More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport // +optional NodePort int32 `json:"nodePort,omitempty" protobuf:"varint,5,opt,name=nodePort"` @@ -5275,7 +5393,12 @@ const ( // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// Event is a report of an event somewhere in the cluster. +// Event is a report of an event somewhere in the cluster. Events +// have a limited retention time and triggers and messages may evolve +// with time. Event consumers should not rely on the timing of an event +// with a given Reason reflecting a consistent underlying trigger, or the +// continued existence of events with that Reason. Events should be +// treated as informative, best-effort, supplemental data. type Event struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. @@ -6104,3 +6227,26 @@ const ( // and data streams for a single forwarded connection PortForwardRequestIDHeader = "requestID" ) + +// PortStatus represents the error condition of a service port + +type PortStatus struct { + // Port is the port number of the service port of which status is recorded here + Port int32 `json:"port" protobuf:"varint,1,opt,name=port"` + // Protocol is the protocol of the service port of which status is recorded here + // The supported values are: "TCP", "UDP", "SCTP" + Protocol Protocol `json:"protocol" protobuf:"bytes,2,opt,name=protocol,casttype=Protocol"` + // Error is to record the problem with the service port + // The format of the error shall comply with the following rules: + // - built-in error values shall be specified in this file and those shall use + // CamelCase names + // - cloud provider specific error values must have names that comply with the + // format foo.example.com/CamelCase. + // --- + // The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + // +optional + // +kubebuilder:validation:Required + // +kubebuilder:validation:Pattern=`^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$` + // +kubebuilder:validation:MaxLength=316 + Error *string `json:"error,omitempty" protobuf:"bytes,3,opt,name=error"` +} diff --git a/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go index 61832b8159..c58d8ac566 100644 --- a/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go @@ -339,7 +339,7 @@ var map_Container = map[string]string{ "volumeDevices": "volumeDevices is the list of block devices to be used by the container.", "livenessProbe": "Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", "readinessProbe": "Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", - "startupProbe": "StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. This is a beta feature enabled by the StartupProbe feature flag. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + "startupProbe": "StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", "lifecycle": "Actions that the management system should take in response to container lifecycle events. Cannot be updated.", "terminationMessagePath": "Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.", "terminationMessagePolicy": "Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.", @@ -637,7 +637,7 @@ func (EphemeralVolumeSource) SwaggerDoc() map[string]string { } var map_Event = map[string]string{ - "": "Event is a report of an event somewhere in the cluster.", + "": "Event is a report of an event somewhere in the cluster. Events have a limited retention time and triggers and messages may evolve with time. Event consumers should not rely on the timing of an event with a given Reason reflecting a consistent underlying trigger, or the continued existence of events with that Reason. Events should be treated as informative, best-effort, supplemental data.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "involvedObject": "The object that this event is about.", "reason": "This should be a short, machine understandable string that gives the reason for the transition into the object's current status.", @@ -953,6 +953,7 @@ var map_LoadBalancerIngress = map[string]string{ "": "LoadBalancerIngress represents the status of a load-balancer ingress point: traffic intended for the service should be sent to an ingress point.", "ip": "IP is set for load-balancer ingress points that are IP based (typically GCE or OpenStack load-balancers)", "hostname": "Hostname is set for load-balancer ingress points that are DNS based (typically AWS load-balancers)", + "ports": "Ports is a list of records of service ports If used, every port defined in the service should have an entry in it", } func (LoadBalancerIngress) SwaggerDoc() map[string]string { @@ -1310,7 +1311,7 @@ var map_PersistentVolumeClaimSpec = map[string]string{ "volumeName": "VolumeName is the binding reference to the PersistentVolume backing this claim.", "storageClassName": "Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1", "volumeMode": "volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec.", - "dataSource": "This field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot - Beta) * An existing PVC (PersistentVolumeClaim) * An existing custom resource/object that implements data population (Alpha) In order to use VolumeSnapshot object types, the appropriate feature gate must be enabled (VolumeSnapshotDataSource or AnyVolumeDataSource) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. If the specified data source is not supported, the volume will not be created and the failure will be reported as an event. In the future, we plan to support more data source types and the behavior of the provisioner may change.", + "dataSource": "This field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) * An existing custom resource that implements data population (Alpha) In order to use custom resource types that implement data population, the AnyVolumeDataSource feature gate must be enabled. If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source.", } func (PersistentVolumeClaimSpec) SwaggerDoc() map[string]string { @@ -1602,7 +1603,7 @@ var map_PodSecurityContext = map[string]string{ "supplementalGroups": "A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.", "fsGroup": "A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\n\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw ", "sysctls": "Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch.", - "fsGroupChangePolicy": "fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \"OnRootMismatch\" and \"Always\". If not specified defaults to \"Always\".", + "fsGroupChangePolicy": "fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \"OnRootMismatch\" and \"Always\". If not specified, \"Always\" is used.", "seccompProfile": "The seccomp options to use by the containers in this pod.", } @@ -1723,6 +1724,16 @@ func (PodTemplateSpec) SwaggerDoc() map[string]string { return map_PodTemplateSpec } +var map_PortStatus = map[string]string{ + "port": "Port is the port number of the service port of which status is recorded here", + "protocol": "Protocol is the protocol of the service port of which status is recorded here The supported values are: \"TCP\", \"UDP\", \"SCTP\"", + "error": "Error is to record the problem with the service port The format of the error shall comply with the following rules: - built-in error values shall be specified in this file and those shall use\n CamelCase names\n- cloud provider specific error values must have names that comply with the\n format foo.example.com/CamelCase.", +} + +func (PortStatus) SwaggerDoc() map[string]string { + return map_PortStatus +} + var map_PortworxVolumeSource = map[string]string{ "": "PortworxVolumeSource represents a Portworx volume resource.", "volumeID": "VolumeID uniquely identifies a Portworx volume", @@ -2209,7 +2220,7 @@ var map_ServicePort = map[string]string{ "appProtocol": "The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and http://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol. This is a beta field that is guarded by the ServiceAppProtocol feature gate and enabled by default.", "port": "The port that will be exposed by this service.", "targetPort": "Number or name of the port to access on the pods targeted by the service. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. If this is a string, it will be looked up as a named port in the target Pod's container ports. If this is not specified, the value of the 'port' field is used (an identity map). This field is ignored for services with clusterIP=None, and should be omitted or set equal to the 'port' field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service", - "nodePort": "The port on each node on which this service is exposed when type=NodePort or LoadBalancer. Usually assigned by the system. If specified, it will be allocated to the service if unused or else creation of the service will fail. Default is to auto-allocate a port if the ServiceType of this Service requires one. More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport", + "nodePort": "The port on each node on which this service is exposed when type is NodePort or LoadBalancer. Usually assigned by the system. If a value is specified, in-range, and not in use it will be used, otherwise the operation will fail. If not specified, a port will be allocated if this Service requires one. If this field is specified when creating a Service which does not need it, creation will fail. This field will be wiped when updating a Service to no longer need it (e.g. changing type from NodePort to ClusterIP). More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport", } func (ServicePort) SwaggerDoc() map[string]string { @@ -2226,22 +2237,25 @@ func (ServiceProxyOptions) SwaggerDoc() map[string]string { } var map_ServiceSpec = map[string]string{ - "": "ServiceSpec describes the attributes that a user creates on a service.", - "ports": "The list of ports that are exposed by this service. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies", - "selector": "Route service traffic to pods with label keys and values matching this selector. If empty or not present, the service is assumed to have an external process managing its endpoints, which Kubernetes will not modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if type is ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/", - "clusterIP": "clusterIP is the IP address of the service and is usually assigned randomly by the master. If an address is specified manually and is not in use by others, it will be allocated to the service; otherwise, creation of the service will fail. This field can not be changed through updates. Valid values are \"None\", empty string (\"\"), or a valid IP address. \"None\" can be specified for headless services when proxying is not required. Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if type is ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies", - "type": "type determines how the Service is exposed. Defaults to ClusterIP. Valid options are ExternalName, ClusterIP, NodePort, and LoadBalancer. \"ExternalName\" maps to the specified externalName. \"ClusterIP\" allocates a cluster-internal IP address for load-balancing to endpoints. Endpoints are determined by the selector or if that is not specified, by manual construction of an Endpoints object. If clusterIP is \"None\", no virtual IP is allocated and the endpoints are published as a set of endpoints rather than a stable IP. \"NodePort\" builds on ClusterIP and allocates a port on every node which routes to the clusterIP. \"LoadBalancer\" builds on NodePort and creates an external load-balancer (if supported in the current cloud) which routes to the clusterIP. More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types", - "externalIPs": "externalIPs is a list of IP addresses for which nodes in the cluster will also accept traffic for this service. These IPs are not managed by Kubernetes. The user is responsible for ensuring that traffic arrives at a node with this IP. A common example is external load-balancers that are not part of the Kubernetes system.", - "sessionAffinity": "Supports \"ClientIP\" and \"None\". Used to maintain session affinity. Enable client IP based session affinity. Must be ClientIP or None. Defaults to None. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies", - "loadBalancerIP": "Only applies to Service Type: LoadBalancer LoadBalancer will get created with the IP specified in this field. This feature depends on whether the underlying cloud-provider supports specifying the loadBalancerIP when a load balancer is created. This field will be ignored if the cloud-provider does not support the feature.", - "loadBalancerSourceRanges": "If specified and supported by the platform, this will restrict traffic through the cloud-provider load-balancer will be restricted to the specified client IPs. This field will be ignored if the cloud-provider does not support the feature.\" More info: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/", - "externalName": "externalName is the external reference that kubedns or equivalent will return as a CNAME record for this service. No proxying will be involved. Must be a valid RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) and requires Type to be ExternalName.", - "externalTrafficPolicy": "externalTrafficPolicy denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints. \"Local\" preserves the client source IP and avoids a second hop for LoadBalancer and Nodeport type services, but risks potentially imbalanced traffic spreading. \"Cluster\" obscures the client source IP and may cause a second hop to another node, but should have good overall load-spreading.", - "healthCheckNodePort": "healthCheckNodePort specifies the healthcheck nodePort for the service. If not specified, HealthCheckNodePort is created by the service api backend with the allocated nodePort. Will use user-specified nodePort value if specified by the client. Only effects when Type is set to LoadBalancer and ExternalTrafficPolicy is set to Local.", - "publishNotReadyAddresses": "publishNotReadyAddresses indicates that any agent which deals with endpoints for this Service should disregard any indications of ready/not-ready. The primary use case for setting this field is for a StatefulSet's Headless Service to propagate SRV DNS records for its Pods for the purpose of peer discovery. The Kubernetes controllers that generate Endpoints and EndpointSlice resources for Services interpret this to mean that all endpoints are considered \"ready\" even if the Pods themselves are not. Agents which consume only Kubernetes generated endpoints through the Endpoints or EndpointSlice resources can safely assume this behavior.", - "sessionAffinityConfig": "sessionAffinityConfig contains the configurations of session affinity.", - "ipFamily": "ipFamily specifies whether this Service has a preference for a particular IP family (e.g. IPv4 vs. IPv6) when the IPv6DualStack feature gate is enabled. In a dual-stack cluster, you can specify ipFamily when creating a ClusterIP Service to determine whether the controller will allocate an IPv4 or IPv6 IP for it, and you can specify ipFamily when creating a headless Service to determine whether it will have IPv4 or IPv6 Endpoints. In either case, if you do not specify an ipFamily explicitly, it will default to the cluster's primary IP family. This field is part of an alpha feature, and you should not make any assumptions about its semantics other than those described above. In particular, you should not assume that it can (or cannot) be changed after creation time; that it can only have the values \"IPv4\" and \"IPv6\"; or that its current value on a given Service correctly reflects the current state of that Service. (For ClusterIP Services, look at clusterIP to see if the Service is IPv4 or IPv6. For headless Services, look at the endpoints, which may be dual-stack in the future. For ExternalName Services, ipFamily has no meaning, but it may be set to an irrelevant value anyway.)", - "topologyKeys": "topologyKeys is a preference-order list of topology keys which implementations of services should use to preferentially sort endpoints when accessing this Service, it can not be used at the same time as externalTrafficPolicy=Local. Topology keys must be valid label keys and at most 16 keys may be specified. Endpoints are chosen based on the first topology key with available backends. If this field is specified and all entries have no backends that match the topology of the client, the service has no backends for that client and connections should fail. The special value \"*\" may be used to mean \"any topology\". This catch-all value, if used, only makes sense as the last value in the list. If this is not specified or empty, no topology constraints will be applied.", + "": "ServiceSpec describes the attributes that a user creates on a service.", + "ports": "The list of ports that are exposed by this service. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies", + "selector": "Route service traffic to pods with label keys and values matching this selector. If empty or not present, the service is assumed to have an external process managing its endpoints, which Kubernetes will not modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if type is ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/", + "clusterIP": "clusterIP is the IP address of the service and is usually assigned randomly. If an address is specified manually, is in-range (as per system configuration), and is not in use, it will be allocated to the service; otherwise creation of the service will fail. This field may not be changed through updates unless the type field is also being changed to ExternalName (which requires this field to be blank) or the type field is being changed from ExternalName (in which case this field may optionally be specified, as describe above). Valid values are \"None\", empty string (\"\"), or a valid IP address. Setting this to \"None\" makes a \"headless service\" (no virtual IP), which is useful when direct endpoint connections are preferred and proxying is not required. Only applies to types ClusterIP, NodePort, and LoadBalancer. If this field is specified when creating a Service of type ExternalName, creation will fail. This field will be wiped when updating a Service to type ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies", + "clusterIPs": "ClusterIPs is a list of IP addresses assigned to this service, and are usually assigned randomly. If an address is specified manually, is in-range (as per system configuration), and is not in use, it will be allocated to the service; otherwise creation of the service will fail. This field may not be changed through updates unless the type field is also being changed to ExternalName (which requires this field to be empty) or the type field is being changed from ExternalName (in which case this field may optionally be specified, as describe above). Valid values are \"None\", empty string (\"\"), or a valid IP address. Setting this to \"None\" makes a \"headless service\" (no virtual IP), which is useful when direct endpoint connections are preferred and proxying is not required. Only applies to types ClusterIP, NodePort, and LoadBalancer. If this field is specified when creating a Service of type ExternalName, creation will fail. This field will be wiped when updating a Service to type ExternalName. If this field is not specified, it will be initialized from the clusterIP field. If this field is specified, clients must ensure that clusterIPs[0] and clusterIP have the same value.\n\nUnless the \"IPv6DualStack\" feature gate is enabled, this field is limited to one value, which must be the same as the clusterIP field. If the feature gate is enabled, this field may hold a maximum of two entries (dual-stack IPs, in either order). These IPs must correspond to the values of the ipFamilies field. Both clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies", + "type": "type determines how the Service is exposed. Defaults to ClusterIP. Valid options are ExternalName, ClusterIP, NodePort, and LoadBalancer. \"ClusterIP\" allocates a cluster-internal IP address for load-balancing to endpoints. Endpoints are determined by the selector or if that is not specified, by manual construction of an Endpoints object or EndpointSlice objects. If clusterIP is \"None\", no virtual IP is allocated and the endpoints are published as a set of endpoints rather than a virtual IP. \"NodePort\" builds on ClusterIP and allocates a port on every node which routes to the same endpoints as the clusterIP. \"LoadBalancer\" builds on NodePort and creates an external load-balancer (if supported in the current cloud) which routes to the same endpoints as the clusterIP. \"ExternalName\" aliases this service to the specified externalName. Several other fields do not apply to ExternalName services. More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types", + "externalIPs": "externalIPs is a list of IP addresses for which nodes in the cluster will also accept traffic for this service. These IPs are not managed by Kubernetes. The user is responsible for ensuring that traffic arrives at a node with this IP. A common example is external load-balancers that are not part of the Kubernetes system.", + "sessionAffinity": "Supports \"ClientIP\" and \"None\". Used to maintain session affinity. Enable client IP based session affinity. Must be ClientIP or None. Defaults to None. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies", + "loadBalancerIP": "Only applies to Service Type: LoadBalancer LoadBalancer will get created with the IP specified in this field. This feature depends on whether the underlying cloud-provider supports specifying the loadBalancerIP when a load balancer is created. This field will be ignored if the cloud-provider does not support the feature.", + "loadBalancerSourceRanges": "If specified and supported by the platform, this will restrict traffic through the cloud-provider load-balancer will be restricted to the specified client IPs. This field will be ignored if the cloud-provider does not support the feature.\" More info: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/", + "externalName": "externalName is the external reference that discovery mechanisms will return as an alias for this service (e.g. a DNS CNAME record). No proxying will be involved. Must be a lowercase RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) and requires Type to be", + "externalTrafficPolicy": "externalTrafficPolicy denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints. \"Local\" preserves the client source IP and avoids a second hop for LoadBalancer and Nodeport type services, but risks potentially imbalanced traffic spreading. \"Cluster\" obscures the client source IP and may cause a second hop to another node, but should have good overall load-spreading.", + "healthCheckNodePort": "healthCheckNodePort specifies the healthcheck nodePort for the service. This only applies when type is set to LoadBalancer and externalTrafficPolicy is set to Local. If a value is specified, is in-range, and is not in use, it will be used. If not specified, a value will be automatically allocated. External systems (e.g. load-balancers) can use this port to determine if a given node holds endpoints for this service or not. If this field is specified when creating a Service which does not need it, creation will fail. This field will be wiped when updating a Service to no longer need it (e.g. changing type).", + "publishNotReadyAddresses": "publishNotReadyAddresses indicates that any agent which deals with endpoints for this Service should disregard any indications of ready/not-ready. The primary use case for setting this field is for a StatefulSet's Headless Service to propagate SRV DNS records for its Pods for the purpose of peer discovery. The Kubernetes controllers that generate Endpoints and EndpointSlice resources for Services interpret this to mean that all endpoints are considered \"ready\" even if the Pods themselves are not. Agents which consume only Kubernetes generated endpoints through the Endpoints or EndpointSlice resources can safely assume this behavior.", + "sessionAffinityConfig": "sessionAffinityConfig contains the configurations of session affinity.", + "topologyKeys": "topologyKeys is a preference-order list of topology keys which implementations of services should use to preferentially sort endpoints when accessing this Service, it can not be used at the same time as externalTrafficPolicy=Local. Topology keys must be valid label keys and at most 16 keys may be specified. Endpoints are chosen based on the first topology key with available backends. If this field is specified and all entries have no backends that match the topology of the client, the service has no backends for that client and connections should fail. The special value \"*\" may be used to mean \"any topology\". This catch-all value, if used, only makes sense as the last value in the list. If this is not specified or empty, no topology constraints will be applied. This field is alpha-level and is only honored by servers that enable the ServiceTopology feature.", + "ipFamilies": "IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this service, and is gated by the \"IPv6DualStack\" feature gate. This field is usually assigned automatically based on cluster configuration and the ipFamilyPolicy field. If this field is specified manually, the requested family is available in the cluster, and ipFamilyPolicy allows it, it will be used; otherwise creation of the service will fail. This field is conditionally mutable: it allows for adding or removing a secondary IP family, but it does not allow changing the primary IP family of the Service. Valid values are \"IPv4\" and \"IPv6\". This field only applies to Services of types ClusterIP, NodePort, and LoadBalancer, and does apply to \"headless\" services. This field will be wiped when updating a Service to type ExternalName.\n\nThis field may hold a maximum of two entries (dual-stack families, in either order). These families must correspond to the values of the clusterIPs field, if specified. Both clusterIPs and ipFamilies are governed by the ipFamilyPolicy field.", + "ipFamilyPolicy": "IPFamilyPolicy represents the dual-stack-ness requested or required by this Service, and is gated by the \"IPv6DualStack\" feature gate. If there is no value provided, then this field will be set to SingleStack. Services can be \"SingleStack\" (a single IP family), \"PreferDualStack\" (two IP families on dual-stack configured clusters or a single IP family on single-stack clusters), or \"RequireDualStack\" (two IP families on dual-stack configured clusters, otherwise fail). The ipFamilies and clusterIPs fields depend on the value of this field. This field will be wiped when updating a service to type ExternalName.", + "allocateLoadBalancerNodePorts": "allocateLoadBalancerNodePorts defines if NodePorts will be automatically allocated for services with type LoadBalancer. Default is \"true\". It may be set to \"false\" if the cluster load-balancer does not rely on NodePorts. allocateLoadBalancerNodePorts may only be set for services with type LoadBalancer and will be cleared if the type is changed to any other type. This field is alpha-level and is only honored by servers that enable the ServiceLBNodePortControl feature.", } func (ServiceSpec) SwaggerDoc() map[string]string { @@ -2251,6 +2265,7 @@ func (ServiceSpec) SwaggerDoc() map[string]string { var map_ServiceStatus = map[string]string{ "": "ServiceStatus represents the current status of a service.", "loadBalancer": "LoadBalancer contains the current status of the load-balancer, if one is present.", + "conditions": "Current service state", } func (ServiceStatus) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/core/v1/well_known_labels.go b/vendor/k8s.io/api/core/v1/well_known_labels.go index 22aa55b911..a506f17f65 100644 --- a/vendor/k8s.io/api/core/v1/well_known_labels.go +++ b/vendor/k8s.io/api/core/v1/well_known_labels.go @@ -19,10 +19,16 @@ package v1 const ( LabelHostname = "kubernetes.io/hostname" - LabelZoneFailureDomain = "failure-domain.beta.kubernetes.io/zone" - LabelZoneRegion = "failure-domain.beta.kubernetes.io/region" - LabelZoneFailureDomainStable = "topology.kubernetes.io/zone" - LabelZoneRegionStable = "topology.kubernetes.io/region" + LabelFailureDomainBetaZone = "failure-domain.beta.kubernetes.io/zone" + LabelFailureDomainBetaRegion = "failure-domain.beta.kubernetes.io/region" + LabelTopologyZone = "topology.kubernetes.io/zone" + LabelTopologyRegion = "topology.kubernetes.io/region" + + // Legacy names for compat. + LabelZoneFailureDomain = LabelFailureDomainBetaZone // deprecated, remove after 1.20 + LabelZoneRegion = LabelFailureDomainBetaRegion // deprecated, remove after 1.20 + LabelZoneFailureDomainStable = LabelTopologyZone + LabelZoneRegionStable = LabelTopologyRegion LabelInstanceType = "beta.kubernetes.io/instance-type" LabelInstanceTypeStable = "node.kubernetes.io/instance-type" diff --git a/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go index 445c7c04a9..b868f9ba5b 100644 --- a/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go @@ -2144,6 +2144,13 @@ func (in *List) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *LoadBalancerIngress) DeepCopyInto(out *LoadBalancerIngress) { *out = *in + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]PortStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } @@ -2163,7 +2170,9 @@ func (in *LoadBalancerStatus) DeepCopyInto(out *LoadBalancerStatus) { if in.Ingress != nil { in, out := &in.Ingress, &out.Ingress *out = make([]LoadBalancerIngress, len(*in)) - copy(*out, *in) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } } return } @@ -4079,6 +4088,27 @@ func (in *PodTemplateSpec) DeepCopy() *PodTemplateSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PortStatus) DeepCopyInto(out *PortStatus) { + *out = *in + if in.Error != nil { + in, out := &in.Error, &out.Error + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortStatus. +func (in *PortStatus) DeepCopy() *PortStatus { + if in == nil { + return nil + } + out := new(PortStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PortworxVolumeSource) DeepCopyInto(out *PortworxVolumeSource) { *out = *in @@ -5270,6 +5300,11 @@ func (in *ServiceSpec) DeepCopyInto(out *ServiceSpec) { (*out)[key] = val } } + if in.ClusterIPs != nil { + in, out := &in.ClusterIPs, &out.ClusterIPs + *out = make([]string, len(*in)) + copy(*out, *in) + } if in.ExternalIPs != nil { in, out := &in.ExternalIPs, &out.ExternalIPs *out = make([]string, len(*in)) @@ -5285,16 +5320,26 @@ func (in *ServiceSpec) DeepCopyInto(out *ServiceSpec) { *out = new(SessionAffinityConfig) (*in).DeepCopyInto(*out) } - if in.IPFamily != nil { - in, out := &in.IPFamily, &out.IPFamily - *out = new(IPFamily) - **out = **in - } if in.TopologyKeys != nil { in, out := &in.TopologyKeys, &out.TopologyKeys *out = make([]string, len(*in)) copy(*out, *in) } + if in.IPFamilies != nil { + in, out := &in.IPFamilies, &out.IPFamilies + *out = make([]IPFamily, len(*in)) + copy(*out, *in) + } + if in.IPFamilyPolicy != nil { + in, out := &in.IPFamilyPolicy, &out.IPFamilyPolicy + *out = new(IPFamilyPolicyType) + **out = **in + } + if in.AllocateLoadBalancerNodePorts != nil { + in, out := &in.AllocateLoadBalancerNodePorts, &out.AllocateLoadBalancerNodePorts + *out = new(bool) + **out = **in + } return } @@ -5312,6 +5357,13 @@ func (in *ServiceSpec) DeepCopy() *ServiceSpec { func (in *ServiceStatus) DeepCopyInto(out *ServiceStatus) { *out = *in in.LoadBalancer.DeepCopyInto(&out.LoadBalancer) + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } diff --git a/vendor/k8s.io/api/discovery/v1alpha1/generated.pb.go b/vendor/k8s.io/api/discovery/v1alpha1/generated.pb.go index 45c4382cf5..c7db73cb71 100644 --- a/vendor/k8s.io/api/discovery/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/discovery/v1alpha1/generated.pb.go @@ -200,54 +200,58 @@ func init() { } var fileDescriptor_772f83c5b34e07a5 = []byte{ - // 746 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x53, 0x4b, 0x6f, 0xd3, 0x4a, - 0x14, 0x8e, 0x9b, 0x5a, 0xb2, 0x27, 0x8d, 0x6e, 0x3b, 0xba, 0x8b, 0x28, 0xf7, 0x5e, 0x3b, 0xca, - 0x5d, 0x10, 0xa9, 0x30, 0x26, 0x15, 0x45, 0x15, 0xac, 0x6a, 0x28, 0x0f, 0x89, 0x47, 0x18, 0xba, - 0x40, 0x88, 0x05, 0x13, 0x7b, 0xea, 0x98, 0x24, 0x1e, 0xcb, 0x9e, 0x44, 0xca, 0x8e, 0x9f, 0xc0, - 0x0f, 0x62, 0x89, 0x50, 0x97, 0x5d, 0x76, 0x65, 0x51, 0xf7, 0x5f, 0x74, 0x85, 0x66, 0xfc, 0x4a, - 0x09, 0x8f, 0xec, 0x66, 0xbe, 0x39, 0xdf, 0x77, 0xce, 0xf9, 0xe6, 0x1c, 0xf0, 0x68, 0x7c, 0x10, - 0x23, 0x9f, 0x59, 0xe3, 0xd9, 0x90, 0x46, 0x01, 0xe5, 0x34, 0xb6, 0xe6, 0x34, 0x70, 0x59, 0x64, - 0xe5, 0x0f, 0x24, 0xf4, 0x2d, 0xd7, 0x8f, 0x1d, 0x36, 0xa7, 0xd1, 0xc2, 0x9a, 0xf7, 0xc9, 0x24, - 0x1c, 0x91, 0xbe, 0xe5, 0xd1, 0x80, 0x46, 0x84, 0x53, 0x17, 0x85, 0x11, 0xe3, 0x0c, 0xfe, 0x97, - 0x85, 0x23, 0x12, 0xfa, 0xa8, 0x0c, 0x47, 0x45, 0x78, 0xfb, 0x96, 0xe7, 0xf3, 0xd1, 0x6c, 0x88, - 0x1c, 0x36, 0xb5, 0x3c, 0xe6, 0x31, 0x4b, 0xb2, 0x86, 0xb3, 0x13, 0x79, 0x93, 0x17, 0x79, 0xca, - 0xd4, 0xda, 0xdd, 0xa5, 0xe4, 0x0e, 0x8b, 0xa8, 0x35, 0x5f, 0xc9, 0xd8, 0xbe, 0x53, 0xc5, 0x4c, - 0x89, 0x33, 0xf2, 0x03, 0x51, 0x5f, 0x38, 0xf6, 0x04, 0x10, 0x5b, 0x53, 0xca, 0xc9, 0xcf, 0x58, - 0xd6, 0xaf, 0x58, 0xd1, 0x2c, 0xe0, 0xfe, 0x94, 0xae, 0x10, 0xee, 0xfe, 0x89, 0x10, 0x3b, 0x23, - 0x3a, 0x25, 0x3f, 0xf2, 0xba, 0x9f, 0xeb, 0x40, 0x3b, 0x0a, 0xdc, 0x90, 0xf9, 0x01, 0x87, 0xbb, - 0x40, 0x27, 0xae, 0x1b, 0xd1, 0x38, 0xa6, 0x71, 0x4b, 0xe9, 0xd4, 0x7b, 0xba, 0xdd, 0x4c, 0x13, - 0x53, 0x3f, 0x2c, 0x40, 0x5c, 0xbd, 0x43, 0x0a, 0x80, 0xc3, 0x02, 0xd7, 0xe7, 0x3e, 0x0b, 0xe2, - 0xd6, 0x46, 0x47, 0xe9, 0x35, 0xf6, 0xfa, 0xe8, 0xb7, 0xfe, 0xa2, 0x22, 0xd3, 0x83, 0x92, 0x68, - 0xc3, 0xd3, 0xc4, 0xac, 0xa5, 0x89, 0x09, 0x2a, 0x0c, 0x2f, 0x09, 0xc3, 0x1e, 0xd0, 0x46, 0x2c, - 0xe6, 0x01, 0x99, 0xd2, 0x56, 0xbd, 0xa3, 0xf4, 0x74, 0x7b, 0x2b, 0x4d, 0x4c, 0xed, 0x49, 0x8e, - 0xe1, 0xf2, 0x15, 0x0e, 0x80, 0xce, 0x49, 0xe4, 0x51, 0x8e, 0xe9, 0x49, 0x6b, 0x53, 0xd6, 0xf3, - 0xff, 0x72, 0x3d, 0xe2, 0x87, 0xd0, 0xbc, 0x8f, 0x5e, 0x0e, 0x3f, 0x50, 0x47, 0x04, 0xd1, 0x88, - 0x06, 0x0e, 0xcd, 0x5a, 0x3c, 0x2e, 0x98, 0xb8, 0x12, 0x81, 0x0e, 0xd0, 0x38, 0x0b, 0xd9, 0x84, - 0x79, 0x8b, 0x96, 0xda, 0xa9, 0xf7, 0x1a, 0x7b, 0xfb, 0x6b, 0x36, 0x88, 0x8e, 0x73, 0xde, 0x51, - 0xc0, 0xa3, 0x85, 0xbd, 0x9d, 0x37, 0xa9, 0x15, 0x30, 0x2e, 0x85, 0xdb, 0xf7, 0x41, 0xf3, 0x5a, - 0x30, 0xdc, 0x06, 0xf5, 0x31, 0x5d, 0xb4, 0x14, 0xd1, 0x2c, 0x16, 0x47, 0xf8, 0x37, 0x50, 0xe7, - 0x64, 0x32, 0xa3, 0xd2, 0x65, 0x1d, 0x67, 0x97, 0x7b, 0x1b, 0x07, 0x4a, 0x77, 0x1f, 0xc0, 0x55, - 0x4f, 0xa1, 0x09, 0xd4, 0x88, 0x12, 0x37, 0xd3, 0xd0, 0x6c, 0x3d, 0x4d, 0x4c, 0x15, 0x0b, 0x00, - 0x67, 0x78, 0xf7, 0xab, 0x02, 0xb6, 0x0a, 0xde, 0x80, 0x45, 0x1c, 0xfe, 0x0b, 0x36, 0xa5, 0xc3, - 0x32, 0xa9, 0xad, 0xa5, 0x89, 0xb9, 0xf9, 0x42, 0xb8, 0x2b, 0x51, 0xf8, 0x18, 0x68, 0x72, 0x5a, - 0x1c, 0x36, 0xc9, 0x4a, 0xb0, 0x77, 0x45, 0x33, 0x83, 0x1c, 0xbb, 0x4a, 0xcc, 0x7f, 0x56, 0x37, - 0x01, 0x15, 0xcf, 0xb8, 0x24, 0x8b, 0x34, 0x21, 0x8b, 0xb8, 0xfc, 0x48, 0x35, 0x4b, 0x23, 0xd2, - 0x63, 0x89, 0xc2, 0x3e, 0x68, 0x90, 0x30, 0x2c, 0x68, 0xf2, 0x0b, 0x75, 0xfb, 0xaf, 0x34, 0x31, - 0x1b, 0x87, 0x15, 0x8c, 0x97, 0x63, 0xba, 0x97, 0x1b, 0xa0, 0x59, 0x34, 0xf2, 0x7a, 0xe2, 0x3b, - 0x14, 0xbe, 0x07, 0x9a, 0x58, 0x2a, 0x97, 0x70, 0x22, 0xbb, 0x69, 0xec, 0xdd, 0x5e, 0xfa, 0xb3, - 0x72, 0x37, 0x50, 0x38, 0xf6, 0x04, 0x10, 0x23, 0x11, 0x5d, 0x8d, 0xc5, 0x73, 0xca, 0x49, 0x35, - 0x93, 0x15, 0x86, 0x4b, 0x55, 0xf8, 0x10, 0x34, 0xf2, 0x2d, 0x38, 0x5e, 0x84, 0x34, 0x2f, 0xb3, - 0x9b, 0x53, 0x1a, 0x87, 0xd5, 0xd3, 0xd5, 0xf5, 0x2b, 0x5e, 0xa6, 0xc1, 0x37, 0x40, 0xa7, 0x79, - 0xe1, 0x62, 0x7b, 0xc4, 0x70, 0xdd, 0x58, 0x73, 0xb8, 0xec, 0x9d, 0x3c, 0x99, 0x5e, 0x20, 0x31, - 0xae, 0xc4, 0xe0, 0x00, 0xa8, 0xc2, 0xce, 0xb8, 0x55, 0x97, 0xaa, 0xbb, 0x6b, 0xaa, 0x8a, 0x8f, - 0xb0, 0x9b, 0xb9, 0xb2, 0x2a, 0x6e, 0x31, 0xce, 0x84, 0xba, 0x5f, 0x14, 0xb0, 0x73, 0xcd, 0xe5, - 0x67, 0x7e, 0xcc, 0xe1, 0xbb, 0x15, 0xa7, 0xd1, 0x7a, 0x4e, 0x0b, 0xb6, 0xf4, 0xb9, 0x5c, 0x8b, - 0x02, 0x59, 0x72, 0xf9, 0x15, 0x50, 0x7d, 0x4e, 0xa7, 0x85, 0x37, 0x37, 0xd7, 0xec, 0x42, 0x96, - 0x57, 0xb5, 0xf1, 0x54, 0x48, 0xe0, 0x4c, 0xc9, 0x46, 0xa7, 0x17, 0x46, 0xed, 0xec, 0xc2, 0xa8, - 0x9d, 0x5f, 0x18, 0xb5, 0x8f, 0xa9, 0xa1, 0x9c, 0xa6, 0x86, 0x72, 0x96, 0x1a, 0xca, 0x79, 0x6a, - 0x28, 0xdf, 0x52, 0x43, 0xf9, 0x74, 0x69, 0xd4, 0xde, 0x6a, 0x85, 0xe6, 0xf7, 0x00, 0x00, 0x00, - 0xff, 0xff, 0x65, 0x85, 0x5a, 0x9b, 0x75, 0x06, 0x00, 0x00, + // 801 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x53, 0x4d, 0x8f, 0xe3, 0x44, + 0x10, 0x8d, 0x27, 0x13, 0xd6, 0xee, 0xec, 0x88, 0xdd, 0x16, 0x87, 0x68, 0x00, 0x7b, 0x14, 0x84, + 0x88, 0x34, 0xd0, 0x26, 0x23, 0x40, 0x2b, 0x38, 0x8d, 0x61, 0xf9, 0x90, 0x60, 0x19, 0x7a, 0xe7, + 0x80, 0x10, 0x07, 0x7a, 0xec, 0x5a, 0xc7, 0x24, 0x76, 0x5b, 0xdd, 0x9d, 0x48, 0xb9, 0xf1, 0x0f, + 0xe0, 0x47, 0x21, 0x34, 0xc7, 0x3d, 0xee, 0xc9, 0x62, 0xbc, 0x12, 0x3f, 0x62, 0x4f, 0xa8, 0xdb, + 0x9f, 0x43, 0x80, 0xcd, 0xcd, 0xfd, 0xaa, 0xde, 0xab, 0x7a, 0xe5, 0x2a, 0xf4, 0xf9, 0xf2, 0x81, + 0x24, 0x09, 0xf7, 0x97, 0xeb, 0x2b, 0x10, 0x19, 0x28, 0x90, 0xfe, 0x06, 0xb2, 0x88, 0x0b, 0xbf, + 0x0e, 0xb0, 0x3c, 0xf1, 0xa3, 0x44, 0x86, 0x7c, 0x03, 0x62, 0xeb, 0x6f, 0xe6, 0x6c, 0x95, 0x2f, + 0xd8, 0xdc, 0x8f, 0x21, 0x03, 0xc1, 0x14, 0x44, 0x24, 0x17, 0x5c, 0x71, 0xfc, 0x66, 0x95, 0x4e, + 0x58, 0x9e, 0x90, 0x36, 0x9d, 0x34, 0xe9, 0xc7, 0xef, 0xc5, 0x89, 0x5a, 0xac, 0xaf, 0x48, 0xc8, + 0x53, 0x3f, 0xe6, 0x31, 0xf7, 0x0d, 0xeb, 0x6a, 0xfd, 0xc4, 0xbc, 0xcc, 0xc3, 0x7c, 0x55, 0x6a, + 0xc7, 0xd3, 0x5e, 0xf1, 0x90, 0x0b, 0xf0, 0x37, 0x3b, 0x15, 0x8f, 0x3f, 0xe8, 0x72, 0x52, 0x16, + 0x2e, 0x92, 0x4c, 0xf7, 0x97, 0x2f, 0x63, 0x0d, 0x48, 0x3f, 0x05, 0xc5, 0xfe, 0x8d, 0xe5, 0xff, + 0x17, 0x4b, 0xac, 0x33, 0x95, 0xa4, 0xb0, 0x43, 0xf8, 0xe8, 0x65, 0x04, 0x19, 0x2e, 0x20, 0x65, + 0xff, 0xe4, 0x4d, 0xff, 0x1a, 0x22, 0xfb, 0x61, 0x16, 0xe5, 0x3c, 0xc9, 0x14, 0x3e, 0x45, 0x0e, + 0x8b, 0x22, 0x01, 0x52, 0x82, 0x9c, 0x58, 0x27, 0xc3, 0x99, 0x13, 0x1c, 0x95, 0x85, 0xe7, 0x9c, + 0x37, 0x20, 0xed, 0xe2, 0x18, 0x10, 0x0a, 0x79, 0x16, 0x25, 0x2a, 0xe1, 0x99, 0x9c, 0x1c, 0x9c, + 0x58, 0xb3, 0xf1, 0xd9, 0x9c, 0xfc, 0xef, 0x7c, 0x49, 0x53, 0xe9, 0xd3, 0x96, 0x18, 0xe0, 0xeb, + 0xc2, 0x1b, 0x94, 0x85, 0x87, 0x3a, 0x8c, 0xf6, 0x84, 0xf1, 0x0c, 0xd9, 0x0b, 0x2e, 0x55, 0xc6, + 0x52, 0x98, 0x0c, 0x4f, 0xac, 0x99, 0x13, 0xdc, 0x2d, 0x0b, 0xcf, 0xfe, 0xb2, 0xc6, 0x68, 0x1b, + 0xc5, 0x17, 0xc8, 0x51, 0x4c, 0xc4, 0xa0, 0x28, 0x3c, 0x99, 0x1c, 0x9a, 0x7e, 0xde, 0xea, 0xf7, + 0xa3, 0xff, 0x10, 0xd9, 0xcc, 0xc9, 0xb7, 0x57, 0x3f, 0x43, 0xa8, 0x93, 0x40, 0x40, 0x16, 0x42, + 0x65, 0xf1, 0xb2, 0x61, 0xd2, 0x4e, 0x04, 0x87, 0xc8, 0x56, 0x3c, 0xe7, 0x2b, 0x1e, 0x6f, 0x27, + 0xa3, 0x93, 0xe1, 0x6c, 0x7c, 0xf6, 0xe1, 0x9e, 0x06, 0xc9, 0x65, 0xcd, 0x7b, 0x98, 0x29, 0xb1, + 0x0d, 0xee, 0xd5, 0x26, 0xed, 0x06, 0xa6, 0xad, 0xb0, 0x36, 0x98, 0xf1, 0x08, 0x1e, 0x69, 0x83, + 0xaf, 0x74, 0x06, 0x1f, 0xd5, 0x18, 0x6d, 0xa3, 0xc7, 0x9f, 0xa0, 0xa3, 0x5b, 0xb2, 0xf8, 0x1e, + 0x1a, 0x2e, 0x61, 0x3b, 0xb1, 0x34, 0x8b, 0xea, 0x4f, 0xfc, 0x1a, 0x1a, 0x6d, 0xd8, 0x6a, 0x0d, + 0xe6, 0x7f, 0x38, 0xb4, 0x7a, 0x7c, 0x7c, 0xf0, 0xc0, 0x9a, 0xfe, 0x6a, 0x21, 0xbc, 0x3b, 0x7e, + 0xec, 0xa1, 0x91, 0x00, 0x16, 0x55, 0x22, 0x76, 0xe0, 0x94, 0x85, 0x37, 0xa2, 0x1a, 0xa0, 0x15, + 0x8e, 0xdf, 0x46, 0x77, 0x24, 0x88, 0x4d, 0x92, 0xc5, 0x46, 0xd3, 0x0e, 0xc6, 0x65, 0xe1, 0xdd, + 0x79, 0x5c, 0x41, 0xb4, 0x89, 0xe1, 0x39, 0x1a, 0x2b, 0x10, 0x69, 0x92, 0x31, 0xa5, 0x53, 0x87, + 0x26, 0xf5, 0xd5, 0xb2, 0xf0, 0xc6, 0x97, 0x1d, 0x4c, 0xfb, 0x39, 0xd3, 0x3f, 0x2c, 0x74, 0xb7, + 0xe9, 0xe8, 0x82, 0x0b, 0x85, 0xdf, 0x40, 0x87, 0xe6, 0x37, 0x1b, 0x3f, 0x81, 0x5d, 0x16, 0xde, + 0xa1, 0x99, 0x80, 0x41, 0xf1, 0x17, 0xc8, 0x36, 0x2b, 0x1b, 0xf2, 0x55, 0xe5, 0x2e, 0x38, 0xd5, + 0x73, 0xba, 0xa8, 0xb1, 0x17, 0x85, 0xf7, 0xfa, 0xee, 0x39, 0x92, 0x26, 0x4c, 0x5b, 0xb2, 0x2e, + 0x93, 0x73, 0xa1, 0x4c, 0x8f, 0xa3, 0xaa, 0x8c, 0x2e, 0x4f, 0x0d, 0xaa, 0x8d, 0xb0, 0x3c, 0x6f, + 0x68, 0x66, 0x8f, 0x9c, 0xca, 0xc8, 0x79, 0x07, 0xd3, 0x7e, 0xce, 0xf4, 0xf9, 0x01, 0x3a, 0x6a, + 0x8c, 0x3c, 0x5e, 0x25, 0x21, 0xe0, 0x9f, 0x90, 0xad, 0x2f, 0x3b, 0x62, 0x8a, 0x19, 0x37, 0xe3, + 0xb3, 0xf7, 0x7b, 0x8b, 0xd3, 0x1e, 0x28, 0xc9, 0x97, 0xb1, 0x06, 0x24, 0xd1, 0xd9, 0xdd, 0x6e, + 0x7e, 0x03, 0x8a, 0x75, 0x87, 0xd1, 0x61, 0xb4, 0x55, 0xc5, 0x9f, 0xa1, 0x71, 0x7d, 0x8a, 0x97, + 0xdb, 0x1c, 0xea, 0x36, 0xa7, 0x35, 0x65, 0x7c, 0xde, 0x85, 0x5e, 0xdc, 0x7e, 0xd2, 0x3e, 0x0d, + 0x7f, 0x8f, 0x1c, 0xa8, 0x1b, 0xd7, 0x27, 0xac, 0x37, 0xfc, 0x9d, 0x3d, 0x37, 0x3c, 0xb8, 0x5f, + 0x17, 0x73, 0x1a, 0x44, 0xd2, 0x4e, 0x0c, 0x5f, 0xa0, 0x91, 0x1e, 0xa7, 0x9c, 0x0c, 0x8d, 0xea, + 0xe9, 0x9e, 0xaa, 0xfa, 0x47, 0x04, 0x47, 0xb5, 0xf2, 0x48, 0xbf, 0x24, 0xad, 0x84, 0xa6, 0xbf, + 0x5b, 0xe8, 0xfe, 0xad, 0x29, 0x7f, 0x9d, 0x48, 0x85, 0x7f, 0xdc, 0x99, 0x34, 0xd9, 0x6f, 0xd2, + 0x9a, 0x6d, 0xe6, 0xdc, 0xde, 0x66, 0x83, 0xf4, 0xa6, 0xfc, 0x1d, 0x1a, 0x25, 0x0a, 0xd2, 0x66, + 0x36, 0xef, 0xee, 0xe9, 0xc2, 0xb4, 0xd7, 0xd9, 0xf8, 0x4a, 0x4b, 0xd0, 0x4a, 0x29, 0x20, 0xd7, + 0x37, 0xee, 0xe0, 0xe9, 0x8d, 0x3b, 0x78, 0x76, 0xe3, 0x0e, 0x7e, 0x29, 0x5d, 0xeb, 0xba, 0x74, + 0xad, 0xa7, 0xa5, 0x6b, 0x3d, 0x2b, 0x5d, 0xeb, 0xcf, 0xd2, 0xb5, 0x7e, 0x7b, 0xee, 0x0e, 0x7e, + 0xb0, 0x1b, 0xcd, 0xbf, 0x03, 0x00, 0x00, 0xff, 0xff, 0x03, 0x95, 0x92, 0xa5, 0xfa, 0x06, 0x00, + 0x00, } func (m *Endpoint) Marshal() (dAtA []byte, err error) { @@ -270,6 +274,13 @@ func (m *Endpoint) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.NodeName != nil { + i -= len(*m.NodeName) + copy(dAtA[i:], *m.NodeName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.NodeName))) + i-- + dAtA[i] = 0x32 + } if len(m.Topology) > 0 { keysForTopology := make([]string, 0, len(m.Topology)) for k := range m.Topology { @@ -355,6 +366,26 @@ func (m *EndpointConditions) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.Terminating != nil { + i-- + if *m.Terminating { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.Serving != nil { + i-- + if *m.Serving { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } if m.Ready != nil { i-- if *m.Ready { @@ -571,6 +602,10 @@ func (m *Endpoint) Size() (n int) { n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) } } + if m.NodeName != nil { + l = len(*m.NodeName) + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -583,6 +618,12 @@ func (m *EndpointConditions) Size() (n int) { if m.Ready != nil { n += 2 } + if m.Serving != nil { + n += 2 + } + if m.Terminating != nil { + n += 2 + } return n } @@ -678,6 +719,7 @@ func (this *Endpoint) String() string { `Hostname:` + valueToStringGenerated(this.Hostname) + `,`, `TargetRef:` + strings.Replace(fmt.Sprintf("%v", this.TargetRef), "ObjectReference", "v1.ObjectReference", 1) + `,`, `Topology:` + mapStringForTopology + `,`, + `NodeName:` + valueToStringGenerated(this.NodeName) + `,`, `}`, }, "") return s @@ -688,6 +730,8 @@ func (this *EndpointConditions) String() string { } s := strings.Join([]string{`&EndpointConditions{`, `Ready:` + valueToStringGenerated(this.Ready) + `,`, + `Serving:` + valueToStringGenerated(this.Serving) + `,`, + `Terminating:` + valueToStringGenerated(this.Terminating) + `,`, `}`, }, "") return s @@ -1031,7 +1075,7 @@ func (m *Endpoint) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -1042,16 +1086,46 @@ func (m *Endpoint) Unmarshal(dAtA []byte) error { } m.Topology[mapkey] = mapvalue iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.NodeName = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1116,16 +1190,55 @@ func (m *EndpointConditions) Unmarshal(dAtA []byte) error { } b := bool(v != 0) m.Ready = &b + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Serving", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Serving = &b + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Terminating", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Terminating = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1294,10 +1407,7 @@ func (m *EndpointPort) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1480,10 +1590,7 @@ func (m *EndpointSlice) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1600,10 +1707,7 @@ func (m *EndpointSliceList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/k8s.io/api/discovery/v1alpha1/generated.proto b/vendor/k8s.io/api/discovery/v1alpha1/generated.proto index 2cbbdcdb01..4b66a6c57b 100644 --- a/vendor/k8s.io/api/discovery/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/discovery/v1alpha1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.discovery.v1alpha1; @@ -45,8 +45,8 @@ message Endpoint { // hostname of this endpoint. This field may be used by consumers of // endpoints to distinguish endpoints from each other (e.g. in DNS names). // Multiple endpoints which use the same hostname should be considered - // fungible (e.g. multiple A values in DNS). Must pass DNS Label (RFC 1123) - // validation. + // fungible (e.g. multiple A values in DNS). Must be lowercase and pass + // DNS label (RFC 1123) validation. // +optional optional string hostname = 3; @@ -67,8 +67,15 @@ message Endpoint { // endpoint is located. This should match the corresponding node label. // * topology.kubernetes.io/region: the value indicates the region where the // endpoint is located. This should match the corresponding node label. + // This field is deprecated and will be removed in future api versions. // +optional map topology = 5; + + // nodeName represents the name of the Node hosting this endpoint. This can + // be used to determine endpoints local to a Node. This field can be enabled + // with the EndpointSliceNodeName feature gate. + // +optional + optional string nodeName = 6; } // EndpointConditions represents the current condition of an endpoint. @@ -76,9 +83,25 @@ message EndpointConditions { // ready indicates that this endpoint is prepared to receive traffic, // according to whatever system is managing the endpoint. A nil value // indicates an unknown state. In most cases consumers should interpret this - // unknown state as ready. + // unknown state as ready. For compatibility reasons, ready should never be + // "true" for terminating endpoints. // +optional optional bool ready = 1; + + // serving is identical to ready except that it is set regardless of the + // terminating state of endpoints. This condition should be set to true for + // a ready endpoint that is terminating. If nil, consumers should defer to + // the ready condition. This field can be enabled with the + // EndpointSliceTerminatingCondition feature gate. + // +optional + optional bool serving = 2; + + // terminating indicates that this endpoint is terminating. A nil value + // indicates an unknown state. Consumers should interpret this unknown state + // to mean that the endpoint is not terminating. This field can be enabled + // with the EndpointSliceTerminatingCondition feature gate. + // +optional + optional bool terminating = 3; } // EndpointPort represents a Port used by an EndpointSlice diff --git a/vendor/k8s.io/api/discovery/v1alpha1/types.go b/vendor/k8s.io/api/discovery/v1alpha1/types.go index cf50b501cf..34b706ea89 100644 --- a/vendor/k8s.io/api/discovery/v1alpha1/types.go +++ b/vendor/k8s.io/api/discovery/v1alpha1/types.go @@ -86,8 +86,8 @@ type Endpoint struct { // hostname of this endpoint. This field may be used by consumers of // endpoints to distinguish endpoints from each other (e.g. in DNS names). // Multiple endpoints which use the same hostname should be considered - // fungible (e.g. multiple A values in DNS). Must pass DNS Label (RFC 1123) - // validation. + // fungible (e.g. multiple A values in DNS). Must be lowercase and pass + // DNS label (RFC 1123) validation. // +optional Hostname *string `json:"hostname,omitempty" protobuf:"bytes,3,opt,name=hostname"` // targetRef is a reference to a Kubernetes object that represents this @@ -106,8 +106,14 @@ type Endpoint struct { // endpoint is located. This should match the corresponding node label. // * topology.kubernetes.io/region: the value indicates the region where the // endpoint is located. This should match the corresponding node label. + // This field is deprecated and will be removed in future api versions. // +optional Topology map[string]string `json:"topology,omitempty" protobuf:"bytes,5,opt,name=topology"` + // nodeName represents the name of the Node hosting this endpoint. This can + // be used to determine endpoints local to a Node. This field can be enabled + // with the EndpointSliceNodeName feature gate. + // +optional + NodeName *string `json:"nodeName,omitempty" protobuf:"bytes,6,opt,name=nodeName"` } // EndpointConditions represents the current condition of an endpoint. @@ -115,9 +121,25 @@ type EndpointConditions struct { // ready indicates that this endpoint is prepared to receive traffic, // according to whatever system is managing the endpoint. A nil value // indicates an unknown state. In most cases consumers should interpret this - // unknown state as ready. + // unknown state as ready. For compatibility reasons, ready should never be + // "true" for terminating endpoints. // +optional Ready *bool `json:"ready,omitempty" protobuf:"bytes,1,name=ready"` + + // serving is identical to ready except that it is set regardless of the + // terminating state of endpoints. This condition should be set to true for + // a ready endpoint that is terminating. If nil, consumers should defer to + // the ready condition. This field can be enabled with the + // EndpointSliceTerminatingCondition feature gate. + // +optional + Serving *bool `json:"serving,omitempty" protobuf:"bytes,2,name=serving"` + + // terminating indicates that this endpoint is terminating. A nil value + // indicates an unknown state. Consumers should interpret this unknown state + // to mean that the endpoint is not terminating. This field can be enabled + // with the EndpointSliceTerminatingCondition feature gate. + // +optional + Terminating *bool `json:"terminating,omitempty" protobuf:"bytes,3,name=terminating"` } // EndpointPort represents a Port used by an EndpointSlice diff --git a/vendor/k8s.io/api/discovery/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/discovery/v1alpha1/types_swagger_doc_generated.go index 1ba2d60d4a..f6c983689a 100644 --- a/vendor/k8s.io/api/discovery/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/discovery/v1alpha1/types_swagger_doc_generated.go @@ -31,9 +31,10 @@ var map_Endpoint = map[string]string{ "": "Endpoint represents a single logical \"backend\" implementing a service.", "addresses": "addresses of this endpoint. The contents of this field are interpreted according to the corresponding EndpointSlice addressType field. Consumers must handle different types of addresses in the context of their own capabilities. This must contain at least one address but no more than 100.", "conditions": "conditions contains information about the current status of the endpoint.", - "hostname": "hostname of this endpoint. This field may be used by consumers of endpoints to distinguish endpoints from each other (e.g. in DNS names). Multiple endpoints which use the same hostname should be considered fungible (e.g. multiple A values in DNS). Must pass DNS Label (RFC 1123) validation.", + "hostname": "hostname of this endpoint. This field may be used by consumers of endpoints to distinguish endpoints from each other (e.g. in DNS names). Multiple endpoints which use the same hostname should be considered fungible (e.g. multiple A values in DNS). Must be lowercase and pass DNS label (RFC 1123) validation.", "targetRef": "targetRef is a reference to a Kubernetes object that represents this endpoint.", - "topology": "topology contains arbitrary topology information associated with the endpoint. These key/value pairs must conform with the label format. https://kubernetes.io/docs/concepts/overview/working-with-objects/labels Topology may include a maximum of 16 key/value pairs. This includes, but is not limited to the following well known keys: * kubernetes.io/hostname: the value indicates the hostname of the node\n where the endpoint is located. This should match the corresponding\n node label.\n* topology.kubernetes.io/zone: the value indicates the zone where the\n endpoint is located. This should match the corresponding node label.\n* topology.kubernetes.io/region: the value indicates the region where the\n endpoint is located. This should match the corresponding node label.", + "topology": "topology contains arbitrary topology information associated with the endpoint. These key/value pairs must conform with the label format. https://kubernetes.io/docs/concepts/overview/working-with-objects/labels Topology may include a maximum of 16 key/value pairs. This includes, but is not limited to the following well known keys: * kubernetes.io/hostname: the value indicates the hostname of the node\n where the endpoint is located. This should match the corresponding\n node label.\n* topology.kubernetes.io/zone: the value indicates the zone where the\n endpoint is located. This should match the corresponding node label.\n* topology.kubernetes.io/region: the value indicates the region where the\n endpoint is located. This should match the corresponding node label.\nThis field is deprecated and will be removed in future api versions.", + "nodeName": "nodeName represents the name of the Node hosting this endpoint. This can be used to determine endpoints local to a Node. This field can be enabled with the EndpointSliceNodeName feature gate.", } func (Endpoint) SwaggerDoc() map[string]string { @@ -41,8 +42,10 @@ func (Endpoint) SwaggerDoc() map[string]string { } var map_EndpointConditions = map[string]string{ - "": "EndpointConditions represents the current condition of an endpoint.", - "ready": "ready indicates that this endpoint is prepared to receive traffic, according to whatever system is managing the endpoint. A nil value indicates an unknown state. In most cases consumers should interpret this unknown state as ready.", + "": "EndpointConditions represents the current condition of an endpoint.", + "ready": "ready indicates that this endpoint is prepared to receive traffic, according to whatever system is managing the endpoint. A nil value indicates an unknown state. In most cases consumers should interpret this unknown state as ready. For compatibility reasons, ready should never be \"true\" for terminating endpoints.", + "serving": "serving is identical to ready except that it is set regardless of the terminating state of endpoints. This condition should be set to true for a ready endpoint that is terminating. If nil, consumers should defer to the ready condition. This field can be enabled with the EndpointSliceTerminatingCondition feature gate.", + "terminating": "terminating indicates that this endpoint is terminating. A nil value indicates an unknown state. Consumers should interpret this unknown state to mean that the endpoint is not terminating. This field can be enabled with the EndpointSliceTerminatingCondition feature gate.", } func (EndpointConditions) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/discovery/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/discovery/v1alpha1/zz_generated.deepcopy.go index c72f64acfd..13e54d5007 100644 --- a/vendor/k8s.io/api/discovery/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/discovery/v1alpha1/zz_generated.deepcopy.go @@ -51,6 +51,11 @@ func (in *Endpoint) DeepCopyInto(out *Endpoint) { (*out)[key] = val } } + if in.NodeName != nil { + in, out := &in.NodeName, &out.NodeName + *out = new(string) + **out = **in + } return } @@ -72,6 +77,16 @@ func (in *EndpointConditions) DeepCopyInto(out *EndpointConditions) { *out = new(bool) **out = **in } + if in.Serving != nil { + in, out := &in.Serving, &out.Serving + *out = new(bool) + **out = **in + } + if in.Terminating != nil { + in, out := &in.Terminating, &out.Terminating + *out = new(bool) + **out = **in + } return } diff --git a/vendor/k8s.io/api/discovery/v1beta1/generated.pb.go b/vendor/k8s.io/api/discovery/v1beta1/generated.pb.go index ce0046c519..1fdb8ddb77 100644 --- a/vendor/k8s.io/api/discovery/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/discovery/v1beta1/generated.pb.go @@ -200,54 +200,57 @@ func init() { } var fileDescriptor_ece80bbc872d519b = []byte{ - // 745 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x53, 0xcf, 0x6b, 0xdb, 0x48, - 0x14, 0xb6, 0xe2, 0x88, 0x95, 0xc6, 0x31, 0x9b, 0x0c, 0x7b, 0x30, 0xde, 0x20, 0x19, 0x2f, 0x2c, - 0x66, 0x43, 0xa4, 0x75, 0xc8, 0x2e, 0x61, 0xf7, 0x14, 0xed, 0x86, 0xb6, 0xd0, 0x36, 0x66, 0x1a, - 0x28, 0x94, 0x1e, 0x3a, 0x96, 0x26, 0xb2, 0x6a, 0x5b, 0x23, 0x34, 0x63, 0x83, 0x6f, 0xfd, 0x13, - 0xfa, 0xf7, 0xf4, 0x5a, 0x28, 0x39, 0xe6, 0x98, 0x93, 0xa8, 0xd5, 0xff, 0x22, 0xa7, 0x32, 0xa3, - 0x5f, 0x76, 0xdd, 0x1f, 0xbe, 0xcd, 0x7c, 0xf3, 0xbe, 0xef, 0xbd, 0xf7, 0xcd, 0x7b, 0xe0, 0x62, - 0x7c, 0xc6, 0xac, 0x80, 0xda, 0xe3, 0xd9, 0x90, 0xc4, 0x21, 0xe1, 0x84, 0xd9, 0x73, 0x12, 0x7a, - 0x34, 0xb6, 0xf3, 0x07, 0x1c, 0x05, 0xb6, 0x17, 0x30, 0x97, 0xce, 0x49, 0xbc, 0xb0, 0xe7, 0xfd, - 0x21, 0xe1, 0xb8, 0x6f, 0xfb, 0x24, 0x24, 0x31, 0xe6, 0xc4, 0xb3, 0xa2, 0x98, 0x72, 0x0a, 0x0f, - 0xb3, 0x68, 0x0b, 0x47, 0x81, 0x55, 0x46, 0x5b, 0x79, 0x74, 0xfb, 0xd8, 0x0f, 0xf8, 0x68, 0x36, - 0xb4, 0x5c, 0x3a, 0xb5, 0x7d, 0xea, 0x53, 0x5b, 0x92, 0x86, 0xb3, 0x6b, 0x79, 0x93, 0x17, 0x79, - 0xca, 0xc4, 0xda, 0xdd, 0x95, 0xd4, 0x2e, 0x8d, 0x89, 0x3d, 0xdf, 0x48, 0xd8, 0x3e, 0xad, 0x62, - 0xa6, 0xd8, 0x1d, 0x05, 0xa1, 0xa8, 0x2e, 0x1a, 0xfb, 0x02, 0x60, 0xf6, 0x94, 0x70, 0xfc, 0x35, - 0x96, 0xfd, 0x2d, 0x56, 0x3c, 0x0b, 0x79, 0x30, 0x25, 0x1b, 0x84, 0xbf, 0x7f, 0x44, 0x60, 0xee, - 0x88, 0x4c, 0xf1, 0x97, 0xbc, 0xee, 0xbb, 0x3a, 0xd0, 0x2e, 0x42, 0x2f, 0xa2, 0x41, 0xc8, 0xe1, - 0x11, 0xd0, 0xb1, 0xe7, 0xc5, 0x84, 0x31, 0xc2, 0x5a, 0x4a, 0xa7, 0xde, 0xd3, 0x9d, 0x66, 0x9a, - 0x98, 0xfa, 0x79, 0x01, 0xa2, 0xea, 0x1d, 0x7a, 0x00, 0xb8, 0x34, 0xf4, 0x02, 0x1e, 0xd0, 0x90, - 0xb5, 0x76, 0x3a, 0x4a, 0xaf, 0x71, 0xf2, 0xa7, 0xf5, 0x3d, 0x7b, 0xad, 0x22, 0xd1, 0x7f, 0x25, - 0xcf, 0x81, 0x37, 0x89, 0x59, 0x4b, 0x13, 0x13, 0x54, 0x18, 0x5a, 0xd1, 0x85, 0x3d, 0xa0, 0x8d, - 0x28, 0xe3, 0x21, 0x9e, 0x92, 0x56, 0xbd, 0xa3, 0xf4, 0x74, 0x67, 0x2f, 0x4d, 0x4c, 0xed, 0x61, - 0x8e, 0xa1, 0xf2, 0x15, 0x0e, 0x80, 0xce, 0x71, 0xec, 0x13, 0x8e, 0xc8, 0x75, 0x6b, 0x57, 0x96, - 0xf3, 0xdb, 0x6a, 0x39, 0xe2, 0x83, 0xac, 0x79, 0xdf, 0xba, 0x1c, 0xbe, 0x26, 0xae, 0x08, 0x22, - 0x31, 0x09, 0x5d, 0x92, 0x75, 0x78, 0x55, 0x30, 0x51, 0x25, 0x02, 0x87, 0x40, 0xe3, 0x34, 0xa2, - 0x13, 0xea, 0x2f, 0x5a, 0x6a, 0xa7, 0xde, 0x6b, 0x9c, 0x9c, 0x6e, 0xd7, 0x9f, 0x75, 0x95, 0xd3, - 0x2e, 0x42, 0x1e, 0x2f, 0x9c, 0xfd, 0xbc, 0x47, 0xad, 0x80, 0x51, 0xa9, 0xdb, 0xfe, 0x17, 0x34, - 0xd7, 0x82, 0xe1, 0x3e, 0xa8, 0x8f, 0xc9, 0xa2, 0xa5, 0x88, 0x5e, 0x91, 0x38, 0xc2, 0x5f, 0x80, - 0x3a, 0xc7, 0x93, 0x19, 0x91, 0x1e, 0xeb, 0x28, 0xbb, 0xfc, 0xb3, 0x73, 0xa6, 0x74, 0xff, 0x02, - 0x70, 0xd3, 0x52, 0x68, 0x02, 0x35, 0x26, 0xd8, 0xcb, 0x34, 0x34, 0x47, 0x4f, 0x13, 0x53, 0x45, - 0x02, 0x40, 0x19, 0xde, 0xfd, 0xa0, 0x80, 0xbd, 0x82, 0x37, 0xa0, 0x31, 0x87, 0x87, 0x60, 0x57, - 0x1a, 0x2c, 0x93, 0x3a, 0x5a, 0x9a, 0x98, 0xbb, 0x4f, 0x85, 0xb9, 0x12, 0x85, 0x0f, 0x80, 0x26, - 0x67, 0xc5, 0xa5, 0x93, 0xac, 0x04, 0xe7, 0x48, 0x34, 0x33, 0xc8, 0xb1, 0xfb, 0xc4, 0xfc, 0x75, - 0x73, 0x0f, 0xac, 0xe2, 0x19, 0x95, 0x64, 0x91, 0x26, 0xa2, 0x31, 0x97, 0xff, 0xa8, 0x66, 0x69, - 0x44, 0x7a, 0x24, 0x51, 0xd8, 0x07, 0x0d, 0x1c, 0x45, 0x05, 0x4d, 0xfe, 0xa0, 0xee, 0xfc, 0x9c, - 0x26, 0x66, 0xe3, 0xbc, 0x82, 0xd1, 0x6a, 0x4c, 0x77, 0xb9, 0x03, 0x9a, 0x45, 0x23, 0xcf, 0x26, - 0x81, 0x4b, 0xe0, 0x2b, 0xa0, 0x89, 0x95, 0xf2, 0x30, 0xc7, 0xb2, 0x9b, 0xf5, 0x91, 0x2c, 0x37, - 0xc3, 0x8a, 0xc6, 0xbe, 0x00, 0x98, 0x25, 0xa2, 0xab, 0xa9, 0x78, 0x42, 0x38, 0xae, 0x46, 0xb2, - 0xc2, 0x50, 0xa9, 0x0a, 0xff, 0x07, 0x8d, 0x7c, 0x07, 0xae, 0x16, 0x11, 0xc9, 0xcb, 0xec, 0xe6, - 0x94, 0xc6, 0x79, 0xf5, 0x74, 0xbf, 0x7e, 0x45, 0xab, 0x34, 0xf8, 0x1c, 0xe8, 0x24, 0x2f, 0x5c, - 0xec, 0x8e, 0x98, 0xad, 0xdf, 0xb7, 0x9b, 0x2d, 0xe7, 0x20, 0xcf, 0xa5, 0x17, 0x08, 0x43, 0x95, - 0x16, 0xbc, 0x04, 0xaa, 0x70, 0x93, 0xb5, 0xea, 0x52, 0xf4, 0x8f, 0xed, 0x44, 0xc5, 0x37, 0x38, - 0xcd, 0x5c, 0x58, 0x15, 0x37, 0x86, 0x32, 0x9d, 0xee, 0x7b, 0x05, 0x1c, 0xac, 0x79, 0xfc, 0x38, - 0x60, 0x1c, 0xbe, 0xdc, 0xf0, 0xd9, 0xda, 0xce, 0x67, 0xc1, 0x96, 0x2e, 0x97, 0x4b, 0x51, 0x20, - 0x2b, 0x1e, 0x0f, 0x80, 0x1a, 0x70, 0x32, 0x2d, 0x9c, 0x39, 0xda, 0xae, 0x09, 0x59, 0x5d, 0xd5, - 0xc5, 0x23, 0xa1, 0x80, 0x32, 0x21, 0xe7, 0xf8, 0x66, 0x69, 0xd4, 0x6e, 0x97, 0x46, 0xed, 0x6e, - 0x69, 0xd4, 0xde, 0xa4, 0x86, 0x72, 0x93, 0x1a, 0xca, 0x6d, 0x6a, 0x28, 0x77, 0xa9, 0xa1, 0x7c, - 0x4c, 0x0d, 0xe5, 0xed, 0x27, 0xa3, 0xf6, 0xe2, 0xa7, 0x5c, 0xf2, 0x73, 0x00, 0x00, 0x00, 0xff, - 0xff, 0x29, 0x1a, 0xa2, 0x6f, 0x6d, 0x06, 0x00, 0x00, + // 798 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x53, 0x4d, 0x8f, 0xe3, 0x44, + 0x10, 0x8d, 0x27, 0x63, 0xc6, 0xee, 0xec, 0x88, 0xdd, 0x16, 0x87, 0x68, 0x58, 0xd9, 0xa3, 0x20, + 0x50, 0xc4, 0x68, 0x6d, 0x66, 0xb5, 0x42, 0x2b, 0x38, 0x8d, 0x61, 0x04, 0x48, 0xb0, 0x1b, 0xf5, + 0x46, 0x42, 0x42, 0x1c, 0xe8, 0xd8, 0xb5, 0x8e, 0x49, 0xec, 0xb6, 0xba, 0x3b, 0x91, 0x72, 0xe3, + 0x1f, 0xc0, 0x7f, 0x42, 0x42, 0x73, 0xdc, 0xe3, 0x9e, 0x2c, 0x62, 0xf8, 0x15, 0x7b, 0x42, 0xdd, + 0xfe, 0x4a, 0x08, 0x1f, 0xb9, 0x75, 0xbf, 0xaa, 0xf7, 0xaa, 0x5e, 0x75, 0x17, 0xba, 0x5d, 0x3c, + 0x15, 0x5e, 0xc2, 0xfc, 0xc5, 0x6a, 0x06, 0x3c, 0x03, 0x09, 0xc2, 0x5f, 0x43, 0x16, 0x31, 0xee, + 0xd7, 0x01, 0x9a, 0x27, 0x7e, 0x94, 0x88, 0x90, 0xad, 0x81, 0x6f, 0xfc, 0xf5, 0xf5, 0x0c, 0x24, + 0xbd, 0xf6, 0x63, 0xc8, 0x80, 0x53, 0x09, 0x91, 0x97, 0x73, 0x26, 0x19, 0x7e, 0x58, 0x65, 0x7b, + 0x34, 0x4f, 0xbc, 0x36, 0xdb, 0xab, 0xb3, 0x2f, 0x1e, 0xc5, 0x89, 0x9c, 0xaf, 0x66, 0x5e, 0xc8, + 0x52, 0x3f, 0x66, 0x31, 0xf3, 0x35, 0x69, 0xb6, 0x7a, 0xa9, 0x6f, 0xfa, 0xa2, 0x4f, 0x95, 0xd8, + 0xc5, 0x68, 0xa7, 0x74, 0xc8, 0x38, 0xf8, 0xeb, 0x83, 0x82, 0x17, 0x4f, 0xba, 0x9c, 0x94, 0x86, + 0xf3, 0x24, 0x53, 0xdd, 0xe5, 0x8b, 0x58, 0x01, 0xc2, 0x4f, 0x41, 0xd2, 0x7f, 0x62, 0xf9, 0xff, + 0xc6, 0xe2, 0xab, 0x4c, 0x26, 0x29, 0x1c, 0x10, 0x3e, 0xfe, 0x3f, 0x82, 0x08, 0xe7, 0x90, 0xd2, + 0xbf, 0xf3, 0x46, 0x7f, 0xf6, 0x91, 0x75, 0x9b, 0x45, 0x39, 0x4b, 0x32, 0x89, 0xaf, 0x90, 0x4d, + 0xa3, 0x88, 0x83, 0x10, 0x20, 0x86, 0xc6, 0x65, 0x7f, 0x6c, 0x07, 0xe7, 0x65, 0xe1, 0xda, 0x37, + 0x0d, 0x48, 0xba, 0x38, 0x8e, 0x10, 0x0a, 0x59, 0x16, 0x25, 0x32, 0x61, 0x99, 0x18, 0x9e, 0x5c, + 0x1a, 0xe3, 0xc1, 0xe3, 0x8f, 0xbc, 0xff, 0x1a, 0xaf, 0xd7, 0x14, 0xfa, 0xac, 0xe5, 0x05, 0xf8, + 0xae, 0x70, 0x7b, 0x65, 0xe1, 0xa2, 0x0e, 0x23, 0x3b, 0xba, 0x78, 0x8c, 0xac, 0x39, 0x13, 0x32, + 0xa3, 0x29, 0x0c, 0xfb, 0x97, 0xc6, 0xd8, 0x0e, 0xee, 0x95, 0x85, 0x6b, 0x7d, 0x59, 0x63, 0xa4, + 0x8d, 0xe2, 0x09, 0xb2, 0x25, 0xe5, 0x31, 0x48, 0x02, 0x2f, 0x87, 0xa7, 0xba, 0x9d, 0xf7, 0x76, + 0xdb, 0x51, 0x0f, 0xe4, 0xad, 0xaf, 0xbd, 0xe7, 0xb3, 0x1f, 0x21, 0x54, 0x49, 0xc0, 0x21, 0x0b, + 0xa1, 0x72, 0x38, 0x6d, 0x98, 0xa4, 0x13, 0xc1, 0x33, 0x64, 0x49, 0x96, 0xb3, 0x25, 0x8b, 0x37, + 0x43, 0xf3, 0xb2, 0x3f, 0x1e, 0x3c, 0x7e, 0x72, 0x9c, 0x3f, 0x6f, 0x5a, 0xd3, 0x6e, 0x33, 0xc9, + 0x37, 0xc1, 0xfd, 0xda, 0xa3, 0xd5, 0xc0, 0xa4, 0xd5, 0x55, 0xfe, 0x32, 0x16, 0xc1, 0x33, 0xe5, + 0xef, 0xad, 0xce, 0xdf, 0xb3, 0x1a, 0x23, 0x6d, 0xf4, 0xe2, 0x53, 0x74, 0xbe, 0x27, 0x8b, 0xef, + 0xa3, 0xfe, 0x02, 0x36, 0x43, 0x43, 0xb1, 0x88, 0x3a, 0xe2, 0x77, 0x90, 0xb9, 0xa6, 0xcb, 0x15, + 0xe8, 0xd7, 0xb0, 0x49, 0x75, 0xf9, 0xe4, 0xe4, 0xa9, 0x31, 0xfa, 0xd9, 0x40, 0xf8, 0x70, 0xfa, + 0xd8, 0x45, 0x26, 0x07, 0x1a, 0x55, 0x22, 0x56, 0x60, 0x97, 0x85, 0x6b, 0x12, 0x05, 0x90, 0x0a, + 0xc7, 0xef, 0xa3, 0x33, 0x01, 0x7c, 0x9d, 0x64, 0xb1, 0xd6, 0xb4, 0x82, 0x41, 0x59, 0xb8, 0x67, + 0x2f, 0x2a, 0x88, 0x34, 0x31, 0x7c, 0x8d, 0x06, 0x12, 0x78, 0x9a, 0x64, 0x54, 0xaa, 0xd4, 0xbe, + 0x4e, 0x7d, 0xbb, 0x2c, 0xdc, 0xc1, 0xb4, 0x83, 0xc9, 0x6e, 0xce, 0xe8, 0x37, 0x03, 0xdd, 0x6b, + 0x3a, 0x9a, 0x30, 0x2e, 0xf1, 0x43, 0x74, 0xaa, 0x5f, 0x59, 0xfb, 0x09, 0xac, 0xb2, 0x70, 0x4f, + 0xf5, 0x04, 0x34, 0x8a, 0xbf, 0x40, 0x96, 0xfe, 0xb0, 0x21, 0x5b, 0x56, 0xee, 0x82, 0x2b, 0x35, + 0xa7, 0x49, 0x8d, 0xbd, 0x29, 0xdc, 0x77, 0x0f, 0x97, 0xd1, 0x6b, 0xc2, 0xa4, 0x25, 0xab, 0x32, + 0x39, 0xe3, 0x52, 0xf7, 0x68, 0x56, 0x65, 0x54, 0x79, 0xa2, 0x51, 0x65, 0x84, 0xe6, 0x79, 0x43, + 0xd3, 0xdf, 0xc8, 0xae, 0x8c, 0xdc, 0x74, 0x30, 0xd9, 0xcd, 0x19, 0x6d, 0x4f, 0xd0, 0x79, 0x63, + 0xe4, 0xc5, 0x32, 0x09, 0x01, 0xff, 0x80, 0x2c, 0xb5, 0xd7, 0x11, 0x95, 0x54, 0xbb, 0xd9, 0xdf, + 0x8b, 0x76, 0x3d, 0xbd, 0x7c, 0x11, 0x2b, 0x40, 0x78, 0x2a, 0xbb, 0xfb, 0x9a, 0xdf, 0x80, 0xa4, + 0xdd, 0x5e, 0x74, 0x18, 0x69, 0x55, 0xf1, 0xe7, 0x68, 0x50, 0x2f, 0xe2, 0x74, 0x93, 0x43, 0xdd, + 0xe6, 0xa8, 0xa6, 0x0c, 0x6e, 0xba, 0xd0, 0x9b, 0xfd, 0x2b, 0xd9, 0xa5, 0xe1, 0x6f, 0x91, 0x0d, + 0x75, 0xe3, 0x6a, 0x81, 0xd5, 0x07, 0xff, 0xe0, 0xb8, 0x0f, 0x1e, 0x3c, 0xa8, 0x6b, 0xd9, 0x0d, + 0x22, 0x48, 0xa7, 0x85, 0x9f, 0x23, 0x53, 0x4d, 0x53, 0x0c, 0xfb, 0x5a, 0xf4, 0xc3, 0xe3, 0x44, + 0xd5, 0x33, 0x04, 0xe7, 0xb5, 0xb0, 0xa9, 0x6e, 0x82, 0x54, 0x3a, 0xa3, 0x5f, 0x0d, 0xf4, 0x60, + 0x6f, 0xc6, 0x5f, 0x27, 0x42, 0xe2, 0xef, 0x0f, 0xe6, 0xec, 0x1d, 0x37, 0x67, 0xc5, 0xd6, 0x53, + 0x6e, 0x37, 0xb3, 0x41, 0x76, 0x66, 0x3c, 0x41, 0x66, 0x22, 0x21, 0x6d, 0x26, 0x73, 0x75, 0x9c, + 0x09, 0xdd, 0x5d, 0xe7, 0xe2, 0x2b, 0xa5, 0x40, 0x2a, 0xa1, 0xe0, 0xd1, 0xdd, 0xd6, 0xe9, 0xbd, + 0xda, 0x3a, 0xbd, 0xd7, 0x5b, 0xa7, 0xf7, 0x53, 0xe9, 0x18, 0x77, 0xa5, 0x63, 0xbc, 0x2a, 0x1d, + 0xe3, 0x75, 0xe9, 0x18, 0xbf, 0x97, 0x8e, 0xf1, 0xcb, 0x1f, 0x4e, 0xef, 0xbb, 0xb3, 0x5a, 0xf2, + 0xaf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xa6, 0x35, 0xe6, 0xf5, 0xf2, 0x06, 0x00, 0x00, } func (m *Endpoint) Marshal() (dAtA []byte, err error) { @@ -270,6 +273,13 @@ func (m *Endpoint) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.NodeName != nil { + i -= len(*m.NodeName) + copy(dAtA[i:], *m.NodeName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.NodeName))) + i-- + dAtA[i] = 0x32 + } if len(m.Topology) > 0 { keysForTopology := make([]string, 0, len(m.Topology)) for k := range m.Topology { @@ -355,6 +365,26 @@ func (m *EndpointConditions) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.Terminating != nil { + i-- + if *m.Terminating { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.Serving != nil { + i-- + if *m.Serving { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } if m.Ready != nil { i-- if *m.Ready { @@ -571,6 +601,10 @@ func (m *Endpoint) Size() (n int) { n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) } } + if m.NodeName != nil { + l = len(*m.NodeName) + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -583,6 +617,12 @@ func (m *EndpointConditions) Size() (n int) { if m.Ready != nil { n += 2 } + if m.Serving != nil { + n += 2 + } + if m.Terminating != nil { + n += 2 + } return n } @@ -678,6 +718,7 @@ func (this *Endpoint) String() string { `Hostname:` + valueToStringGenerated(this.Hostname) + `,`, `TargetRef:` + strings.Replace(fmt.Sprintf("%v", this.TargetRef), "ObjectReference", "v1.ObjectReference", 1) + `,`, `Topology:` + mapStringForTopology + `,`, + `NodeName:` + valueToStringGenerated(this.NodeName) + `,`, `}`, }, "") return s @@ -688,6 +729,8 @@ func (this *EndpointConditions) String() string { } s := strings.Join([]string{`&EndpointConditions{`, `Ready:` + valueToStringGenerated(this.Ready) + `,`, + `Serving:` + valueToStringGenerated(this.Serving) + `,`, + `Terminating:` + valueToStringGenerated(this.Terminating) + `,`, `}`, }, "") return s @@ -1031,7 +1074,7 @@ func (m *Endpoint) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -1042,16 +1085,46 @@ func (m *Endpoint) Unmarshal(dAtA []byte) error { } m.Topology[mapkey] = mapvalue iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.NodeName = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1116,16 +1189,55 @@ func (m *EndpointConditions) Unmarshal(dAtA []byte) error { } b := bool(v != 0) m.Ready = &b + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Serving", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Serving = &b + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Terminating", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Terminating = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1294,10 +1406,7 @@ func (m *EndpointPort) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1480,10 +1589,7 @@ func (m *EndpointSlice) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1600,10 +1706,7 @@ func (m *EndpointSliceList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/k8s.io/api/discovery/v1beta1/generated.proto b/vendor/k8s.io/api/discovery/v1beta1/generated.proto index adf10c0e48..e5d21caadf 100644 --- a/vendor/k8s.io/api/discovery/v1beta1/generated.proto +++ b/vendor/k8s.io/api/discovery/v1beta1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.discovery.v1beta1; @@ -45,8 +45,8 @@ message Endpoint { // hostname of this endpoint. This field may be used by consumers of // endpoints to distinguish endpoints from each other (e.g. in DNS names). // Multiple endpoints which use the same hostname should be considered - // fungible (e.g. multiple A values in DNS). Must pass DNS Label (RFC 1123) - // validation. + // fungible (e.g. multiple A values in DNS). Must be lowercase and pass DNS + // Label (RFC 1123) validation. // +optional optional string hostname = 3; @@ -67,8 +67,15 @@ message Endpoint { // endpoint is located. This should match the corresponding node label. // * topology.kubernetes.io/region: the value indicates the region where the // endpoint is located. This should match the corresponding node label. + // This field is deprecated and will be removed in future api versions. // +optional map topology = 5; + + // nodeName represents the name of the Node hosting this endpoint. This can + // be used to determine endpoints local to a Node. This field can be enabled + // with the EndpointSliceNodeName feature gate. + // +optional + optional string nodeName = 6; } // EndpointConditions represents the current condition of an endpoint. @@ -76,9 +83,25 @@ message EndpointConditions { // ready indicates that this endpoint is prepared to receive traffic, // according to whatever system is managing the endpoint. A nil value // indicates an unknown state. In most cases consumers should interpret this - // unknown state as ready. + // unknown state as ready. For compatibility reasons, ready should never be + // "true" for terminating endpoints. // +optional optional bool ready = 1; + + // serving is identical to ready except that it is set regardless of the + // terminating state of endpoints. This condition should be set to true for + // a ready endpoint that is terminating. If nil, consumers should defer to + // the ready condition. This field can be enabled with the + // EndpointSliceTerminatingCondition feature gate. + // +optional + optional bool serving = 2; + + // terminating indicates that this endpoint is terminating. A nil value + // indicates an unknown state. Consumers should interpret this unknown state + // to mean that the endpoint is not terminating. This field can be enabled + // with the EndpointSliceTerminatingCondition feature gate. + // +optional + optional bool terminating = 3; } // EndpointPort represents a Port used by an EndpointSlice diff --git a/vendor/k8s.io/api/discovery/v1beta1/types.go b/vendor/k8s.io/api/discovery/v1beta1/types.go index 5cafea7479..e14088e8b2 100644 --- a/vendor/k8s.io/api/discovery/v1beta1/types.go +++ b/vendor/k8s.io/api/discovery/v1beta1/types.go @@ -60,12 +60,6 @@ type EndpointSlice struct { type AddressType string const ( - // AddressTypeIP represents an IP Address. - // This address type has been deprecated and has been replaced by the IPv4 - // and IPv6 adddress types. New resources with this address type will be - // considered invalid. This will be fully removed in 1.18. - // +deprecated - AddressTypeIP = AddressType("IP") // AddressTypeIPv4 represents an IPv4 Address. AddressTypeIPv4 = AddressType(v1.IPv4Protocol) // AddressTypeIPv6 represents an IPv6 Address. @@ -88,8 +82,8 @@ type Endpoint struct { // hostname of this endpoint. This field may be used by consumers of // endpoints to distinguish endpoints from each other (e.g. in DNS names). // Multiple endpoints which use the same hostname should be considered - // fungible (e.g. multiple A values in DNS). Must pass DNS Label (RFC 1123) - // validation. + // fungible (e.g. multiple A values in DNS). Must be lowercase and pass DNS + // Label (RFC 1123) validation. // +optional Hostname *string `json:"hostname,omitempty" protobuf:"bytes,3,opt,name=hostname"` // targetRef is a reference to a Kubernetes object that represents this @@ -108,8 +102,14 @@ type Endpoint struct { // endpoint is located. This should match the corresponding node label. // * topology.kubernetes.io/region: the value indicates the region where the // endpoint is located. This should match the corresponding node label. + // This field is deprecated and will be removed in future api versions. // +optional Topology map[string]string `json:"topology,omitempty" protobuf:"bytes,5,opt,name=topology"` + // nodeName represents the name of the Node hosting this endpoint. This can + // be used to determine endpoints local to a Node. This field can be enabled + // with the EndpointSliceNodeName feature gate. + // +optional + NodeName *string `json:"nodeName,omitempty" protobuf:"bytes,6,opt,name=nodeName"` } // EndpointConditions represents the current condition of an endpoint. @@ -117,9 +117,25 @@ type EndpointConditions struct { // ready indicates that this endpoint is prepared to receive traffic, // according to whatever system is managing the endpoint. A nil value // indicates an unknown state. In most cases consumers should interpret this - // unknown state as ready. + // unknown state as ready. For compatibility reasons, ready should never be + // "true" for terminating endpoints. // +optional Ready *bool `json:"ready,omitempty" protobuf:"bytes,1,name=ready"` + + // serving is identical to ready except that it is set regardless of the + // terminating state of endpoints. This condition should be set to true for + // a ready endpoint that is terminating. If nil, consumers should defer to + // the ready condition. This field can be enabled with the + // EndpointSliceTerminatingCondition feature gate. + // +optional + Serving *bool `json:"serving,omitempty" protobuf:"bytes,2,name=serving"` + + // terminating indicates that this endpoint is terminating. A nil value + // indicates an unknown state. Consumers should interpret this unknown state + // to mean that the endpoint is not terminating. This field can be enabled + // with the EndpointSliceTerminatingCondition feature gate. + // +optional + Terminating *bool `json:"terminating,omitempty" protobuf:"bytes,3,name=terminating"` } // EndpointPort represents a Port used by an EndpointSlice diff --git a/vendor/k8s.io/api/discovery/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/discovery/v1beta1/types_swagger_doc_generated.go index d67cc72146..d48b93d8b5 100644 --- a/vendor/k8s.io/api/discovery/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/discovery/v1beta1/types_swagger_doc_generated.go @@ -31,9 +31,10 @@ var map_Endpoint = map[string]string{ "": "Endpoint represents a single logical \"backend\" implementing a service.", "addresses": "addresses of this endpoint. The contents of this field are interpreted according to the corresponding EndpointSlice addressType field. Consumers must handle different types of addresses in the context of their own capabilities. This must contain at least one address but no more than 100.", "conditions": "conditions contains information about the current status of the endpoint.", - "hostname": "hostname of this endpoint. This field may be used by consumers of endpoints to distinguish endpoints from each other (e.g. in DNS names). Multiple endpoints which use the same hostname should be considered fungible (e.g. multiple A values in DNS). Must pass DNS Label (RFC 1123) validation.", + "hostname": "hostname of this endpoint. This field may be used by consumers of endpoints to distinguish endpoints from each other (e.g. in DNS names). Multiple endpoints which use the same hostname should be considered fungible (e.g. multiple A values in DNS). Must be lowercase and pass DNS Label (RFC 1123) validation.", "targetRef": "targetRef is a reference to a Kubernetes object that represents this endpoint.", - "topology": "topology contains arbitrary topology information associated with the endpoint. These key/value pairs must conform with the label format. https://kubernetes.io/docs/concepts/overview/working-with-objects/labels Topology may include a maximum of 16 key/value pairs. This includes, but is not limited to the following well known keys: * kubernetes.io/hostname: the value indicates the hostname of the node\n where the endpoint is located. This should match the corresponding\n node label.\n* topology.kubernetes.io/zone: the value indicates the zone where the\n endpoint is located. This should match the corresponding node label.\n* topology.kubernetes.io/region: the value indicates the region where the\n endpoint is located. This should match the corresponding node label.", + "topology": "topology contains arbitrary topology information associated with the endpoint. These key/value pairs must conform with the label format. https://kubernetes.io/docs/concepts/overview/working-with-objects/labels Topology may include a maximum of 16 key/value pairs. This includes, but is not limited to the following well known keys: * kubernetes.io/hostname: the value indicates the hostname of the node\n where the endpoint is located. This should match the corresponding\n node label.\n* topology.kubernetes.io/zone: the value indicates the zone where the\n endpoint is located. This should match the corresponding node label.\n* topology.kubernetes.io/region: the value indicates the region where the\n endpoint is located. This should match the corresponding node label.\nThis field is deprecated and will be removed in future api versions.", + "nodeName": "nodeName represents the name of the Node hosting this endpoint. This can be used to determine endpoints local to a Node. This field can be enabled with the EndpointSliceNodeName feature gate.", } func (Endpoint) SwaggerDoc() map[string]string { @@ -41,8 +42,10 @@ func (Endpoint) SwaggerDoc() map[string]string { } var map_EndpointConditions = map[string]string{ - "": "EndpointConditions represents the current condition of an endpoint.", - "ready": "ready indicates that this endpoint is prepared to receive traffic, according to whatever system is managing the endpoint. A nil value indicates an unknown state. In most cases consumers should interpret this unknown state as ready.", + "": "EndpointConditions represents the current condition of an endpoint.", + "ready": "ready indicates that this endpoint is prepared to receive traffic, according to whatever system is managing the endpoint. A nil value indicates an unknown state. In most cases consumers should interpret this unknown state as ready. For compatibility reasons, ready should never be \"true\" for terminating endpoints.", + "serving": "serving is identical to ready except that it is set regardless of the terminating state of endpoints. This condition should be set to true for a ready endpoint that is terminating. If nil, consumers should defer to the ready condition. This field can be enabled with the EndpointSliceTerminatingCondition feature gate.", + "terminating": "terminating indicates that this endpoint is terminating. A nil value indicates an unknown state. Consumers should interpret this unknown state to mean that the endpoint is not terminating. This field can be enabled with the EndpointSliceTerminatingCondition feature gate.", } func (EndpointConditions) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/discovery/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/discovery/v1beta1/zz_generated.deepcopy.go index 8490ec73fb..7076553d29 100644 --- a/vendor/k8s.io/api/discovery/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/discovery/v1beta1/zz_generated.deepcopy.go @@ -51,6 +51,11 @@ func (in *Endpoint) DeepCopyInto(out *Endpoint) { (*out)[key] = val } } + if in.NodeName != nil { + in, out := &in.NodeName, &out.NodeName + *out = new(string) + **out = **in + } return } @@ -72,6 +77,16 @@ func (in *EndpointConditions) DeepCopyInto(out *EndpointConditions) { *out = new(bool) **out = **in } + if in.Serving != nil { + in, out := &in.Serving, &out.Serving + *out = new(bool) + **out = **in + } + if in.Terminating != nil { + in, out := &in.Terminating, &out.Terminating + *out = new(bool) + **out = **in + } return } diff --git a/vendor/k8s.io/api/events/v1/generated.pb.go b/vendor/k8s.io/api/events/v1/generated.pb.go index 717137cffe..70ad588a62 100644 --- a/vendor/k8s.io/api/events/v1/generated.pb.go +++ b/vendor/k8s.io/api/events/v1/generated.pb.go @@ -1077,10 +1077,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1197,10 +1194,7 @@ func (m *EventList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1302,10 +1296,7 @@ func (m *EventSeries) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/k8s.io/api/events/v1/generated.proto b/vendor/k8s.io/api/events/v1/generated.proto index 18e3d0182e..690c99e4c5 100644 --- a/vendor/k8s.io/api/events/v1/generated.proto +++ b/vendor/k8s.io/api/events/v1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.events.v1; @@ -30,8 +30,12 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; option go_package = "v1"; // Event is a report of an event somewhere in the cluster. It generally denotes some state change in the system. +// Events have a limited retention time and triggers and messages may evolve +// with time. Event consumers should not rely on the timing of an event +// with a given Reason reflecting a consistent underlying trigger, or the +// continued existence of events with that Reason. Events should be +// treated as informative, best-effort, supplemental data. message Event { - // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // eventTime is the time when this Event was first observed. It is required. @@ -43,22 +47,18 @@ message Event { // reportingController is the name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`. // This field cannot be empty for new Events. - // +optional optional string reportingController = 4; // reportingInstance is the ID of the controller instance, e.g. `kubelet-xyzf`. // This field cannot be empty for new Events and it can have at most 128 characters. - // +optional optional string reportingInstance = 5; // action is what action was taken/failed regarding to the regarding object. It is machine-readable. - // This field can have at most 128 characters. - // +optional + // This field cannot be empty for new Events and it can have at most 128 characters. optional string action = 6; // reason is why the action was taken. It is human-readable. - // This field can have at most 128 characters. - // +optional + // This field cannot be empty for new Events and it can have at most 128 characters. optional string reason = 7; // regarding contains the object this Event is about. In most cases it's an Object reporting controller @@ -80,7 +80,7 @@ message Event { // type is the type of this event (Normal, Warning), new types could be added in the future. // It is machine-readable. - // +optional + // This field cannot be empty for new Events. optional string type = 11; // deprecatedSource is the deprecated field assuring backward compatibility with core.v1 Event type. diff --git a/vendor/k8s.io/api/events/v1/types.go b/vendor/k8s.io/api/events/v1/types.go index 07ede55422..4bf715872a 100644 --- a/vendor/k8s.io/api/events/v1/types.go +++ b/vendor/k8s.io/api/events/v1/types.go @@ -25,10 +25,15 @@ import ( // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // Event is a report of an event somewhere in the cluster. It generally denotes some state change in the system. +// Events have a limited retention time and triggers and messages may evolve +// with time. Event consumers should not rely on the timing of an event +// with a given Reason reflecting a consistent underlying trigger, or the +// continued existence of events with that Reason. Events should be +// treated as informative, best-effort, supplemental data. type Event struct { metav1.TypeMeta `json:",inline"` - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + metav1.ObjectMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` // eventTime is the time when this Event was first observed. It is required. EventTime metav1.MicroTime `json:"eventTime" protobuf:"bytes,2,opt,name=eventTime"` @@ -39,22 +44,18 @@ type Event struct { // reportingController is the name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`. // This field cannot be empty for new Events. - // +optional ReportingController string `json:"reportingController,omitempty" protobuf:"bytes,4,opt,name=reportingController"` // reportingInstance is the ID of the controller instance, e.g. `kubelet-xyzf`. // This field cannot be empty for new Events and it can have at most 128 characters. - // +optional ReportingInstance string `json:"reportingInstance,omitempty" protobuf:"bytes,5,opt,name=reportingInstance"` // action is what action was taken/failed regarding to the regarding object. It is machine-readable. - // This field can have at most 128 characters. - // +optional + // This field cannot be empty for new Events and it can have at most 128 characters. Action string `json:"action,omitempty" protobuf:"bytes,6,name=action"` // reason is why the action was taken. It is human-readable. - // This field can have at most 128 characters. - // +optional + // This field cannot be empty for new Events and it can have at most 128 characters. Reason string `json:"reason,omitempty" protobuf:"bytes,7,name=reason"` // regarding contains the object this Event is about. In most cases it's an Object reporting controller @@ -76,7 +77,7 @@ type Event struct { // type is the type of this event (Normal, Warning), new types could be added in the future. // It is machine-readable. - // +optional + // This field cannot be empty for new Events. Type string `json:"type,omitempty" protobuf:"bytes,11,opt,name=type"` // deprecatedSource is the deprecated field assuring backward compatibility with core.v1 Event type. diff --git a/vendor/k8s.io/api/events/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/events/v1/types_swagger_doc_generated.go index e0467436ed..7255727bb4 100644 --- a/vendor/k8s.io/api/events/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/events/v1/types_swagger_doc_generated.go @@ -28,17 +28,17 @@ package v1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_Event = map[string]string{ - "": "Event is a report of an event somewhere in the cluster. It generally denotes some state change in the system.", + "": "Event is a report of an event somewhere in the cluster. It generally denotes some state change in the system. Events have a limited retention time and triggers and messages may evolve with time. Event consumers should not rely on the timing of an event with a given Reason reflecting a consistent underlying trigger, or the continued existence of events with that Reason. Events should be treated as informative, best-effort, supplemental data.", "eventTime": "eventTime is the time when this Event was first observed. It is required.", "series": "series is data about the Event series this event represents or nil if it's a singleton Event.", "reportingController": "reportingController is the name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`. This field cannot be empty for new Events.", "reportingInstance": "reportingInstance is the ID of the controller instance, e.g. `kubelet-xyzf`. This field cannot be empty for new Events and it can have at most 128 characters.", - "action": "action is what action was taken/failed regarding to the regarding object. It is machine-readable. This field can have at most 128 characters.", - "reason": "reason is why the action was taken. It is human-readable. This field can have at most 128 characters.", + "action": "action is what action was taken/failed regarding to the regarding object. It is machine-readable. This field cannot be empty for new Events and it can have at most 128 characters.", + "reason": "reason is why the action was taken. It is human-readable. This field cannot be empty for new Events and it can have at most 128 characters.", "regarding": "regarding contains the object this Event is about. In most cases it's an Object reporting controller implements, e.g. ReplicaSetController implements ReplicaSets and this event is emitted because it acts on some changes in a ReplicaSet object.", "related": "related is the optional secondary object for more complex actions. E.g. when regarding object triggers a creation or deletion of related object.", "note": "note is a human-readable description of the status of this operation. Maximal length of the note is 1kB, but libraries should be prepared to handle values up to 64kB.", - "type": "type is the type of this event (Normal, Warning), new types could be added in the future. It is machine-readable.", + "type": "type is the type of this event (Normal, Warning), new types could be added in the future. It is machine-readable. This field cannot be empty for new Events.", "deprecatedSource": "deprecatedSource is the deprecated field assuring backward compatibility with core.v1 Event type.", "deprecatedFirstTimestamp": "deprecatedFirstTimestamp is the deprecated field assuring backward compatibility with core.v1 Event type.", "deprecatedLastTimestamp": "deprecatedLastTimestamp is the deprecated field assuring backward compatibility with core.v1 Event type.", diff --git a/vendor/k8s.io/api/events/v1beta1/generated.pb.go b/vendor/k8s.io/api/events/v1beta1/generated.pb.go index 3709ef633a..d92411bc8a 100644 --- a/vendor/k8s.io/api/events/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/events/v1beta1/generated.pb.go @@ -1077,10 +1077,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1197,10 +1194,7 @@ func (m *EventList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1302,10 +1296,7 @@ func (m *EventSeries) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/k8s.io/api/events/v1beta1/generated.proto b/vendor/k8s.io/api/events/v1beta1/generated.proto index 79bde87c43..90b57d8d08 100644 --- a/vendor/k8s.io/api/events/v1beta1/generated.proto +++ b/vendor/k8s.io/api/events/v1beta1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.events.v1beta1; @@ -30,8 +30,12 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; option go_package = "v1beta1"; // Event is a report of an event somewhere in the cluster. It generally denotes some state change in the system. +// Events have a limited retention time and triggers and messages may evolve +// with time. Event consumers should not rely on the timing of an event +// with a given Reason reflecting a consistent underlying trigger, or the +// continued existence of events with that Reason. Events should be +// treated as informative, best-effort, supplemental data. message Event { - // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // eventTime is the time when this Event was first observed. It is required. diff --git a/vendor/k8s.io/api/events/v1beta1/types.go b/vendor/k8s.io/api/events/v1beta1/types.go index e2ed214b06..796e56ea7d 100644 --- a/vendor/k8s.io/api/events/v1beta1/types.go +++ b/vendor/k8s.io/api/events/v1beta1/types.go @@ -27,10 +27,15 @@ import ( // +k8s:prerelease-lifecycle-gen:deprecated=1.22 // Event is a report of an event somewhere in the cluster. It generally denotes some state change in the system. +// Events have a limited retention time and triggers and messages may evolve +// with time. Event consumers should not rely on the timing of an event +// with a given Reason reflecting a consistent underlying trigger, or the +// continued existence of events with that Reason. Events should be +// treated as informative, best-effort, supplemental data. type Event struct { metav1.TypeMeta `json:",inline"` - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + metav1.ObjectMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` // eventTime is the time when this Event was first observed. It is required. EventTime metav1.MicroTime `json:"eventTime" protobuf:"bytes,2,opt,name=eventTime"` diff --git a/vendor/k8s.io/api/events/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/events/v1beta1/types_swagger_doc_generated.go index 8c987f8996..7f8e162cdd 100644 --- a/vendor/k8s.io/api/events/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/events/v1beta1/types_swagger_doc_generated.go @@ -28,7 +28,7 @@ package v1beta1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_Event = map[string]string{ - "": "Event is a report of an event somewhere in the cluster. It generally denotes some state change in the system.", + "": "Event is a report of an event somewhere in the cluster. It generally denotes some state change in the system. Events have a limited retention time and triggers and messages may evolve with time. Event consumers should not rely on the timing of an event with a given Reason reflecting a consistent underlying trigger, or the continued existence of events with that Reason. Events should be treated as informative, best-effort, supplemental data.", "eventTime": "eventTime is the time when this Event was first observed. It is required.", "series": "series is data about the Event series this event represents or nil if it's a singleton Event.", "reportingController": "reportingController is the name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`. This field cannot be empty for new Events.", diff --git a/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go b/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go index bd37f432c4..f1d82c0fdc 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go @@ -6750,10 +6750,7 @@ func (m *AllowedCSIDriver) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -6835,10 +6832,7 @@ func (m *AllowedFlexVolume) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -6940,10 +6934,7 @@ func (m *AllowedHostPath) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -7092,10 +7083,7 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -7306,10 +7294,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -7426,10 +7411,7 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -7639,10 +7621,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -7898,10 +7877,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -8019,10 +7995,7 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -8171,10 +8144,7 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -8418,10 +8388,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -8538,10 +8505,7 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -8733,7 +8697,7 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -8783,10 +8747,7 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -9073,10 +9034,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -9294,10 +9252,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -9415,10 +9370,7 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -9534,10 +9486,7 @@ func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -9685,10 +9634,7 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -9772,10 +9718,7 @@ func (m *HTTPIngressRuleValue) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -9863,10 +9806,7 @@ func (m *HostPortRange) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -9954,10 +9894,7 @@ func (m *IDRange) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -10071,10 +10008,7 @@ func (m *IPBlock) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -10223,10 +10157,7 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -10377,10 +10308,7 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -10497,10 +10425,7 @@ func (m *IngressList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -10615,10 +10540,7 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -10704,10 +10626,7 @@ func (m *IngressRuleValue) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -10894,10 +10813,7 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -10980,10 +10896,7 @@ func (m *IngressStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -11097,10 +11010,7 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -11216,10 +11126,7 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -11337,10 +11244,7 @@ func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -11458,10 +11362,7 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -11578,10 +11479,7 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -11739,10 +11637,7 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -11861,10 +11756,7 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -12047,10 +11939,7 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -12166,10 +12055,7 @@ func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -12286,10 +12172,7 @@ func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -13045,10 +12928,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -13197,10 +13077,7 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -13411,10 +13288,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -13531,10 +13405,7 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -13692,10 +13563,7 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -13874,10 +13742,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -13946,10 +13811,7 @@ func (m *RollbackConfig) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -14035,10 +13897,7 @@ func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -14160,10 +14019,7 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -14279,10 +14135,7 @@ func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -14398,10 +14251,7 @@ func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -14516,10 +14366,7 @@ func (m *RuntimeClassStrategyOptions) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -14637,10 +14484,7 @@ func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -14789,10 +14633,7 @@ func (m *Scale) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -14861,10 +14702,7 @@ func (m *ScaleSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -15043,7 +14881,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -15092,10 +14930,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -15211,10 +15046,7 @@ func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/k8s.io/api/extensions/v1beta1/generated.proto b/vendor/k8s.io/api/extensions/v1beta1/generated.proto index a81cce680c..a4ca5b563a 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/generated.proto +++ b/vendor/k8s.io/api/extensions/v1beta1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.extensions.v1beta1; diff --git a/vendor/k8s.io/api/flowcontrol/v1alpha1/doc.go b/vendor/k8s.io/api/flowcontrol/v1alpha1/doc.go index 31b8b5d535..a3d4d0c607 100644 --- a/vendor/k8s.io/api/flowcontrol/v1alpha1/doc.go +++ b/vendor/k8s.io/api/flowcontrol/v1alpha1/doc.go @@ -17,6 +17,7 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:protobuf-gen=package // +k8s:openapi-gen=true +// +k8s:prerelease-lifecycle-gen=true // +groupName=flowcontrol.apiserver.k8s.io diff --git a/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.pb.go b/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.pb.go index 86c8612049..7f0687ac04 100644 --- a/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.pb.go @@ -2513,10 +2513,7 @@ func (m *FlowDistinguisherMethod) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2665,10 +2662,7 @@ func (m *FlowSchema) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2879,10 +2873,7 @@ func (m *FlowSchemaCondition) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2999,10 +2990,7 @@ func (m *FlowSchemaList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3174,10 +3162,7 @@ func (m *FlowSchemaSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3261,10 +3246,7 @@ func (m *FlowSchemaStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3346,10 +3328,7 @@ func (m *GroupSubject) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3467,10 +3446,7 @@ func (m *LimitResponse) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3572,10 +3548,7 @@ func (m *LimitedPriorityLevelConfiguration) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3689,10 +3662,7 @@ func (m *NonResourcePolicyRule) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3844,10 +3814,7 @@ func (m *PolicyRulesWithSubjects) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3996,10 +3963,7 @@ func (m *PriorityLevelConfiguration) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4210,10 +4174,7 @@ func (m *PriorityLevelConfigurationCondition) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4330,10 +4291,7 @@ func (m *PriorityLevelConfigurationList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4415,10 +4373,7 @@ func (m *PriorityLevelConfigurationReference) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4536,10 +4491,7 @@ func (m *PriorityLevelConfigurationSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4623,10 +4575,7 @@ func (m *PriorityLevelConfigurationStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4733,10 +4682,7 @@ func (m *QueuingConfiguration) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4934,10 +4880,7 @@ func (m *ResourcePolicyRule) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5051,10 +4994,7 @@ func (m *ServiceAccountSubject) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5244,10 +5184,7 @@ func (m *Subject) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5329,10 +5266,7 @@ func (m *UserSubject) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.proto b/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.proto index 0801dd6c12..7b19a273e5 100644 --- a/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.flowcontrol.v1alpha1; diff --git a/vendor/k8s.io/api/flowcontrol/v1alpha1/types.go b/vendor/k8s.io/api/flowcontrol/v1alpha1/types.go index a67c6dd027..1e9701fc36 100644 --- a/vendor/k8s.io/api/flowcontrol/v1alpha1/types.go +++ b/vendor/k8s.io/api/flowcontrol/v1alpha1/types.go @@ -51,9 +51,19 @@ const ( FlowSchemaMaxMatchingPrecedence int32 = 10000 ) +// Constants for apiserver response headers. +const ( + ResponseHeaderMatchedPriorityLevelConfigurationUID = "X-Kubernetes-PF-PriorityLevel-UID" + ResponseHeaderMatchedFlowSchemaUID = "X-Kubernetes-PF-FlowSchema-UID" +) + // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.18 +// +k8s:prerelease-lifecycle-gen:deprecated=1.20 +// +k8s:prerelease-lifecycle-gen:removed=1.21 +// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1beta1,FlowSchema // FlowSchema defines the schema of a group of flows. Note that a flow is made up of a set of inbound API requests with // similar attributes and is identified by a pair of strings: the name of the FlowSchema and a "flow distinguisher". @@ -74,6 +84,10 @@ type FlowSchema struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.18 +// +k8s:prerelease-lifecycle-gen:deprecated=1.20 +// +k8s:prerelease-lifecycle-gen:removed=1.21 +// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1beta1,FlowSchemaList // FlowSchemaList is a list of FlowSchema objects. type FlowSchemaList struct { @@ -321,6 +335,10 @@ type FlowSchemaConditionType string // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.18 +// +k8s:prerelease-lifecycle-gen:deprecated=1.20 +// +k8s:prerelease-lifecycle-gen:removed=1.21 +// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1beta1,PriorityLevelConfiguration // PriorityLevelConfiguration represents the configuration of a priority level. type PriorityLevelConfiguration struct { @@ -340,6 +358,10 @@ type PriorityLevelConfiguration struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.18 +// +k8s:prerelease-lifecycle-gen:deprecated=1.20 +// +k8s:prerelease-lifecycle-gen:removed=1.21 +// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1beta1,PriorityLevelConfigurationList // PriorityLevelConfigurationList is a list of PriorityLevelConfiguration objects. type PriorityLevelConfigurationList struct { diff --git a/vendor/k8s.io/api/flowcontrol/v1alpha1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/flowcontrol/v1alpha1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 0000000000..4152aa2a91 --- /dev/null +++ b/vendor/k8s.io/api/flowcontrol/v1alpha1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,121 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + schema "k8s.io/apimachinery/pkg/runtime/schema" +) + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *FlowSchema) APILifecycleIntroduced() (major, minor int) { + return 1, 18 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *FlowSchema) APILifecycleDeprecated() (major, minor int) { + return 1, 20 +} + +// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. +// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. +func (in *FlowSchema) APILifecycleReplacement() schema.GroupVersionKind { + return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta1", Kind: "FlowSchema"} +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *FlowSchema) APILifecycleRemoved() (major, minor int) { + return 1, 21 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *FlowSchemaList) APILifecycleIntroduced() (major, minor int) { + return 1, 18 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *FlowSchemaList) APILifecycleDeprecated() (major, minor int) { + return 1, 20 +} + +// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. +// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. +func (in *FlowSchemaList) APILifecycleReplacement() schema.GroupVersionKind { + return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta1", Kind: "FlowSchemaList"} +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *FlowSchemaList) APILifecycleRemoved() (major, minor int) { + return 1, 21 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PriorityLevelConfiguration) APILifecycleIntroduced() (major, minor int) { + return 1, 18 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *PriorityLevelConfiguration) APILifecycleDeprecated() (major, minor int) { + return 1, 20 +} + +// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. +// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. +func (in *PriorityLevelConfiguration) APILifecycleReplacement() schema.GroupVersionKind { + return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta1", Kind: "PriorityLevelConfiguration"} +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *PriorityLevelConfiguration) APILifecycleRemoved() (major, minor int) { + return 1, 21 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PriorityLevelConfigurationList) APILifecycleIntroduced() (major, minor int) { + return 1, 18 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *PriorityLevelConfigurationList) APILifecycleDeprecated() (major, minor int) { + return 1, 20 +} + +// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. +// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. +func (in *PriorityLevelConfigurationList) APILifecycleReplacement() schema.GroupVersionKind { + return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta1", Kind: "PriorityLevelConfigurationList"} +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *PriorityLevelConfigurationList) APILifecycleRemoved() (major, minor int) { + return 1, 21 +} diff --git a/vendor/k8s.io/api/flowcontrol/v1beta1/doc.go b/vendor/k8s.io/api/flowcontrol/v1beta1/doc.go new file mode 100644 index 0000000000..50897b7eb5 --- /dev/null +++ b/vendor/k8s.io/api/flowcontrol/v1beta1/doc.go @@ -0,0 +1,25 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:openapi-gen=true +// +k8s:prerelease-lifecycle-gen=true + +// +groupName=flowcontrol.apiserver.k8s.io + +// Package v1beta1 holds api types of version v1alpha1 for group "flowcontrol.apiserver.k8s.io". +package v1beta1 // import "k8s.io/api/flowcontrol/v1beta1" diff --git a/vendor/k8s.io/api/flowcontrol/v1beta1/generated.pb.go b/vendor/k8s.io/api/flowcontrol/v1beta1/generated.pb.go new file mode 100644 index 0000000000..cb06fe5e77 --- /dev/null +++ b/vendor/k8s.io/api/flowcontrol/v1beta1/generated.pb.go @@ -0,0 +1,5367 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/flowcontrol/v1beta1/generated.proto + +package v1beta1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *FlowDistinguisherMethod) Reset() { *m = FlowDistinguisherMethod{} } +func (*FlowDistinguisherMethod) ProtoMessage() {} +func (*FlowDistinguisherMethod) Descriptor() ([]byte, []int) { + return fileDescriptor_80171c2a4e3669de, []int{0} +} +func (m *FlowDistinguisherMethod) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FlowDistinguisherMethod) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FlowDistinguisherMethod) XXX_Merge(src proto.Message) { + xxx_messageInfo_FlowDistinguisherMethod.Merge(m, src) +} +func (m *FlowDistinguisherMethod) XXX_Size() int { + return m.Size() +} +func (m *FlowDistinguisherMethod) XXX_DiscardUnknown() { + xxx_messageInfo_FlowDistinguisherMethod.DiscardUnknown(m) +} + +var xxx_messageInfo_FlowDistinguisherMethod proto.InternalMessageInfo + +func (m *FlowSchema) Reset() { *m = FlowSchema{} } +func (*FlowSchema) ProtoMessage() {} +func (*FlowSchema) Descriptor() ([]byte, []int) { + return fileDescriptor_80171c2a4e3669de, []int{1} +} +func (m *FlowSchema) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FlowSchema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FlowSchema) XXX_Merge(src proto.Message) { + xxx_messageInfo_FlowSchema.Merge(m, src) +} +func (m *FlowSchema) XXX_Size() int { + return m.Size() +} +func (m *FlowSchema) XXX_DiscardUnknown() { + xxx_messageInfo_FlowSchema.DiscardUnknown(m) +} + +var xxx_messageInfo_FlowSchema proto.InternalMessageInfo + +func (m *FlowSchemaCondition) Reset() { *m = FlowSchemaCondition{} } +func (*FlowSchemaCondition) ProtoMessage() {} +func (*FlowSchemaCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_80171c2a4e3669de, []int{2} +} +func (m *FlowSchemaCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FlowSchemaCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FlowSchemaCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_FlowSchemaCondition.Merge(m, src) +} +func (m *FlowSchemaCondition) XXX_Size() int { + return m.Size() +} +func (m *FlowSchemaCondition) XXX_DiscardUnknown() { + xxx_messageInfo_FlowSchemaCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_FlowSchemaCondition proto.InternalMessageInfo + +func (m *FlowSchemaList) Reset() { *m = FlowSchemaList{} } +func (*FlowSchemaList) ProtoMessage() {} +func (*FlowSchemaList) Descriptor() ([]byte, []int) { + return fileDescriptor_80171c2a4e3669de, []int{3} +} +func (m *FlowSchemaList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FlowSchemaList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FlowSchemaList) XXX_Merge(src proto.Message) { + xxx_messageInfo_FlowSchemaList.Merge(m, src) +} +func (m *FlowSchemaList) XXX_Size() int { + return m.Size() +} +func (m *FlowSchemaList) XXX_DiscardUnknown() { + xxx_messageInfo_FlowSchemaList.DiscardUnknown(m) +} + +var xxx_messageInfo_FlowSchemaList proto.InternalMessageInfo + +func (m *FlowSchemaSpec) Reset() { *m = FlowSchemaSpec{} } +func (*FlowSchemaSpec) ProtoMessage() {} +func (*FlowSchemaSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_80171c2a4e3669de, []int{4} +} +func (m *FlowSchemaSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FlowSchemaSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FlowSchemaSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_FlowSchemaSpec.Merge(m, src) +} +func (m *FlowSchemaSpec) XXX_Size() int { + return m.Size() +} +func (m *FlowSchemaSpec) XXX_DiscardUnknown() { + xxx_messageInfo_FlowSchemaSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_FlowSchemaSpec proto.InternalMessageInfo + +func (m *FlowSchemaStatus) Reset() { *m = FlowSchemaStatus{} } +func (*FlowSchemaStatus) ProtoMessage() {} +func (*FlowSchemaStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_80171c2a4e3669de, []int{5} +} +func (m *FlowSchemaStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FlowSchemaStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FlowSchemaStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_FlowSchemaStatus.Merge(m, src) +} +func (m *FlowSchemaStatus) XXX_Size() int { + return m.Size() +} +func (m *FlowSchemaStatus) XXX_DiscardUnknown() { + xxx_messageInfo_FlowSchemaStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_FlowSchemaStatus proto.InternalMessageInfo + +func (m *GroupSubject) Reset() { *m = GroupSubject{} } +func (*GroupSubject) ProtoMessage() {} +func (*GroupSubject) Descriptor() ([]byte, []int) { + return fileDescriptor_80171c2a4e3669de, []int{6} +} +func (m *GroupSubject) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GroupSubject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GroupSubject) XXX_Merge(src proto.Message) { + xxx_messageInfo_GroupSubject.Merge(m, src) +} +func (m *GroupSubject) XXX_Size() int { + return m.Size() +} +func (m *GroupSubject) XXX_DiscardUnknown() { + xxx_messageInfo_GroupSubject.DiscardUnknown(m) +} + +var xxx_messageInfo_GroupSubject proto.InternalMessageInfo + +func (m *LimitResponse) Reset() { *m = LimitResponse{} } +func (*LimitResponse) ProtoMessage() {} +func (*LimitResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_80171c2a4e3669de, []int{7} +} +func (m *LimitResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LimitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LimitResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_LimitResponse.Merge(m, src) +} +func (m *LimitResponse) XXX_Size() int { + return m.Size() +} +func (m *LimitResponse) XXX_DiscardUnknown() { + xxx_messageInfo_LimitResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_LimitResponse proto.InternalMessageInfo + +func (m *LimitedPriorityLevelConfiguration) Reset() { *m = LimitedPriorityLevelConfiguration{} } +func (*LimitedPriorityLevelConfiguration) ProtoMessage() {} +func (*LimitedPriorityLevelConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptor_80171c2a4e3669de, []int{8} +} +func (m *LimitedPriorityLevelConfiguration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LimitedPriorityLevelConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LimitedPriorityLevelConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_LimitedPriorityLevelConfiguration.Merge(m, src) +} +func (m *LimitedPriorityLevelConfiguration) XXX_Size() int { + return m.Size() +} +func (m *LimitedPriorityLevelConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_LimitedPriorityLevelConfiguration.DiscardUnknown(m) +} + +var xxx_messageInfo_LimitedPriorityLevelConfiguration proto.InternalMessageInfo + +func (m *NonResourcePolicyRule) Reset() { *m = NonResourcePolicyRule{} } +func (*NonResourcePolicyRule) ProtoMessage() {} +func (*NonResourcePolicyRule) Descriptor() ([]byte, []int) { + return fileDescriptor_80171c2a4e3669de, []int{9} +} +func (m *NonResourcePolicyRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NonResourcePolicyRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NonResourcePolicyRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_NonResourcePolicyRule.Merge(m, src) +} +func (m *NonResourcePolicyRule) XXX_Size() int { + return m.Size() +} +func (m *NonResourcePolicyRule) XXX_DiscardUnknown() { + xxx_messageInfo_NonResourcePolicyRule.DiscardUnknown(m) +} + +var xxx_messageInfo_NonResourcePolicyRule proto.InternalMessageInfo + +func (m *PolicyRulesWithSubjects) Reset() { *m = PolicyRulesWithSubjects{} } +func (*PolicyRulesWithSubjects) ProtoMessage() {} +func (*PolicyRulesWithSubjects) Descriptor() ([]byte, []int) { + return fileDescriptor_80171c2a4e3669de, []int{10} +} +func (m *PolicyRulesWithSubjects) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PolicyRulesWithSubjects) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PolicyRulesWithSubjects) XXX_Merge(src proto.Message) { + xxx_messageInfo_PolicyRulesWithSubjects.Merge(m, src) +} +func (m *PolicyRulesWithSubjects) XXX_Size() int { + return m.Size() +} +func (m *PolicyRulesWithSubjects) XXX_DiscardUnknown() { + xxx_messageInfo_PolicyRulesWithSubjects.DiscardUnknown(m) +} + +var xxx_messageInfo_PolicyRulesWithSubjects proto.InternalMessageInfo + +func (m *PriorityLevelConfiguration) Reset() { *m = PriorityLevelConfiguration{} } +func (*PriorityLevelConfiguration) ProtoMessage() {} +func (*PriorityLevelConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptor_80171c2a4e3669de, []int{11} +} +func (m *PriorityLevelConfiguration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PriorityLevelConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PriorityLevelConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_PriorityLevelConfiguration.Merge(m, src) +} +func (m *PriorityLevelConfiguration) XXX_Size() int { + return m.Size() +} +func (m *PriorityLevelConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_PriorityLevelConfiguration.DiscardUnknown(m) +} + +var xxx_messageInfo_PriorityLevelConfiguration proto.InternalMessageInfo + +func (m *PriorityLevelConfigurationCondition) Reset() { *m = PriorityLevelConfigurationCondition{} } +func (*PriorityLevelConfigurationCondition) ProtoMessage() {} +func (*PriorityLevelConfigurationCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_80171c2a4e3669de, []int{12} +} +func (m *PriorityLevelConfigurationCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PriorityLevelConfigurationCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PriorityLevelConfigurationCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_PriorityLevelConfigurationCondition.Merge(m, src) +} +func (m *PriorityLevelConfigurationCondition) XXX_Size() int { + return m.Size() +} +func (m *PriorityLevelConfigurationCondition) XXX_DiscardUnknown() { + xxx_messageInfo_PriorityLevelConfigurationCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_PriorityLevelConfigurationCondition proto.InternalMessageInfo + +func (m *PriorityLevelConfigurationList) Reset() { *m = PriorityLevelConfigurationList{} } +func (*PriorityLevelConfigurationList) ProtoMessage() {} +func (*PriorityLevelConfigurationList) Descriptor() ([]byte, []int) { + return fileDescriptor_80171c2a4e3669de, []int{13} +} +func (m *PriorityLevelConfigurationList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PriorityLevelConfigurationList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PriorityLevelConfigurationList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PriorityLevelConfigurationList.Merge(m, src) +} +func (m *PriorityLevelConfigurationList) XXX_Size() int { + return m.Size() +} +func (m *PriorityLevelConfigurationList) XXX_DiscardUnknown() { + xxx_messageInfo_PriorityLevelConfigurationList.DiscardUnknown(m) +} + +var xxx_messageInfo_PriorityLevelConfigurationList proto.InternalMessageInfo + +func (m *PriorityLevelConfigurationReference) Reset() { *m = PriorityLevelConfigurationReference{} } +func (*PriorityLevelConfigurationReference) ProtoMessage() {} +func (*PriorityLevelConfigurationReference) Descriptor() ([]byte, []int) { + return fileDescriptor_80171c2a4e3669de, []int{14} +} +func (m *PriorityLevelConfigurationReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PriorityLevelConfigurationReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PriorityLevelConfigurationReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_PriorityLevelConfigurationReference.Merge(m, src) +} +func (m *PriorityLevelConfigurationReference) XXX_Size() int { + return m.Size() +} +func (m *PriorityLevelConfigurationReference) XXX_DiscardUnknown() { + xxx_messageInfo_PriorityLevelConfigurationReference.DiscardUnknown(m) +} + +var xxx_messageInfo_PriorityLevelConfigurationReference proto.InternalMessageInfo + +func (m *PriorityLevelConfigurationSpec) Reset() { *m = PriorityLevelConfigurationSpec{} } +func (*PriorityLevelConfigurationSpec) ProtoMessage() {} +func (*PriorityLevelConfigurationSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_80171c2a4e3669de, []int{15} +} +func (m *PriorityLevelConfigurationSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PriorityLevelConfigurationSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PriorityLevelConfigurationSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_PriorityLevelConfigurationSpec.Merge(m, src) +} +func (m *PriorityLevelConfigurationSpec) XXX_Size() int { + return m.Size() +} +func (m *PriorityLevelConfigurationSpec) XXX_DiscardUnknown() { + xxx_messageInfo_PriorityLevelConfigurationSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_PriorityLevelConfigurationSpec proto.InternalMessageInfo + +func (m *PriorityLevelConfigurationStatus) Reset() { *m = PriorityLevelConfigurationStatus{} } +func (*PriorityLevelConfigurationStatus) ProtoMessage() {} +func (*PriorityLevelConfigurationStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_80171c2a4e3669de, []int{16} +} +func (m *PriorityLevelConfigurationStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PriorityLevelConfigurationStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PriorityLevelConfigurationStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_PriorityLevelConfigurationStatus.Merge(m, src) +} +func (m *PriorityLevelConfigurationStatus) XXX_Size() int { + return m.Size() +} +func (m *PriorityLevelConfigurationStatus) XXX_DiscardUnknown() { + xxx_messageInfo_PriorityLevelConfigurationStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_PriorityLevelConfigurationStatus proto.InternalMessageInfo + +func (m *QueuingConfiguration) Reset() { *m = QueuingConfiguration{} } +func (*QueuingConfiguration) ProtoMessage() {} +func (*QueuingConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptor_80171c2a4e3669de, []int{17} +} +func (m *QueuingConfiguration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueuingConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *QueuingConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueuingConfiguration.Merge(m, src) +} +func (m *QueuingConfiguration) XXX_Size() int { + return m.Size() +} +func (m *QueuingConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_QueuingConfiguration.DiscardUnknown(m) +} + +var xxx_messageInfo_QueuingConfiguration proto.InternalMessageInfo + +func (m *ResourcePolicyRule) Reset() { *m = ResourcePolicyRule{} } +func (*ResourcePolicyRule) ProtoMessage() {} +func (*ResourcePolicyRule) Descriptor() ([]byte, []int) { + return fileDescriptor_80171c2a4e3669de, []int{18} +} +func (m *ResourcePolicyRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourcePolicyRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourcePolicyRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourcePolicyRule.Merge(m, src) +} +func (m *ResourcePolicyRule) XXX_Size() int { + return m.Size() +} +func (m *ResourcePolicyRule) XXX_DiscardUnknown() { + xxx_messageInfo_ResourcePolicyRule.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourcePolicyRule proto.InternalMessageInfo + +func (m *ServiceAccountSubject) Reset() { *m = ServiceAccountSubject{} } +func (*ServiceAccountSubject) ProtoMessage() {} +func (*ServiceAccountSubject) Descriptor() ([]byte, []int) { + return fileDescriptor_80171c2a4e3669de, []int{19} +} +func (m *ServiceAccountSubject) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceAccountSubject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ServiceAccountSubject) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceAccountSubject.Merge(m, src) +} +func (m *ServiceAccountSubject) XXX_Size() int { + return m.Size() +} +func (m *ServiceAccountSubject) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceAccountSubject.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceAccountSubject proto.InternalMessageInfo + +func (m *Subject) Reset() { *m = Subject{} } +func (*Subject) ProtoMessage() {} +func (*Subject) Descriptor() ([]byte, []int) { + return fileDescriptor_80171c2a4e3669de, []int{20} +} +func (m *Subject) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Subject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Subject) XXX_Merge(src proto.Message) { + xxx_messageInfo_Subject.Merge(m, src) +} +func (m *Subject) XXX_Size() int { + return m.Size() +} +func (m *Subject) XXX_DiscardUnknown() { + xxx_messageInfo_Subject.DiscardUnknown(m) +} + +var xxx_messageInfo_Subject proto.InternalMessageInfo + +func (m *UserSubject) Reset() { *m = UserSubject{} } +func (*UserSubject) ProtoMessage() {} +func (*UserSubject) Descriptor() ([]byte, []int) { + return fileDescriptor_80171c2a4e3669de, []int{21} +} +func (m *UserSubject) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UserSubject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *UserSubject) XXX_Merge(src proto.Message) { + xxx_messageInfo_UserSubject.Merge(m, src) +} +func (m *UserSubject) XXX_Size() int { + return m.Size() +} +func (m *UserSubject) XXX_DiscardUnknown() { + xxx_messageInfo_UserSubject.DiscardUnknown(m) +} + +var xxx_messageInfo_UserSubject proto.InternalMessageInfo + +func init() { + proto.RegisterType((*FlowDistinguisherMethod)(nil), "k8s.io.api.flowcontrol.v1beta1.FlowDistinguisherMethod") + proto.RegisterType((*FlowSchema)(nil), "k8s.io.api.flowcontrol.v1beta1.FlowSchema") + proto.RegisterType((*FlowSchemaCondition)(nil), "k8s.io.api.flowcontrol.v1beta1.FlowSchemaCondition") + proto.RegisterType((*FlowSchemaList)(nil), "k8s.io.api.flowcontrol.v1beta1.FlowSchemaList") + proto.RegisterType((*FlowSchemaSpec)(nil), "k8s.io.api.flowcontrol.v1beta1.FlowSchemaSpec") + proto.RegisterType((*FlowSchemaStatus)(nil), "k8s.io.api.flowcontrol.v1beta1.FlowSchemaStatus") + proto.RegisterType((*GroupSubject)(nil), "k8s.io.api.flowcontrol.v1beta1.GroupSubject") + proto.RegisterType((*LimitResponse)(nil), "k8s.io.api.flowcontrol.v1beta1.LimitResponse") + proto.RegisterType((*LimitedPriorityLevelConfiguration)(nil), "k8s.io.api.flowcontrol.v1beta1.LimitedPriorityLevelConfiguration") + proto.RegisterType((*NonResourcePolicyRule)(nil), "k8s.io.api.flowcontrol.v1beta1.NonResourcePolicyRule") + proto.RegisterType((*PolicyRulesWithSubjects)(nil), "k8s.io.api.flowcontrol.v1beta1.PolicyRulesWithSubjects") + proto.RegisterType((*PriorityLevelConfiguration)(nil), "k8s.io.api.flowcontrol.v1beta1.PriorityLevelConfiguration") + proto.RegisterType((*PriorityLevelConfigurationCondition)(nil), "k8s.io.api.flowcontrol.v1beta1.PriorityLevelConfigurationCondition") + proto.RegisterType((*PriorityLevelConfigurationList)(nil), "k8s.io.api.flowcontrol.v1beta1.PriorityLevelConfigurationList") + proto.RegisterType((*PriorityLevelConfigurationReference)(nil), "k8s.io.api.flowcontrol.v1beta1.PriorityLevelConfigurationReference") + proto.RegisterType((*PriorityLevelConfigurationSpec)(nil), "k8s.io.api.flowcontrol.v1beta1.PriorityLevelConfigurationSpec") + proto.RegisterType((*PriorityLevelConfigurationStatus)(nil), "k8s.io.api.flowcontrol.v1beta1.PriorityLevelConfigurationStatus") + proto.RegisterType((*QueuingConfiguration)(nil), "k8s.io.api.flowcontrol.v1beta1.QueuingConfiguration") + proto.RegisterType((*ResourcePolicyRule)(nil), "k8s.io.api.flowcontrol.v1beta1.ResourcePolicyRule") + proto.RegisterType((*ServiceAccountSubject)(nil), "k8s.io.api.flowcontrol.v1beta1.ServiceAccountSubject") + proto.RegisterType((*Subject)(nil), "k8s.io.api.flowcontrol.v1beta1.Subject") + proto.RegisterType((*UserSubject)(nil), "k8s.io.api.flowcontrol.v1beta1.UserSubject") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/flowcontrol/v1beta1/generated.proto", fileDescriptor_80171c2a4e3669de) +} + +var fileDescriptor_80171c2a4e3669de = []byte{ + // 1494 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x57, 0xcb, 0x73, 0xdb, 0x44, + 0x18, 0x8f, 0x1c, 0x3b, 0x89, 0xbf, 0x3c, 0xbb, 0x69, 0x27, 0x9e, 0x74, 0xc6, 0x4e, 0xc5, 0x0c, + 0x05, 0xda, 0xca, 0x6d, 0x69, 0x69, 0x81, 0xe1, 0x11, 0xa5, 0x50, 0x4a, 0x93, 0x34, 0xdd, 0xb4, + 0xc0, 0x94, 0xce, 0x50, 0x59, 0xde, 0xd8, 0x6a, 0x6c, 0x49, 0xd5, 0xae, 0x9c, 0x09, 0xbd, 0x30, + 0xfc, 0x05, 0x9c, 0xe1, 0xc8, 0x81, 0x3b, 0xff, 0x00, 0x47, 0x3a, 0x9c, 0x7a, 0xec, 0xc9, 0x50, + 0x73, 0xe2, 0xc0, 0x1d, 0x7a, 0x62, 0x76, 0xb5, 0x92, 0x2c, 0xbf, 0xe4, 0x69, 0x67, 0x7a, 0xe2, + 0x66, 0x7d, 0x8f, 0xdf, 0xf7, 0xd8, 0xdf, 0x7e, 0xfb, 0x19, 0xae, 0xee, 0x5f, 0xa6, 0x9a, 0xe5, + 0x94, 0xf7, 0xfd, 0x0a, 0xf1, 0x6c, 0xc2, 0x08, 0x2d, 0xb7, 0x88, 0x5d, 0x75, 0xbc, 0xb2, 0x54, + 0x18, 0xae, 0x55, 0xde, 0x6b, 0x38, 0x07, 0xa6, 0x63, 0x33, 0xcf, 0x69, 0x94, 0x5b, 0xe7, 0x2a, + 0x84, 0x19, 0xe7, 0xca, 0x35, 0x62, 0x13, 0xcf, 0x60, 0xa4, 0xaa, 0xb9, 0x9e, 0xc3, 0x1c, 0x54, + 0x0c, 0xec, 0x35, 0xc3, 0xb5, 0xb4, 0x2e, 0x7b, 0x4d, 0xda, 0xaf, 0x9e, 0xa9, 0x59, 0xac, 0xee, + 0x57, 0x34, 0xd3, 0x69, 0x96, 0x6b, 0x4e, 0xcd, 0x29, 0x0b, 0xb7, 0x8a, 0xbf, 0x27, 0xbe, 0xc4, + 0x87, 0xf8, 0x15, 0xc0, 0xad, 0x5e, 0x88, 0xc3, 0x37, 0x0d, 0xb3, 0x6e, 0xd9, 0xc4, 0x3b, 0x2c, + 0xbb, 0xfb, 0x35, 0x2e, 0xa0, 0xe5, 0x26, 0x61, 0x46, 0xb9, 0xd5, 0x97, 0xc4, 0x6a, 0x79, 0x98, + 0x97, 0xe7, 0xdb, 0xcc, 0x6a, 0x92, 0x3e, 0x87, 0xb7, 0xd2, 0x1c, 0xa8, 0x59, 0x27, 0x4d, 0xa3, + 0xd7, 0x4f, 0xbd, 0x03, 0x2b, 0x1f, 0x37, 0x9c, 0x83, 0x2b, 0x16, 0x65, 0x96, 0x5d, 0xf3, 0x2d, + 0x5a, 0x27, 0xde, 0x16, 0x61, 0x75, 0xa7, 0x8a, 0x3e, 0x80, 0x2c, 0x3b, 0x74, 0x49, 0x41, 0x59, + 0x53, 0x5e, 0xcb, 0xeb, 0xa7, 0x1e, 0xb5, 0x4b, 0x13, 0x9d, 0x76, 0x29, 0x7b, 0xeb, 0xd0, 0x25, + 0xcf, 0xda, 0xa5, 0xe3, 0x43, 0xdc, 0xb8, 0x1a, 0x0b, 0x47, 0xf5, 0xfb, 0x0c, 0x00, 0xb7, 0xda, + 0x15, 0xa1, 0xd1, 0x3d, 0x98, 0xe1, 0xe5, 0x56, 0x0d, 0x66, 0x08, 0xcc, 0xd9, 0xf3, 0x67, 0xb5, + 0xb8, 0xd7, 0x51, 0xd6, 0x9a, 0xbb, 0x5f, 0xe3, 0x02, 0xaa, 0x71, 0x6b, 0xad, 0x75, 0x4e, 0xbb, + 0x51, 0xb9, 0x4f, 0x4c, 0xb6, 0x45, 0x98, 0xa1, 0x23, 0x99, 0x05, 0xc4, 0x32, 0x1c, 0xa1, 0xa2, + 0x1d, 0xc8, 0x52, 0x97, 0x98, 0x85, 0x8c, 0x40, 0xd7, 0xb4, 0xd1, 0x27, 0xa9, 0xc5, 0xb9, 0xed, + 0xba, 0xc4, 0xd4, 0xe7, 0xc2, 0x0a, 0xf9, 0x17, 0x16, 0x48, 0xe8, 0x0b, 0x98, 0xa2, 0xcc, 0x60, + 0x3e, 0x2d, 0x4c, 0xf6, 0x65, 0x9c, 0x86, 0x29, 0xfc, 0xf4, 0x05, 0x89, 0x3a, 0x15, 0x7c, 0x63, + 0x89, 0xa7, 0x3e, 0xc9, 0xc0, 0x72, 0x6c, 0xbc, 0xe1, 0xd8, 0x55, 0x8b, 0x59, 0x8e, 0x8d, 0xde, + 0x4d, 0x74, 0xfd, 0x64, 0x4f, 0xd7, 0x57, 0x06, 0xb8, 0xc4, 0x1d, 0x47, 0x6f, 0x47, 0xe9, 0x66, + 0x84, 0xfb, 0x89, 0x64, 0xf0, 0x67, 0xed, 0xd2, 0x62, 0xe4, 0x96, 0xcc, 0x07, 0xb5, 0x00, 0x35, + 0x0c, 0xca, 0x6e, 0x79, 0x86, 0x4d, 0x03, 0x58, 0xab, 0x49, 0x64, 0xd5, 0x6f, 0x8c, 0x77, 0x4e, + 0xdc, 0x43, 0x5f, 0x95, 0x21, 0xd1, 0x66, 0x1f, 0x1a, 0x1e, 0x10, 0x01, 0xbd, 0x0a, 0x53, 0x1e, + 0x31, 0xa8, 0x63, 0x17, 0xb2, 0x22, 0xe5, 0xa8, 0x5f, 0x58, 0x48, 0xb1, 0xd4, 0xa2, 0xd7, 0x61, + 0xba, 0x49, 0x28, 0x35, 0x6a, 0xa4, 0x90, 0x13, 0x86, 0x8b, 0xd2, 0x70, 0x7a, 0x2b, 0x10, 0xe3, + 0x50, 0xaf, 0xfe, 0xa2, 0xc0, 0x42, 0xdc, 0xa7, 0x4d, 0x8b, 0x32, 0x74, 0xb7, 0x8f, 0x7b, 0xda, + 0x78, 0x35, 0x71, 0x6f, 0xc1, 0xbc, 0x25, 0x19, 0x6e, 0x26, 0x94, 0x74, 0xf1, 0xee, 0x06, 0xe4, + 0x2c, 0x46, 0x9a, 0xbc, 0xeb, 0x93, 0x3d, 0xed, 0x4a, 0x21, 0x89, 0x3e, 0x2f, 0x61, 0x73, 0xd7, + 0x38, 0x00, 0x0e, 0x70, 0xd4, 0xbf, 0x26, 0xbb, 0x2b, 0xe0, 0x7c, 0x44, 0x3f, 0x29, 0xb0, 0xea, + 0x7a, 0x96, 0xe3, 0x59, 0xec, 0x70, 0x93, 0xb4, 0x48, 0x63, 0xc3, 0xb1, 0xf7, 0xac, 0x9a, 0xef, + 0x19, 0xbc, 0x95, 0xb2, 0xa8, 0x8d, 0xb4, 0xc8, 0x3b, 0x43, 0x11, 0x30, 0xd9, 0x23, 0x1e, 0xb1, + 0x4d, 0xa2, 0xab, 0x32, 0xa5, 0xd5, 0x11, 0xc6, 0x23, 0x52, 0x41, 0x9f, 0x02, 0x6a, 0x1a, 0x8c, + 0x77, 0xb4, 0xb6, 0xe3, 0x11, 0x93, 0x54, 0x39, 0xaa, 0x20, 0x64, 0x2e, 0x66, 0xc7, 0x56, 0x9f, + 0x05, 0x1e, 0xe0, 0x85, 0xbe, 0x55, 0x60, 0xb9, 0xda, 0x3f, 0x64, 0x24, 0x2f, 0x2f, 0x8d, 0xd3, + 0xe8, 0x01, 0x33, 0x4a, 0x5f, 0xe9, 0xb4, 0x4b, 0xcb, 0x03, 0x14, 0x78, 0x50, 0x30, 0x74, 0x17, + 0x72, 0x9e, 0xdf, 0x20, 0xb4, 0x90, 0x15, 0xc7, 0x9b, 0x1a, 0x75, 0xc7, 0x69, 0x58, 0xe6, 0x21, + 0xe6, 0x2e, 0x9f, 0x5b, 0xac, 0xbe, 0xeb, 0x8b, 0x59, 0x45, 0xe3, 0xb3, 0x16, 0x2a, 0x1c, 0x80, + 0xaa, 0x0f, 0x61, 0xa9, 0x77, 0x68, 0xa0, 0x1a, 0x80, 0x19, 0xde, 0x53, 0x5a, 0x50, 0x44, 0xd8, + 0x37, 0xc7, 0x67, 0x55, 0x74, 0xc7, 0xe3, 0x79, 0x19, 0x89, 0x28, 0xee, 0x82, 0x56, 0xcf, 0xc2, + 0xdc, 0x55, 0xcf, 0xf1, 0x5d, 0x99, 0x23, 0x5a, 0x83, 0xac, 0x6d, 0x34, 0xc3, 0xe9, 0x13, 0x4d, + 0xc4, 0x6d, 0xa3, 0x49, 0xb0, 0xd0, 0xa8, 0x3f, 0x2a, 0x30, 0xbf, 0x69, 0x35, 0x2d, 0x86, 0x09, + 0x75, 0x1d, 0x9b, 0x12, 0x74, 0x31, 0x31, 0xb1, 0x4e, 0xf4, 0x4c, 0xac, 0x23, 0x09, 0xe3, 0xae, + 0x59, 0xf5, 0x25, 0x4c, 0x3f, 0xf0, 0x89, 0x6f, 0xd9, 0x35, 0x39, 0xaf, 0x2f, 0xa4, 0x15, 0x78, + 0x33, 0x30, 0x4f, 0xb0, 0x4d, 0x9f, 0xe5, 0x23, 0x40, 0x6a, 0x70, 0x88, 0xa8, 0xfe, 0xad, 0xc0, + 0x09, 0x11, 0x98, 0x54, 0x87, 0xb3, 0x18, 0xdd, 0x85, 0x82, 0x41, 0xa9, 0xef, 0x91, 0xea, 0x86, + 0x63, 0x9b, 0xbe, 0xc7, 0xf9, 0x7f, 0xb8, 0x5b, 0x37, 0x3c, 0x42, 0x45, 0x35, 0x39, 0x7d, 0x4d, + 0x56, 0x53, 0x58, 0x1f, 0x62, 0x87, 0x87, 0x22, 0xa0, 0xfb, 0x30, 0xdf, 0xe8, 0xae, 0x5d, 0x96, + 0x79, 0x26, 0xad, 0xcc, 0x44, 0xc3, 0xf4, 0x63, 0x32, 0x83, 0x64, 0xd3, 0x71, 0x12, 0x5a, 0x3d, + 0x80, 0x63, 0xdb, 0xfc, 0x0e, 0x53, 0xc7, 0xf7, 0x4c, 0x12, 0x13, 0x10, 0x95, 0x20, 0xd7, 0x22, + 0x5e, 0x25, 0x20, 0x51, 0x5e, 0xcf, 0x73, 0xfa, 0x7d, 0xc6, 0x05, 0x38, 0x90, 0xa3, 0xf7, 0x60, + 0xd1, 0x8e, 0x3d, 0x6f, 0xe3, 0x4d, 0x5a, 0x98, 0x12, 0xa6, 0xcb, 0x9d, 0x76, 0x69, 0x71, 0x3b, + 0xa9, 0xc2, 0xbd, 0xb6, 0x6a, 0x3b, 0x03, 0x2b, 0x43, 0xf8, 0x8e, 0x6e, 0xc3, 0x0c, 0x95, 0xbf, + 0x25, 0x87, 0x4f, 0xa6, 0xd5, 0x2e, 0x7d, 0xe3, 0x69, 0x1b, 0x82, 0xe1, 0x08, 0x0a, 0x39, 0x30, + 0xef, 0xc9, 0x14, 0x44, 0x4c, 0x39, 0x75, 0xcf, 0xa7, 0x61, 0xf7, 0x77, 0x27, 0x6e, 0x2e, 0xee, + 0x06, 0xc4, 0x49, 0x7c, 0xf4, 0x10, 0x96, 0xba, 0xca, 0x0e, 0x62, 0x4e, 0x8a, 0x98, 0x17, 0xd3, + 0x62, 0x0e, 0x3c, 0x14, 0xbd, 0x20, 0xc3, 0x2e, 0x6d, 0xf7, 0xc0, 0xe2, 0xbe, 0x40, 0xea, 0x6f, + 0x19, 0x18, 0x31, 0x88, 0x5f, 0xc2, 0x52, 0x75, 0x2f, 0xb1, 0x54, 0xbd, 0xff, 0xfc, 0x2f, 0xcc, + 0xd0, 0x25, 0xab, 0xde, 0xb3, 0x64, 0x7d, 0xf8, 0x02, 0x31, 0x46, 0x2f, 0x5d, 0xff, 0x64, 0xe0, + 0x95, 0xe1, 0xce, 0xf1, 0x12, 0x76, 0x3d, 0x31, 0xd2, 0x2e, 0xf5, 0x8c, 0xb4, 0x93, 0x63, 0x40, + 0xfc, 0xbf, 0x94, 0xf5, 0x2c, 0x65, 0xbf, 0x2b, 0x50, 0x1c, 0xde, 0xb7, 0x97, 0xb0, 0xa4, 0x7d, + 0x95, 0x5c, 0xd2, 0xde, 0x79, 0x7e, 0x92, 0x0d, 0x59, 0xda, 0xae, 0x8e, 0xe2, 0x56, 0xb4, 0x5e, + 0x8d, 0xf1, 0xc4, 0xfe, 0x3a, 0xb2, 0x55, 0x62, 0x1b, 0x4c, 0xf9, 0x97, 0x90, 0xf0, 0xfe, 0xc8, + 0x36, 0x2a, 0x0d, 0xd2, 0x24, 0x36, 0x93, 0x84, 0xac, 0xc3, 0x74, 0x23, 0x78, 0x1b, 0xe5, 0xa5, + 0x5e, 0x1f, 0xeb, 0x49, 0x1a, 0xf5, 0x94, 0x06, 0xcf, 0xb0, 0x34, 0xc3, 0x21, 0xbc, 0xfa, 0x83, + 0x02, 0x6b, 0x69, 0x97, 0x15, 0x1d, 0x0c, 0x58, 0x76, 0x5e, 0x60, 0x91, 0x1d, 0x7f, 0xf9, 0xf9, + 0x59, 0x81, 0xa3, 0x83, 0x76, 0x0a, 0x4e, 0x7f, 0xbe, 0x48, 0x44, 0x5b, 0x40, 0x44, 0xff, 0x9b, + 0x42, 0x8a, 0xa5, 0x16, 0x9d, 0x86, 0x99, 0xba, 0x61, 0x57, 0x77, 0xad, 0xaf, 0xc3, 0xfd, 0x36, + 0x22, 0xe0, 0x27, 0x52, 0x8e, 0x23, 0x0b, 0x74, 0x05, 0x96, 0x84, 0xdf, 0x26, 0xb1, 0x6b, 0xac, + 0x2e, 0x7a, 0x25, 0xae, 0x72, 0x2e, 0x7e, 0x0f, 0x6e, 0xf6, 0xe8, 0x71, 0x9f, 0x87, 0xfa, 0xaf, + 0x02, 0xe8, 0x79, 0xde, 0xf9, 0x53, 0x90, 0x37, 0x5c, 0x4b, 0x2c, 0x7b, 0xc1, 0x15, 0xc8, 0xeb, + 0xf3, 0x9d, 0x76, 0x29, 0xbf, 0xbe, 0x73, 0x2d, 0x10, 0xe2, 0x58, 0xcf, 0x8d, 0xc3, 0x27, 0x30, + 0x78, 0xea, 0xa4, 0x71, 0x18, 0x98, 0xe2, 0x58, 0x8f, 0x2e, 0xc3, 0x9c, 0xd9, 0xf0, 0x29, 0x23, + 0xde, 0xae, 0xe9, 0xb8, 0x44, 0x8c, 0x8c, 0x19, 0xfd, 0xa8, 0xac, 0x69, 0x6e, 0xa3, 0x4b, 0x87, + 0x13, 0x96, 0x48, 0x03, 0xe0, 0x84, 0xa7, 0xae, 0xc1, 0xe3, 0xe4, 0x44, 0x9c, 0x05, 0x7e, 0x60, + 0xdb, 0x91, 0x14, 0x77, 0x59, 0xa8, 0xf7, 0xe1, 0xd8, 0x2e, 0xf1, 0x5a, 0x96, 0x49, 0xd6, 0x4d, + 0xd3, 0xf1, 0x6d, 0x16, 0xae, 0xad, 0x65, 0xc8, 0x47, 0x66, 0xf2, 0x4e, 0x1c, 0x91, 0xf1, 0xf3, + 0x11, 0x16, 0x8e, 0x6d, 0xa2, 0x4b, 0x98, 0x19, 0x7e, 0x09, 0x33, 0x30, 0x1d, 0xc3, 0x67, 0xf7, + 0x2d, 0xbb, 0x2a, 0x91, 0x8f, 0x87, 0xd6, 0xd7, 0x2d, 0xbb, 0xfa, 0xac, 0x5d, 0x9a, 0x95, 0x66, + 0xfc, 0x13, 0x0b, 0x43, 0x74, 0x0d, 0xb2, 0x3e, 0x25, 0x9e, 0xbc, 0x5e, 0xa7, 0xd2, 0xc8, 0x7c, + 0x9b, 0x12, 0x2f, 0xdc, 0x7c, 0x66, 0x38, 0x32, 0x17, 0x60, 0x01, 0x81, 0xb6, 0x20, 0x57, 0xe3, + 0x87, 0x22, 0xa7, 0xfe, 0xe9, 0x34, 0xac, 0xee, 0x75, 0x3e, 0xa0, 0x81, 0x90, 0xe0, 0x00, 0x05, + 0x3d, 0x80, 0x05, 0x9a, 0x68, 0xa1, 0x38, 0xae, 0x31, 0x36, 0x99, 0x81, 0x8d, 0xd7, 0x51, 0xa7, + 0x5d, 0x5a, 0x48, 0xaa, 0x70, 0x4f, 0x00, 0xb5, 0x0c, 0xb3, 0x5d, 0x05, 0xa6, 0xcf, 0x3f, 0xfd, + 0xcc, 0xa3, 0xa7, 0xc5, 0x89, 0xc7, 0x4f, 0x8b, 0x13, 0x4f, 0x9e, 0x16, 0x27, 0xbe, 0xe9, 0x14, + 0x95, 0x47, 0x9d, 0xa2, 0xf2, 0xb8, 0x53, 0x54, 0x9e, 0x74, 0x8a, 0xca, 0x1f, 0x9d, 0xa2, 0xf2, + 0xdd, 0x9f, 0xc5, 0x89, 0x3b, 0xd3, 0x32, 0xb3, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0x3c, 0xd4, + 0x36, 0xaf, 0xfa, 0x13, 0x00, 0x00, +} + +func (m *FlowDistinguisherMethod) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FlowDistinguisherMethod) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FlowDistinguisherMethod) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *FlowSchema) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FlowSchema) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FlowSchema) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *FlowSchemaCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FlowSchemaCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FlowSchemaCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *FlowSchemaList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FlowSchemaList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FlowSchemaList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *FlowSchemaSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FlowSchemaSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FlowSchemaSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if m.DistinguisherMethod != nil { + { + size, err := m.DistinguisherMethod.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + i = encodeVarintGenerated(dAtA, i, uint64(m.MatchingPrecedence)) + i-- + dAtA[i] = 0x10 + { + size, err := m.PriorityLevelConfiguration.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *FlowSchemaStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FlowSchemaStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FlowSchemaStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *GroupSubject) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GroupSubject) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GroupSubject) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *LimitResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LimitResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LimitResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Queuing != nil { + { + size, err := m.Queuing.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *LimitedPriorityLevelConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LimitedPriorityLevelConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LimitedPriorityLevelConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.LimitResponse.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(m.AssuredConcurrencyShares)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *NonResourcePolicyRule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NonResourcePolicyRule) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NonResourcePolicyRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.NonResourceURLs) > 0 { + for iNdEx := len(m.NonResourceURLs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.NonResourceURLs[iNdEx]) + copy(dAtA[i:], m.NonResourceURLs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NonResourceURLs[iNdEx]))) + i-- + dAtA[i] = 0x32 + } + } + if len(m.Verbs) > 0 { + for iNdEx := len(m.Verbs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Verbs[iNdEx]) + copy(dAtA[i:], m.Verbs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verbs[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *PolicyRulesWithSubjects) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PolicyRulesWithSubjects) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PolicyRulesWithSubjects) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.NonResourceRules) > 0 { + for iNdEx := len(m.NonResourceRules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.NonResourceRules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.ResourceRules) > 0 { + for iNdEx := len(m.ResourceRules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ResourceRules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Subjects) > 0 { + for iNdEx := len(m.Subjects) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Subjects[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *PriorityLevelConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PriorityLevelConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PriorityLevelConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PriorityLevelConfigurationCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PriorityLevelConfigurationCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PriorityLevelConfigurationCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PriorityLevelConfigurationList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PriorityLevelConfigurationList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PriorityLevelConfigurationList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PriorityLevelConfigurationReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PriorityLevelConfigurationReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PriorityLevelConfigurationReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PriorityLevelConfigurationSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PriorityLevelConfigurationSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PriorityLevelConfigurationSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Limited != nil { + { + size, err := m.Limited.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PriorityLevelConfigurationStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PriorityLevelConfigurationStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PriorityLevelConfigurationStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *QueuingConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueuingConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueuingConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.QueueLengthLimit)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.HandSize)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.Queues)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *ResourcePolicyRule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourcePolicyRule) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourcePolicyRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Namespaces) > 0 { + for iNdEx := len(m.Namespaces) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Namespaces[iNdEx]) + copy(dAtA[i:], m.Namespaces[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespaces[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + i-- + if m.ClusterScope { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + if len(m.Resources) > 0 { + for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Resources[iNdEx]) + copy(dAtA[i:], m.Resources[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resources[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.APIGroups) > 0 { + for iNdEx := len(m.APIGroups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.APIGroups[iNdEx]) + copy(dAtA[i:], m.APIGroups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroups[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Verbs) > 0 { + for iNdEx := len(m.Verbs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Verbs[iNdEx]) + copy(dAtA[i:], m.Verbs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verbs[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ServiceAccountSubject) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceAccountSubject) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServiceAccountSubject) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Subject) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Subject) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Subject) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ServiceAccount != nil { + { + size, err := m.ServiceAccount.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.Group != nil { + { + size, err := m.Group.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.User != nil { + { + size, err := m.User.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *UserSubject) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UserSubject) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UserSubject) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *FlowDistinguisherMethod) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *FlowSchema) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *FlowSchemaCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *FlowSchemaList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *FlowSchemaSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.PriorityLevelConfiguration.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.MatchingPrecedence)) + if m.DistinguisherMethod != nil { + l = m.DistinguisherMethod.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *FlowSchemaStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *GroupSubject) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *LimitResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.Queuing != nil { + l = m.Queuing.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *LimitedPriorityLevelConfiguration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.AssuredConcurrencyShares)) + l = m.LimitResponse.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *NonResourcePolicyRule) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Verbs) > 0 { + for _, s := range m.Verbs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.NonResourceURLs) > 0 { + for _, s := range m.NonResourceURLs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PolicyRulesWithSubjects) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Subjects) > 0 { + for _, e := range m.Subjects { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.ResourceRules) > 0 { + for _, e := range m.ResourceRules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.NonResourceRules) > 0 { + for _, e := range m.NonResourceRules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PriorityLevelConfiguration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PriorityLevelConfigurationCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PriorityLevelConfigurationList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PriorityLevelConfigurationReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PriorityLevelConfigurationSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.Limited != nil { + l = m.Limited.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *PriorityLevelConfigurationStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *QueuingConfiguration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Queues)) + n += 1 + sovGenerated(uint64(m.HandSize)) + n += 1 + sovGenerated(uint64(m.QueueLengthLimit)) + return n +} + +func (m *ResourcePolicyRule) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Verbs) > 0 { + for _, s := range m.Verbs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.APIGroups) > 0 { + for _, s := range m.APIGroups { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Resources) > 0 { + for _, s := range m.Resources { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 2 + if len(m.Namespaces) > 0 { + for _, s := range m.Namespaces { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ServiceAccountSubject) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Subject) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + if m.User != nil { + l = m.User.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Group != nil { + l = m.Group.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ServiceAccount != nil { + l = m.ServiceAccount.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *UserSubject) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *FlowDistinguisherMethod) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FlowDistinguisherMethod{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `}`, + }, "") + return s +} +func (this *FlowSchema) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FlowSchema{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "FlowSchemaSpec", "FlowSchemaSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "FlowSchemaStatus", "FlowSchemaStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *FlowSchemaCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FlowSchemaCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *FlowSchemaList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]FlowSchema{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "FlowSchema", "FlowSchema", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&FlowSchemaList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *FlowSchemaSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForRules := "[]PolicyRulesWithSubjects{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "PolicyRulesWithSubjects", "PolicyRulesWithSubjects", 1), `&`, ``, 1) + "," + } + repeatedStringForRules += "}" + s := strings.Join([]string{`&FlowSchemaSpec{`, + `PriorityLevelConfiguration:` + strings.Replace(strings.Replace(this.PriorityLevelConfiguration.String(), "PriorityLevelConfigurationReference", "PriorityLevelConfigurationReference", 1), `&`, ``, 1) + `,`, + `MatchingPrecedence:` + fmt.Sprintf("%v", this.MatchingPrecedence) + `,`, + `DistinguisherMethod:` + strings.Replace(this.DistinguisherMethod.String(), "FlowDistinguisherMethod", "FlowDistinguisherMethod", 1) + `,`, + `Rules:` + repeatedStringForRules + `,`, + `}`, + }, "") + return s +} +func (this *FlowSchemaStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]FlowSchemaCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "FlowSchemaCondition", "FlowSchemaCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&FlowSchemaStatus{`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s +} +func (this *GroupSubject) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GroupSubject{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *LimitResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LimitResponse{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Queuing:` + strings.Replace(this.Queuing.String(), "QueuingConfiguration", "QueuingConfiguration", 1) + `,`, + `}`, + }, "") + return s +} +func (this *LimitedPriorityLevelConfiguration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LimitedPriorityLevelConfiguration{`, + `AssuredConcurrencyShares:` + fmt.Sprintf("%v", this.AssuredConcurrencyShares) + `,`, + `LimitResponse:` + strings.Replace(strings.Replace(this.LimitResponse.String(), "LimitResponse", "LimitResponse", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NonResourcePolicyRule) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NonResourcePolicyRule{`, + `Verbs:` + fmt.Sprintf("%v", this.Verbs) + `,`, + `NonResourceURLs:` + fmt.Sprintf("%v", this.NonResourceURLs) + `,`, + `}`, + }, "") + return s +} +func (this *PolicyRulesWithSubjects) String() string { + if this == nil { + return "nil" + } + repeatedStringForSubjects := "[]Subject{" + for _, f := range this.Subjects { + repeatedStringForSubjects += strings.Replace(strings.Replace(f.String(), "Subject", "Subject", 1), `&`, ``, 1) + "," + } + repeatedStringForSubjects += "}" + repeatedStringForResourceRules := "[]ResourcePolicyRule{" + for _, f := range this.ResourceRules { + repeatedStringForResourceRules += strings.Replace(strings.Replace(f.String(), "ResourcePolicyRule", "ResourcePolicyRule", 1), `&`, ``, 1) + "," + } + repeatedStringForResourceRules += "}" + repeatedStringForNonResourceRules := "[]NonResourcePolicyRule{" + for _, f := range this.NonResourceRules { + repeatedStringForNonResourceRules += strings.Replace(strings.Replace(f.String(), "NonResourcePolicyRule", "NonResourcePolicyRule", 1), `&`, ``, 1) + "," + } + repeatedStringForNonResourceRules += "}" + s := strings.Join([]string{`&PolicyRulesWithSubjects{`, + `Subjects:` + repeatedStringForSubjects + `,`, + `ResourceRules:` + repeatedStringForResourceRules + `,`, + `NonResourceRules:` + repeatedStringForNonResourceRules + `,`, + `}`, + }, "") + return s +} +func (this *PriorityLevelConfiguration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PriorityLevelConfiguration{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PriorityLevelConfigurationSpec", "PriorityLevelConfigurationSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PriorityLevelConfigurationStatus", "PriorityLevelConfigurationStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PriorityLevelConfigurationCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PriorityLevelConfigurationCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *PriorityLevelConfigurationList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]PriorityLevelConfiguration{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PriorityLevelConfiguration", "PriorityLevelConfiguration", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&PriorityLevelConfigurationList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *PriorityLevelConfigurationReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PriorityLevelConfigurationReference{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *PriorityLevelConfigurationSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PriorityLevelConfigurationSpec{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Limited:` + strings.Replace(this.Limited.String(), "LimitedPriorityLevelConfiguration", "LimitedPriorityLevelConfiguration", 1) + `,`, + `}`, + }, "") + return s +} +func (this *PriorityLevelConfigurationStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]PriorityLevelConfigurationCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "PriorityLevelConfigurationCondition", "PriorityLevelConfigurationCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&PriorityLevelConfigurationStatus{`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s +} +func (this *QueuingConfiguration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&QueuingConfiguration{`, + `Queues:` + fmt.Sprintf("%v", this.Queues) + `,`, + `HandSize:` + fmt.Sprintf("%v", this.HandSize) + `,`, + `QueueLengthLimit:` + fmt.Sprintf("%v", this.QueueLengthLimit) + `,`, + `}`, + }, "") + return s +} +func (this *ResourcePolicyRule) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourcePolicyRule{`, + `Verbs:` + fmt.Sprintf("%v", this.Verbs) + `,`, + `APIGroups:` + fmt.Sprintf("%v", this.APIGroups) + `,`, + `Resources:` + fmt.Sprintf("%v", this.Resources) + `,`, + `ClusterScope:` + fmt.Sprintf("%v", this.ClusterScope) + `,`, + `Namespaces:` + fmt.Sprintf("%v", this.Namespaces) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceAccountSubject) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceAccountSubject{`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *Subject) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Subject{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `User:` + strings.Replace(this.User.String(), "UserSubject", "UserSubject", 1) + `,`, + `Group:` + strings.Replace(this.Group.String(), "GroupSubject", "GroupSubject", 1) + `,`, + `ServiceAccount:` + strings.Replace(this.ServiceAccount.String(), "ServiceAccountSubject", "ServiceAccountSubject", 1) + `,`, + `}`, + }, "") + return s +} +func (this *UserSubject) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UserSubject{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *FlowDistinguisherMethod) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FlowDistinguisherMethod: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FlowDistinguisherMethod: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = FlowDistinguisherMethodType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FlowSchema) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FlowSchema: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FlowSchema: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FlowSchemaCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FlowSchemaCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FlowSchemaCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = FlowSchemaConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FlowSchemaList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FlowSchemaList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FlowSchemaList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, FlowSchema{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FlowSchemaSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FlowSchemaSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FlowSchemaSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PriorityLevelConfiguration", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.PriorityLevelConfiguration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchingPrecedence", wireType) + } + m.MatchingPrecedence = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MatchingPrecedence |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DistinguisherMethod", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DistinguisherMethod == nil { + m.DistinguisherMethod = &FlowDistinguisherMethod{} + } + if err := m.DistinguisherMethod.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rules = append(m.Rules, PolicyRulesWithSubjects{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FlowSchemaStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FlowSchemaStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FlowSchemaStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, FlowSchemaCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GroupSubject) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GroupSubject: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GroupSubject: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LimitResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LimitResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LimitResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = LimitResponseType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Queuing", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Queuing == nil { + m.Queuing = &QueuingConfiguration{} + } + if err := m.Queuing.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LimitedPriorityLevelConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LimitedPriorityLevelConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LimitedPriorityLevelConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AssuredConcurrencyShares", wireType) + } + m.AssuredConcurrencyShares = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.AssuredConcurrencyShares |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LimitResponse", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LimitResponse.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NonResourcePolicyRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NonResourcePolicyRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NonResourcePolicyRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Verbs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Verbs = append(m.Verbs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NonResourceURLs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NonResourceURLs = append(m.NonResourceURLs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PolicyRulesWithSubjects) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PolicyRulesWithSubjects: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PolicyRulesWithSubjects: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subjects", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subjects = append(m.Subjects, Subject{}) + if err := m.Subjects[len(m.Subjects)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceRules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceRules = append(m.ResourceRules, ResourcePolicyRule{}) + if err := m.ResourceRules[len(m.ResourceRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NonResourceRules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NonResourceRules = append(m.NonResourceRules, NonResourcePolicyRule{}) + if err := m.NonResourceRules[len(m.NonResourceRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PriorityLevelConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PriorityLevelConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PriorityLevelConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PriorityLevelConfigurationCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PriorityLevelConfigurationCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PriorityLevelConfigurationCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = PriorityLevelConfigurationConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PriorityLevelConfigurationList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PriorityLevelConfigurationList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PriorityLevelConfigurationList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, PriorityLevelConfiguration{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PriorityLevelConfigurationReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PriorityLevelConfigurationReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PriorityLevelConfigurationReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PriorityLevelConfigurationSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PriorityLevelConfigurationSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PriorityLevelConfigurationSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = PriorityLevelEnablement(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Limited", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Limited == nil { + m.Limited = &LimitedPriorityLevelConfiguration{} + } + if err := m.Limited.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PriorityLevelConfigurationStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PriorityLevelConfigurationStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PriorityLevelConfigurationStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, PriorityLevelConfigurationCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueuingConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueuingConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueuingConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Queues", wireType) + } + m.Queues = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Queues |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HandSize", wireType) + } + m.HandSize = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.HandSize |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field QueueLengthLimit", wireType) + } + m.QueueLengthLimit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.QueueLengthLimit |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourcePolicyRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourcePolicyRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourcePolicyRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Verbs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Verbs = append(m.Verbs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIGroups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIGroups = append(m.APIGroups, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resources = append(m.Resources, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterScope", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ClusterScope = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespaces", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespaces = append(m.Namespaces, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceAccountSubject) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceAccountSubject: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceAccountSubject: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Subject) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Subject: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Subject: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = SubjectKind(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.User == nil { + m.User = &UserSubject{} + } + if err := m.User.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Group == nil { + m.Group = &GroupSubject{} + } + if err := m.Group.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceAccount", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ServiceAccount == nil { + m.ServiceAccount = &ServiceAccountSubject{} + } + if err := m.ServiceAccount.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UserSubject) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UserSubject: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UserSubject: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/api/flowcontrol/v1beta1/generated.proto b/vendor/k8s.io/api/flowcontrol/v1beta1/generated.proto new file mode 100644 index 0000000000..9ddfc5465b --- /dev/null +++ b/vendor/k8s.io/api/flowcontrol/v1beta1/generated.proto @@ -0,0 +1,434 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package k8s.io.api.flowcontrol.v1beta1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1beta1"; + +// FlowDistinguisherMethod specifies the method of a flow distinguisher. +message FlowDistinguisherMethod { + // `type` is the type of flow distinguisher method + // The supported types are "ByUser" and "ByNamespace". + // Required. + optional string type = 1; +} + +// FlowSchema defines the schema of a group of flows. Note that a flow is made up of a set of inbound API requests with +// similar attributes and is identified by a pair of strings: the name of the FlowSchema and a "flow distinguisher". +message FlowSchema { + // `metadata` is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // `spec` is the specification of the desired behavior of a FlowSchema. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional FlowSchemaSpec spec = 2; + + // `status` is the current status of a FlowSchema. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional FlowSchemaStatus status = 3; +} + +// FlowSchemaCondition describes conditions for a FlowSchema. +message FlowSchemaCondition { + // `type` is the type of the condition. + // Required. + optional string type = 1; + + // `status` is the status of the condition. + // Can be True, False, Unknown. + // Required. + optional string status = 2; + + // `lastTransitionTime` is the last time the condition transitioned from one status to another. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + + // `reason` is a unique, one-word, CamelCase reason for the condition's last transition. + optional string reason = 4; + + // `message` is a human-readable message indicating details about last transition. + optional string message = 5; +} + +// FlowSchemaList is a list of FlowSchema objects. +message FlowSchemaList { + // `metadata` is the standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // `items` is a list of FlowSchemas. + repeated FlowSchema items = 2; +} + +// FlowSchemaSpec describes how the FlowSchema's specification looks like. +message FlowSchemaSpec { + // `priorityLevelConfiguration` should reference a PriorityLevelConfiguration in the cluster. If the reference cannot + // be resolved, the FlowSchema will be ignored and marked as invalid in its status. + // Required. + optional PriorityLevelConfigurationReference priorityLevelConfiguration = 1; + + // `matchingPrecedence` is used to choose among the FlowSchemas that match a given request. The chosen + // FlowSchema is among those with the numerically lowest (which we take to be logically highest) + // MatchingPrecedence. Each MatchingPrecedence value must be ranged in [1,10000]. + // Note that if the precedence is not specified, it will be set to 1000 as default. + // +optional + optional int32 matchingPrecedence = 2; + + // `distinguisherMethod` defines how to compute the flow distinguisher for requests that match this schema. + // `nil` specifies that the distinguisher is disabled and thus will always be the empty string. + // +optional + optional FlowDistinguisherMethod distinguisherMethod = 3; + + // `rules` describes which requests will match this flow schema. This FlowSchema matches a request if and only if + // at least one member of rules matches the request. + // if it is an empty slice, there will be no requests matching the FlowSchema. + // +listType=atomic + // +optional + repeated PolicyRulesWithSubjects rules = 4; +} + +// FlowSchemaStatus represents the current state of a FlowSchema. +message FlowSchemaStatus { + // `conditions` is a list of the current states of FlowSchema. + // +listType=map + // +listMapKey=type + // +optional + repeated FlowSchemaCondition conditions = 1; +} + +// GroupSubject holds detailed information for group-kind subject. +message GroupSubject { + // name is the user group that matches, or "*" to match all user groups. + // See https://github.com/kubernetes/apiserver/blob/master/pkg/authentication/user/user.go for some + // well-known group names. + // Required. + optional string name = 1; +} + +// LimitResponse defines how to handle requests that can not be executed right now. +// +union +message LimitResponse { + // `type` is "Queue" or "Reject". + // "Queue" means that requests that can not be executed upon arrival + // are held in a queue until they can be executed or a queuing limit + // is reached. + // "Reject" means that requests that can not be executed upon arrival + // are rejected. + // Required. + // +unionDiscriminator + optional string type = 1; + + // `queuing` holds the configuration parameters for queuing. + // This field may be non-empty only if `type` is `"Queue"`. + // +optional + optional QueuingConfiguration queuing = 2; +} + +// LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. +// It addresses two issues: +// * How are requests for this priority level limited? +// * What should be done with requests that exceed the limit? +message LimitedPriorityLevelConfiguration { + // `assuredConcurrencyShares` (ACS) configures the execution + // limit, which is a limit on the number of requests of this + // priority level that may be exeucting at a given time. ACS must + // be a positive number. The server's concurrency limit (SCL) is + // divided among the concurrency-controlled priority levels in + // proportion to their assured concurrency shares. This produces + // the assured concurrency value (ACV) --- the number of requests + // that may be executing at a time --- for each such priority + // level: + // + // ACV(l) = ceil( SCL * ACS(l) / ( sum[priority levels k] ACS(k) ) ) + // + // bigger numbers of ACS mean more reserved concurrent requests (at the + // expense of every other PL). + // This field has a default value of 30. + // +optional + optional int32 assuredConcurrencyShares = 1; + + // `limitResponse` indicates what to do with requests that can not be executed right now + optional LimitResponse limitResponse = 2; +} + +// NonResourcePolicyRule is a predicate that matches non-resource requests according to their verb and the +// target non-resource URL. A NonResourcePolicyRule matches a request if and only if both (a) at least one member +// of verbs matches the request and (b) at least one member of nonResourceURLs matches the request. +message NonResourcePolicyRule { + // `verbs` is a list of matching verbs and may not be empty. + // "*" matches all verbs. If it is present, it must be the only entry. + // +listType=set + // Required. + repeated string verbs = 1; + + // `nonResourceURLs` is a set of url prefixes that a user should have access to and may not be empty. + // For example: + // - "/healthz" is legal + // - "/hea*" is illegal + // - "/hea" is legal but matches nothing + // - "/hea/*" also matches nothing + // - "/healthz/*" matches all per-component health checks. + // "*" matches all non-resource urls. if it is present, it must be the only entry. + // +listType=set + // Required. + repeated string nonResourceURLs = 6; +} + +// PolicyRulesWithSubjects prescribes a test that applies to a request to an apiserver. The test considers the subject +// making the request, the verb being requested, and the resource to be acted upon. This PolicyRulesWithSubjects matches +// a request if and only if both (a) at least one member of subjects matches the request and (b) at least one member +// of resourceRules or nonResourceRules matches the request. +message PolicyRulesWithSubjects { + // subjects is the list of normal user, serviceaccount, or group that this rule cares about. + // There must be at least one member in this slice. + // A slice that includes both the system:authenticated and system:unauthenticated user groups matches every request. + // +listType=atomic + // Required. + repeated Subject subjects = 1; + + // `resourceRules` is a slice of ResourcePolicyRules that identify matching requests according to their verb and the + // target resource. + // At least one of `resourceRules` and `nonResourceRules` has to be non-empty. + // +listType=atomic + // +optional + repeated ResourcePolicyRule resourceRules = 2; + + // `nonResourceRules` is a list of NonResourcePolicyRules that identify matching requests according to their verb + // and the target non-resource URL. + // +listType=atomic + // +optional + repeated NonResourcePolicyRule nonResourceRules = 3; +} + +// PriorityLevelConfiguration represents the configuration of a priority level. +message PriorityLevelConfiguration { + // `metadata` is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // `spec` is the specification of the desired behavior of a "request-priority". + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional PriorityLevelConfigurationSpec spec = 2; + + // `status` is the current status of a "request-priority". + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional PriorityLevelConfigurationStatus status = 3; +} + +// PriorityLevelConfigurationCondition defines the condition of priority level. +message PriorityLevelConfigurationCondition { + // `type` is the type of the condition. + // Required. + optional string type = 1; + + // `status` is the status of the condition. + // Can be True, False, Unknown. + // Required. + optional string status = 2; + + // `lastTransitionTime` is the last time the condition transitioned from one status to another. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + + // `reason` is a unique, one-word, CamelCase reason for the condition's last transition. + optional string reason = 4; + + // `message` is a human-readable message indicating details about last transition. + optional string message = 5; +} + +// PriorityLevelConfigurationList is a list of PriorityLevelConfiguration objects. +message PriorityLevelConfigurationList { + // `metadata` is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // `items` is a list of request-priorities. + repeated PriorityLevelConfiguration items = 2; +} + +// PriorityLevelConfigurationReference contains information that points to the "request-priority" being used. +message PriorityLevelConfigurationReference { + // `name` is the name of the priority level configuration being referenced + // Required. + optional string name = 1; +} + +// PriorityLevelConfigurationSpec specifies the configuration of a priority level. +// +union +message PriorityLevelConfigurationSpec { + // `type` indicates whether this priority level is subject to + // limitation on request execution. A value of `"Exempt"` means + // that requests of this priority level are not subject to a limit + // (and thus are never queued) and do not detract from the + // capacity made available to other priority levels. A value of + // `"Limited"` means that (a) requests of this priority level + // _are_ subject to limits and (b) some of the server's limited + // capacity is made available exclusively to this priority level. + // Required. + // +unionDiscriminator + optional string type = 1; + + // `limited` specifies how requests are handled for a Limited priority level. + // This field must be non-empty if and only if `type` is `"Limited"`. + // +optional + optional LimitedPriorityLevelConfiguration limited = 2; +} + +// PriorityLevelConfigurationStatus represents the current state of a "request-priority". +message PriorityLevelConfigurationStatus { + // `conditions` is the current state of "request-priority". + // +listType=map + // +listMapKey=type + // +optional + repeated PriorityLevelConfigurationCondition conditions = 1; +} + +// QueuingConfiguration holds the configuration parameters for queuing +message QueuingConfiguration { + // `queues` is the number of queues for this priority level. The + // queues exist independently at each apiserver. The value must be + // positive. Setting it to 1 effectively precludes + // shufflesharding and thus makes the distinguisher method of + // associated flow schemas irrelevant. This field has a default + // value of 64. + // +optional + optional int32 queues = 1; + + // `handSize` is a small positive number that configures the + // shuffle sharding of requests into queues. When enqueuing a request + // at this priority level the request's flow identifier (a string + // pair) is hashed and the hash value is used to shuffle the list + // of queues and deal a hand of the size specified here. The + // request is put into one of the shortest queues in that hand. + // `handSize` must be no larger than `queues`, and should be + // significantly smaller (so that a few heavy flows do not + // saturate most of the queues). See the user-facing + // documentation for more extensive guidance on setting this + // field. This field has a default value of 8. + // +optional + optional int32 handSize = 2; + + // `queueLengthLimit` is the maximum number of requests allowed to + // be waiting in a given queue of this priority level at a time; + // excess requests are rejected. This value must be positive. If + // not specified, it will be defaulted to 50. + // +optional + optional int32 queueLengthLimit = 3; +} + +// ResourcePolicyRule is a predicate that matches some resource +// requests, testing the request's verb and the target resource. A +// ResourcePolicyRule matches a resource request if and only if: (a) +// at least one member of verbs matches the request, (b) at least one +// member of apiGroups matches the request, (c) at least one member of +// resources matches the request, and (d) least one member of +// namespaces matches the request. +message ResourcePolicyRule { + // `verbs` is a list of matching verbs and may not be empty. + // "*" matches all verbs and, if present, must be the only entry. + // +listType=set + // Required. + repeated string verbs = 1; + + // `apiGroups` is a list of matching API groups and may not be empty. + // "*" matches all API groups and, if present, must be the only entry. + // +listType=set + // Required. + repeated string apiGroups = 2; + + // `resources` is a list of matching resources (i.e., lowercase + // and plural) with, if desired, subresource. For example, [ + // "services", "nodes/status" ]. This list may not be empty. + // "*" matches all resources and, if present, must be the only entry. + // Required. + // +listType=set + repeated string resources = 3; + + // `clusterScope` indicates whether to match requests that do not + // specify a namespace (which happens either because the resource + // is not namespaced or the request targets all namespaces). + // If this field is omitted or false then the `namespaces` field + // must contain a non-empty list. + // +optional + optional bool clusterScope = 4; + + // `namespaces` is a list of target namespaces that restricts + // matches. A request that specifies a target namespace matches + // only if either (a) this list contains that target namespace or + // (b) this list contains "*". Note that "*" matches any + // specified namespace but does not match a request that _does + // not specify_ a namespace (see the `clusterScope` field for + // that). + // This list may be empty, but only if `clusterScope` is true. + // +optional + // +listType=set + repeated string namespaces = 5; +} + +// ServiceAccountSubject holds detailed information for service-account-kind subject. +message ServiceAccountSubject { + // `namespace` is the namespace of matching ServiceAccount objects. + // Required. + optional string namespace = 1; + + // `name` is the name of matching ServiceAccount objects, or "*" to match regardless of name. + // Required. + optional string name = 2; +} + +// Subject matches the originator of a request, as identified by the request authentication system. There are three +// ways of matching an originator; by user, group, or service account. +// +union +message Subject { + // Required + // +unionDiscriminator + optional string kind = 1; + + // +optional + optional UserSubject user = 2; + + // +optional + optional GroupSubject group = 3; + + // +optional + optional ServiceAccountSubject serviceAccount = 4; +} + +// UserSubject holds detailed information for user-kind subject. +message UserSubject { + // `name` is the username that matches, or "*" to match all usernames. + // Required. + optional string name = 1; +} + diff --git a/vendor/knative.dev/pkg/apis/duck/v1alpha1/register.go b/vendor/k8s.io/api/flowcontrol/v1beta1/register.go similarity index 66% rename from vendor/knative.dev/pkg/apis/duck/v1alpha1/register.go rename to vendor/k8s.io/api/flowcontrol/v1beta1/register.go index 6ae381400d..e78549314e 100644 --- a/vendor/knative.dev/pkg/apis/duck/v1alpha1/register.go +++ b/vendor/k8s.io/api/flowcontrol/v1beta1/register.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Knative Authors +Copyright 2019 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,19 +14,21 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1alpha1 +package v1beta1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - "knative.dev/pkg/apis/duck" ) +// GroupName is the name of api group +const GroupName = "flowcontrol.apiserver.k8s.io" + // SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: duck.GroupName, Version: "v1alpha1"} +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} -// Kind takes an unqualified kind and returns back a Group qualified GroupKind +// Kind takes an unqualified kind and returns a Group qualified GroupKind func Kind(kind string) schema.GroupKind { return SchemeGroupVersion.WithKind(kind).GroupKind() } @@ -37,20 +39,19 @@ func Resource(resource string) schema.GroupResource { } var ( + // SchemeBuilder installs the api group to a scheme SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - AddToScheme = SchemeBuilder.AddToScheme + // AddToScheme adds api to a scheme + AddToScheme = SchemeBuilder.AddToScheme ) -// Adds the list of known types to Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes( - SchemeGroupVersion, - &AddressableType{}, - (&AddressableType{}).GetListType(), - &Target{}, - (&Target{}).GetListType(), - &LegacyTarget{}, - (&LegacyTarget{}).GetListType(), + scheme.AddKnownTypes(SchemeGroupVersion, + &FlowSchema{}, + &FlowSchemaList{}, + &PriorityLevelConfiguration{}, + &PriorityLevelConfigurationList{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) return nil diff --git a/vendor/k8s.io/api/flowcontrol/v1beta1/types.go b/vendor/k8s.io/api/flowcontrol/v1beta1/types.go new file mode 100644 index 0000000000..ece834e929 --- /dev/null +++ b/vendor/k8s.io/api/flowcontrol/v1beta1/types.go @@ -0,0 +1,529 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// These are valid wildcards. +const ( + APIGroupAll = "*" + ResourceAll = "*" + VerbAll = "*" + NonResourceAll = "*" + NameAll = "*" + + NamespaceEvery = "*" // matches every particular namespace +) + +// System preset priority level names +const ( + PriorityLevelConfigurationNameExempt = "exempt" + PriorityLevelConfigurationNameCatchAll = "catch-all" + FlowSchemaNameExempt = "exempt" + FlowSchemaNameCatchAll = "catch-all" +) + +// Conditions +const ( + FlowSchemaConditionDangling = "Dangling" + + PriorityLevelConfigurationConditionConcurrencyShared = "ConcurrencyShared" +) + +// Constants used by api validation. +const ( + FlowSchemaMaxMatchingPrecedence int32 = 10000 +) + +// Constants for apiserver response headers. +const ( + ResponseHeaderMatchedPriorityLevelConfigurationUID = "X-Kubernetes-PF-PriorityLevel-UID" + ResponseHeaderMatchedFlowSchemaUID = "X-Kubernetes-PF-FlowSchema-UID" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.20 + +// FlowSchema defines the schema of a group of flows. Note that a flow is made up of a set of inbound API requests with +// similar attributes and is identified by a pair of strings: the name of the FlowSchema and a "flow distinguisher". +type FlowSchema struct { + metav1.TypeMeta `json:",inline"` + // `metadata` is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // `spec` is the specification of the desired behavior of a FlowSchema. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Spec FlowSchemaSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + // `status` is the current status of a FlowSchema. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Status FlowSchemaStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.20 + +// FlowSchemaList is a list of FlowSchema objects. +type FlowSchemaList struct { + metav1.TypeMeta `json:",inline"` + // `metadata` is the standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // `items` is a list of FlowSchemas. + Items []FlowSchema `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// FlowSchemaSpec describes how the FlowSchema's specification looks like. +type FlowSchemaSpec struct { + // `priorityLevelConfiguration` should reference a PriorityLevelConfiguration in the cluster. If the reference cannot + // be resolved, the FlowSchema will be ignored and marked as invalid in its status. + // Required. + PriorityLevelConfiguration PriorityLevelConfigurationReference `json:"priorityLevelConfiguration" protobuf:"bytes,1,opt,name=priorityLevelConfiguration"` + // `matchingPrecedence` is used to choose among the FlowSchemas that match a given request. The chosen + // FlowSchema is among those with the numerically lowest (which we take to be logically highest) + // MatchingPrecedence. Each MatchingPrecedence value must be ranged in [1,10000]. + // Note that if the precedence is not specified, it will be set to 1000 as default. + // +optional + MatchingPrecedence int32 `json:"matchingPrecedence" protobuf:"varint,2,opt,name=matchingPrecedence"` + // `distinguisherMethod` defines how to compute the flow distinguisher for requests that match this schema. + // `nil` specifies that the distinguisher is disabled and thus will always be the empty string. + // +optional + DistinguisherMethod *FlowDistinguisherMethod `json:"distinguisherMethod,omitempty" protobuf:"bytes,3,opt,name=distinguisherMethod"` + // `rules` describes which requests will match this flow schema. This FlowSchema matches a request if and only if + // at least one member of rules matches the request. + // if it is an empty slice, there will be no requests matching the FlowSchema. + // +listType=atomic + // +optional + Rules []PolicyRulesWithSubjects `json:"rules,omitempty" protobuf:"bytes,4,rep,name=rules"` +} + +// FlowDistinguisherMethodType is the type of flow distinguisher method +type FlowDistinguisherMethodType string + +// These are valid flow-distinguisher methods. +const ( + // FlowDistinguisherMethodByUserType specifies that the flow distinguisher is the username in the request. + // This type is used to provide some insulation between users. + FlowDistinguisherMethodByUserType FlowDistinguisherMethodType = "ByUser" + + // FlowDistinguisherMethodByNamespaceType specifies that the flow distinguisher is the namespace of the + // object that the request acts upon. If the object is not namespaced, or if the request is a non-resource + // request, then the distinguisher will be the empty string. An example usage of this type is to provide + // some insulation between tenants in a situation where there are multiple tenants and each namespace + // is dedicated to a tenant. + FlowDistinguisherMethodByNamespaceType FlowDistinguisherMethodType = "ByNamespace" +) + +// FlowDistinguisherMethod specifies the method of a flow distinguisher. +type FlowDistinguisherMethod struct { + // `type` is the type of flow distinguisher method + // The supported types are "ByUser" and "ByNamespace". + // Required. + Type FlowDistinguisherMethodType `json:"type" protobuf:"bytes,1,opt,name=type"` +} + +// PriorityLevelConfigurationReference contains information that points to the "request-priority" being used. +type PriorityLevelConfigurationReference struct { + // `name` is the name of the priority level configuration being referenced + // Required. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` +} + +// PolicyRulesWithSubjects prescribes a test that applies to a request to an apiserver. The test considers the subject +// making the request, the verb being requested, and the resource to be acted upon. This PolicyRulesWithSubjects matches +// a request if and only if both (a) at least one member of subjects matches the request and (b) at least one member +// of resourceRules or nonResourceRules matches the request. +type PolicyRulesWithSubjects struct { + // subjects is the list of normal user, serviceaccount, or group that this rule cares about. + // There must be at least one member in this slice. + // A slice that includes both the system:authenticated and system:unauthenticated user groups matches every request. + // +listType=atomic + // Required. + Subjects []Subject `json:"subjects" protobuf:"bytes,1,rep,name=subjects"` + // `resourceRules` is a slice of ResourcePolicyRules that identify matching requests according to their verb and the + // target resource. + // At least one of `resourceRules` and `nonResourceRules` has to be non-empty. + // +listType=atomic + // +optional + ResourceRules []ResourcePolicyRule `json:"resourceRules,omitempty" protobuf:"bytes,2,opt,name=resourceRules"` + // `nonResourceRules` is a list of NonResourcePolicyRules that identify matching requests according to their verb + // and the target non-resource URL. + // +listType=atomic + // +optional + NonResourceRules []NonResourcePolicyRule `json:"nonResourceRules,omitempty" protobuf:"bytes,3,opt,name=nonResourceRules"` +} + +// Subject matches the originator of a request, as identified by the request authentication system. There are three +// ways of matching an originator; by user, group, or service account. +// +union +type Subject struct { + // Required + // +unionDiscriminator + Kind SubjectKind `json:"kind" protobuf:"bytes,1,opt,name=kind"` + // +optional + User *UserSubject `json:"user,omitempty" protobuf:"bytes,2,opt,name=user"` + // +optional + Group *GroupSubject `json:"group,omitempty" protobuf:"bytes,3,opt,name=group"` + // +optional + ServiceAccount *ServiceAccountSubject `json:"serviceAccount,omitempty" protobuf:"bytes,4,opt,name=serviceAccount"` +} + +// SubjectKind is the kind of subject. +type SubjectKind string + +// Supported subject's kinds. +const ( + SubjectKindUser SubjectKind = "User" + SubjectKindGroup SubjectKind = "Group" + SubjectKindServiceAccount SubjectKind = "ServiceAccount" +) + +// UserSubject holds detailed information for user-kind subject. +type UserSubject struct { + // `name` is the username that matches, or "*" to match all usernames. + // Required. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` +} + +// GroupSubject holds detailed information for group-kind subject. +type GroupSubject struct { + // name is the user group that matches, or "*" to match all user groups. + // See https://github.com/kubernetes/apiserver/blob/master/pkg/authentication/user/user.go for some + // well-known group names. + // Required. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` +} + +// ServiceAccountSubject holds detailed information for service-account-kind subject. +type ServiceAccountSubject struct { + // `namespace` is the namespace of matching ServiceAccount objects. + // Required. + Namespace string `json:"namespace" protobuf:"bytes,1,opt,name=namespace"` + // `name` is the name of matching ServiceAccount objects, or "*" to match regardless of name. + // Required. + Name string `json:"name" protobuf:"bytes,2,opt,name=name"` +} + +// ResourcePolicyRule is a predicate that matches some resource +// requests, testing the request's verb and the target resource. A +// ResourcePolicyRule matches a resource request if and only if: (a) +// at least one member of verbs matches the request, (b) at least one +// member of apiGroups matches the request, (c) at least one member of +// resources matches the request, and (d) least one member of +// namespaces matches the request. +type ResourcePolicyRule struct { + // `verbs` is a list of matching verbs and may not be empty. + // "*" matches all verbs and, if present, must be the only entry. + // +listType=set + // Required. + Verbs []string `json:"verbs" protobuf:"bytes,1,rep,name=verbs"` + + // `apiGroups` is a list of matching API groups and may not be empty. + // "*" matches all API groups and, if present, must be the only entry. + // +listType=set + // Required. + APIGroups []string `json:"apiGroups" protobuf:"bytes,2,rep,name=apiGroups"` + + // `resources` is a list of matching resources (i.e., lowercase + // and plural) with, if desired, subresource. For example, [ + // "services", "nodes/status" ]. This list may not be empty. + // "*" matches all resources and, if present, must be the only entry. + // Required. + // +listType=set + Resources []string `json:"resources" protobuf:"bytes,3,rep,name=resources"` + + // `clusterScope` indicates whether to match requests that do not + // specify a namespace (which happens either because the resource + // is not namespaced or the request targets all namespaces). + // If this field is omitted or false then the `namespaces` field + // must contain a non-empty list. + // +optional + ClusterScope bool `json:"clusterScope,omitempty" protobuf:"varint,4,opt,name=clusterScope"` + + // `namespaces` is a list of target namespaces that restricts + // matches. A request that specifies a target namespace matches + // only if either (a) this list contains that target namespace or + // (b) this list contains "*". Note that "*" matches any + // specified namespace but does not match a request that _does + // not specify_ a namespace (see the `clusterScope` field for + // that). + // This list may be empty, but only if `clusterScope` is true. + // +optional + // +listType=set + Namespaces []string `json:"namespaces" protobuf:"bytes,5,rep,name=namespaces"` +} + +// NonResourcePolicyRule is a predicate that matches non-resource requests according to their verb and the +// target non-resource URL. A NonResourcePolicyRule matches a request if and only if both (a) at least one member +// of verbs matches the request and (b) at least one member of nonResourceURLs matches the request. +type NonResourcePolicyRule struct { + // `verbs` is a list of matching verbs and may not be empty. + // "*" matches all verbs. If it is present, it must be the only entry. + // +listType=set + // Required. + Verbs []string `json:"verbs" protobuf:"bytes,1,rep,name=verbs"` + // `nonResourceURLs` is a set of url prefixes that a user should have access to and may not be empty. + // For example: + // - "/healthz" is legal + // - "/hea*" is illegal + // - "/hea" is legal but matches nothing + // - "/hea/*" also matches nothing + // - "/healthz/*" matches all per-component health checks. + // "*" matches all non-resource urls. if it is present, it must be the only entry. + // +listType=set + // Required. + NonResourceURLs []string `json:"nonResourceURLs" protobuf:"bytes,6,rep,name=nonResourceURLs"` +} + +// FlowSchemaStatus represents the current state of a FlowSchema. +type FlowSchemaStatus struct { + // `conditions` is a list of the current states of FlowSchema. + // +listType=map + // +listMapKey=type + // +optional + Conditions []FlowSchemaCondition `json:"conditions,omitempty" protobuf:"bytes,1,rep,name=conditions"` +} + +// FlowSchemaCondition describes conditions for a FlowSchema. +type FlowSchemaCondition struct { + // `type` is the type of the condition. + // Required. + Type FlowSchemaConditionType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type"` + // `status` is the status of the condition. + // Can be True, False, Unknown. + // Required. + Status ConditionStatus `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"` + // `lastTransitionTime` is the last time the condition transitioned from one status to another. + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // `reason` is a unique, one-word, CamelCase reason for the condition's last transition. + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // `message` is a human-readable message indicating details about last transition. + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` +} + +// FlowSchemaConditionType is a valid value for FlowSchemaStatusCondition.Type +type FlowSchemaConditionType string + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.20 + +// PriorityLevelConfiguration represents the configuration of a priority level. +type PriorityLevelConfiguration struct { + metav1.TypeMeta `json:",inline"` + // `metadata` is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // `spec` is the specification of the desired behavior of a "request-priority". + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Spec PriorityLevelConfigurationSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + // `status` is the current status of a "request-priority". + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Status PriorityLevelConfigurationStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.20 + +// PriorityLevelConfigurationList is a list of PriorityLevelConfiguration objects. +type PriorityLevelConfigurationList struct { + metav1.TypeMeta `json:",inline"` + // `metadata` is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // `items` is a list of request-priorities. + Items []PriorityLevelConfiguration `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// PriorityLevelConfigurationSpec specifies the configuration of a priority level. +// +union +type PriorityLevelConfigurationSpec struct { + // `type` indicates whether this priority level is subject to + // limitation on request execution. A value of `"Exempt"` means + // that requests of this priority level are not subject to a limit + // (and thus are never queued) and do not detract from the + // capacity made available to other priority levels. A value of + // `"Limited"` means that (a) requests of this priority level + // _are_ subject to limits and (b) some of the server's limited + // capacity is made available exclusively to this priority level. + // Required. + // +unionDiscriminator + Type PriorityLevelEnablement `json:"type" protobuf:"bytes,1,opt,name=type"` + + // `limited` specifies how requests are handled for a Limited priority level. + // This field must be non-empty if and only if `type` is `"Limited"`. + // +optional + Limited *LimitedPriorityLevelConfiguration `json:"limited,omitempty" protobuf:"bytes,2,opt,name=limited"` +} + +// PriorityLevelEnablement indicates whether limits on execution are enabled for the priority level +type PriorityLevelEnablement string + +// Supported priority level enablement values. +const ( + // PriorityLevelEnablementExempt means that requests are not subject to limits + PriorityLevelEnablementExempt PriorityLevelEnablement = "Exempt" + + // PriorityLevelEnablementLimited means that requests are subject to limits + PriorityLevelEnablementLimited PriorityLevelEnablement = "Limited" +) + +// LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. +// It addresses two issues: +// * How are requests for this priority level limited? +// * What should be done with requests that exceed the limit? +type LimitedPriorityLevelConfiguration struct { + // `assuredConcurrencyShares` (ACS) configures the execution + // limit, which is a limit on the number of requests of this + // priority level that may be exeucting at a given time. ACS must + // be a positive number. The server's concurrency limit (SCL) is + // divided among the concurrency-controlled priority levels in + // proportion to their assured concurrency shares. This produces + // the assured concurrency value (ACV) --- the number of requests + // that may be executing at a time --- for each such priority + // level: + // + // ACV(l) = ceil( SCL * ACS(l) / ( sum[priority levels k] ACS(k) ) ) + // + // bigger numbers of ACS mean more reserved concurrent requests (at the + // expense of every other PL). + // This field has a default value of 30. + // +optional + AssuredConcurrencyShares int32 `json:"assuredConcurrencyShares" protobuf:"varint,1,opt,name=assuredConcurrencyShares"` + + // `limitResponse` indicates what to do with requests that can not be executed right now + LimitResponse LimitResponse `json:"limitResponse,omitempty" protobuf:"bytes,2,opt,name=limitResponse"` +} + +// LimitResponse defines how to handle requests that can not be executed right now. +// +union +type LimitResponse struct { + // `type` is "Queue" or "Reject". + // "Queue" means that requests that can not be executed upon arrival + // are held in a queue until they can be executed or a queuing limit + // is reached. + // "Reject" means that requests that can not be executed upon arrival + // are rejected. + // Required. + // +unionDiscriminator + Type LimitResponseType `json:"type" protobuf:"bytes,1,opt,name=type"` + + // `queuing` holds the configuration parameters for queuing. + // This field may be non-empty only if `type` is `"Queue"`. + // +optional + Queuing *QueuingConfiguration `json:"queuing,omitempty" protobuf:"bytes,2,opt,name=queuing"` +} + +// LimitResponseType identifies how a Limited priority level handles a request that can not be executed right now +type LimitResponseType string + +// Supported limit responses. +const ( + // LimitResponseTypeQueue means that requests that can not be executed right now are queued until they can be executed or a queuing limit is hit + LimitResponseTypeQueue LimitResponseType = "Queue" + + // LimitResponseTypeReject means that requests that can not be executed right now are rejected + LimitResponseTypeReject LimitResponseType = "Reject" +) + +// QueuingConfiguration holds the configuration parameters for queuing +type QueuingConfiguration struct { + // `queues` is the number of queues for this priority level. The + // queues exist independently at each apiserver. The value must be + // positive. Setting it to 1 effectively precludes + // shufflesharding and thus makes the distinguisher method of + // associated flow schemas irrelevant. This field has a default + // value of 64. + // +optional + Queues int32 `json:"queues" protobuf:"varint,1,opt,name=queues"` + + // `handSize` is a small positive number that configures the + // shuffle sharding of requests into queues. When enqueuing a request + // at this priority level the request's flow identifier (a string + // pair) is hashed and the hash value is used to shuffle the list + // of queues and deal a hand of the size specified here. The + // request is put into one of the shortest queues in that hand. + // `handSize` must be no larger than `queues`, and should be + // significantly smaller (so that a few heavy flows do not + // saturate most of the queues). See the user-facing + // documentation for more extensive guidance on setting this + // field. This field has a default value of 8. + // +optional + HandSize int32 `json:"handSize" protobuf:"varint,2,opt,name=handSize"` + + // `queueLengthLimit` is the maximum number of requests allowed to + // be waiting in a given queue of this priority level at a time; + // excess requests are rejected. This value must be positive. If + // not specified, it will be defaulted to 50. + // +optional + QueueLengthLimit int32 `json:"queueLengthLimit" protobuf:"varint,3,opt,name=queueLengthLimit"` +} + +// PriorityLevelConfigurationConditionType is a valid value for PriorityLevelConfigurationStatusCondition.Type +type PriorityLevelConfigurationConditionType string + +// PriorityLevelConfigurationStatus represents the current state of a "request-priority". +type PriorityLevelConfigurationStatus struct { + // `conditions` is the current state of "request-priority". + // +listType=map + // +listMapKey=type + // +optional + Conditions []PriorityLevelConfigurationCondition `json:"conditions,omitempty" protobuf:"bytes,1,rep,name=conditions"` +} + +// PriorityLevelConfigurationCondition defines the condition of priority level. +type PriorityLevelConfigurationCondition struct { + // `type` is the type of the condition. + // Required. + Type PriorityLevelConfigurationConditionType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type"` + // `status` is the status of the condition. + // Can be True, False, Unknown. + // Required. + Status ConditionStatus `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"` + // `lastTransitionTime` is the last time the condition transitioned from one status to another. + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // `reason` is a unique, one-word, CamelCase reason for the condition's last transition. + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // `message` is a human-readable message indicating details about last transition. + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` +} + +// ConditionStatus is the status of the condition. +type ConditionStatus string + +// These are valid condition statuses. "ConditionTrue" means a resource is in the condition. +// "ConditionFalse" means a resource is not in the condition. "ConditionUnknown" means kubernetes +// can't decide if a resource is in the condition or not. In the future, we could add other +// intermediate conditions, e.g. ConditionDegraded. +const ( + ConditionTrue ConditionStatus = "True" + ConditionFalse ConditionStatus = "False" + ConditionUnknown ConditionStatus = "Unknown" +) diff --git a/vendor/k8s.io/api/flowcontrol/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/flowcontrol/v1beta1/types_swagger_doc_generated.go new file mode 100644 index 0000000000..8343a883f8 --- /dev/null +++ b/vendor/k8s.io/api/flowcontrol/v1beta1/types_swagger_doc_generated.go @@ -0,0 +1,258 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_FlowDistinguisherMethod = map[string]string{ + "": "FlowDistinguisherMethod specifies the method of a flow distinguisher.", + "type": "`type` is the type of flow distinguisher method The supported types are \"ByUser\" and \"ByNamespace\". Required.", +} + +func (FlowDistinguisherMethod) SwaggerDoc() map[string]string { + return map_FlowDistinguisherMethod +} + +var map_FlowSchema = map[string]string{ + "": "FlowSchema defines the schema of a group of flows. Note that a flow is made up of a set of inbound API requests with similar attributes and is identified by a pair of strings: the name of the FlowSchema and a \"flow distinguisher\".", + "metadata": "`metadata` is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "`spec` is the specification of the desired behavior of a FlowSchema. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "`status` is the current status of a FlowSchema. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", +} + +func (FlowSchema) SwaggerDoc() map[string]string { + return map_FlowSchema +} + +var map_FlowSchemaCondition = map[string]string{ + "": "FlowSchemaCondition describes conditions for a FlowSchema.", + "type": "`type` is the type of the condition. Required.", + "status": "`status` is the status of the condition. Can be True, False, Unknown. Required.", + "lastTransitionTime": "`lastTransitionTime` is the last time the condition transitioned from one status to another.", + "reason": "`reason` is a unique, one-word, CamelCase reason for the condition's last transition.", + "message": "`message` is a human-readable message indicating details about last transition.", +} + +func (FlowSchemaCondition) SwaggerDoc() map[string]string { + return map_FlowSchemaCondition +} + +var map_FlowSchemaList = map[string]string{ + "": "FlowSchemaList is a list of FlowSchema objects.", + "metadata": "`metadata` is the standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "`items` is a list of FlowSchemas.", +} + +func (FlowSchemaList) SwaggerDoc() map[string]string { + return map_FlowSchemaList +} + +var map_FlowSchemaSpec = map[string]string{ + "": "FlowSchemaSpec describes how the FlowSchema's specification looks like.", + "priorityLevelConfiguration": "`priorityLevelConfiguration` should reference a PriorityLevelConfiguration in the cluster. If the reference cannot be resolved, the FlowSchema will be ignored and marked as invalid in its status. Required.", + "matchingPrecedence": "`matchingPrecedence` is used to choose among the FlowSchemas that match a given request. The chosen FlowSchema is among those with the numerically lowest (which we take to be logically highest) MatchingPrecedence. Each MatchingPrecedence value must be ranged in [1,10000]. Note that if the precedence is not specified, it will be set to 1000 as default.", + "distinguisherMethod": "`distinguisherMethod` defines how to compute the flow distinguisher for requests that match this schema. `nil` specifies that the distinguisher is disabled and thus will always be the empty string.", + "rules": "`rules` describes which requests will match this flow schema. This FlowSchema matches a request if and only if at least one member of rules matches the request. if it is an empty slice, there will be no requests matching the FlowSchema.", +} + +func (FlowSchemaSpec) SwaggerDoc() map[string]string { + return map_FlowSchemaSpec +} + +var map_FlowSchemaStatus = map[string]string{ + "": "FlowSchemaStatus represents the current state of a FlowSchema.", + "conditions": "`conditions` is a list of the current states of FlowSchema.", +} + +func (FlowSchemaStatus) SwaggerDoc() map[string]string { + return map_FlowSchemaStatus +} + +var map_GroupSubject = map[string]string{ + "": "GroupSubject holds detailed information for group-kind subject.", + "name": "name is the user group that matches, or \"*\" to match all user groups. See https://github.com/kubernetes/apiserver/blob/master/pkg/authentication/user/user.go for some well-known group names. Required.", +} + +func (GroupSubject) SwaggerDoc() map[string]string { + return map_GroupSubject +} + +var map_LimitResponse = map[string]string{ + "": "LimitResponse defines how to handle requests that can not be executed right now.", + "type": "`type` is \"Queue\" or \"Reject\". \"Queue\" means that requests that can not be executed upon arrival are held in a queue until they can be executed or a queuing limit is reached. \"Reject\" means that requests that can not be executed upon arrival are rejected. Required.", + "queuing": "`queuing` holds the configuration parameters for queuing. This field may be non-empty only if `type` is `\"Queue\"`.", +} + +func (LimitResponse) SwaggerDoc() map[string]string { + return map_LimitResponse +} + +var map_LimitedPriorityLevelConfiguration = map[string]string{ + "": "LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. It addresses two issues:\n * How are requests for this priority level limited?\n * What should be done with requests that exceed the limit?", + "assuredConcurrencyShares": "`assuredConcurrencyShares` (ACS) configures the execution limit, which is a limit on the number of requests of this priority level that may be exeucting at a given time. ACS must be a positive number. The server's concurrency limit (SCL) is divided among the concurrency-controlled priority levels in proportion to their assured concurrency shares. This produces the assured concurrency value (ACV) ", + "limitResponse": "`limitResponse` indicates what to do with requests that can not be executed right now", +} + +func (LimitedPriorityLevelConfiguration) SwaggerDoc() map[string]string { + return map_LimitedPriorityLevelConfiguration +} + +var map_NonResourcePolicyRule = map[string]string{ + "": "NonResourcePolicyRule is a predicate that matches non-resource requests according to their verb and the target non-resource URL. A NonResourcePolicyRule matches a request if and only if both (a) at least one member of verbs matches the request and (b) at least one member of nonResourceURLs matches the request.", + "verbs": "`verbs` is a list of matching verbs and may not be empty. \"*\" matches all verbs. If it is present, it must be the only entry. Required.", + "nonResourceURLs": "`nonResourceURLs` is a set of url prefixes that a user should have access to and may not be empty. For example:\n - \"/healthz\" is legal\n - \"/hea*\" is illegal\n - \"/hea\" is legal but matches nothing\n - \"/hea/*\" also matches nothing\n - \"/healthz/*\" matches all per-component health checks.\n\"*\" matches all non-resource urls. if it is present, it must be the only entry. Required.", +} + +func (NonResourcePolicyRule) SwaggerDoc() map[string]string { + return map_NonResourcePolicyRule +} + +var map_PolicyRulesWithSubjects = map[string]string{ + "": "PolicyRulesWithSubjects prescribes a test that applies to a request to an apiserver. The test considers the subject making the request, the verb being requested, and the resource to be acted upon. This PolicyRulesWithSubjects matches a request if and only if both (a) at least one member of subjects matches the request and (b) at least one member of resourceRules or nonResourceRules matches the request.", + "subjects": "subjects is the list of normal user, serviceaccount, or group that this rule cares about. There must be at least one member in this slice. A slice that includes both the system:authenticated and system:unauthenticated user groups matches every request. Required.", + "resourceRules": "`resourceRules` is a slice of ResourcePolicyRules that identify matching requests according to their verb and the target resource. At least one of `resourceRules` and `nonResourceRules` has to be non-empty.", + "nonResourceRules": "`nonResourceRules` is a list of NonResourcePolicyRules that identify matching requests according to their verb and the target non-resource URL.", +} + +func (PolicyRulesWithSubjects) SwaggerDoc() map[string]string { + return map_PolicyRulesWithSubjects +} + +var map_PriorityLevelConfiguration = map[string]string{ + "": "PriorityLevelConfiguration represents the configuration of a priority level.", + "metadata": "`metadata` is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "`spec` is the specification of the desired behavior of a \"request-priority\". More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "`status` is the current status of a \"request-priority\". More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", +} + +func (PriorityLevelConfiguration) SwaggerDoc() map[string]string { + return map_PriorityLevelConfiguration +} + +var map_PriorityLevelConfigurationCondition = map[string]string{ + "": "PriorityLevelConfigurationCondition defines the condition of priority level.", + "type": "`type` is the type of the condition. Required.", + "status": "`status` is the status of the condition. Can be True, False, Unknown. Required.", + "lastTransitionTime": "`lastTransitionTime` is the last time the condition transitioned from one status to another.", + "reason": "`reason` is a unique, one-word, CamelCase reason for the condition's last transition.", + "message": "`message` is a human-readable message indicating details about last transition.", +} + +func (PriorityLevelConfigurationCondition) SwaggerDoc() map[string]string { + return map_PriorityLevelConfigurationCondition +} + +var map_PriorityLevelConfigurationList = map[string]string{ + "": "PriorityLevelConfigurationList is a list of PriorityLevelConfiguration objects.", + "metadata": "`metadata` is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "`items` is a list of request-priorities.", +} + +func (PriorityLevelConfigurationList) SwaggerDoc() map[string]string { + return map_PriorityLevelConfigurationList +} + +var map_PriorityLevelConfigurationReference = map[string]string{ + "": "PriorityLevelConfigurationReference contains information that points to the \"request-priority\" being used.", + "name": "`name` is the name of the priority level configuration being referenced Required.", +} + +func (PriorityLevelConfigurationReference) SwaggerDoc() map[string]string { + return map_PriorityLevelConfigurationReference +} + +var map_PriorityLevelConfigurationSpec = map[string]string{ + "": "PriorityLevelConfigurationSpec specifies the configuration of a priority level.", + "type": "`type` indicates whether this priority level is subject to limitation on request execution. A value of `\"Exempt\"` means that requests of this priority level are not subject to a limit (and thus are never queued) and do not detract from the capacity made available to other priority levels. A value of `\"Limited\"` means that (a) requests of this priority level _are_ subject to limits and (b) some of the server's limited capacity is made available exclusively to this priority level. Required.", + "limited": "`limited` specifies how requests are handled for a Limited priority level. This field must be non-empty if and only if `type` is `\"Limited\"`.", +} + +func (PriorityLevelConfigurationSpec) SwaggerDoc() map[string]string { + return map_PriorityLevelConfigurationSpec +} + +var map_PriorityLevelConfigurationStatus = map[string]string{ + "": "PriorityLevelConfigurationStatus represents the current state of a \"request-priority\".", + "conditions": "`conditions` is the current state of \"request-priority\".", +} + +func (PriorityLevelConfigurationStatus) SwaggerDoc() map[string]string { + return map_PriorityLevelConfigurationStatus +} + +var map_QueuingConfiguration = map[string]string{ + "": "QueuingConfiguration holds the configuration parameters for queuing", + "queues": "`queues` is the number of queues for this priority level. The queues exist independently at each apiserver. The value must be positive. Setting it to 1 effectively precludes shufflesharding and thus makes the distinguisher method of associated flow schemas irrelevant. This field has a default value of 64.", + "handSize": "`handSize` is a small positive number that configures the shuffle sharding of requests into queues. When enqueuing a request at this priority level the request's flow identifier (a string pair) is hashed and the hash value is used to shuffle the list of queues and deal a hand of the size specified here. The request is put into one of the shortest queues in that hand. `handSize` must be no larger than `queues`, and should be significantly smaller (so that a few heavy flows do not saturate most of the queues). See the user-facing documentation for more extensive guidance on setting this field. This field has a default value of 8.", + "queueLengthLimit": "`queueLengthLimit` is the maximum number of requests allowed to be waiting in a given queue of this priority level at a time; excess requests are rejected. This value must be positive. If not specified, it will be defaulted to 50.", +} + +func (QueuingConfiguration) SwaggerDoc() map[string]string { + return map_QueuingConfiguration +} + +var map_ResourcePolicyRule = map[string]string{ + "": "ResourcePolicyRule is a predicate that matches some resource requests, testing the request's verb and the target resource. A ResourcePolicyRule matches a resource request if and only if: (a) at least one member of verbs matches the request, (b) at least one member of apiGroups matches the request, (c) at least one member of resources matches the request, and (d) least one member of namespaces matches the request.", + "verbs": "`verbs` is a list of matching verbs and may not be empty. \"*\" matches all verbs and, if present, must be the only entry. Required.", + "apiGroups": "`apiGroups` is a list of matching API groups and may not be empty. \"*\" matches all API groups and, if present, must be the only entry. Required.", + "resources": "`resources` is a list of matching resources (i.e., lowercase and plural) with, if desired, subresource. For example, [ \"services\", \"nodes/status\" ]. This list may not be empty. \"*\" matches all resources and, if present, must be the only entry. Required.", + "clusterScope": "`clusterScope` indicates whether to match requests that do not specify a namespace (which happens either because the resource is not namespaced or the request targets all namespaces). If this field is omitted or false then the `namespaces` field must contain a non-empty list.", + "namespaces": "`namespaces` is a list of target namespaces that restricts matches. A request that specifies a target namespace matches only if either (a) this list contains that target namespace or (b) this list contains \"*\". Note that \"*\" matches any specified namespace but does not match a request that _does not specify_ a namespace (see the `clusterScope` field for that). This list may be empty, but only if `clusterScope` is true.", +} + +func (ResourcePolicyRule) SwaggerDoc() map[string]string { + return map_ResourcePolicyRule +} + +var map_ServiceAccountSubject = map[string]string{ + "": "ServiceAccountSubject holds detailed information for service-account-kind subject.", + "namespace": "`namespace` is the namespace of matching ServiceAccount objects. Required.", + "name": "`name` is the name of matching ServiceAccount objects, or \"*\" to match regardless of name. Required.", +} + +func (ServiceAccountSubject) SwaggerDoc() map[string]string { + return map_ServiceAccountSubject +} + +var map_Subject = map[string]string{ + "": "Subject matches the originator of a request, as identified by the request authentication system. There are three ways of matching an originator; by user, group, or service account.", + "kind": "Required", +} + +func (Subject) SwaggerDoc() map[string]string { + return map_Subject +} + +var map_UserSubject = map[string]string{ + "": "UserSubject holds detailed information for user-kind subject.", + "name": "`name` is the username that matches, or \"*\" to match all usernames. Required.", +} + +func (UserSubject) SwaggerDoc() map[string]string { + return map_UserSubject +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/flowcontrol/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/flowcontrol/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..c8f6e23060 --- /dev/null +++ b/vendor/k8s.io/api/flowcontrol/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,541 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1beta1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowDistinguisherMethod) DeepCopyInto(out *FlowDistinguisherMethod) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowDistinguisherMethod. +func (in *FlowDistinguisherMethod) DeepCopy() *FlowDistinguisherMethod { + if in == nil { + return nil + } + out := new(FlowDistinguisherMethod) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowSchema) DeepCopyInto(out *FlowSchema) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowSchema. +func (in *FlowSchema) DeepCopy() *FlowSchema { + if in == nil { + return nil + } + out := new(FlowSchema) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FlowSchema) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowSchemaCondition) DeepCopyInto(out *FlowSchemaCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowSchemaCondition. +func (in *FlowSchemaCondition) DeepCopy() *FlowSchemaCondition { + if in == nil { + return nil + } + out := new(FlowSchemaCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowSchemaList) DeepCopyInto(out *FlowSchemaList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]FlowSchema, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowSchemaList. +func (in *FlowSchemaList) DeepCopy() *FlowSchemaList { + if in == nil { + return nil + } + out := new(FlowSchemaList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FlowSchemaList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowSchemaSpec) DeepCopyInto(out *FlowSchemaSpec) { + *out = *in + out.PriorityLevelConfiguration = in.PriorityLevelConfiguration + if in.DistinguisherMethod != nil { + in, out := &in.DistinguisherMethod, &out.DistinguisherMethod + *out = new(FlowDistinguisherMethod) + **out = **in + } + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]PolicyRulesWithSubjects, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowSchemaSpec. +func (in *FlowSchemaSpec) DeepCopy() *FlowSchemaSpec { + if in == nil { + return nil + } + out := new(FlowSchemaSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowSchemaStatus) DeepCopyInto(out *FlowSchemaStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]FlowSchemaCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowSchemaStatus. +func (in *FlowSchemaStatus) DeepCopy() *FlowSchemaStatus { + if in == nil { + return nil + } + out := new(FlowSchemaStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupSubject) DeepCopyInto(out *GroupSubject) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupSubject. +func (in *GroupSubject) DeepCopy() *GroupSubject { + if in == nil { + return nil + } + out := new(GroupSubject) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LimitResponse) DeepCopyInto(out *LimitResponse) { + *out = *in + if in.Queuing != nil { + in, out := &in.Queuing, &out.Queuing + *out = new(QueuingConfiguration) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitResponse. +func (in *LimitResponse) DeepCopy() *LimitResponse { + if in == nil { + return nil + } + out := new(LimitResponse) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LimitedPriorityLevelConfiguration) DeepCopyInto(out *LimitedPriorityLevelConfiguration) { + *out = *in + in.LimitResponse.DeepCopyInto(&out.LimitResponse) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitedPriorityLevelConfiguration. +func (in *LimitedPriorityLevelConfiguration) DeepCopy() *LimitedPriorityLevelConfiguration { + if in == nil { + return nil + } + out := new(LimitedPriorityLevelConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NonResourcePolicyRule) DeepCopyInto(out *NonResourcePolicyRule) { + *out = *in + if in.Verbs != nil { + in, out := &in.Verbs, &out.Verbs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.NonResourceURLs != nil { + in, out := &in.NonResourceURLs, &out.NonResourceURLs + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NonResourcePolicyRule. +func (in *NonResourcePolicyRule) DeepCopy() *NonResourcePolicyRule { + if in == nil { + return nil + } + out := new(NonResourcePolicyRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyRulesWithSubjects) DeepCopyInto(out *PolicyRulesWithSubjects) { + *out = *in + if in.Subjects != nil { + in, out := &in.Subjects, &out.Subjects + *out = make([]Subject, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceRules != nil { + in, out := &in.ResourceRules, &out.ResourceRules + *out = make([]ResourcePolicyRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NonResourceRules != nil { + in, out := &in.NonResourceRules, &out.NonResourceRules + *out = make([]NonResourcePolicyRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyRulesWithSubjects. +func (in *PolicyRulesWithSubjects) DeepCopy() *PolicyRulesWithSubjects { + if in == nil { + return nil + } + out := new(PolicyRulesWithSubjects) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PriorityLevelConfiguration) DeepCopyInto(out *PriorityLevelConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityLevelConfiguration. +func (in *PriorityLevelConfiguration) DeepCopy() *PriorityLevelConfiguration { + if in == nil { + return nil + } + out := new(PriorityLevelConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PriorityLevelConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PriorityLevelConfigurationCondition) DeepCopyInto(out *PriorityLevelConfigurationCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityLevelConfigurationCondition. +func (in *PriorityLevelConfigurationCondition) DeepCopy() *PriorityLevelConfigurationCondition { + if in == nil { + return nil + } + out := new(PriorityLevelConfigurationCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PriorityLevelConfigurationList) DeepCopyInto(out *PriorityLevelConfigurationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PriorityLevelConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityLevelConfigurationList. +func (in *PriorityLevelConfigurationList) DeepCopy() *PriorityLevelConfigurationList { + if in == nil { + return nil + } + out := new(PriorityLevelConfigurationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PriorityLevelConfigurationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PriorityLevelConfigurationReference) DeepCopyInto(out *PriorityLevelConfigurationReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityLevelConfigurationReference. +func (in *PriorityLevelConfigurationReference) DeepCopy() *PriorityLevelConfigurationReference { + if in == nil { + return nil + } + out := new(PriorityLevelConfigurationReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PriorityLevelConfigurationSpec) DeepCopyInto(out *PriorityLevelConfigurationSpec) { + *out = *in + if in.Limited != nil { + in, out := &in.Limited, &out.Limited + *out = new(LimitedPriorityLevelConfiguration) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityLevelConfigurationSpec. +func (in *PriorityLevelConfigurationSpec) DeepCopy() *PriorityLevelConfigurationSpec { + if in == nil { + return nil + } + out := new(PriorityLevelConfigurationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PriorityLevelConfigurationStatus) DeepCopyInto(out *PriorityLevelConfigurationStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]PriorityLevelConfigurationCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityLevelConfigurationStatus. +func (in *PriorityLevelConfigurationStatus) DeepCopy() *PriorityLevelConfigurationStatus { + if in == nil { + return nil + } + out := new(PriorityLevelConfigurationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueuingConfiguration) DeepCopyInto(out *QueuingConfiguration) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueuingConfiguration. +func (in *QueuingConfiguration) DeepCopy() *QueuingConfiguration { + if in == nil { + return nil + } + out := new(QueuingConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcePolicyRule) DeepCopyInto(out *ResourcePolicyRule) { + *out = *in + if in.Verbs != nil { + in, out := &in.Verbs, &out.Verbs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.APIGroups != nil { + in, out := &in.APIGroups, &out.APIGroups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Namespaces != nil { + in, out := &in.Namespaces, &out.Namespaces + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcePolicyRule. +func (in *ResourcePolicyRule) DeepCopy() *ResourcePolicyRule { + if in == nil { + return nil + } + out := new(ResourcePolicyRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountSubject) DeepCopyInto(out *ServiceAccountSubject) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountSubject. +func (in *ServiceAccountSubject) DeepCopy() *ServiceAccountSubject { + if in == nil { + return nil + } + out := new(ServiceAccountSubject) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Subject) DeepCopyInto(out *Subject) { + *out = *in + if in.User != nil { + in, out := &in.User, &out.User + *out = new(UserSubject) + **out = **in + } + if in.Group != nil { + in, out := &in.Group, &out.Group + *out = new(GroupSubject) + **out = **in + } + if in.ServiceAccount != nil { + in, out := &in.ServiceAccount, &out.ServiceAccount + *out = new(ServiceAccountSubject) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Subject. +func (in *Subject) DeepCopy() *Subject { + if in == nil { + return nil + } + out := new(Subject) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSubject) DeepCopyInto(out *UserSubject) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSubject. +func (in *UserSubject) DeepCopy() *UserSubject { + if in == nil { + return nil + } + out := new(UserSubject) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/flowcontrol/v1beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/flowcontrol/v1beta1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 0000000000..d0f2297181 --- /dev/null +++ b/vendor/k8s.io/api/flowcontrol/v1beta1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,93 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1beta1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *FlowSchema) APILifecycleIntroduced() (major, minor int) { + return 1, 20 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *FlowSchema) APILifecycleDeprecated() (major, minor int) { + return 1, 23 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *FlowSchema) APILifecycleRemoved() (major, minor int) { + return 1, 26 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *FlowSchemaList) APILifecycleIntroduced() (major, minor int) { + return 1, 20 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *FlowSchemaList) APILifecycleDeprecated() (major, minor int) { + return 1, 23 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *FlowSchemaList) APILifecycleRemoved() (major, minor int) { + return 1, 26 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PriorityLevelConfiguration) APILifecycleIntroduced() (major, minor int) { + return 1, 20 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *PriorityLevelConfiguration) APILifecycleDeprecated() (major, minor int) { + return 1, 23 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *PriorityLevelConfiguration) APILifecycleRemoved() (major, minor int) { + return 1, 26 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PriorityLevelConfigurationList) APILifecycleIntroduced() (major, minor int) { + return 1, 20 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *PriorityLevelConfigurationList) APILifecycleDeprecated() (major, minor int) { + return 1, 23 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *PriorityLevelConfigurationList) APILifecycleRemoved() (major, minor int) { + return 1, 26 +} diff --git a/vendor/k8s.io/api/networking/v1/generated.pb.go b/vendor/k8s.io/api/networking/v1/generated.pb.go index 4e03b54381..49238823b8 100644 --- a/vendor/k8s.io/api/networking/v1/generated.pb.go +++ b/vendor/k8s.io/api/networking/v1/generated.pb.go @@ -2723,10 +2723,7 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2810,10 +2807,7 @@ func (m *HTTPIngressRuleValue) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2927,10 +2921,7 @@ func (m *IPBlock) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3079,10 +3070,7 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3204,10 +3192,7 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3323,10 +3308,7 @@ func (m *IngressClass) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3443,10 +3425,7 @@ func (m *IngressClassList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3564,10 +3543,7 @@ func (m *IngressClassSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3684,10 +3660,7 @@ func (m *IngressList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3802,10 +3775,7 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3891,10 +3861,7 @@ func (m *IngressRuleValue) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4009,10 +3976,7 @@ func (m *IngressServiceBackend) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4199,10 +4163,7 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4285,10 +4246,7 @@ func (m *IngressStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4402,10 +4360,7 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4521,10 +4476,7 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4642,10 +4594,7 @@ func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4763,10 +4712,7 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4883,10 +4829,7 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5044,10 +4987,7 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5166,10 +5106,7 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5352,10 +5289,7 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5456,10 +5390,7 @@ func (m *ServiceBackendPort) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/k8s.io/api/networking/v1/generated.proto b/vendor/k8s.io/api/networking/v1/generated.proto index a98ef94c86..74737098b9 100644 --- a/vendor/k8s.io/api/networking/v1/generated.proto +++ b/vendor/k8s.io/api/networking/v1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.networking.v1; diff --git a/vendor/k8s.io/api/networking/v1beta1/generated.pb.go b/vendor/k8s.io/api/networking/v1beta1/generated.pb.go index 6f51df864b..fa921b6199 100644 --- a/vendor/k8s.io/api/networking/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/networking/v1beta1/generated.pb.go @@ -1606,10 +1606,7 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1693,10 +1690,7 @@ func (m *HTTPIngressRuleValue) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1845,10 +1839,7 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1999,10 +1990,7 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2118,10 +2106,7 @@ func (m *IngressClass) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2238,10 +2223,7 @@ func (m *IngressClassList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2359,10 +2341,7 @@ func (m *IngressClassSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2479,10 +2458,7 @@ func (m *IngressList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2597,10 +2573,7 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2686,10 +2659,7 @@ func (m *IngressRuleValue) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2876,10 +2846,7 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2962,10 +2929,7 @@ func (m *IngressStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3079,10 +3043,7 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/k8s.io/api/networking/v1beta1/generated.proto b/vendor/k8s.io/api/networking/v1beta1/generated.proto index a97c318db0..251bbafecf 100644 --- a/vendor/k8s.io/api/networking/v1beta1/generated.proto +++ b/vendor/k8s.io/api/networking/v1beta1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.networking.v1beta1; diff --git a/vendor/k8s.io/api/settings/v1alpha1/doc.go b/vendor/k8s.io/api/node/v1/doc.go similarity index 82% rename from vendor/k8s.io/api/settings/v1alpha1/doc.go rename to vendor/k8s.io/api/node/v1/doc.go index 60066bb6db..12cbcb8a0e 100644 --- a/vendor/k8s.io/api/settings/v1alpha1/doc.go +++ b/vendor/k8s.io/api/node/v1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2020 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -18,6 +18,6 @@ limitations under the License. // +k8s:protobuf-gen=package // +k8s:openapi-gen=true -// +groupName=settings.k8s.io +// +groupName=node.k8s.io -package v1alpha1 // import "k8s.io/api/settings/v1alpha1" +package v1 // import "k8s.io/api/node/v1" diff --git a/vendor/k8s.io/api/node/v1/generated.pb.go b/vendor/k8s.io/api/node/v1/generated.pb.go new file mode 100644 index 0000000000..d930f63b24 --- /dev/null +++ b/vendor/k8s.io/api/node/v1/generated.pb.go @@ -0,0 +1,1399 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/node/v1/generated.proto + +package v1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + v11 "k8s.io/api/core/v1" + k8s_io_apimachinery_pkg_api_resource "k8s.io/apimachinery/pkg/api/resource" + resource "k8s.io/apimachinery/pkg/api/resource" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *Overhead) Reset() { *m = Overhead{} } +func (*Overhead) ProtoMessage() {} +func (*Overhead) Descriptor() ([]byte, []int) { + return fileDescriptor_6ac9be560e26ae98, []int{0} +} +func (m *Overhead) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Overhead) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Overhead) XXX_Merge(src proto.Message) { + xxx_messageInfo_Overhead.Merge(m, src) +} +func (m *Overhead) XXX_Size() int { + return m.Size() +} +func (m *Overhead) XXX_DiscardUnknown() { + xxx_messageInfo_Overhead.DiscardUnknown(m) +} + +var xxx_messageInfo_Overhead proto.InternalMessageInfo + +func (m *RuntimeClass) Reset() { *m = RuntimeClass{} } +func (*RuntimeClass) ProtoMessage() {} +func (*RuntimeClass) Descriptor() ([]byte, []int) { + return fileDescriptor_6ac9be560e26ae98, []int{1} +} +func (m *RuntimeClass) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RuntimeClass) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RuntimeClass) XXX_Merge(src proto.Message) { + xxx_messageInfo_RuntimeClass.Merge(m, src) +} +func (m *RuntimeClass) XXX_Size() int { + return m.Size() +} +func (m *RuntimeClass) XXX_DiscardUnknown() { + xxx_messageInfo_RuntimeClass.DiscardUnknown(m) +} + +var xxx_messageInfo_RuntimeClass proto.InternalMessageInfo + +func (m *RuntimeClassList) Reset() { *m = RuntimeClassList{} } +func (*RuntimeClassList) ProtoMessage() {} +func (*RuntimeClassList) Descriptor() ([]byte, []int) { + return fileDescriptor_6ac9be560e26ae98, []int{2} +} +func (m *RuntimeClassList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RuntimeClassList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RuntimeClassList) XXX_Merge(src proto.Message) { + xxx_messageInfo_RuntimeClassList.Merge(m, src) +} +func (m *RuntimeClassList) XXX_Size() int { + return m.Size() +} +func (m *RuntimeClassList) XXX_DiscardUnknown() { + xxx_messageInfo_RuntimeClassList.DiscardUnknown(m) +} + +var xxx_messageInfo_RuntimeClassList proto.InternalMessageInfo + +func (m *Scheduling) Reset() { *m = Scheduling{} } +func (*Scheduling) ProtoMessage() {} +func (*Scheduling) Descriptor() ([]byte, []int) { + return fileDescriptor_6ac9be560e26ae98, []int{3} +} +func (m *Scheduling) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Scheduling) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Scheduling) XXX_Merge(src proto.Message) { + xxx_messageInfo_Scheduling.Merge(m, src) +} +func (m *Scheduling) XXX_Size() int { + return m.Size() +} +func (m *Scheduling) XXX_DiscardUnknown() { + xxx_messageInfo_Scheduling.DiscardUnknown(m) +} + +var xxx_messageInfo_Scheduling proto.InternalMessageInfo + +func init() { + proto.RegisterType((*Overhead)(nil), "k8s.io.api.node.v1.Overhead") + proto.RegisterMapType((k8s_io_api_core_v1.ResourceList)(nil), "k8s.io.api.node.v1.Overhead.PodFixedEntry") + proto.RegisterType((*RuntimeClass)(nil), "k8s.io.api.node.v1.RuntimeClass") + proto.RegisterType((*RuntimeClassList)(nil), "k8s.io.api.node.v1.RuntimeClassList") + proto.RegisterType((*Scheduling)(nil), "k8s.io.api.node.v1.Scheduling") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.node.v1.Scheduling.NodeSelectorEntry") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/node/v1/generated.proto", fileDescriptor_6ac9be560e26ae98) +} + +var fileDescriptor_6ac9be560e26ae98 = []byte{ + // 656 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x53, 0x31, 0x6f, 0xd3, 0x40, + 0x14, 0xce, 0xa5, 0x54, 0x4d, 0x2f, 0x29, 0x94, 0xa3, 0x43, 0x14, 0x21, 0x27, 0xca, 0x14, 0x90, + 0x7a, 0x6e, 0x2b, 0x84, 0x2a, 0x18, 0x90, 0x0c, 0xad, 0x40, 0x82, 0x02, 0x2e, 0x2c, 0x88, 0x81, + 0x8b, 0xfd, 0x70, 0xdc, 0xc4, 0xbe, 0xe8, 0x7c, 0x8e, 0xc8, 0x86, 0x58, 0x90, 0x98, 0xfa, 0x5f, + 0x18, 0xf8, 0x0b, 0x15, 0x53, 0xc7, 0x4e, 0x2d, 0x0d, 0xff, 0x82, 0x09, 0xdd, 0xd9, 0x4e, 0x5c, + 0x12, 0x42, 0xd9, 0xee, 0xde, 0x7d, 0xdf, 0xf7, 0xde, 0xfb, 0xde, 0x3d, 0x7c, 0xbf, 0xbb, 0x1d, + 0x51, 0x9f, 0x9b, 0xdd, 0xb8, 0x0d, 0x22, 0x04, 0x09, 0x91, 0x39, 0x80, 0xd0, 0xe5, 0xc2, 0x4c, + 0x1f, 0x58, 0xdf, 0x37, 0x43, 0xee, 0x82, 0x39, 0xd8, 0x34, 0x3d, 0x08, 0x41, 0x30, 0x09, 0x2e, + 0xed, 0x0b, 0x2e, 0x39, 0x21, 0x09, 0x86, 0xb2, 0xbe, 0x4f, 0x15, 0x86, 0x0e, 0x36, 0x6b, 0xeb, + 0x9e, 0x2f, 0x3b, 0x71, 0x9b, 0x3a, 0x3c, 0x30, 0x3d, 0xee, 0x71, 0x53, 0x43, 0xdb, 0xf1, 0x7b, + 0x7d, 0xd3, 0x17, 0x7d, 0x4a, 0x24, 0x6a, 0xcd, 0x5c, 0x1a, 0x87, 0x8b, 0x59, 0x69, 0x6a, 0x77, + 0x26, 0x98, 0x80, 0x39, 0x1d, 0x3f, 0x04, 0x31, 0x34, 0xfb, 0x5d, 0x4f, 0x93, 0x04, 0x44, 0x3c, + 0x16, 0x0e, 0xfc, 0x17, 0x2b, 0x32, 0x03, 0x90, 0x6c, 0x56, 0x2e, 0xf3, 0x6f, 0x2c, 0x11, 0x87, + 0xd2, 0x0f, 0xa6, 0xd3, 0xdc, 0xfd, 0x17, 0x21, 0x72, 0x3a, 0x10, 0xb0, 0x3f, 0x79, 0xcd, 0xef, + 0x45, 0x5c, 0x7a, 0x3e, 0x00, 0xd1, 0x01, 0xe6, 0x92, 0x63, 0x84, 0x4b, 0x7d, 0xee, 0xee, 0xfa, + 0x1f, 0xc0, 0xad, 0xa2, 0xc6, 0x42, 0xab, 0xbc, 0x75, 0x9b, 0x4e, 0x9b, 0x4b, 0x33, 0x02, 0x7d, + 0x91, 0x82, 0x77, 0x42, 0x29, 0x86, 0xd6, 0x67, 0x74, 0x74, 0x5a, 0x2f, 0x8c, 0x4e, 0xeb, 0xa5, + 0x2c, 0xfe, 0xeb, 0xb4, 0x5e, 0x9f, 0x76, 0x96, 0xda, 0xa9, 0x59, 0x4f, 0xfd, 0x48, 0x7e, 0x3a, + 0x9b, 0x0b, 0xd9, 0x63, 0x01, 0x7c, 0x39, 0xab, 0xaf, 0x5f, 0xc6, 0x7b, 0xfa, 0x32, 0x66, 0xa1, + 0xf4, 0xe5, 0xd0, 0x1e, 0x77, 0x51, 0xeb, 0xe2, 0x95, 0x0b, 0x45, 0x92, 0x55, 0xbc, 0xd0, 0x85, + 0x61, 0x15, 0x35, 0x50, 0x6b, 0xd9, 0x56, 0x47, 0xf2, 0x08, 0x2f, 0x0e, 0x58, 0x2f, 0x86, 0x6a, + 0xb1, 0x81, 0x5a, 0xe5, 0x2d, 0x9a, 0xeb, 0x78, 0x9c, 0x8b, 0xf6, 0xbb, 0x9e, 0xb6, 0x60, 0x3a, + 0x57, 0x42, 0xbe, 0x57, 0xdc, 0x46, 0xcd, 0xaf, 0x45, 0x5c, 0xb1, 0x13, 0xbf, 0x1f, 0xf6, 0x58, + 0x14, 0x91, 0x77, 0xb8, 0xa4, 0x26, 0xec, 0x32, 0xc9, 0x74, 0xc6, 0xf2, 0xd6, 0xc6, 0x3c, 0xf5, + 0x88, 0x2a, 0xb4, 0x76, 0xb8, 0x7d, 0x00, 0x8e, 0x7c, 0x06, 0x92, 0x59, 0x24, 0x35, 0x15, 0x4f, + 0x62, 0xf6, 0x58, 0x95, 0xdc, 0xc2, 0x4b, 0x1d, 0x16, 0xba, 0x3d, 0x10, 0xba, 0xfc, 0x65, 0xeb, + 0x5a, 0x0a, 0x5f, 0x7a, 0x9c, 0x84, 0xed, 0xec, 0x9d, 0xec, 0xe2, 0x12, 0x4f, 0x07, 0x57, 0x5d, + 0xd0, 0xc5, 0xdc, 0x9c, 0x37, 0x5c, 0xab, 0xa2, 0x26, 0x99, 0xdd, 0xec, 0x31, 0x97, 0xec, 0x61, + 0xac, 0x3e, 0x93, 0x1b, 0xf7, 0xfc, 0xd0, 0xab, 0x5e, 0xd1, 0x4a, 0xc6, 0x2c, 0xa5, 0xfd, 0x31, + 0xca, 0xba, 0xaa, 0x1a, 0x98, 0xdc, 0xed, 0x9c, 0x42, 0xf3, 0x1b, 0xc2, 0xab, 0x79, 0xd7, 0xd4, + 0xaf, 0x20, 0x6f, 0xa7, 0x9c, 0xa3, 0x97, 0x73, 0x4e, 0xb1, 0xb5, 0x6f, 0xab, 0xd9, 0x67, 0xcc, + 0x22, 0x39, 0xd7, 0x76, 0xf0, 0xa2, 0x2f, 0x21, 0x88, 0xaa, 0x45, 0xfd, 0xc9, 0x1b, 0xb3, 0xaa, + 0xcf, 0x97, 0x64, 0xad, 0xa4, 0x62, 0x8b, 0x4f, 0x14, 0xcd, 0x4e, 0xd8, 0xcd, 0xc3, 0x22, 0xce, + 0x35, 0x45, 0x0e, 0x70, 0x45, 0x91, 0xf7, 0xa1, 0x07, 0x8e, 0xe4, 0x22, 0xdd, 0xa0, 0x8d, 0xf9, + 0xd6, 0xd0, 0xbd, 0x1c, 0x25, 0xd9, 0xa3, 0xb5, 0x34, 0x59, 0x25, 0xff, 0x64, 0x5f, 0xd0, 0x26, + 0xaf, 0x71, 0x59, 0xf2, 0x9e, 0x5a, 0x65, 0x9f, 0x87, 0x59, 0x1f, 0x17, 0xa6, 0xa0, 0x36, 0x49, + 0xa5, 0x7a, 0x35, 0x86, 0x59, 0x37, 0x52, 0xe1, 0xf2, 0x24, 0x16, 0xd9, 0x79, 0x9d, 0xda, 0x03, + 0x7c, 0x7d, 0xaa, 0x9e, 0x19, 0x2b, 0xb3, 0x96, 0x5f, 0x99, 0xe5, 0xdc, 0x0a, 0x58, 0xad, 0xa3, + 0x73, 0xa3, 0x70, 0x7c, 0x6e, 0x14, 0x4e, 0xce, 0x8d, 0xc2, 0xc7, 0x91, 0x81, 0x8e, 0x46, 0x06, + 0x3a, 0x1e, 0x19, 0xe8, 0x64, 0x64, 0xa0, 0x1f, 0x23, 0x03, 0x1d, 0xfe, 0x34, 0x0a, 0x6f, 0x8a, + 0x83, 0xcd, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0xf3, 0x40, 0xe0, 0x08, 0xf3, 0x05, 0x00, 0x00, +} + +func (m *Overhead) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Overhead) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Overhead) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.PodFixed) > 0 { + keysForPodFixed := make([]string, 0, len(m.PodFixed)) + for k := range m.PodFixed { + keysForPodFixed = append(keysForPodFixed, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForPodFixed) + for iNdEx := len(keysForPodFixed) - 1; iNdEx >= 0; iNdEx-- { + v := m.PodFixed[k8s_io_api_core_v1.ResourceName(keysForPodFixed[iNdEx])] + baseI := i + { + size, err := ((*resource.Quantity)(&v)).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForPodFixed[iNdEx]) + copy(dAtA[i:], keysForPodFixed[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForPodFixed[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *RuntimeClass) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RuntimeClass) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RuntimeClass) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Scheduling != nil { + { + size, err := m.Scheduling.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.Overhead != nil { + { + size, err := m.Overhead.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + i -= len(m.Handler) + copy(dAtA[i:], m.Handler) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Handler))) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RuntimeClassList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RuntimeClassList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RuntimeClassList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Scheduling) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Scheduling) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Scheduling) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Tolerations) > 0 { + for iNdEx := len(m.Tolerations) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Tolerations[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.NodeSelector) > 0 { + keysForNodeSelector := make([]string, 0, len(m.NodeSelector)) + for k := range m.NodeSelector { + keysForNodeSelector = append(keysForNodeSelector, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector) + for iNdEx := len(keysForNodeSelector) - 1; iNdEx >= 0; iNdEx-- { + v := m.NodeSelector[string(keysForNodeSelector[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForNodeSelector[iNdEx]) + copy(dAtA[i:], keysForNodeSelector[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForNodeSelector[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Overhead) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.PodFixed) > 0 { + for k, v := range m.PodFixed { + _ = k + _ = v + l = ((*resource.Quantity)(&v)).Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *RuntimeClass) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Handler) + n += 1 + l + sovGenerated(uint64(l)) + if m.Overhead != nil { + l = m.Overhead.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Scheduling != nil { + l = m.Scheduling.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *RuntimeClassList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *Scheduling) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.NodeSelector) > 0 { + for k, v := range m.NodeSelector { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Tolerations) > 0 { + for _, e := range m.Tolerations { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Overhead) String() string { + if this == nil { + return "nil" + } + keysForPodFixed := make([]string, 0, len(this.PodFixed)) + for k := range this.PodFixed { + keysForPodFixed = append(keysForPodFixed, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForPodFixed) + mapStringForPodFixed := "k8s_io_api_core_v1.ResourceList{" + for _, k := range keysForPodFixed { + mapStringForPodFixed += fmt.Sprintf("%v: %v,", k, this.PodFixed[k8s_io_api_core_v1.ResourceName(k)]) + } + mapStringForPodFixed += "}" + s := strings.Join([]string{`&Overhead{`, + `PodFixed:` + mapStringForPodFixed + `,`, + `}`, + }, "") + return s +} +func (this *RuntimeClass) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RuntimeClass{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Handler:` + fmt.Sprintf("%v", this.Handler) + `,`, + `Overhead:` + strings.Replace(this.Overhead.String(), "Overhead", "Overhead", 1) + `,`, + `Scheduling:` + strings.Replace(this.Scheduling.String(), "Scheduling", "Scheduling", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RuntimeClassList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]RuntimeClass{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "RuntimeClass", "RuntimeClass", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&RuntimeClassList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *Scheduling) String() string { + if this == nil { + return "nil" + } + repeatedStringForTolerations := "[]Toleration{" + for _, f := range this.Tolerations { + repeatedStringForTolerations += fmt.Sprintf("%v", f) + "," + } + repeatedStringForTolerations += "}" + keysForNodeSelector := make([]string, 0, len(this.NodeSelector)) + for k := range this.NodeSelector { + keysForNodeSelector = append(keysForNodeSelector, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector) + mapStringForNodeSelector := "map[string]string{" + for _, k := range keysForNodeSelector { + mapStringForNodeSelector += fmt.Sprintf("%v: %v,", k, this.NodeSelector[k]) + } + mapStringForNodeSelector += "}" + s := strings.Join([]string{`&Scheduling{`, + `NodeSelector:` + mapStringForNodeSelector + `,`, + `Tolerations:` + repeatedStringForTolerations + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Overhead) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Overhead: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Overhead: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodFixed", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PodFixed == nil { + m.PodFixed = make(k8s_io_api_core_v1.ResourceList) + } + var mapkey k8s_io_api_core_v1.ResourceName + mapvalue := &resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.PodFixed[k8s_io_api_core_v1.ResourceName(mapkey)] = ((k8s_io_apimachinery_pkg_api_resource.Quantity)(*mapvalue)) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RuntimeClass) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RuntimeClass: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RuntimeClass: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Handler", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Handler = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Overhead", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Overhead == nil { + m.Overhead = &Overhead{} + } + if err := m.Overhead.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scheduling", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Scheduling == nil { + m.Scheduling = &Scheduling{} + } + if err := m.Scheduling.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RuntimeClassList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RuntimeClassList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RuntimeClassList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, RuntimeClass{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Scheduling) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Scheduling: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Scheduling: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodeSelector == nil { + m.NodeSelector = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.NodeSelector[mapkey] = mapvalue + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tolerations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tolerations = append(m.Tolerations, v11.Toleration{}) + if err := m.Tolerations[len(m.Tolerations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/api/node/v1/generated.proto b/vendor/k8s.io/api/node/v1/generated.proto new file mode 100644 index 0000000000..4a86999f14 --- /dev/null +++ b/vendor/k8s.io/api/node/v1/generated.proto @@ -0,0 +1,109 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package k8s.io.api.node.v1; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/api/resource/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1"; + +// Overhead structure represents the resource overhead associated with running a pod. +message Overhead { + // PodFixed represents the fixed resource overhead associated with running a pod. + // +optional + map podFixed = 1; +} + +// RuntimeClass defines a class of container runtime supported in the cluster. +// The RuntimeClass is used to determine which container runtime is used to run +// all containers in a pod. RuntimeClasses are manually defined by a +// user or cluster provisioner, and referenced in the PodSpec. The Kubelet is +// responsible for resolving the RuntimeClassName reference before running the +// pod. For more details, see +// https://kubernetes.io/docs/concepts/containers/runtime-class/ +message RuntimeClass { + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Handler specifies the underlying runtime and configuration that the CRI + // implementation will use to handle pods of this class. The possible values + // are specific to the node & CRI configuration. It is assumed that all + // handlers are available on every node, and handlers of the same name are + // equivalent on every node. + // For example, a handler called "runc" might specify that the runc OCI + // runtime (using native Linux containers) will be used to run the containers + // in a pod. + // The Handler must be lowercase, conform to the DNS Label (RFC 1123) requirements, + // and is immutable. + optional string handler = 2; + + // Overhead represents the resource overhead associated with running a pod for a + // given RuntimeClass. For more details, see + // https://kubernetes.io/docs/concepts/scheduling-eviction/pod-overhead/ + // This field is in beta starting v1.18 + // and is only honored by servers that enable the PodOverhead feature. + // +optional + optional Overhead overhead = 3; + + // Scheduling holds the scheduling constraints to ensure that pods running + // with this RuntimeClass are scheduled to nodes that support it. + // If scheduling is nil, this RuntimeClass is assumed to be supported by all + // nodes. + // +optional + optional Scheduling scheduling = 4; +} + +// RuntimeClassList is a list of RuntimeClass objects. +message RuntimeClassList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of schema objects. + repeated RuntimeClass items = 2; +} + +// Scheduling specifies the scheduling constraints for nodes supporting a +// RuntimeClass. +message Scheduling { + // nodeSelector lists labels that must be present on nodes that support this + // RuntimeClass. Pods using this RuntimeClass can only be scheduled to a + // node matched by this selector. The RuntimeClass nodeSelector is merged + // with a pod's existing nodeSelector. Any conflicts will cause the pod to + // be rejected in admission. + // +optional + map nodeSelector = 1; + + // tolerations are appended (excluding duplicates) to pods running with this + // RuntimeClass during admission, effectively unioning the set of nodes + // tolerated by the pod and the RuntimeClass. + // +optional + // +listType=atomic + repeated k8s.io.api.core.v1.Toleration tolerations = 2; +} + diff --git a/vendor/k8s.io/api/node/v1/register.go b/vendor/k8s.io/api/node/v1/register.go new file mode 100644 index 0000000000..d514e9b5a5 --- /dev/null +++ b/vendor/k8s.io/api/node/v1/register.go @@ -0,0 +1,52 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "node.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // SchemeBuilder is the scheme builder with scheme init functions to run for this API package + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // AddToScheme is a common registration function for mapping packaged scoped group & version keys to a scheme + AddToScheme = SchemeBuilder.AddToScheme +) + +// addKnownTypes adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &RuntimeClass{}, + &RuntimeClassList{}, + ) + + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/node/v1/types.go b/vendor/k8s.io/api/node/v1/types.go new file mode 100644 index 0000000000..b32cc36c49 --- /dev/null +++ b/vendor/k8s.io/api/node/v1/types.go @@ -0,0 +1,107 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RuntimeClass defines a class of container runtime supported in the cluster. +// The RuntimeClass is used to determine which container runtime is used to run +// all containers in a pod. RuntimeClasses are manually defined by a +// user or cluster provisioner, and referenced in the PodSpec. The Kubelet is +// responsible for resolving the RuntimeClassName reference before running the +// pod. For more details, see +// https://kubernetes.io/docs/concepts/containers/runtime-class/ +type RuntimeClass struct { + metav1.TypeMeta `json:",inline"` + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Handler specifies the underlying runtime and configuration that the CRI + // implementation will use to handle pods of this class. The possible values + // are specific to the node & CRI configuration. It is assumed that all + // handlers are available on every node, and handlers of the same name are + // equivalent on every node. + // For example, a handler called "runc" might specify that the runc OCI + // runtime (using native Linux containers) will be used to run the containers + // in a pod. + // The Handler must be lowercase, conform to the DNS Label (RFC 1123) requirements, + // and is immutable. + Handler string `json:"handler" protobuf:"bytes,2,opt,name=handler"` + + // Overhead represents the resource overhead associated with running a pod for a + // given RuntimeClass. For more details, see + // https://kubernetes.io/docs/concepts/scheduling-eviction/pod-overhead/ + // This field is in beta starting v1.18 + // and is only honored by servers that enable the PodOverhead feature. + // +optional + Overhead *Overhead `json:"overhead,omitempty" protobuf:"bytes,3,opt,name=overhead"` + + // Scheduling holds the scheduling constraints to ensure that pods running + // with this RuntimeClass are scheduled to nodes that support it. + // If scheduling is nil, this RuntimeClass is assumed to be supported by all + // nodes. + // +optional + Scheduling *Scheduling `json:"scheduling,omitempty" protobuf:"bytes,4,opt,name=scheduling"` +} + +// Overhead structure represents the resource overhead associated with running a pod. +type Overhead struct { + // PodFixed represents the fixed resource overhead associated with running a pod. + // +optional + PodFixed corev1.ResourceList `json:"podFixed,omitempty" protobuf:"bytes,1,opt,name=podFixed,casttype=k8s.io/api/core/v1.ResourceList,castkey=k8s.io/api/core/v1.ResourceName,castvalue=k8s.io/apimachinery/pkg/api/resource.Quantity"` +} + +// Scheduling specifies the scheduling constraints for nodes supporting a +// RuntimeClass. +type Scheduling struct { + // nodeSelector lists labels that must be present on nodes that support this + // RuntimeClass. Pods using this RuntimeClass can only be scheduled to a + // node matched by this selector. The RuntimeClass nodeSelector is merged + // with a pod's existing nodeSelector. Any conflicts will cause the pod to + // be rejected in admission. + // +optional + NodeSelector map[string]string `json:"nodeSelector,omitempty" protobuf:"bytes,1,opt,name=nodeSelector"` + + // tolerations are appended (excluding duplicates) to pods running with this + // RuntimeClass during admission, effectively unioning the set of nodes + // tolerated by the pod and the RuntimeClass. + // +optional + // +listType=atomic + Tolerations []corev1.Toleration `json:"tolerations,omitempty" protobuf:"bytes,2,rep,name=tolerations"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RuntimeClassList is a list of RuntimeClass objects. +type RuntimeClassList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of schema objects. + Items []RuntimeClass `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/api/node/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/node/v1/types_swagger_doc_generated.go new file mode 100644 index 0000000000..c68c40e90f --- /dev/null +++ b/vendor/k8s.io/api/node/v1/types_swagger_doc_generated.go @@ -0,0 +1,71 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_Overhead = map[string]string{ + "": "Overhead structure represents the resource overhead associated with running a pod.", + "podFixed": "PodFixed represents the fixed resource overhead associated with running a pod.", +} + +func (Overhead) SwaggerDoc() map[string]string { + return map_Overhead +} + +var map_RuntimeClass = map[string]string{ + "": "RuntimeClass defines a class of container runtime supported in the cluster. The RuntimeClass is used to determine which container runtime is used to run all containers in a pod. RuntimeClasses are manually defined by a user or cluster provisioner, and referenced in the PodSpec. The Kubelet is responsible for resolving the RuntimeClassName reference before running the pod. For more details, see https://kubernetes.io/docs/concepts/containers/runtime-class/", + "metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "handler": "Handler specifies the underlying runtime and configuration that the CRI implementation will use to handle pods of this class. The possible values are specific to the node & CRI configuration. It is assumed that all handlers are available on every node, and handlers of the same name are equivalent on every node. For example, a handler called \"runc\" might specify that the runc OCI runtime (using native Linux containers) will be used to run the containers in a pod. The Handler must be lowercase, conform to the DNS Label (RFC 1123) requirements, and is immutable.", + "overhead": "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. For more details, see\n https://kubernetes.io/docs/concepts/scheduling-eviction/pod-overhead/\nThis field is in beta starting v1.18 and is only honored by servers that enable the PodOverhead feature.", + "scheduling": "Scheduling holds the scheduling constraints to ensure that pods running with this RuntimeClass are scheduled to nodes that support it. If scheduling is nil, this RuntimeClass is assumed to be supported by all nodes.", +} + +func (RuntimeClass) SwaggerDoc() map[string]string { + return map_RuntimeClass +} + +var map_RuntimeClassList = map[string]string{ + "": "RuntimeClassList is a list of RuntimeClass objects.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "Items is a list of schema objects.", +} + +func (RuntimeClassList) SwaggerDoc() map[string]string { + return map_RuntimeClassList +} + +var map_Scheduling = map[string]string{ + "": "Scheduling specifies the scheduling constraints for nodes supporting a RuntimeClass.", + "nodeSelector": "nodeSelector lists labels that must be present on nodes that support this RuntimeClass. Pods using this RuntimeClass can only be scheduled to a node matched by this selector. The RuntimeClass nodeSelector is merged with a pod's existing nodeSelector. Any conflicts will cause the pod to be rejected in admission.", + "tolerations": "tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission, effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.", +} + +func (Scheduling) SwaggerDoc() map[string]string { + return map_Scheduling +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/settings/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/node/v1/zz_generated.deepcopy.go similarity index 54% rename from vendor/k8s.io/api/settings/v1alpha1/zz_generated.deepcopy.go rename to vendor/k8s.io/api/node/v1/zz_generated.deepcopy.go index ed6c31a32c..35084da7e3 100644 --- a/vendor/k8s.io/api/settings/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/node/v1/zz_generated.deepcopy.go @@ -18,34 +18,66 @@ limitations under the License. // Code generated by deepcopy-gen. DO NOT EDIT. -package v1alpha1 +package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodPreset) DeepCopyInto(out *PodPreset) { +func (in *Overhead) DeepCopyInto(out *Overhead) { + *out = *in + if in.PodFixed != nil { + in, out := &in.PodFixed, &out.PodFixed + *out = make(corev1.ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Overhead. +func (in *Overhead) DeepCopy() *Overhead { + if in == nil { + return nil + } + out := new(Overhead) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuntimeClass) DeepCopyInto(out *RuntimeClass) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) + if in.Overhead != nil { + in, out := &in.Overhead, &out.Overhead + *out = new(Overhead) + (*in).DeepCopyInto(*out) + } + if in.Scheduling != nil { + in, out := &in.Scheduling, &out.Scheduling + *out = new(Scheduling) + (*in).DeepCopyInto(*out) + } return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodPreset. -func (in *PodPreset) DeepCopy() *PodPreset { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuntimeClass. +func (in *RuntimeClass) DeepCopy() *RuntimeClass { if in == nil { return nil } - out := new(PodPreset) + out := new(RuntimeClass) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodPreset) DeepCopyObject() runtime.Object { +func (in *RuntimeClass) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -53,13 +85,13 @@ func (in *PodPreset) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodPresetList) DeepCopyInto(out *PodPresetList) { +func (in *RuntimeClassList) DeepCopyInto(out *RuntimeClassList) { *out = *in out.TypeMeta = in.TypeMeta in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]PodPreset, len(*in)) + *out = make([]RuntimeClass, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -67,18 +99,18 @@ func (in *PodPresetList) DeepCopyInto(out *PodPresetList) { return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodPresetList. -func (in *PodPresetList) DeepCopy() *PodPresetList { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuntimeClassList. +func (in *RuntimeClassList) DeepCopy() *RuntimeClassList { if in == nil { return nil } - out := new(PodPresetList) + out := new(RuntimeClassList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodPresetList) DeepCopyObject() runtime.Object { +func (in *RuntimeClassList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -86,33 +118,18 @@ func (in *PodPresetList) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodPresetSpec) DeepCopyInto(out *PodPresetSpec) { +func (in *Scheduling) DeepCopyInto(out *Scheduling) { *out = *in - in.Selector.DeepCopyInto(&out.Selector) - if in.Env != nil { - in, out := &in.Env, &out.Env - *out = make([]v1.EnvVar, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.EnvFrom != nil { - in, out := &in.EnvFrom, &out.EnvFrom - *out = make([]v1.EnvFromSource, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Volumes != nil { - in, out := &in.Volumes, &out.Volumes - *out = make([]v1.Volume, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val } } - if in.VolumeMounts != nil { - in, out := &in.VolumeMounts, &out.VolumeMounts - *out = make([]v1.VolumeMount, len(*in)) + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]corev1.Toleration, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -120,12 +137,12 @@ func (in *PodPresetSpec) DeepCopyInto(out *PodPresetSpec) { return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodPresetSpec. -func (in *PodPresetSpec) DeepCopy() *PodPresetSpec { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Scheduling. +func (in *Scheduling) DeepCopy() *Scheduling { if in == nil { return nil } - out := new(PodPresetSpec) + out := new(Scheduling) in.DeepCopyInto(out) return out } diff --git a/vendor/k8s.io/api/node/v1alpha1/generated.pb.go b/vendor/k8s.io/api/node/v1alpha1/generated.pb.go index e6658a96fb..abd2c09b6b 100644 --- a/vendor/k8s.io/api/node/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/node/v1alpha1/generated.pb.go @@ -852,7 +852,7 @@ func (m *Overhead) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -869,10 +869,7 @@ func (m *Overhead) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -988,10 +985,7 @@ func (m *RuntimeClass) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1108,10 +1102,7 @@ func (m *RuntimeClassList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1265,10 +1256,7 @@ func (m *RuntimeClassSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1428,7 +1416,7 @@ func (m *Scheduling) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -1479,10 +1467,7 @@ func (m *Scheduling) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/k8s.io/api/node/v1alpha1/generated.proto b/vendor/k8s.io/api/node/v1alpha1/generated.proto index ac05f839eb..310ad490bf 100644 --- a/vendor/k8s.io/api/node/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/node/v1alpha1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.node.v1alpha1; @@ -78,8 +78,8 @@ message RuntimeClassSpec { // For example, a handler called "runc" might specify that the runc OCI // runtime (using native Linux containers) will be used to run the containers // in a pod. - // The RuntimeHandler must conform to the DNS Label (RFC 1123) requirements - // and is immutable. + // The RuntimeHandler must be lowercase, conform to the DNS Label (RFC 1123) + // requirements, and is immutable. optional string runtimeHandler = 1; // Overhead represents the resource overhead associated with running a pod for a diff --git a/vendor/k8s.io/api/node/v1alpha1/types.go b/vendor/k8s.io/api/node/v1alpha1/types.go index b59767107c..03e7e6f333 100644 --- a/vendor/k8s.io/api/node/v1alpha1/types.go +++ b/vendor/k8s.io/api/node/v1alpha1/types.go @@ -56,8 +56,8 @@ type RuntimeClassSpec struct { // For example, a handler called "runc" might specify that the runc OCI // runtime (using native Linux containers) will be used to run the containers // in a pod. - // The RuntimeHandler must conform to the DNS Label (RFC 1123) requirements - // and is immutable. + // The RuntimeHandler must be lowercase, conform to the DNS Label (RFC 1123) + // requirements, and is immutable. RuntimeHandler string `json:"runtimeHandler" protobuf:"bytes,1,opt,name=runtimeHandler"` // Overhead represents the resource overhead associated with running a pod for a diff --git a/vendor/k8s.io/api/node/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/node/v1alpha1/types_swagger_doc_generated.go index 3900001723..d3011466bb 100644 --- a/vendor/k8s.io/api/node/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/node/v1alpha1/types_swagger_doc_generated.go @@ -58,7 +58,7 @@ func (RuntimeClassList) SwaggerDoc() map[string]string { var map_RuntimeClassSpec = map[string]string{ "": "RuntimeClassSpec is a specification of a RuntimeClass. It contains parameters that are required to describe the RuntimeClass to the Container Runtime Interface (CRI) implementation, as well as any other components that need to understand how the pod will be run. The RuntimeClassSpec is immutable.", - "runtimeHandler": "RuntimeHandler specifies the underlying runtime and configuration that the CRI implementation will use to handle pods of this class. The possible values are specific to the node & CRI configuration. It is assumed that all handlers are available on every node, and handlers of the same name are equivalent on every node. For example, a handler called \"runc\" might specify that the runc OCI runtime (using native Linux containers) will be used to run the containers in a pod. The RuntimeHandler must conform to the DNS Label (RFC 1123) requirements and is immutable.", + "runtimeHandler": "RuntimeHandler specifies the underlying runtime and configuration that the CRI implementation will use to handle pods of this class. The possible values are specific to the node & CRI configuration. It is assumed that all handlers are available on every node, and handlers of the same name are equivalent on every node. For example, a handler called \"runc\" might specify that the runc OCI runtime (using native Linux containers) will be used to run the containers in a pod. The RuntimeHandler must be lowercase, conform to the DNS Label (RFC 1123) requirements, and is immutable.", "overhead": "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. For more details, see https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.15, and is only honored by servers that enable the PodOverhead feature.", "scheduling": "Scheduling holds the scheduling constraints to ensure that pods running with this RuntimeClass are scheduled to nodes that support it. If scheduling is nil, this RuntimeClass is assumed to be supported by all nodes.", } diff --git a/vendor/k8s.io/api/node/v1beta1/generated.pb.go b/vendor/k8s.io/api/node/v1beta1/generated.pb.go index b85cbd295e..4bfdd5df30 100644 --- a/vendor/k8s.io/api/node/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/node/v1beta1/generated.pb.go @@ -767,7 +767,7 @@ func (m *Overhead) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -784,10 +784,7 @@ func (m *Overhead) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -974,10 +971,7 @@ func (m *RuntimeClass) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1094,10 +1088,7 @@ func (m *RuntimeClassList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1257,7 +1248,7 @@ func (m *Scheduling) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -1308,10 +1299,7 @@ func (m *Scheduling) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/k8s.io/api/node/v1beta1/generated.proto b/vendor/k8s.io/api/node/v1beta1/generated.proto index 49166798da..5c2d9d50af 100644 --- a/vendor/k8s.io/api/node/v1beta1/generated.proto +++ b/vendor/k8s.io/api/node/v1beta1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.node.v1beta1; @@ -57,8 +57,8 @@ message RuntimeClass { // For example, a handler called "runc" might specify that the runc OCI // runtime (using native Linux containers) will be used to run the containers // in a pod. - // The Handler must conform to the DNS Label (RFC 1123) requirements, and is - // immutable. + // The Handler must be lowercase, conform to the DNS Label (RFC 1123) requirements, + // and is immutable. optional string handler = 2; // Overhead represents the resource overhead associated with running a pod for a diff --git a/vendor/k8s.io/api/node/v1beta1/types.go b/vendor/k8s.io/api/node/v1beta1/types.go index 1d2b96312e..89559949ca 100644 --- a/vendor/k8s.io/api/node/v1beta1/types.go +++ b/vendor/k8s.io/api/node/v1beta1/types.go @@ -48,8 +48,8 @@ type RuntimeClass struct { // For example, a handler called "runc" might specify that the runc OCI // runtime (using native Linux containers) will be used to run the containers // in a pod. - // The Handler must conform to the DNS Label (RFC 1123) requirements, and is - // immutable. + // The Handler must be lowercase, conform to the DNS Label (RFC 1123) requirements, + // and is immutable. Handler string `json:"handler" protobuf:"bytes,2,opt,name=handler"` // Overhead represents the resource overhead associated with running a pod for a diff --git a/vendor/k8s.io/api/node/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/node/v1beta1/types_swagger_doc_generated.go index 681f73f23c..a486147f03 100644 --- a/vendor/k8s.io/api/node/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/node/v1beta1/types_swagger_doc_generated.go @@ -39,7 +39,7 @@ func (Overhead) SwaggerDoc() map[string]string { var map_RuntimeClass = map[string]string{ "": "RuntimeClass defines a class of container runtime supported in the cluster. The RuntimeClass is used to determine which container runtime is used to run all containers in a pod. RuntimeClasses are (currently) manually defined by a user or cluster provisioner, and referenced in the PodSpec. The Kubelet is responsible for resolving the RuntimeClassName reference before running the pod. For more details, see https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md", "metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "handler": "Handler specifies the underlying runtime and configuration that the CRI implementation will use to handle pods of this class. The possible values are specific to the node & CRI configuration. It is assumed that all handlers are available on every node, and handlers of the same name are equivalent on every node. For example, a handler called \"runc\" might specify that the runc OCI runtime (using native Linux containers) will be used to run the containers in a pod. The Handler must conform to the DNS Label (RFC 1123) requirements, and is immutable.", + "handler": "Handler specifies the underlying runtime and configuration that the CRI implementation will use to handle pods of this class. The possible values are specific to the node & CRI configuration. It is assumed that all handlers are available on every node, and handlers of the same name are equivalent on every node. For example, a handler called \"runc\" might specify that the runc OCI runtime (using native Linux containers) will be used to run the containers in a pod. The Handler must be lowercase, conform to the DNS Label (RFC 1123) requirements, and is immutable.", "overhead": "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. For more details, see https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.15, and is only honored by servers that enable the PodOverhead feature.", "scheduling": "Scheduling holds the scheduling constraints to ensure that pods running with this RuntimeClass are scheduled to nodes that support it. If scheduling is nil, this RuntimeClass is assumed to be supported by all nodes.", } diff --git a/vendor/k8s.io/api/policy/v1beta1/generated.pb.go b/vendor/k8s.io/api/policy/v1beta1/generated.pb.go index 40ec7ef7fb..e91b497cb0 100644 --- a/vendor/k8s.io/api/policy/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/policy/v1beta1/generated.pb.go @@ -2540,10 +2540,7 @@ func (m *AllowedCSIDriver) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2625,10 +2622,7 @@ func (m *AllowedFlexVolume) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2730,10 +2724,7 @@ func (m *AllowedHostPath) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2852,10 +2843,7 @@ func (m *Eviction) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2971,10 +2959,7 @@ func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3062,10 +3047,7 @@ func (m *HostPortRange) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3153,10 +3135,7 @@ func (m *IDRange) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3305,10 +3284,7 @@ func (m *PodDisruptionBudget) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3425,10 +3401,7 @@ func (m *PodDisruptionBudgetList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3586,10 +3559,7 @@ func (m *PodDisruptionBudgetSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3770,7 +3740,7 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -3863,10 +3833,7 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3982,10 +3949,7 @@ func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4102,10 +4066,7 @@ func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4861,10 +4822,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4980,10 +4938,7 @@ func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5099,10 +5054,7 @@ func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5217,10 +5169,7 @@ func (m *RuntimeClassStrategyOptions) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5338,10 +5287,7 @@ func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5457,10 +5403,7 @@ func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/k8s.io/api/policy/v1beta1/generated.proto b/vendor/k8s.io/api/policy/v1beta1/generated.proto index 30db726f9c..18a1c65786 100644 --- a/vendor/k8s.io/api/policy/v1beta1/generated.proto +++ b/vendor/k8s.io/api/policy/v1beta1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.policy.v1beta1; diff --git a/vendor/k8s.io/api/rbac/v1/generated.pb.go b/vendor/k8s.io/api/rbac/v1/generated.pb.go index ba6872d624..678c00512e 100644 --- a/vendor/k8s.io/api/rbac/v1/generated.pb.go +++ b/vendor/k8s.io/api/rbac/v1/generated.pb.go @@ -1557,10 +1557,7 @@ func (m *AggregationRule) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1713,10 +1710,7 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1866,10 +1860,7 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1986,10 +1977,7 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2106,10 +2094,7 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2319,10 +2304,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2439,10 +2421,7 @@ func (m *Role) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2592,10 +2571,7 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2712,10 +2688,7 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2832,10 +2805,7 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2981,10 +2951,7 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3162,10 +3129,7 @@ func (m *Subject) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/k8s.io/api/rbac/v1/generated.proto b/vendor/k8s.io/api/rbac/v1/generated.proto index 71fa08341c..22c6dae4b0 100644 --- a/vendor/k8s.io/api/rbac/v1/generated.proto +++ b/vendor/k8s.io/api/rbac/v1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.rbac.v1; diff --git a/vendor/k8s.io/api/rbac/v1alpha1/generated.pb.go b/vendor/k8s.io/api/rbac/v1alpha1/generated.pb.go index 3b12526da9..94c1bef8bb 100644 --- a/vendor/k8s.io/api/rbac/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/rbac/v1alpha1/generated.pb.go @@ -1558,10 +1558,7 @@ func (m *AggregationRule) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1714,10 +1711,7 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1867,10 +1861,7 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1987,10 +1978,7 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2107,10 +2095,7 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2320,10 +2305,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2440,10 +2422,7 @@ func (m *Role) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2593,10 +2572,7 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2713,10 +2689,7 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2833,10 +2806,7 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2982,10 +2952,7 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3163,10 +3130,7 @@ func (m *Subject) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/k8s.io/api/rbac/v1alpha1/generated.proto b/vendor/k8s.io/api/rbac/v1alpha1/generated.proto index 60354e6a6f..caed7ec306 100644 --- a/vendor/k8s.io/api/rbac/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/rbac/v1alpha1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.rbac.v1alpha1; diff --git a/vendor/k8s.io/api/rbac/v1beta1/generated.pb.go b/vendor/k8s.io/api/rbac/v1beta1/generated.pb.go index 53d36320e4..ad5d7cb05f 100644 --- a/vendor/k8s.io/api/rbac/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/rbac/v1beta1/generated.pb.go @@ -1557,10 +1557,7 @@ func (m *AggregationRule) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1713,10 +1710,7 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1866,10 +1860,7 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1986,10 +1977,7 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2106,10 +2094,7 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2319,10 +2304,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2439,10 +2421,7 @@ func (m *Role) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2592,10 +2571,7 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2712,10 +2688,7 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2832,10 +2805,7 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2981,10 +2951,7 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3162,10 +3129,7 @@ func (m *Subject) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/k8s.io/api/rbac/v1beta1/generated.proto b/vendor/k8s.io/api/rbac/v1beta1/generated.proto index 44cd6c24aa..17d3741f0c 100644 --- a/vendor/k8s.io/api/rbac/v1beta1/generated.proto +++ b/vendor/k8s.io/api/rbac/v1beta1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.rbac.v1beta1; diff --git a/vendor/k8s.io/api/scheduling/v1/generated.pb.go b/vendor/k8s.io/api/scheduling/v1/generated.pb.go index efc3102efe..c5ef2f50ec 100644 --- a/vendor/k8s.io/api/scheduling/v1/generated.pb.go +++ b/vendor/k8s.io/api/scheduling/v1/generated.pb.go @@ -511,10 +511,7 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -631,10 +628,7 @@ func (m *PriorityClassList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/k8s.io/api/scheduling/v1/generated.proto b/vendor/k8s.io/api/scheduling/v1/generated.proto index e7489f5392..1f9a7474a0 100644 --- a/vendor/k8s.io/api/scheduling/v1/generated.proto +++ b/vendor/k8s.io/api/scheduling/v1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.scheduling.v1; diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/generated.pb.go b/vendor/k8s.io/api/scheduling/v1alpha1/generated.pb.go index 8a62104dbe..16f3c7cb4c 100644 --- a/vendor/k8s.io/api/scheduling/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/scheduling/v1alpha1/generated.pb.go @@ -511,10 +511,7 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -631,10 +628,7 @@ func (m *PriorityClassList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto b/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto index 8c4a2c4609..da27a13e75 100644 --- a/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.scheduling.v1alpha1; diff --git a/vendor/k8s.io/api/scheduling/v1beta1/generated.pb.go b/vendor/k8s.io/api/scheduling/v1beta1/generated.pb.go index b89af56b3b..64b1c15057 100644 --- a/vendor/k8s.io/api/scheduling/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/scheduling/v1beta1/generated.pb.go @@ -511,10 +511,7 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -631,10 +628,7 @@ func (m *PriorityClassList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/k8s.io/api/scheduling/v1beta1/generated.proto b/vendor/k8s.io/api/scheduling/v1beta1/generated.proto index eae3c01f37..99bdaabee1 100644 --- a/vendor/k8s.io/api/scheduling/v1beta1/generated.proto +++ b/vendor/k8s.io/api/scheduling/v1beta1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.scheduling.v1beta1; diff --git a/vendor/k8s.io/api/settings/v1alpha1/generated.pb.go b/vendor/k8s.io/api/settings/v1alpha1/generated.pb.go deleted file mode 100644 index 7ed066d31c..0000000000 --- a/vendor/k8s.io/api/settings/v1alpha1/generated.pb.go +++ /dev/null @@ -1,1053 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/settings/v1alpha1/generated.proto - -package v1alpha1 - -import ( - fmt "fmt" - - io "io" - - proto "github.com/gogo/protobuf/proto" - v11 "k8s.io/api/core/v1" - - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -func (m *PodPreset) Reset() { *m = PodPreset{} } -func (*PodPreset) ProtoMessage() {} -func (*PodPreset) Descriptor() ([]byte, []int) { - return fileDescriptor_48fab0a6ea4b79ce, []int{0} -} -func (m *PodPreset) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PodPreset) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *PodPreset) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodPreset.Merge(m, src) -} -func (m *PodPreset) XXX_Size() int { - return m.Size() -} -func (m *PodPreset) XXX_DiscardUnknown() { - xxx_messageInfo_PodPreset.DiscardUnknown(m) -} - -var xxx_messageInfo_PodPreset proto.InternalMessageInfo - -func (m *PodPresetList) Reset() { *m = PodPresetList{} } -func (*PodPresetList) ProtoMessage() {} -func (*PodPresetList) Descriptor() ([]byte, []int) { - return fileDescriptor_48fab0a6ea4b79ce, []int{1} -} -func (m *PodPresetList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PodPresetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *PodPresetList) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodPresetList.Merge(m, src) -} -func (m *PodPresetList) XXX_Size() int { - return m.Size() -} -func (m *PodPresetList) XXX_DiscardUnknown() { - xxx_messageInfo_PodPresetList.DiscardUnknown(m) -} - -var xxx_messageInfo_PodPresetList proto.InternalMessageInfo - -func (m *PodPresetSpec) Reset() { *m = PodPresetSpec{} } -func (*PodPresetSpec) ProtoMessage() {} -func (*PodPresetSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_48fab0a6ea4b79ce, []int{2} -} -func (m *PodPresetSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PodPresetSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *PodPresetSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodPresetSpec.Merge(m, src) -} -func (m *PodPresetSpec) XXX_Size() int { - return m.Size() -} -func (m *PodPresetSpec) XXX_DiscardUnknown() { - xxx_messageInfo_PodPresetSpec.DiscardUnknown(m) -} - -var xxx_messageInfo_PodPresetSpec proto.InternalMessageInfo - -func init() { - proto.RegisterType((*PodPreset)(nil), "k8s.io.api.settings.v1alpha1.PodPreset") - proto.RegisterType((*PodPresetList)(nil), "k8s.io.api.settings.v1alpha1.PodPresetList") - proto.RegisterType((*PodPresetSpec)(nil), "k8s.io.api.settings.v1alpha1.PodPresetSpec") -} - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/settings/v1alpha1/generated.proto", fileDescriptor_48fab0a6ea4b79ce) -} - -var fileDescriptor_48fab0a6ea4b79ce = []byte{ - // 542 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0xc1, 0x8e, 0xd2, 0x40, - 0x1c, 0xc6, 0xe9, 0xb2, 0x04, 0x1c, 0xd8, 0x68, 0x1a, 0x0f, 0x0d, 0x31, 0x65, 0xe5, 0xe2, 0x26, - 0xc6, 0x19, 0x59, 0x8d, 0xd1, 0x6b, 0x13, 0x4c, 0x4c, 0x20, 0x6e, 0x4a, 0xb2, 0x89, 0xc6, 0x83, - 0x43, 0xf9, 0x5b, 0x2a, 0xb4, 0xd3, 0xcc, 0x4c, 0x9b, 0x78, 0xf3, 0x11, 0x7c, 0x01, 0x9f, 0x44, - 0x1f, 0x80, 0xe3, 0x1e, 0xf7, 0xb4, 0x91, 0xfa, 0x22, 0x66, 0x86, 0x29, 0xa0, 0x88, 0x72, 0x9b, - 0xff, 0x9f, 0xef, 0xfb, 0xcd, 0xf7, 0x31, 0x45, 0xfd, 0xd9, 0x73, 0x81, 0x23, 0x46, 0x66, 0xd9, - 0x18, 0x78, 0x02, 0x12, 0x04, 0xc9, 0x21, 0x99, 0x30, 0x4e, 0xcc, 0x0f, 0x34, 0x8d, 0x88, 0x00, - 0x29, 0xa3, 0x24, 0x14, 0x24, 0xef, 0xd1, 0x79, 0x3a, 0xa5, 0x3d, 0x12, 0x42, 0x02, 0x9c, 0x4a, - 0x98, 0xe0, 0x94, 0x33, 0xc9, 0xec, 0x7b, 0x2b, 0x35, 0xa6, 0x69, 0x84, 0x4b, 0x35, 0x2e, 0xd5, - 0xed, 0x47, 0x61, 0x24, 0xa7, 0xd9, 0x18, 0x07, 0x2c, 0x26, 0x21, 0x0b, 0x19, 0xd1, 0xa6, 0x71, - 0xf6, 0x41, 0x4f, 0x7a, 0xd0, 0xa7, 0x15, 0xac, 0xdd, 0xdd, 0xba, 0x3a, 0x60, 0x1c, 0x48, 0xbe, - 0x73, 0x61, 0xfb, 0xe9, 0x46, 0x13, 0xd3, 0x60, 0x1a, 0x25, 0xc0, 0x3f, 0x91, 0x74, 0x16, 0xaa, - 0x85, 0x20, 0x31, 0x48, 0xfa, 0x37, 0x17, 0xd9, 0xe7, 0xe2, 0x59, 0x22, 0xa3, 0x18, 0x76, 0x0c, - 0xcf, 0xfe, 0x67, 0x10, 0xc1, 0x14, 0x62, 0xfa, 0xa7, 0xaf, 0xfb, 0xdd, 0x42, 0xb7, 0x2e, 0xd8, - 0xe4, 0x82, 0x83, 0x00, 0x69, 0xbf, 0x47, 0x0d, 0x95, 0x68, 0x42, 0x25, 0x75, 0xac, 0x53, 0xeb, - 0xac, 0x79, 0xfe, 0x18, 0x6f, 0xfe, 0xb0, 0x35, 0x18, 0xa7, 0xb3, 0x50, 0x2d, 0x04, 0x56, 0x6a, - 0x9c, 0xf7, 0xf0, 0xeb, 0xf1, 0x47, 0x08, 0xe4, 0x10, 0x24, 0xf5, 0xec, 0xc5, 0x4d, 0xa7, 0x52, - 0xdc, 0x74, 0xd0, 0x66, 0xe7, 0xaf, 0xa9, 0xf6, 0x10, 0x1d, 0x8b, 0x14, 0x02, 0xe7, 0x48, 0xd3, - 0x1f, 0xe2, 0x7f, 0x3d, 0x07, 0x5e, 0x07, 0x1b, 0xa5, 0x10, 0x78, 0x2d, 0x03, 0x3e, 0x56, 0x93, - 0xaf, 0x31, 0xdd, 0x6f, 0x16, 0x3a, 0x59, 0xab, 0x06, 0x91, 0x90, 0xf6, 0xbb, 0x9d, 0x0a, 0xf8, - 0xb0, 0x0a, 0xca, 0xad, 0x0b, 0xdc, 0x31, 0xf7, 0x34, 0xca, 0xcd, 0x56, 0xfc, 0x01, 0xaa, 0x45, - 0x12, 0x62, 0xe1, 0x1c, 0x9d, 0x56, 0xcf, 0x9a, 0xe7, 0x0f, 0x0e, 0xcc, 0xef, 0x9d, 0x18, 0x66, - 0xed, 0x95, 0x72, 0xfb, 0x2b, 0x48, 0xf7, 0x6b, 0x75, 0x2b, 0xbd, 0x6a, 0x65, 0x53, 0xd4, 0x10, - 0x30, 0x87, 0x40, 0x32, 0x6e, 0xd2, 0x3f, 0x39, 0x30, 0x3d, 0x1d, 0xc3, 0x7c, 0x64, 0xac, 0x9b, - 0x0a, 0xe5, 0xc6, 0x5f, 0x63, 0xed, 0x17, 0xa8, 0x0a, 0x49, 0x6e, 0x0a, 0xb4, 0xb7, 0x0b, 0xa8, - 0x4f, 0x58, 0xb1, 0xfa, 0x49, 0x7e, 0x49, 0xb9, 0xd7, 0x34, 0x90, 0x6a, 0x3f, 0xc9, 0x7d, 0xe5, - 0xb1, 0x07, 0xa8, 0x0e, 0x49, 0xfe, 0x92, 0xb3, 0xd8, 0xa9, 0x6a, 0xfb, 0xfd, 0x3d, 0x76, 0x25, - 0x19, 0xb1, 0x8c, 0x07, 0xe0, 0xdd, 0x36, 0x94, 0xba, 0x59, 0xfb, 0x25, 0xc2, 0xee, 0xa3, 0x7a, - 0xce, 0xe6, 0x59, 0x0c, 0xc2, 0x39, 0xde, 0x1f, 0xe6, 0x52, 0x4b, 0x36, 0x98, 0xd5, 0x2c, 0xfc, - 0xd2, 0x6b, 0xbf, 0x41, 0xad, 0xd5, 0x71, 0xc8, 0xb2, 0x44, 0x0a, 0xa7, 0xa6, 0x59, 0x9d, 0xfd, - 0x2c, 0xad, 0xf3, 0xee, 0x1a, 0x60, 0x6b, 0x6b, 0x29, 0xfc, 0xdf, 0x50, 0x1e, 0x5e, 0x2c, 0xdd, - 0xca, 0xd5, 0xd2, 0xad, 0x5c, 0x2f, 0xdd, 0xca, 0xe7, 0xc2, 0xb5, 0x16, 0x85, 0x6b, 0x5d, 0x15, - 0xae, 0x75, 0x5d, 0xb8, 0xd6, 0x8f, 0xc2, 0xb5, 0xbe, 0xfc, 0x74, 0x2b, 0x6f, 0x1b, 0xe5, 0x7b, - 0xff, 0x0a, 0x00, 0x00, 0xff, 0xff, 0x46, 0x15, 0xf2, 0x97, 0xa4, 0x04, 0x00, 0x00, -} - -func (m *PodPreset) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PodPreset) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PodPreset) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *PodPresetList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PodPresetList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PodPresetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *PodPresetSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PodPresetSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PodPresetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.VolumeMounts) > 0 { - for iNdEx := len(m.VolumeMounts) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.VolumeMounts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - } - if len(m.Volumes) > 0 { - for iNdEx := len(m.Volumes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Volumes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - } - if len(m.EnvFrom) > 0 { - for iNdEx := len(m.EnvFrom) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.EnvFrom[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - if len(m.Env) > 0 { - for iNdEx := len(m.Env) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Env[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - offset -= sovGenerated(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *PodPreset) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *PodPresetList) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *PodPresetSpec) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Selector.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Env) > 0 { - for _, e := range m.Env { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.EnvFrom) > 0 { - for _, e := range m.EnvFrom { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Volumes) > 0 { - for _, e := range m.Volumes { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.VolumeMounts) > 0 { - for _, e := range m.VolumeMounts { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func sovGenerated(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *PodPreset) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PodPreset{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodPresetSpec", "PodPresetSpec", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PodPresetList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]PodPreset{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PodPreset", "PodPreset", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&PodPresetList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *PodPresetSpec) String() string { - if this == nil { - return "nil" - } - repeatedStringForEnv := "[]EnvVar{" - for _, f := range this.Env { - repeatedStringForEnv += fmt.Sprintf("%v", f) + "," - } - repeatedStringForEnv += "}" - repeatedStringForEnvFrom := "[]EnvFromSource{" - for _, f := range this.EnvFrom { - repeatedStringForEnvFrom += fmt.Sprintf("%v", f) + "," - } - repeatedStringForEnvFrom += "}" - repeatedStringForVolumes := "[]Volume{" - for _, f := range this.Volumes { - repeatedStringForVolumes += fmt.Sprintf("%v", f) + "," - } - repeatedStringForVolumes += "}" - repeatedStringForVolumeMounts := "[]VolumeMount{" - for _, f := range this.VolumeMounts { - repeatedStringForVolumeMounts += fmt.Sprintf("%v", f) + "," - } - repeatedStringForVolumeMounts += "}" - s := strings.Join([]string{`&PodPresetSpec{`, - `Selector:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1), `&`, ``, 1) + `,`, - `Env:` + repeatedStringForEnv + `,`, - `EnvFrom:` + repeatedStringForEnvFrom + `,`, - `Volumes:` + repeatedStringForVolumes + `,`, - `VolumeMounts:` + repeatedStringForVolumeMounts + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *PodPreset) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodPreset: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodPreset: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodPresetList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodPresetList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodPresetList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, PodPreset{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodPresetSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodPresetSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodPresetSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Env = append(m.Env, v11.EnvVar{}) - if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EnvFrom", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.EnvFrom = append(m.EnvFrom, v11.EnvFromSource{}) - if err := m.EnvFrom[len(m.EnvFrom)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Volumes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Volumes = append(m.Volumes, v11.Volume{}) - if err := m.Volumes[len(m.Volumes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeMounts", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.VolumeMounts = append(m.VolumeMounts, v11.VolumeMount{}) - if err := m.VolumeMounts[len(m.VolumeMounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") -) diff --git a/vendor/k8s.io/api/settings/v1alpha1/generated.proto b/vendor/k8s.io/api/settings/v1alpha1/generated.proto deleted file mode 100644 index db1ec9312b..0000000000 --- a/vendor/k8s.io/api/settings/v1alpha1/generated.proto +++ /dev/null @@ -1,75 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.api.settings.v1alpha1; - -import "k8s.io/api/core/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "v1alpha1"; - -// PodPreset is a policy resource that defines additional runtime -// requirements for a Pod. -message PodPreset { - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // +optional - optional PodPresetSpec spec = 2; -} - -// PodPresetList is a list of PodPreset objects. -message PodPresetList { - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // Items is a list of schema objects. - repeated PodPreset items = 2; -} - -// PodPresetSpec is a description of a pod preset. -message PodPresetSpec { - // Selector is a label query over a set of resources, in this case pods. - // Required. - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 1; - - // Env defines the collection of EnvVar to inject into containers. - // +optional - repeated k8s.io.api.core.v1.EnvVar env = 2; - - // EnvFrom defines the collection of EnvFromSource to inject into containers. - // +optional - repeated k8s.io.api.core.v1.EnvFromSource envFrom = 3; - - // Volumes defines the collection of Volume to inject into the pod. - // +optional - repeated k8s.io.api.core.v1.Volume volumes = 4; - - // VolumeMounts defines the collection of VolumeMount to inject into containers. - // +optional - repeated k8s.io.api.core.v1.VolumeMount volumeMounts = 5; -} - diff --git a/vendor/k8s.io/api/settings/v1alpha1/types.go b/vendor/k8s.io/api/settings/v1alpha1/types.go deleted file mode 100644 index 8cc99d440d..0000000000 --- a/vendor/k8s.io/api/settings/v1alpha1/types.go +++ /dev/null @@ -1,70 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PodPreset is a policy resource that defines additional runtime -// requirements for a Pod. -type PodPreset struct { - metav1.TypeMeta `json:",inline"` - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // +optional - Spec PodPresetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` -} - -// PodPresetSpec is a description of a pod preset. -type PodPresetSpec struct { - // Selector is a label query over a set of resources, in this case pods. - // Required. - Selector metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,1,opt,name=selector"` - - // Env defines the collection of EnvVar to inject into containers. - // +optional - Env []v1.EnvVar `json:"env,omitempty" protobuf:"bytes,2,rep,name=env"` - // EnvFrom defines the collection of EnvFromSource to inject into containers. - // +optional - EnvFrom []v1.EnvFromSource `json:"envFrom,omitempty" protobuf:"bytes,3,rep,name=envFrom"` - // Volumes defines the collection of Volume to inject into the pod. - // +optional - Volumes []v1.Volume `json:"volumes,omitempty" protobuf:"bytes,4,rep,name=volumes"` - // VolumeMounts defines the collection of VolumeMount to inject into containers. - // +optional - VolumeMounts []v1.VolumeMount `json:"volumeMounts,omitempty" protobuf:"bytes,5,rep,name=volumeMounts"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PodPresetList is a list of PodPreset objects. -type PodPresetList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is a list of schema objects. - Items []PodPreset `json:"items" protobuf:"bytes,2,rep,name=items"` -} diff --git a/vendor/k8s.io/api/settings/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/settings/v1alpha1/types_swagger_doc_generated.go deleted file mode 100644 index 0501e0af35..0000000000 --- a/vendor/k8s.io/api/settings/v1alpha1/types_swagger_doc_generated.go +++ /dev/null @@ -1,61 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -// This file contains a collection of methods that can be used from go-restful to -// generate Swagger API documentation for its models. Please read this PR for more -// information on the implementation: https://github.com/emicklei/go-restful/pull/215 -// -// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if -// they are on one line! For multiple line or blocks that you want to ignore use ---. -// Any context after a --- is ignored. -// -// Those methods can be generated by using hack/update-generated-swagger-docs.sh - -// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. -var map_PodPreset = map[string]string{ - "": "PodPreset is a policy resource that defines additional runtime requirements for a Pod.", -} - -func (PodPreset) SwaggerDoc() map[string]string { - return map_PodPreset -} - -var map_PodPresetList = map[string]string{ - "": "PodPresetList is a list of PodPreset objects.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is a list of schema objects.", -} - -func (PodPresetList) SwaggerDoc() map[string]string { - return map_PodPresetList -} - -var map_PodPresetSpec = map[string]string{ - "": "PodPresetSpec is a description of a pod preset.", - "selector": "Selector is a label query over a set of resources, in this case pods. Required.", - "env": "Env defines the collection of EnvVar to inject into containers.", - "envFrom": "EnvFrom defines the collection of EnvFromSource to inject into containers.", - "volumes": "Volumes defines the collection of Volume to inject into the pod.", - "volumeMounts": "VolumeMounts defines the collection of VolumeMount to inject into containers.", -} - -func (PodPresetSpec) SwaggerDoc() map[string]string { - return map_PodPresetSpec -} - -// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/storage/v1/generated.pb.go b/vendor/k8s.io/api/storage/v1/generated.pb.go index 2c7088c389..34a3c34dc2 100644 --- a/vendor/k8s.io/api/storage/v1/generated.pb.go +++ b/vendor/k8s.io/api/storage/v1/generated.pb.go @@ -298,10 +298,38 @@ func (m *StorageClassList) XXX_DiscardUnknown() { var xxx_messageInfo_StorageClassList proto.InternalMessageInfo +func (m *TokenRequest) Reset() { *m = TokenRequest{} } +func (*TokenRequest) ProtoMessage() {} +func (*TokenRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_3b530c1983504d8d, []int{9} +} +func (m *TokenRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TokenRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TokenRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_TokenRequest.Merge(m, src) +} +func (m *TokenRequest) XXX_Size() int { + return m.Size() +} +func (m *TokenRequest) XXX_DiscardUnknown() { + xxx_messageInfo_TokenRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_TokenRequest proto.InternalMessageInfo + func (m *VolumeAttachment) Reset() { *m = VolumeAttachment{} } func (*VolumeAttachment) ProtoMessage() {} func (*VolumeAttachment) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{9} + return fileDescriptor_3b530c1983504d8d, []int{10} } func (m *VolumeAttachment) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -329,7 +357,7 @@ var xxx_messageInfo_VolumeAttachment proto.InternalMessageInfo func (m *VolumeAttachmentList) Reset() { *m = VolumeAttachmentList{} } func (*VolumeAttachmentList) ProtoMessage() {} func (*VolumeAttachmentList) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{10} + return fileDescriptor_3b530c1983504d8d, []int{11} } func (m *VolumeAttachmentList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -357,7 +385,7 @@ var xxx_messageInfo_VolumeAttachmentList proto.InternalMessageInfo func (m *VolumeAttachmentSource) Reset() { *m = VolumeAttachmentSource{} } func (*VolumeAttachmentSource) ProtoMessage() {} func (*VolumeAttachmentSource) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{11} + return fileDescriptor_3b530c1983504d8d, []int{12} } func (m *VolumeAttachmentSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -385,7 +413,7 @@ var xxx_messageInfo_VolumeAttachmentSource proto.InternalMessageInfo func (m *VolumeAttachmentSpec) Reset() { *m = VolumeAttachmentSpec{} } func (*VolumeAttachmentSpec) ProtoMessage() {} func (*VolumeAttachmentSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{12} + return fileDescriptor_3b530c1983504d8d, []int{13} } func (m *VolumeAttachmentSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -413,7 +441,7 @@ var xxx_messageInfo_VolumeAttachmentSpec proto.InternalMessageInfo func (m *VolumeAttachmentStatus) Reset() { *m = VolumeAttachmentStatus{} } func (*VolumeAttachmentStatus) ProtoMessage() {} func (*VolumeAttachmentStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{13} + return fileDescriptor_3b530c1983504d8d, []int{14} } func (m *VolumeAttachmentStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -441,7 +469,7 @@ var xxx_messageInfo_VolumeAttachmentStatus proto.InternalMessageInfo func (m *VolumeError) Reset() { *m = VolumeError{} } func (*VolumeError) ProtoMessage() {} func (*VolumeError) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{14} + return fileDescriptor_3b530c1983504d8d, []int{15} } func (m *VolumeError) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -469,7 +497,7 @@ var xxx_messageInfo_VolumeError proto.InternalMessageInfo func (m *VolumeNodeResources) Reset() { *m = VolumeNodeResources{} } func (*VolumeNodeResources) ProtoMessage() {} func (*VolumeNodeResources) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{15} + return fileDescriptor_3b530c1983504d8d, []int{16} } func (m *VolumeNodeResources) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -505,6 +533,7 @@ func init() { proto.RegisterType((*StorageClass)(nil), "k8s.io.api.storage.v1.StorageClass") proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.storage.v1.StorageClass.ParametersEntry") proto.RegisterType((*StorageClassList)(nil), "k8s.io.api.storage.v1.StorageClassList") + proto.RegisterType((*TokenRequest)(nil), "k8s.io.api.storage.v1.TokenRequest") proto.RegisterType((*VolumeAttachment)(nil), "k8s.io.api.storage.v1.VolumeAttachment") proto.RegisterType((*VolumeAttachmentList)(nil), "k8s.io.api.storage.v1.VolumeAttachmentList") proto.RegisterType((*VolumeAttachmentSource)(nil), "k8s.io.api.storage.v1.VolumeAttachmentSource") @@ -520,95 +549,101 @@ func init() { } var fileDescriptor_3b530c1983504d8d = []byte{ - // 1395 bytes of a gzipped FileDescriptorProto + // 1500 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x57, 0xcf, 0x6f, 0x1b, 0xc5, - 0x17, 0xcf, 0xc6, 0x76, 0x7e, 0x8c, 0x93, 0xc6, 0x99, 0xe4, 0xfb, 0xfd, 0xfa, 0x9b, 0x83, 0x37, - 0x5a, 0x2a, 0x08, 0x85, 0xae, 0x9b, 0x52, 0xaa, 0xaa, 0x52, 0x91, 0xe2, 0xc4, 0xa5, 0x11, 0x71, - 0x12, 0x8d, 0x4b, 0x85, 0x10, 0x20, 0x26, 0xbb, 0x13, 0x67, 0x1b, 0xef, 0xce, 0x76, 0x77, 0x6c, - 0xf0, 0x8d, 0x13, 0x37, 0x24, 0xb8, 0xf2, 0x57, 0x80, 0x04, 0x17, 0x8e, 0x9c, 0xca, 0xad, 0xe2, - 0xd4, 0xd3, 0x8a, 0x2e, 0x67, 0xb8, 0x71, 0xc9, 0x09, 0xcd, 0xec, 0xd8, 0xfb, 0xc3, 0xeb, 0x34, - 0xbd, 0xe4, 0xe6, 0x79, 0xef, 0x7d, 0x3e, 0xef, 0xbd, 0x79, 0x3f, 0x66, 0x0d, 0xde, 0x3b, 0xbd, - 0xe3, 0xeb, 0x16, 0xad, 0x9f, 0xf6, 0x8e, 0x88, 0xe7, 0x10, 0x46, 0xfc, 0x7a, 0x9f, 0x38, 0x26, - 0xf5, 0xea, 0x52, 0x81, 0x5d, 0xab, 0xee, 0x33, 0xea, 0xe1, 0x0e, 0xa9, 0xf7, 0x37, 0xeb, 0x1d, - 0xe2, 0x10, 0x0f, 0x33, 0x62, 0xea, 0xae, 0x47, 0x19, 0x85, 0xff, 0x89, 0xcc, 0x74, 0xec, 0x5a, - 0xba, 0x34, 0xd3, 0xfb, 0x9b, 0x6b, 0xd7, 0x3b, 0x16, 0x3b, 0xe9, 0x1d, 0xe9, 0x06, 0xb5, 0xeb, - 0x1d, 0xda, 0xa1, 0x75, 0x61, 0x7d, 0xd4, 0x3b, 0x16, 0x27, 0x71, 0x10, 0xbf, 0x22, 0x96, 0x35, - 0x2d, 0xe1, 0xcc, 0xa0, 0x5e, 0x9e, 0xa7, 0xb5, 0x5b, 0xb1, 0x8d, 0x8d, 0x8d, 0x13, 0xcb, 0x21, - 0xde, 0xa0, 0xee, 0x9e, 0x76, 0xb8, 0xc0, 0xaf, 0xdb, 0x84, 0xe1, 0x3c, 0x54, 0x7d, 0x12, 0xca, - 0xeb, 0x39, 0xcc, 0xb2, 0xc9, 0x18, 0xe0, 0xf6, 0xcb, 0x00, 0xbe, 0x71, 0x42, 0x6c, 0x9c, 0xc5, - 0x69, 0x3f, 0x2b, 0x60, 0x7e, 0xbb, 0xbd, 0xbb, 0xe3, 0x59, 0x7d, 0xe2, 0xc1, 0xcf, 0xc1, 0x1c, - 0x8f, 0xc8, 0xc4, 0x0c, 0x57, 0x95, 0x75, 0x65, 0xa3, 0x7c, 0xf3, 0x86, 0x1e, 0xdf, 0xd4, 0x88, - 0x58, 0x77, 0x4f, 0x3b, 0x5c, 0xe0, 0xeb, 0xdc, 0x5a, 0xef, 0x6f, 0xea, 0x07, 0x47, 0x8f, 0x89, - 0xc1, 0x5a, 0x84, 0xe1, 0x06, 0x7c, 0x1a, 0xa8, 0x53, 0x61, 0xa0, 0x82, 0x58, 0x86, 0x46, 0xac, - 0xf0, 0x3e, 0x28, 0xfa, 0x2e, 0x31, 0xaa, 0xd3, 0x82, 0xfd, 0xaa, 0x9e, 0x5b, 0x07, 0x7d, 0x14, - 0x51, 0xdb, 0x25, 0x46, 0x63, 0x41, 0x32, 0x16, 0xf9, 0x09, 0x09, 0xbc, 0xf6, 0x93, 0x02, 0x16, - 0x47, 0x56, 0x7b, 0x96, 0xcf, 0xe0, 0x27, 0x63, 0xb1, 0xeb, 0x17, 0x8b, 0x9d, 0xa3, 0x45, 0xe4, - 0x15, 0xe9, 0x67, 0x6e, 0x28, 0x49, 0xc4, 0xdd, 0x04, 0x25, 0x8b, 0x11, 0xdb, 0xaf, 0x4e, 0xaf, - 0x17, 0x36, 0xca, 0x37, 0xd7, 0x5f, 0x16, 0x78, 0x63, 0x51, 0x92, 0x95, 0x76, 0x39, 0x0c, 0x45, - 0x68, 0xed, 0x9f, 0xe9, 0x44, 0xd8, 0x3c, 0x1d, 0x78, 0x17, 0x5c, 0xc1, 0x8c, 0x61, 0xe3, 0x04, - 0x91, 0x27, 0x3d, 0xcb, 0x23, 0xa6, 0x08, 0x7e, 0xae, 0x01, 0xc3, 0x40, 0xbd, 0xb2, 0x95, 0xd2, - 0xa0, 0x8c, 0x25, 0xc7, 0xba, 0xd4, 0xdc, 0x75, 0x8e, 0xe9, 0x81, 0xd3, 0xa2, 0x3d, 0x87, 0x89, - 0x6b, 0x95, 0xd8, 0xc3, 0x94, 0x06, 0x65, 0x2c, 0xa1, 0x01, 0x56, 0xfb, 0xb4, 0xdb, 0xb3, 0xc9, - 0x9e, 0x75, 0x4c, 0x8c, 0x81, 0xd1, 0x25, 0x2d, 0x6a, 0x12, 0xbf, 0x5a, 0x58, 0x2f, 0x6c, 0xcc, - 0x37, 0xea, 0x61, 0xa0, 0xae, 0x3e, 0xca, 0xd1, 0x9f, 0x05, 0xea, 0x4a, 0x8e, 0x1c, 0xe5, 0x92, - 0xc1, 0x7b, 0x60, 0x49, 0x5e, 0xce, 0x36, 0x76, 0xb1, 0x61, 0xb1, 0x41, 0xb5, 0x28, 0x22, 0x5c, - 0x09, 0x03, 0x75, 0xa9, 0x9d, 0x56, 0xa1, 0xac, 0x2d, 0x7c, 0x00, 0x16, 0x8f, 0xfd, 0xf7, 0x3d, - 0xda, 0x73, 0x0f, 0x69, 0xd7, 0x32, 0x06, 0xd5, 0xd2, 0xba, 0xb2, 0x31, 0xdf, 0xd0, 0xc2, 0x40, - 0x5d, 0xbc, 0xdf, 0x4e, 0x28, 0xce, 0xb2, 0x02, 0x94, 0x06, 0x6a, 0x3f, 0x2a, 0x60, 0x76, 0xbb, - 0xbd, 0xbb, 0x4f, 0x4d, 0x72, 0x09, 0x4d, 0xbe, 0x93, 0x6a, 0x72, 0x6d, 0x72, 0xaf, 0xf0, 0x78, - 0x26, 0xb6, 0xf8, 0xdf, 0x51, 0x8b, 0x73, 0x1b, 0x39, 0x9e, 0xeb, 0xa0, 0xe8, 0x60, 0x9b, 0x88, - 0xa8, 0xe7, 0x63, 0xcc, 0x3e, 0xb6, 0x09, 0x12, 0x1a, 0xf8, 0x3a, 0x98, 0x71, 0xa8, 0x49, 0x76, - 0x77, 0x84, 0xef, 0xf9, 0xc6, 0x15, 0x69, 0x33, 0xb3, 0x2f, 0xa4, 0x48, 0x6a, 0xe1, 0x2d, 0xb0, - 0xc0, 0xa8, 0x4b, 0xbb, 0xb4, 0x33, 0xf8, 0x80, 0x0c, 0x86, 0x55, 0xaf, 0x84, 0x81, 0xba, 0xf0, - 0x30, 0x21, 0x47, 0x29, 0x2b, 0xf8, 0x29, 0x28, 0xe3, 0x6e, 0x97, 0x1a, 0x98, 0xe1, 0xa3, 0x2e, - 0x11, 0xa5, 0x2c, 0xdf, 0xbc, 0x36, 0x21, 0xbd, 0xa8, 0x4b, 0xb8, 0x5f, 0x44, 0x7c, 0xda, 0xf3, - 0x0c, 0xe2, 0x37, 0x96, 0xc2, 0x40, 0x2d, 0x6f, 0xc5, 0x14, 0x28, 0xc9, 0xa7, 0xfd, 0xa0, 0x80, - 0xb2, 0x4c, 0xf8, 0x12, 0x26, 0x7a, 0x3b, 0x3d, 0xd1, 0xb5, 0xf3, 0xab, 0x34, 0x61, 0x9e, 0x3f, - 0x1b, 0x45, 0x2c, 0x86, 0xf9, 0x00, 0xcc, 0x9a, 0xa2, 0x54, 0x7e, 0x55, 0x11, 0xac, 0x57, 0xcf, - 0x67, 0x95, 0xbb, 0x62, 0x49, 0x72, 0xcf, 0x46, 0x67, 0x1f, 0x0d, 0x59, 0xb4, 0x6f, 0x66, 0xc0, - 0xc2, 0x70, 0x4c, 0xba, 0xd8, 0xf7, 0x2f, 0xa1, 0x79, 0xdf, 0x05, 0x65, 0xd7, 0xa3, 0x7d, 0xcb, - 0xb7, 0xa8, 0x43, 0x3c, 0xd9, 0x47, 0x2b, 0x12, 0x52, 0x3e, 0x8c, 0x55, 0x28, 0x69, 0x07, 0x3b, - 0x00, 0xb8, 0xd8, 0xc3, 0x36, 0x61, 0x3c, 0xfb, 0x82, 0xc8, 0xfe, 0x9d, 0x09, 0xd9, 0x27, 0x33, - 0xd2, 0x0f, 0x47, 0xa8, 0xa6, 0xc3, 0xbc, 0x41, 0x1c, 0x5d, 0xac, 0x40, 0x09, 0x6a, 0x78, 0x0a, - 0x16, 0x3d, 0x62, 0x74, 0xb1, 0x65, 0xcb, 0xa5, 0x50, 0x14, 0x11, 0x36, 0xf9, 0x52, 0x40, 0x49, - 0xc5, 0x59, 0xa0, 0xde, 0x18, 0x7f, 0xa0, 0xf5, 0x43, 0xe2, 0xf9, 0x96, 0xcf, 0x88, 0xc3, 0xa2, - 0x0e, 0x4d, 0x61, 0x50, 0x9a, 0x9b, 0xcf, 0x89, 0xcd, 0xd7, 0xe5, 0x81, 0xcb, 0x2c, 0xea, 0xf8, - 0xd5, 0x52, 0x3c, 0x27, 0xad, 0x84, 0x1c, 0xa5, 0xac, 0xe0, 0x1e, 0x58, 0xe5, 0x7d, 0xfd, 0x45, - 0xe4, 0xa0, 0xf9, 0xa5, 0x8b, 0x1d, 0x7e, 0x4b, 0xd5, 0x19, 0xb1, 0xfb, 0xaa, 0x7c, 0xb7, 0x6e, - 0xe5, 0xe8, 0x51, 0x2e, 0x0a, 0x7e, 0x04, 0x96, 0xa3, 0xe5, 0xda, 0xb0, 0x1c, 0xd3, 0x72, 0x3a, - 0x7c, 0xb5, 0x56, 0x67, 0x45, 0xd2, 0xd7, 0xc2, 0x40, 0x5d, 0x7e, 0x94, 0x55, 0x9e, 0xe5, 0x09, - 0xd1, 0x38, 0x09, 0x7c, 0x02, 0x96, 0x85, 0x47, 0x62, 0xca, 0xa1, 0xb7, 0x88, 0x5f, 0x9d, 0x13, - 0xa5, 0xdb, 0x48, 0x96, 0x8e, 0x5f, 0x1d, 0xaf, 0xdb, 0x70, 0x35, 0xb4, 0x49, 0x97, 0x18, 0x8c, - 0x7a, 0x0f, 0x89, 0x67, 0x37, 0xfe, 0x2f, 0xeb, 0xb5, 0xbc, 0x95, 0xa5, 0x42, 0xe3, 0xec, 0x6b, - 0xf7, 0xc0, 0x52, 0xa6, 0xe0, 0xb0, 0x02, 0x0a, 0xa7, 0x64, 0x10, 0x2d, 0x35, 0xc4, 0x7f, 0xc2, - 0x55, 0x50, 0xea, 0xe3, 0x6e, 0x8f, 0x44, 0xcd, 0x87, 0xa2, 0xc3, 0xdd, 0xe9, 0x3b, 0x8a, 0xf6, - 0x8b, 0x02, 0x2a, 0xc9, 0xee, 0xb9, 0x84, 0x3d, 0xf1, 0x20, 0xbd, 0x27, 0x5e, 0xbb, 0x40, 0x4f, - 0x4f, 0x58, 0x16, 0xdf, 0x4f, 0x83, 0x4a, 0x54, 0x97, 0xe8, 0x5d, 0xb7, 0x89, 0xc3, 0x2e, 0x61, - 0xa0, 0x5b, 0xa9, 0xd7, 0xe8, 0xad, 0x73, 0xd7, 0x75, 0x1c, 0xd8, 0xa4, 0x67, 0x09, 0x7e, 0x08, - 0x66, 0x7c, 0x86, 0x59, 0x8f, 0x0f, 0x39, 0x27, 0xbc, 0x7e, 0x51, 0x42, 0x01, 0x8a, 0x5f, 0xa4, - 0xe8, 0x8c, 0x24, 0x99, 0xf6, 0xab, 0x02, 0x56, 0xb3, 0x90, 0x4b, 0xa8, 0xee, 0x5e, 0xba, 0xba, - 0x6f, 0x5c, 0x30, 0x99, 0x09, 0x15, 0xfe, 0x5d, 0x01, 0xff, 0x1d, 0xcb, 0x5b, 0xbc, 0x7d, 0x7c, - 0x27, 0xb8, 0x99, 0xcd, 0xb3, 0x1f, 0xbf, 0xe5, 0x62, 0x27, 0x1c, 0xe6, 0xe8, 0x51, 0x2e, 0x0a, - 0x3e, 0x06, 0x15, 0xcb, 0xe9, 0x5a, 0x0e, 0x89, 0x64, 0xed, 0xb8, 0xbe, 0xb9, 0x83, 0x9b, 0x65, - 0x16, 0xc5, 0x5d, 0x0d, 0x03, 0xb5, 0xb2, 0x9b, 0x61, 0x41, 0x63, 0xbc, 0xda, 0x6f, 0x39, 0x95, - 0x11, 0xaf, 0xdd, 0xdb, 0x60, 0x2e, 0xfa, 0x20, 0x25, 0x9e, 0x4c, 0x63, 0x74, 0xd3, 0x5b, 0x52, - 0x8e, 0x46, 0x16, 0xa2, 0x6f, 0xc4, 0x55, 0xc8, 0x40, 0x2f, 0xdc, 0x37, 0x02, 0x94, 0xe8, 0x1b, - 0x71, 0x46, 0x92, 0x8c, 0x07, 0xc1, 0xbf, 0x69, 0xc4, 0x5d, 0x16, 0xd2, 0x41, 0xec, 0x4b, 0x39, - 0x1a, 0x59, 0x68, 0x7f, 0x15, 0x72, 0x0a, 0x24, 0x1a, 0x30, 0x91, 0xcd, 0xf0, 0x13, 0x3c, 0x9b, - 0x8d, 0x39, 0xca, 0xc6, 0x84, 0xdf, 0x29, 0x00, 0xe2, 0x11, 0x45, 0x6b, 0xd8, 0xa0, 0x51, 0x17, - 0x35, 0x5f, 0x69, 0x24, 0xf4, 0xad, 0x31, 0x9e, 0xe8, 0x25, 0x5c, 0x93, 0xfe, 0xe1, 0xb8, 0x01, - 0xca, 0x71, 0x0e, 0x4d, 0x50, 0x8e, 0xa4, 0x4d, 0xcf, 0xa3, 0x9e, 0x1c, 0x4f, 0xed, 0xdc, 0x58, - 0x84, 0x65, 0xa3, 0x26, 0x3e, 0xcb, 0x62, 0xe8, 0x59, 0xa0, 0x96, 0x13, 0x7a, 0x94, 0xa4, 0xe5, - 0x5e, 0x4c, 0x12, 0x7b, 0x29, 0xbe, 0x9a, 0x97, 0x1d, 0x32, 0xd9, 0x4b, 0x82, 0x76, 0xad, 0x09, - 0xfe, 0x37, 0xe1, 0x5a, 0x5e, 0xe9, 0xbd, 0xf8, 0x5a, 0x01, 0x49, 0x1f, 0x70, 0x0f, 0x14, 0xf9, - 0xbf, 0x61, 0xb9, 0x48, 0xae, 0x5d, 0x6c, 0x91, 0x3c, 0xb4, 0x6c, 0x12, 0xaf, 0x42, 0x7e, 0x42, - 0x82, 0x05, 0xbe, 0x09, 0x66, 0x6d, 0xe2, 0xfb, 0xb8, 0x23, 0x3d, 0xc7, 0x1f, 0x72, 0xad, 0x48, - 0x8c, 0x86, 0x7a, 0xed, 0x36, 0x58, 0xc9, 0xf9, 0x20, 0x86, 0x2a, 0x28, 0x19, 0xe2, 0x8f, 0x1b, - 0x0f, 0xa8, 0xd4, 0x98, 0xe7, 0x1b, 0x65, 0x5b, 0xfc, 0x5f, 0x8b, 0xe4, 0x8d, 0x8d, 0xa7, 0x2f, - 0x6a, 0x53, 0xcf, 0x5e, 0xd4, 0xa6, 0x9e, 0xbf, 0xa8, 0x4d, 0x7d, 0x15, 0xd6, 0x94, 0xa7, 0x61, - 0x4d, 0x79, 0x16, 0xd6, 0x94, 0xe7, 0x61, 0x4d, 0xf9, 0x23, 0xac, 0x29, 0xdf, 0xfe, 0x59, 0x9b, - 0xfa, 0x78, 0xba, 0xbf, 0xf9, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x9e, 0x83, 0x24, 0x44, 0x13, - 0x11, 0x00, 0x00, + 0x17, 0xcf, 0xc6, 0xce, 0xaf, 0x71, 0xd2, 0x24, 0x93, 0xf4, 0xfb, 0x35, 0x39, 0xd8, 0xd1, 0xb6, + 0x82, 0x50, 0xe8, 0xba, 0x29, 0xa5, 0xaa, 0x2a, 0x15, 0x29, 0x9b, 0xb8, 0x34, 0x22, 0xbf, 0x34, + 0x0e, 0x15, 0x42, 0x80, 0x3a, 0xd9, 0x9d, 0x38, 0x5b, 0x7b, 0x77, 0xb6, 0x3b, 0x63, 0x53, 0xdf, + 0xe0, 0xc2, 0x0d, 0x09, 0xae, 0x88, 0x3f, 0x02, 0x24, 0xb8, 0x70, 0xe4, 0x54, 0x6e, 0x15, 0xa7, + 0x9e, 0x2c, 0x6a, 0xce, 0xf0, 0x07, 0xe4, 0x84, 0x66, 0x76, 0xec, 0xfd, 0xe1, 0x75, 0x9a, 0x5e, + 0x72, 0xf3, 0xbe, 0x1f, 0x9f, 0xf7, 0x66, 0xde, 0x7b, 0x9f, 0x37, 0x06, 0x1f, 0x34, 0xee, 0x30, + 0xc3, 0xa1, 0x95, 0x46, 0xeb, 0x88, 0x04, 0x1e, 0xe1, 0x84, 0x55, 0xda, 0xc4, 0xb3, 0x69, 0x50, + 0x51, 0x0a, 0xec, 0x3b, 0x15, 0xc6, 0x69, 0x80, 0xeb, 0xa4, 0xd2, 0x5e, 0xaf, 0xd4, 0x89, 0x47, + 0x02, 0xcc, 0x89, 0x6d, 0xf8, 0x01, 0xe5, 0x14, 0x5e, 0x0e, 0xcd, 0x0c, 0xec, 0x3b, 0x86, 0x32, + 0x33, 0xda, 0xeb, 0x2b, 0xd7, 0xeb, 0x0e, 0x3f, 0x69, 0x1d, 0x19, 0x16, 0x75, 0x2b, 0x75, 0x5a, + 0xa7, 0x15, 0x69, 0x7d, 0xd4, 0x3a, 0x96, 0x5f, 0xf2, 0x43, 0xfe, 0x0a, 0x51, 0x56, 0xf4, 0x58, + 0x30, 0x8b, 0x06, 0x59, 0x91, 0x56, 0x6e, 0x45, 0x36, 0x2e, 0xb6, 0x4e, 0x1c, 0x8f, 0x04, 0x9d, + 0x8a, 0xdf, 0xa8, 0x0b, 0x01, 0xab, 0xb8, 0x84, 0xe3, 0x2c, 0xaf, 0xca, 0x28, 0xaf, 0xa0, 0xe5, + 0x71, 0xc7, 0x25, 0x43, 0x0e, 0xb7, 0x5f, 0xe5, 0xc0, 0xac, 0x13, 0xe2, 0xe2, 0xb4, 0x9f, 0xfe, + 0xab, 0x06, 0x66, 0x36, 0x6b, 0xdb, 0x5b, 0x81, 0xd3, 0x26, 0x01, 0x7c, 0x04, 0xa6, 0x45, 0x46, + 0x36, 0xe6, 0xb8, 0xa8, 0xad, 0x6a, 0x6b, 0x85, 0x9b, 0x37, 0x8c, 0xe8, 0xa6, 0x06, 0xc0, 0x86, + 0xdf, 0xa8, 0x0b, 0x01, 0x33, 0x84, 0xb5, 0xd1, 0x5e, 0x37, 0xf6, 0x8f, 0x1e, 0x13, 0x8b, 0xef, + 0x12, 0x8e, 0x4d, 0xf8, 0xac, 0x5b, 0x1e, 0xeb, 0x75, 0xcb, 0x20, 0x92, 0xa1, 0x01, 0x2a, 0xbc, + 0x0f, 0xf2, 0xcc, 0x27, 0x56, 0x71, 0x5c, 0xa2, 0x5f, 0x35, 0x32, 0xeb, 0x60, 0x0c, 0x32, 0xaa, + 0xf9, 0xc4, 0x32, 0x67, 0x15, 0x62, 0x5e, 0x7c, 0x21, 0xe9, 0xaf, 0xff, 0xa2, 0x81, 0xb9, 0x81, + 0xd5, 0x8e, 0xc3, 0x38, 0xfc, 0x6c, 0x28, 0x77, 0xe3, 0x7c, 0xb9, 0x0b, 0x6f, 0x99, 0xf9, 0x82, + 0x8a, 0x33, 0xdd, 0x97, 0xc4, 0xf2, 0xae, 0x82, 0x09, 0x87, 0x13, 0x97, 0x15, 0xc7, 0x57, 0x73, + 0x6b, 0x85, 0x9b, 0xab, 0xaf, 0x4a, 0xdc, 0x9c, 0x53, 0x60, 0x13, 0xdb, 0xc2, 0x0d, 0x85, 0xde, + 0xfa, 0x8f, 0xf9, 0x58, 0xda, 0xe2, 0x38, 0xf0, 0x2e, 0xb8, 0x84, 0x39, 0xc7, 0xd6, 0x09, 0x22, + 0x4f, 0x5a, 0x4e, 0x40, 0x6c, 0x99, 0xfc, 0xb4, 0x09, 0x7b, 0xdd, 0xf2, 0xa5, 0x8d, 0x84, 0x06, + 0xa5, 0x2c, 0x85, 0xaf, 0x4f, 0xed, 0x6d, 0xef, 0x98, 0xee, 0x7b, 0xbb, 0xb4, 0xe5, 0x71, 0x79, + 0xad, 0xca, 0xf7, 0x20, 0xa1, 0x41, 0x29, 0x4b, 0x68, 0x81, 0xe5, 0x36, 0x6d, 0xb6, 0x5c, 0xb2, + 0xe3, 0x1c, 0x13, 0xab, 0x63, 0x35, 0xc9, 0x2e, 0xb5, 0x09, 0x2b, 0xe6, 0x56, 0x73, 0x6b, 0x33, + 0x66, 0xa5, 0xd7, 0x2d, 0x2f, 0x3f, 0xcc, 0xd0, 0x9f, 0x76, 0xcb, 0x4b, 0x19, 0x72, 0x94, 0x09, + 0x06, 0xef, 0x81, 0x79, 0x75, 0x39, 0x9b, 0xd8, 0xc7, 0x96, 0xc3, 0x3b, 0xc5, 0xbc, 0xcc, 0x70, + 0xa9, 0xd7, 0x2d, 0xcf, 0xd7, 0x92, 0x2a, 0x94, 0xb6, 0x85, 0x0f, 0xc0, 0xdc, 0x31, 0xfb, 0x30, + 0xa0, 0x2d, 0xff, 0x80, 0x36, 0x1d, 0xab, 0x53, 0x9c, 0x58, 0xd5, 0xd6, 0x66, 0x4c, 0xbd, 0xd7, + 0x2d, 0xcf, 0xdd, 0xaf, 0xc5, 0x14, 0xa7, 0x69, 0x01, 0x4a, 0x3a, 0xc2, 0x47, 0x60, 0x8e, 0xd3, + 0x06, 0xf1, 0xc4, 0xd5, 0x11, 0xc6, 0x59, 0x71, 0x52, 0x96, 0xf1, 0xca, 0x88, 0x32, 0x1e, 0xc6, + 0x6c, 0xcd, 0xcb, 0xaa, 0x92, 0x73, 0x71, 0x29, 0x43, 0x49, 0x40, 0xb8, 0x09, 0x16, 0x83, 0xb0, + 0x2e, 0x0c, 0x11, 0xbf, 0x75, 0xd4, 0x74, 0xd8, 0x49, 0x71, 0x4a, 0x1e, 0xf6, 0x72, 0xaf, 0x5b, + 0x5e, 0x44, 0x69, 0x25, 0x1a, 0xb6, 0xd7, 0x7f, 0xd6, 0xc0, 0xd4, 0x66, 0x6d, 0x7b, 0x8f, 0xda, + 0xe4, 0x02, 0x66, 0x71, 0x2b, 0x31, 0x8b, 0xfa, 0xe8, 0x96, 0x16, 0xf9, 0x8c, 0x9c, 0xc4, 0x7f, + 0xc3, 0x49, 0x14, 0x36, 0x8a, 0x45, 0x56, 0x41, 0xde, 0xc3, 0x2e, 0x91, 0x59, 0xcf, 0x44, 0x3e, + 0x7b, 0xd8, 0x25, 0x48, 0x6a, 0xe0, 0x9b, 0x60, 0xd2, 0xa3, 0x36, 0xd9, 0xde, 0x92, 0xb1, 0x67, + 0xcc, 0x4b, 0xca, 0x66, 0x72, 0x4f, 0x4a, 0x91, 0xd2, 0xc2, 0x5b, 0x60, 0x96, 0x53, 0x9f, 0x36, + 0x69, 0xbd, 0xf3, 0x11, 0xe9, 0xf4, 0x9b, 0x73, 0xa1, 0xd7, 0x2d, 0xcf, 0x1e, 0xc6, 0xe4, 0x28, + 0x61, 0x05, 0x3f, 0x07, 0x05, 0xdc, 0x6c, 0x52, 0x0b, 0x73, 0x7c, 0xd4, 0x24, 0xb2, 0xe3, 0x0a, + 0x37, 0xaf, 0x8d, 0x38, 0x5e, 0xd8, 0xcc, 0x22, 0x2e, 0x22, 0x8c, 0xb6, 0x02, 0x8b, 0x30, 0x73, + 0xbe, 0xd7, 0x2d, 0x17, 0x36, 0x22, 0x08, 0x14, 0xc7, 0xd3, 0x7f, 0xd2, 0x40, 0x41, 0x1d, 0xf8, + 0x02, 0x88, 0x67, 0x33, 0x49, 0x3c, 0xa5, 0xb3, 0xab, 0x34, 0x82, 0x76, 0xbe, 0x18, 0x64, 0x2c, + 0x39, 0x67, 0x1f, 0x4c, 0xd9, 0xb2, 0x54, 0xac, 0xa8, 0x49, 0xd4, 0xab, 0x67, 0xa3, 0x2a, 0x4a, + 0x9b, 0x57, 0xd8, 0x53, 0xe1, 0x37, 0x43, 0x7d, 0x14, 0xfd, 0xdb, 0x49, 0x30, 0xdb, 0x9f, 0xe6, + 0x26, 0x66, 0xec, 0x02, 0x9a, 0xf7, 0x7d, 0x50, 0xf0, 0x03, 0xda, 0x76, 0x98, 0x43, 0x3d, 0x12, + 0xa8, 0x3e, 0x5a, 0x52, 0x2e, 0x85, 0x83, 0x48, 0x85, 0xe2, 0x76, 0xb0, 0x0e, 0x80, 0x8f, 0x03, + 0xec, 0x12, 0x2e, 0x4e, 0x9f, 0x93, 0xa7, 0x7f, 0x6f, 0xc4, 0xe9, 0xe3, 0x27, 0x32, 0x0e, 0x06, + 0x5e, 0x55, 0x8f, 0x07, 0x9d, 0x28, 0xbb, 0x48, 0x81, 0x62, 0xd0, 0xb0, 0x01, 0xe6, 0x02, 0x62, + 0x35, 0xb1, 0xe3, 0x2a, 0xee, 0xca, 0xcb, 0x0c, 0xab, 0x82, 0x48, 0x50, 0x5c, 0x71, 0xda, 0x2d, + 0xdf, 0x18, 0x7e, 0x47, 0x18, 0x07, 0x24, 0x60, 0x0e, 0xe3, 0xc4, 0xe3, 0x61, 0x87, 0x26, 0x7c, + 0x50, 0x12, 0x5b, 0xcc, 0x89, 0x2b, 0x58, 0x7d, 0xdf, 0xe7, 0x0e, 0xf5, 0x58, 0x71, 0x22, 0x9a, + 0x93, 0xdd, 0x98, 0x1c, 0x25, 0xac, 0xe0, 0x0e, 0x58, 0x16, 0x7d, 0xfd, 0x65, 0x18, 0xa0, 0xfa, + 0xd4, 0xc7, 0x9e, 0xb8, 0xa5, 0xe2, 0xa4, 0x64, 0xad, 0xa2, 0x58, 0x01, 0x1b, 0x19, 0x7a, 0x94, + 0xe9, 0x05, 0x3f, 0x01, 0x8b, 0xe1, 0x0e, 0x30, 0x1d, 0xcf, 0x76, 0xbc, 0xba, 0xd8, 0x00, 0x92, + 0x00, 0x67, 0xcc, 0x6b, 0x82, 0x00, 0x1f, 0xa6, 0x95, 0xa7, 0x59, 0x42, 0x34, 0x0c, 0x02, 0x9f, + 0x80, 0x45, 0x19, 0x91, 0xd8, 0x6a, 0xe8, 0x1d, 0xc2, 0x8a, 0xd3, 0xb2, 0x74, 0x6b, 0xf1, 0xd2, + 0x89, 0xab, 0x0b, 0xd9, 0x3b, 0x24, 0x83, 0x1a, 0x69, 0x12, 0x8b, 0xd3, 0xe0, 0x90, 0x04, 0xae, + 0xf9, 0x86, 0xaa, 0xd7, 0xe2, 0x46, 0x1a, 0x0a, 0x0d, 0xa3, 0xaf, 0xdc, 0x03, 0xf3, 0xa9, 0x82, + 0xc3, 0x05, 0x90, 0x6b, 0x90, 0x4e, 0x48, 0x6a, 0x48, 0xfc, 0x84, 0xcb, 0x60, 0xa2, 0x8d, 0x9b, + 0x2d, 0x12, 0x36, 0x1f, 0x0a, 0x3f, 0xee, 0x8e, 0xdf, 0xd1, 0xf4, 0xdf, 0x34, 0xb0, 0x10, 0xef, + 0x9e, 0x0b, 0xe0, 0x89, 0x07, 0x49, 0x9e, 0xb8, 0x72, 0x8e, 0x9e, 0x1e, 0x41, 0x16, 0x5f, 0x6b, + 0x60, 0x36, 0xbe, 0xea, 0xe0, 0xbb, 0x60, 0x1a, 0xb7, 0x6c, 0x87, 0x78, 0x56, 0x9f, 0xd3, 0x07, + 0x89, 0x6c, 0x28, 0x39, 0x1a, 0x58, 0x88, 0x45, 0x48, 0x9e, 0xfa, 0x4e, 0x80, 0x45, 0x93, 0xd5, + 0x88, 0x45, 0x3d, 0x9b, 0xc9, 0x1b, 0xca, 0x85, 0x8b, 0xb0, 0x9a, 0x56, 0xa2, 0x61, 0x7b, 0xfd, + 0x87, 0x71, 0xb0, 0x10, 0xf6, 0x46, 0xf8, 0x04, 0x72, 0x89, 0xc7, 0x2f, 0x80, 0x54, 0x76, 0x13, + 0x1b, 0xf1, 0x9d, 0x33, 0x57, 0x46, 0x94, 0xd8, 0xa8, 0xd5, 0x08, 0x3f, 0x06, 0x93, 0x8c, 0x63, + 0xde, 0x12, 0x44, 0x23, 0x00, 0xaf, 0x9f, 0x17, 0x50, 0x3a, 0x45, 0x5b, 0x31, 0xfc, 0x46, 0x0a, + 0x4c, 0xff, 0x5d, 0x03, 0xcb, 0x69, 0x97, 0x0b, 0xe8, 0xb0, 0x9d, 0x64, 0x87, 0xbd, 0x75, 0xce, + 0xc3, 0x8c, 0xe8, 0xb2, 0x3f, 0x35, 0xf0, 0xbf, 0xa1, 0x73, 0xcb, 0xfd, 0x2b, 0x78, 0xc9, 0x4f, + 0xb1, 0xdf, 0x5e, 0xf4, 0x9e, 0x90, 0xbc, 0x74, 0x90, 0xa1, 0x47, 0x99, 0x5e, 0xf0, 0x31, 0x58, + 0x70, 0xbc, 0xa6, 0xe3, 0x91, 0x50, 0x56, 0x8b, 0xea, 0x9b, 0x49, 0x1e, 0x69, 0x64, 0x59, 0xdc, + 0xe5, 0x5e, 0xb7, 0xbc, 0xb0, 0x9d, 0x42, 0x41, 0x43, 0xb8, 0xfa, 0x1f, 0x19, 0x95, 0x91, 0x1b, + 0x57, 0x8c, 0x90, 0x94, 0x90, 0x60, 0x68, 0x84, 0x94, 0x1c, 0x0d, 0x2c, 0x64, 0xdf, 0xc8, 0xab, + 0x50, 0x89, 0x9e, 0xbb, 0x6f, 0xa4, 0x53, 0xac, 0x6f, 0xe4, 0x37, 0x52, 0x60, 0x22, 0x09, 0xf1, + 0xae, 0x92, 0x77, 0x99, 0x4b, 0x26, 0xb1, 0xa7, 0xe4, 0x68, 0x60, 0xa1, 0xff, 0x93, 0xcb, 0x28, + 0x90, 0x6c, 0xc0, 0xd8, 0x69, 0xfa, 0xff, 0x56, 0xd2, 0xa7, 0xb1, 0x07, 0xa7, 0xb1, 0xe1, 0xf7, + 0x1a, 0x80, 0x78, 0x00, 0xb1, 0xdb, 0x6f, 0xd0, 0xb0, 0x8b, 0xaa, 0xaf, 0x35, 0x12, 0xc6, 0xc6, + 0x10, 0x4e, 0xb8, 0x8d, 0x57, 0x54, 0x7c, 0x38, 0x6c, 0x80, 0x32, 0x82, 0x43, 0x1b, 0x14, 0x42, + 0x69, 0x35, 0x08, 0x68, 0xa0, 0xc6, 0x53, 0x3f, 0x33, 0x17, 0x69, 0x69, 0x96, 0xe4, 0xd3, 0x30, + 0x72, 0x3d, 0xed, 0x96, 0x0b, 0x31, 0x3d, 0x8a, 0xc3, 0x8a, 0x28, 0x36, 0x89, 0xa2, 0xe4, 0x5f, + 0x2f, 0xca, 0x16, 0x19, 0x1d, 0x25, 0x06, 0xbb, 0x52, 0x05, 0xff, 0x1f, 0x71, 0x2d, 0xaf, 0xb5, + 0xb3, 0xbe, 0xd1, 0x40, 0x3c, 0x06, 0xdc, 0x01, 0x79, 0xee, 0xa8, 0xa9, 0x4b, 0x3e, 0x9f, 0xcf, + 0x20, 0x92, 0x43, 0xc7, 0x25, 0x11, 0x15, 0x8a, 0x2f, 0x24, 0x51, 0xe0, 0xdb, 0x60, 0xca, 0x25, + 0x8c, 0xe1, 0xba, 0x8a, 0x1c, 0x3d, 0x26, 0x77, 0x43, 0x31, 0xea, 0xeb, 0xf5, 0xdb, 0x60, 0x29, + 0xe3, 0x51, 0x0e, 0xcb, 0x60, 0xc2, 0x92, 0xff, 0x71, 0x45, 0x42, 0x13, 0xe6, 0x8c, 0x60, 0x94, + 0x4d, 0xf9, 0xd7, 0x36, 0x94, 0x9b, 0x6b, 0xcf, 0x5e, 0x96, 0xc6, 0x9e, 0xbf, 0x2c, 0x8d, 0xbd, + 0x78, 0x59, 0x1a, 0xfb, 0xaa, 0x57, 0xd2, 0x9e, 0xf5, 0x4a, 0xda, 0xf3, 0x5e, 0x49, 0x7b, 0xd1, + 0x2b, 0x69, 0x7f, 0xf5, 0x4a, 0xda, 0x77, 0x7f, 0x97, 0xc6, 0x3e, 0x1d, 0x6f, 0xaf, 0xff, 0x17, + 0x00, 0x00, 0xff, 0xff, 0x02, 0xb2, 0x6f, 0xe2, 0x3e, 0x12, 0x00, 0x00, } func (m *CSIDriver) Marshal() (dAtA []byte, err error) { @@ -721,6 +756,30 @@ func (m *CSIDriverSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.RequiresRepublish != nil { + i-- + if *m.RequiresRepublish { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 + } + if len(m.TokenRequests) > 0 { + for iNdEx := len(m.TokenRequests) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.TokenRequests[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } if m.FSGroupPolicy != nil { i -= len(*m.FSGroupPolicy) copy(dAtA[i:], *m.FSGroupPolicy) @@ -1107,6 +1166,39 @@ func (m *StorageClassList) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *TokenRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TokenRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TokenRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ExpirationSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.ExpirationSeconds)) + i-- + dAtA[i] = 0x10 + } + i -= len(m.Audience) + copy(dAtA[i:], m.Audience) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Audience))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *VolumeAttachment) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -1503,6 +1595,15 @@ func (m *CSIDriverSpec) Size() (n int) { l = len(*m.FSGroupPolicy) n += 1 + l + sovGenerated(uint64(l)) } + if len(m.TokenRequests) > 0 { + for _, e := range m.TokenRequests { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.RequiresRepublish != nil { + n += 2 + } return n } @@ -1635,6 +1736,20 @@ func (m *StorageClassList) Size() (n int) { return n } +func (m *TokenRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Audience) + n += 1 + l + sovGenerated(uint64(l)) + if m.ExpirationSeconds != nil { + n += 1 + sovGenerated(uint64(*m.ExpirationSeconds)) + } + return n +} + func (m *VolumeAttachment) Size() (n int) { if m == nil { return 0 @@ -1787,12 +1902,19 @@ func (this *CSIDriverSpec) String() string { if this == nil { return "nil" } + repeatedStringForTokenRequests := "[]TokenRequest{" + for _, f := range this.TokenRequests { + repeatedStringForTokenRequests += strings.Replace(strings.Replace(f.String(), "TokenRequest", "TokenRequest", 1), `&`, ``, 1) + "," + } + repeatedStringForTokenRequests += "}" s := strings.Join([]string{`&CSIDriverSpec{`, `AttachRequired:` + valueToStringGenerated(this.AttachRequired) + `,`, `PodInfoOnMount:` + valueToStringGenerated(this.PodInfoOnMount) + `,`, `VolumeLifecycleModes:` + fmt.Sprintf("%v", this.VolumeLifecycleModes) + `,`, `StorageCapacity:` + valueToStringGenerated(this.StorageCapacity) + `,`, `FSGroupPolicy:` + valueToStringGenerated(this.FSGroupPolicy) + `,`, + `TokenRequests:` + repeatedStringForTokenRequests + `,`, + `RequiresRepublish:` + valueToStringGenerated(this.RequiresRepublish) + `,`, `}`, }, "") return s @@ -1900,6 +2022,17 @@ func (this *StorageClassList) String() string { }, "") return s } +func (this *TokenRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TokenRequest{`, + `Audience:` + fmt.Sprintf("%v", this.Audience) + `,`, + `ExpirationSeconds:` + valueToStringGenerated(this.ExpirationSeconds) + `,`, + `}`, + }, "") + return s +} func (this *VolumeAttachment) String() string { if this == nil { return "nil" @@ -2104,10 +2237,7 @@ func (m *CSIDriver) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2224,10 +2354,7 @@ func (m *CSIDriverList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2399,16 +2526,68 @@ func (m *CSIDriverSpec) Unmarshal(dAtA []byte) error { s := FSGroupPolicy(dAtA[iNdEx:postIndex]) m.FSGroupPolicy = &s iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TokenRequests", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TokenRequests = append(m.TokenRequests, TokenRequest{}) + if err := m.TokenRequests[len(m.TokenRequests)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RequiresRepublish", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.RequiresRepublish = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2524,10 +2703,7 @@ func (m *CSINode) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2709,10 +2885,7 @@ func (m *CSINodeDriver) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2829,10 +3002,7 @@ func (m *CSINodeList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2916,10 +3086,7 @@ func (m *CSINodeSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3144,7 +3311,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -3314,10 +3481,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3434,10 +3598,109 @@ func (m *StorageClassList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TokenRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TokenRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TokenRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Audience", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Audience = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExpirationSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ExpirationSeconds = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3586,10 +3849,7 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3706,10 +3966,7 @@ func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3828,10 +4085,7 @@ func (m *VolumeAttachmentSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3978,10 +4232,7 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4161,7 +4412,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -4250,10 +4501,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4368,10 +4616,7 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4441,10 +4686,7 @@ func (m *VolumeNodeResources) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/k8s.io/api/storage/v1/generated.proto b/vendor/k8s.io/api/storage/v1/generated.proto index a3526ca4e4..d6b4d9cbd0 100644 --- a/vendor/k8s.io/api/storage/v1/generated.proto +++ b/vendor/k8s.io/api/storage/v1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.storage.v1; @@ -146,6 +146,43 @@ message CSIDriverSpec { // that enable the CSIVolumeFSGroupPolicy feature gate. // +optional optional string fsGroupPolicy = 5; + + // TokenRequests indicates the CSI driver needs pods' service account + // tokens it is mounting volume for to do necessary authentication. Kubelet + // will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. + // The CSI driver should parse and validate the following VolumeContext: + // "csi.storage.k8s.io/serviceAccount.tokens": { + // "": { + // "token": , + // "expirationTimestamp": , + // }, + // ... + // } + // + // Note: Audience in each TokenRequest should be different and at + // most one token is empty string. To receive a new token after expiry, + // RequiresRepublish can be used to trigger NodePublishVolume periodically. + // + // This is an alpha feature and only available when the + // CSIServiceAccountToken feature is enabled. + // + // +optional + // +listType=atomic + repeated TokenRequest tokenRequests = 6; + + // RequiresRepublish indicates the CSI driver wants `NodePublishVolume` + // being periodically called to reflect any possible change in the mounted + // volume. This field defaults to false. + // + // Note: After a successful initial NodePublishVolume call, subsequent calls + // to NodePublishVolume should only update the contents of the volume. New + // mount points will not be seen by a running container. + // + // This is an alpha feature and only available when the + // CSIServiceAccountToken feature is enabled. + // + // +optional + optional bool requiresRepublish = 7; } // CSINode holds information about all CSI drivers installed on a node. @@ -281,6 +318,19 @@ message StorageClassList { repeated StorageClass items = 2; } +// TokenRequest contains parameters of a service account token. +message TokenRequest { + // Audience is the intended audience of the token in "TokenRequestSpec". + // It will default to the audiences of kube apiserver. + optional string audience = 1; + + // ExpirationSeconds is the duration of validity of the token in "TokenRequestSpec". + // It has the same default value of "ExpirationSeconds" in "TokenRequestSpec". + // + // +optional + optional int64 expirationSeconds = 2; +} + // VolumeAttachment captures the intent to attach or detach the specified volume // to/from the specified node. // diff --git a/vendor/k8s.io/api/storage/v1/types.go b/vendor/k8s.io/api/storage/v1/types.go index 27e06debb1..18d0fb8772 100644 --- a/vendor/k8s.io/api/storage/v1/types.go +++ b/vendor/k8s.io/api/storage/v1/types.go @@ -344,6 +344,43 @@ type CSIDriverSpec struct { // that enable the CSIVolumeFSGroupPolicy feature gate. // +optional FSGroupPolicy *FSGroupPolicy `json:"fsGroupPolicy,omitempty" protobuf:"bytes,5,opt,name=fsGroupPolicy"` + + // TokenRequests indicates the CSI driver needs pods' service account + // tokens it is mounting volume for to do necessary authentication. Kubelet + // will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. + // The CSI driver should parse and validate the following VolumeContext: + // "csi.storage.k8s.io/serviceAccount.tokens": { + // "": { + // "token": , + // "expirationTimestamp": , + // }, + // ... + // } + // + // Note: Audience in each TokenRequest should be different and at + // most one token is empty string. To receive a new token after expiry, + // RequiresRepublish can be used to trigger NodePublishVolume periodically. + // + // This is an alpha feature and only available when the + // CSIServiceAccountToken feature is enabled. + // + // +optional + // +listType=atomic + TokenRequests []TokenRequest `json:"tokenRequests,omitempty" protobuf:"bytes,6,opt,name=tokenRequests"` + + // RequiresRepublish indicates the CSI driver wants `NodePublishVolume` + // being periodically called to reflect any possible change in the mounted + // volume. This field defaults to false. + // + // Note: After a successful initial NodePublishVolume call, subsequent calls + // to NodePublishVolume should only update the contents of the volume. New + // mount points will not be seen by a running container. + // + // This is an alpha feature and only available when the + // CSIServiceAccountToken feature is enabled. + // + // +optional + RequiresRepublish *bool `json:"requiresRepublish,omitempty" protobuf:"varint,7,opt,name=requiresRepublish"` } // FSGroupPolicy specifies if a CSI Driver supports modifying @@ -381,6 +418,20 @@ const ( // provided by a CSI driver. More modes may be added in the future. type VolumeLifecycleMode string +// TokenRequest contains parameters of a service account token. +type TokenRequest struct { + // Audience is the intended audience of the token in "TokenRequestSpec". + // It will default to the audiences of kube apiserver. + // + Audience string `json:"audience" protobuf:"bytes,1,opt,name=audience"` + + // ExpirationSeconds is the duration of validity of the token in "TokenRequestSpec". + // It has the same default value of "ExpirationSeconds" in "TokenRequestSpec". + // + // +optional + ExpirationSeconds *int64 `json:"expirationSeconds,omitempty" protobuf:"varint,2,opt,name=expirationSeconds"` +} + const ( // VolumeLifecyclePersistent explicitly confirms that the driver implements // the full CSI spec. It is the default when CSIDriverSpec.VolumeLifecycleModes is not diff --git a/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go index 606cda4dbc..0e28b1d2f1 100644 --- a/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go @@ -54,6 +54,8 @@ var map_CSIDriverSpec = map[string]string{ "volumeLifecycleModes": "volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. The default if the list is empty is \"Persistent\", which is the usage defined by the CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism. The other mode is \"Ephemeral\". In this mode, volumes are defined inline inside the pod spec with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume. For more information about implementing this mode, see https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html A driver can support one or more of these modes and more modes may be added in the future. This field is beta.", "storageCapacity": "If set to true, storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage capacity that the driver deployment will report by creating CSIStorageCapacity objects with capacity information.\n\nThe check can be enabled immediately when deploying a driver. In that case, provisioning new volumes with late binding will pause until the driver deployment has published some suitable CSIStorageCapacity object.\n\nAlternatively, the driver can be deployed with the field unset or false and it can be flipped later when storage capacity information has been published.\n\nThis is an alpha field and only available when the CSIStorageCapacity feature is enabled. The default is false.", "fsGroupPolicy": "Defines if the underlying volume supports changing ownership and permission of the volume before being mounted. Refer to the specific FSGroupPolicy values for additional details. This field is alpha-level, and is only honored by servers that enable the CSIVolumeFSGroupPolicy feature gate.", + "tokenRequests": "TokenRequests indicates the CSI driver needs pods' service account tokens it is mounting volume for to do necessary authentication. Kubelet will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. The CSI driver should parse and validate the following VolumeContext: \"csi.storage.k8s.io/serviceAccount.tokens\": {\n \"\": {\n \"token\": ,\n \"expirationTimestamp\": ,\n },\n ...\n}\n\nNote: Audience in each TokenRequest should be different and at most one token is empty string. To receive a new token after expiry, RequiresRepublish can be used to trigger NodePublishVolume periodically.\n\nThis is an alpha feature and only available when the CSIServiceAccountToken feature is enabled.", + "requiresRepublish": "RequiresRepublish indicates the CSI driver wants `NodePublishVolume` being periodically called to reflect any possible change in the mounted volume. This field defaults to false.\n\nNote: After a successful initial NodePublishVolume call, subsequent calls to NodePublishVolume should only update the contents of the volume. New mount points will not be seen by a running container.\n\nThis is an alpha feature and only available when the CSIServiceAccountToken feature is enabled.", } func (CSIDriverSpec) SwaggerDoc() map[string]string { @@ -127,6 +129,16 @@ func (StorageClassList) SwaggerDoc() map[string]string { return map_StorageClassList } +var map_TokenRequest = map[string]string{ + "": "TokenRequest contains parameters of a service account token.", + "audience": "Audience is the intended audience of the token in \"TokenRequestSpec\". It will default to the audiences of kube apiserver.", + "expirationSeconds": "ExpirationSeconds is the duration of validity of the token in \"TokenRequestSpec\". It has the same default value of \"ExpirationSeconds\" in \"TokenRequestSpec\".", +} + +func (TokenRequest) SwaggerDoc() map[string]string { + return map_TokenRequest +} + var map_VolumeAttachment = map[string]string{ "": "VolumeAttachment captures the intent to attach or detach the specified volume to/from the specified node.\n\nVolumeAttachment objects are non-namespaced.", "metadata": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", diff --git a/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go index 5eb0225a06..f4de942161 100644 --- a/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go @@ -113,6 +113,18 @@ func (in *CSIDriverSpec) DeepCopyInto(out *CSIDriverSpec) { *out = new(FSGroupPolicy) **out = **in } + if in.TokenRequests != nil { + in, out := &in.TokenRequests, &out.TokenRequests + *out = make([]TokenRequest, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RequiresRepublish != nil { + in, out := &in.RequiresRepublish, &out.RequiresRepublish + *out = new(bool) + **out = **in + } return } @@ -328,6 +340,27 @@ func (in *StorageClassList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TokenRequest) DeepCopyInto(out *TokenRequest) { + *out = *in + if in.ExpirationSeconds != nil { + in, out := &in.ExpirationSeconds, &out.ExpirationSeconds + *out = new(int64) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenRequest. +func (in *TokenRequest) DeepCopy() *TokenRequest { + if in == nil { + return nil + } + out := new(TokenRequest) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VolumeAttachment) DeepCopyInto(out *VolumeAttachment) { *out = *in diff --git a/vendor/k8s.io/api/storage/v1alpha1/generated.pb.go b/vendor/k8s.io/api/storage/v1alpha1/generated.pb.go index 1b7767fdcc..38d3573990 100644 --- a/vendor/k8s.io/api/storage/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/storage/v1alpha1/generated.pb.go @@ -1210,10 +1210,7 @@ func (m *CSIStorageCapacity) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1330,10 +1327,7 @@ func (m *CSIStorageCapacityList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1482,10 +1476,7 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1602,10 +1593,7 @@ func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1724,10 +1712,7 @@ func (m *VolumeAttachmentSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1874,10 +1859,7 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2057,7 +2039,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -2146,10 +2128,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2264,10 +2243,7 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/k8s.io/api/storage/v1alpha1/generated.proto b/vendor/k8s.io/api/storage/v1alpha1/generated.proto index 40a7640518..d64345333d 100644 --- a/vendor/k8s.io/api/storage/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/storage/v1alpha1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.storage.v1alpha1; diff --git a/vendor/k8s.io/api/storage/v1beta1/generated.pb.go b/vendor/k8s.io/api/storage/v1beta1/generated.pb.go index cec77515e1..328d721456 100644 --- a/vendor/k8s.io/api/storage/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/storage/v1beta1/generated.pb.go @@ -298,10 +298,38 @@ func (m *StorageClassList) XXX_DiscardUnknown() { var xxx_messageInfo_StorageClassList proto.InternalMessageInfo +func (m *TokenRequest) Reset() { *m = TokenRequest{} } +func (*TokenRequest) ProtoMessage() {} +func (*TokenRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_7d2980599fd0de80, []int{9} +} +func (m *TokenRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TokenRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TokenRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_TokenRequest.Merge(m, src) +} +func (m *TokenRequest) XXX_Size() int { + return m.Size() +} +func (m *TokenRequest) XXX_DiscardUnknown() { + xxx_messageInfo_TokenRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_TokenRequest proto.InternalMessageInfo + func (m *VolumeAttachment) Reset() { *m = VolumeAttachment{} } func (*VolumeAttachment) ProtoMessage() {} func (*VolumeAttachment) Descriptor() ([]byte, []int) { - return fileDescriptor_7d2980599fd0de80, []int{9} + return fileDescriptor_7d2980599fd0de80, []int{10} } func (m *VolumeAttachment) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -329,7 +357,7 @@ var xxx_messageInfo_VolumeAttachment proto.InternalMessageInfo func (m *VolumeAttachmentList) Reset() { *m = VolumeAttachmentList{} } func (*VolumeAttachmentList) ProtoMessage() {} func (*VolumeAttachmentList) Descriptor() ([]byte, []int) { - return fileDescriptor_7d2980599fd0de80, []int{10} + return fileDescriptor_7d2980599fd0de80, []int{11} } func (m *VolumeAttachmentList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -357,7 +385,7 @@ var xxx_messageInfo_VolumeAttachmentList proto.InternalMessageInfo func (m *VolumeAttachmentSource) Reset() { *m = VolumeAttachmentSource{} } func (*VolumeAttachmentSource) ProtoMessage() {} func (*VolumeAttachmentSource) Descriptor() ([]byte, []int) { - return fileDescriptor_7d2980599fd0de80, []int{11} + return fileDescriptor_7d2980599fd0de80, []int{12} } func (m *VolumeAttachmentSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -385,7 +413,7 @@ var xxx_messageInfo_VolumeAttachmentSource proto.InternalMessageInfo func (m *VolumeAttachmentSpec) Reset() { *m = VolumeAttachmentSpec{} } func (*VolumeAttachmentSpec) ProtoMessage() {} func (*VolumeAttachmentSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_7d2980599fd0de80, []int{12} + return fileDescriptor_7d2980599fd0de80, []int{13} } func (m *VolumeAttachmentSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -413,7 +441,7 @@ var xxx_messageInfo_VolumeAttachmentSpec proto.InternalMessageInfo func (m *VolumeAttachmentStatus) Reset() { *m = VolumeAttachmentStatus{} } func (*VolumeAttachmentStatus) ProtoMessage() {} func (*VolumeAttachmentStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_7d2980599fd0de80, []int{13} + return fileDescriptor_7d2980599fd0de80, []int{14} } func (m *VolumeAttachmentStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -441,7 +469,7 @@ var xxx_messageInfo_VolumeAttachmentStatus proto.InternalMessageInfo func (m *VolumeError) Reset() { *m = VolumeError{} } func (*VolumeError) ProtoMessage() {} func (*VolumeError) Descriptor() ([]byte, []int) { - return fileDescriptor_7d2980599fd0de80, []int{14} + return fileDescriptor_7d2980599fd0de80, []int{15} } func (m *VolumeError) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -469,7 +497,7 @@ var xxx_messageInfo_VolumeError proto.InternalMessageInfo func (m *VolumeNodeResources) Reset() { *m = VolumeNodeResources{} } func (*VolumeNodeResources) ProtoMessage() {} func (*VolumeNodeResources) Descriptor() ([]byte, []int) { - return fileDescriptor_7d2980599fd0de80, []int{15} + return fileDescriptor_7d2980599fd0de80, []int{16} } func (m *VolumeNodeResources) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -505,6 +533,7 @@ func init() { proto.RegisterType((*StorageClass)(nil), "k8s.io.api.storage.v1beta1.StorageClass") proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.storage.v1beta1.StorageClass.ParametersEntry") proto.RegisterType((*StorageClassList)(nil), "k8s.io.api.storage.v1beta1.StorageClassList") + proto.RegisterType((*TokenRequest)(nil), "k8s.io.api.storage.v1beta1.TokenRequest") proto.RegisterType((*VolumeAttachment)(nil), "k8s.io.api.storage.v1beta1.VolumeAttachment") proto.RegisterType((*VolumeAttachmentList)(nil), "k8s.io.api.storage.v1beta1.VolumeAttachmentList") proto.RegisterType((*VolumeAttachmentSource)(nil), "k8s.io.api.storage.v1beta1.VolumeAttachmentSource") @@ -520,95 +549,102 @@ func init() { } var fileDescriptor_7d2980599fd0de80 = []byte{ - // 1400 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x57, 0x3d, 0x6f, 0xdb, 0x46, - 0x1f, 0x37, 0x2d, 0xc9, 0x2f, 0x27, 0x3b, 0x96, 0xcf, 0xc6, 0xf3, 0xe8, 0xd1, 0x20, 0x1a, 0x7a, - 0xd0, 0xc6, 0x09, 0x12, 0x2a, 0x31, 0xd2, 0x20, 0x08, 0x90, 0xc1, 0x72, 0xdc, 0x46, 0x89, 0xe5, - 0xb8, 0x27, 0x23, 0x28, 0x82, 0x0e, 0x3d, 0x91, 0x67, 0x99, 0xb1, 0xc8, 0x63, 0xc8, 0x93, 0x5a, - 0x6d, 0x9d, 0x3a, 0x17, 0x1d, 0xfa, 0x09, 0xfa, 0x15, 0x5a, 0xa0, 0x5d, 0x3a, 0x36, 0x53, 0x11, - 0x74, 0xca, 0x44, 0x34, 0xec, 0x47, 0x28, 0xba, 0x18, 0x1d, 0x8a, 0x3b, 0x9e, 0xc4, 0x17, 0x51, - 0xb1, 0xdd, 0xc1, 0x1b, 0xef, 0xff, 0xf2, 0xfb, 0xbf, 0xff, 0xef, 0x08, 0x76, 0x4e, 0xee, 0x79, - 0x9a, 0x49, 0xeb, 0x27, 0xfd, 0x0e, 0x71, 0x6d, 0xc2, 0x88, 0x57, 0x1f, 0x10, 0xdb, 0xa0, 0x6e, - 0x5d, 0x32, 0xb0, 0x63, 0xd6, 0x3d, 0x46, 0x5d, 0xdc, 0x25, 0xf5, 0xc1, 0xed, 0x0e, 0x61, 0xf8, - 0x76, 0xbd, 0x4b, 0x6c, 0xe2, 0x62, 0x46, 0x0c, 0xcd, 0x71, 0x29, 0xa3, 0xb0, 0x12, 0xca, 0x6a, - 0xd8, 0x31, 0x35, 0x29, 0xab, 0x49, 0xd9, 0xca, 0xcd, 0xae, 0xc9, 0x8e, 0xfb, 0x1d, 0x4d, 0xa7, - 0x56, 0xbd, 0x4b, 0xbb, 0xb4, 0x2e, 0x54, 0x3a, 0xfd, 0x23, 0x71, 0x12, 0x07, 0xf1, 0x15, 0x42, - 0x55, 0x6a, 0x31, 0xb3, 0x3a, 0x75, 0xb9, 0xcd, 0xb4, 0xb9, 0xca, 0x9d, 0x48, 0xc6, 0xc2, 0xfa, - 0xb1, 0x69, 0x13, 0x77, 0x58, 0x77, 0x4e, 0xba, 0x9c, 0xe0, 0xd5, 0x2d, 0xc2, 0x70, 0x96, 0x56, - 0x7d, 0x9a, 0x96, 0xdb, 0xb7, 0x99, 0x69, 0x91, 0x09, 0x85, 0xbb, 0x67, 0x29, 0x78, 0xfa, 0x31, - 0xb1, 0x70, 0x5a, 0xaf, 0xf6, 0x93, 0x02, 0x16, 0x77, 0xda, 0xcd, 0x87, 0xae, 0x39, 0x20, 0x2e, - 0xfc, 0x0c, 0x2c, 0x70, 0x8f, 0x0c, 0xcc, 0x70, 0x59, 0xd9, 0x50, 0x36, 0x8b, 0x5b, 0xb7, 0xb4, - 0x28, 0x5d, 0x63, 0x60, 0xcd, 0x39, 0xe9, 0x72, 0x82, 0xa7, 0x71, 0x69, 0x6d, 0x70, 0x5b, 0x7b, - 0xda, 0x79, 0x41, 0x74, 0xd6, 0x22, 0x0c, 0x37, 0xe0, 0x2b, 0x5f, 0x9d, 0x09, 0x7c, 0x15, 0x44, - 0x34, 0x34, 0x46, 0x85, 0x4f, 0x40, 0xde, 0x73, 0x88, 0x5e, 0x9e, 0x15, 0xe8, 0xd7, 0xb4, 0xe9, - 0xc5, 0xd0, 0xc6, 0x6e, 0xb5, 0x1d, 0xa2, 0x37, 0x96, 0x24, 0x6c, 0x9e, 0x9f, 0x90, 0x00, 0xa9, - 0xfd, 0xa8, 0x80, 0xe5, 0xb1, 0xd4, 0x9e, 0xe9, 0x31, 0xf8, 0xe9, 0x44, 0x00, 0xda, 0xf9, 0x02, - 0xe0, 0xda, 0xc2, 0xfd, 0x92, 0xb4, 0xb3, 0x30, 0xa2, 0xc4, 0x9c, 0x7f, 0x0c, 0x0a, 0x26, 0x23, - 0x96, 0x57, 0x9e, 0xdd, 0xc8, 0x6d, 0x16, 0xb7, 0xde, 0x3b, 0x97, 0xf7, 0x8d, 0x65, 0x89, 0x58, - 0x68, 0x72, 0x5d, 0x14, 0x42, 0xd4, 0xfe, 0x9a, 0x8d, 0xf9, 0xce, 0x63, 0x82, 0xf7, 0xc1, 0x15, - 0xcc, 0x18, 0xd6, 0x8f, 0x11, 0x79, 0xd9, 0x37, 0x5d, 0x62, 0x88, 0x08, 0x16, 0x1a, 0x30, 0xf0, - 0xd5, 0x2b, 0xdb, 0x09, 0x0e, 0x4a, 0x49, 0x72, 0x5d, 0x87, 0x1a, 0x4d, 0xfb, 0x88, 0x3e, 0xb5, - 0x5b, 0xb4, 0x6f, 0x33, 0x91, 0x60, 0xa9, 0x7b, 0x90, 0xe0, 0xa0, 0x94, 0x24, 0xd4, 0xc1, 0xfa, - 0x80, 0xf6, 0xfa, 0x16, 0xd9, 0x33, 0x8f, 0x88, 0x3e, 0xd4, 0x7b, 0xa4, 0x45, 0x0d, 0xe2, 0x95, - 0x73, 0x1b, 0xb9, 0xcd, 0xc5, 0x46, 0x3d, 0xf0, 0xd5, 0xf5, 0x67, 0x19, 0xfc, 0x53, 0x5f, 0x5d, - 0xcb, 0xa0, 0xa3, 0x4c, 0x30, 0xf8, 0x00, 0xac, 0xc8, 0x0c, 0xed, 0x60, 0x07, 0xeb, 0x26, 0x1b, - 0x96, 0xf3, 0xc2, 0xc3, 0xb5, 0xc0, 0x57, 0x57, 0xda, 0x49, 0x16, 0x4a, 0xcb, 0xc2, 0x47, 0x60, - 0xf9, 0xc8, 0xfb, 0xc8, 0xa5, 0x7d, 0xe7, 0x80, 0xf6, 0x4c, 0x7d, 0x58, 0x2e, 0x6c, 0x28, 0x9b, - 0x8b, 0x8d, 0x5a, 0xe0, 0xab, 0xcb, 0x1f, 0xb6, 0x63, 0x8c, 0xd3, 0x34, 0x01, 0x25, 0x15, 0x6b, - 0x3f, 0x28, 0x60, 0x7e, 0xa7, 0xdd, 0xdc, 0xa7, 0x06, 0xb9, 0x84, 0x76, 0x6f, 0x26, 0xda, 0xfd, - 0xea, 0x19, 0x0d, 0xc3, 0x9d, 0x9a, 0xda, 0xec, 0x7f, 0x86, 0xcd, 0xce, 0x65, 0xe4, 0xb4, 0x6e, - 0x80, 0xbc, 0x8d, 0x2d, 0x22, 0x5c, 0x5f, 0x8c, 0x74, 0xf6, 0xb1, 0x45, 0x90, 0xe0, 0xc0, 0xf7, - 0xc1, 0x9c, 0x4d, 0x0d, 0xd2, 0x7c, 0x28, 0x1c, 0x58, 0x6c, 0x5c, 0x91, 0x32, 0x73, 0xfb, 0x82, - 0x8a, 0x24, 0x17, 0xde, 0x01, 0x4b, 0x8c, 0x3a, 0xb4, 0x47, 0xbb, 0xc3, 0x27, 0x64, 0x38, 0x2a, - 0x7d, 0x29, 0xf0, 0xd5, 0xa5, 0xc3, 0x18, 0x1d, 0x25, 0xa4, 0x60, 0x07, 0x14, 0x71, 0xaf, 0x47, - 0x75, 0xcc, 0x70, 0xa7, 0x47, 0x44, 0x3d, 0x8b, 0x5b, 0xf5, 0x77, 0xc5, 0x18, 0xf6, 0x0b, 0x37, - 0x8e, 0x88, 0x47, 0xfb, 0xae, 0x4e, 0xbc, 0xc6, 0x4a, 0xe0, 0xab, 0xc5, 0xed, 0x08, 0x07, 0xc5, - 0x41, 0x6b, 0xdf, 0x2b, 0xa0, 0x28, 0xa3, 0xbe, 0x84, 0x01, 0x7f, 0x94, 0x1c, 0xf0, 0xff, 0x9f, - 0xa3, 0x5e, 0x53, 0xc6, 0x5b, 0x1f, 0xbb, 0x2d, 0x66, 0xfb, 0x10, 0xcc, 0x1b, 0xa2, 0x68, 0x5e, - 0x59, 0x11, 0xd0, 0xd7, 0xce, 0x01, 0x2d, 0xf7, 0xc7, 0x8a, 0x34, 0x30, 0x1f, 0x9e, 0x3d, 0x34, - 0x82, 0xaa, 0x7d, 0x33, 0x07, 0x96, 0x46, 0xa3, 0xd3, 0xc3, 0x9e, 0x77, 0x09, 0x0d, 0xfd, 0x01, - 0x28, 0x3a, 0x2e, 0x1d, 0x98, 0x9e, 0x49, 0x6d, 0xe2, 0xca, 0xb6, 0x5a, 0x93, 0x2a, 0xc5, 0x83, - 0x88, 0x85, 0xe2, 0x72, 0xb0, 0x07, 0x80, 0x83, 0x5d, 0x6c, 0x11, 0xc6, 0x53, 0x90, 0x13, 0x29, - 0xb8, 0xf7, 0xae, 0x14, 0xc4, 0xc3, 0xd2, 0x0e, 0xc6, 0xaa, 0xbb, 0x36, 0x73, 0x87, 0x91, 0x8b, - 0x11, 0x03, 0xc5, 0xf0, 0xe1, 0x09, 0x58, 0x76, 0x89, 0xde, 0xc3, 0xa6, 0x25, 0xb7, 0x45, 0x5e, - 0xb8, 0xb9, 0xcb, 0xb7, 0x05, 0x8a, 0x33, 0x4e, 0x7d, 0xf5, 0xd6, 0xe4, 0x1d, 0xae, 0x1d, 0x10, - 0xd7, 0x33, 0x3d, 0x46, 0x6c, 0x16, 0x36, 0x6c, 0x42, 0x07, 0x25, 0xb1, 0xf9, 0xec, 0x58, 0x7c, - 0x8f, 0x3e, 0x75, 0x98, 0x49, 0x6d, 0xaf, 0x5c, 0x88, 0x66, 0xa7, 0x15, 0xa3, 0xa3, 0x84, 0x14, - 0xdc, 0x03, 0xeb, 0xbc, 0xcd, 0x3f, 0x0f, 0x0d, 0xec, 0x7e, 0xe1, 0x60, 0x9b, 0xa7, 0xaa, 0x3c, - 0x27, 0x96, 0x62, 0x99, 0x2f, 0xdd, 0xed, 0x0c, 0x3e, 0xca, 0xd4, 0x82, 0x9f, 0x80, 0xd5, 0x70, - 0xeb, 0x36, 0x4c, 0xdb, 0x30, 0xed, 0x2e, 0xdf, 0xb9, 0xe5, 0x79, 0x11, 0xf4, 0xf5, 0xc0, 0x57, - 0x57, 0x9f, 0xa5, 0x99, 0xa7, 0x59, 0x44, 0x34, 0x09, 0x02, 0x5f, 0x82, 0x55, 0x61, 0x91, 0x18, - 0x72, 0x11, 0x98, 0xc4, 0x2b, 0x2f, 0x88, 0xfa, 0x6d, 0xc6, 0xeb, 0xc7, 0x53, 0xc7, 0x1b, 0x69, - 0xb4, 0x2e, 0xda, 0xa4, 0x47, 0x74, 0x46, 0xdd, 0x43, 0xe2, 0x5a, 0x8d, 0xff, 0xc9, 0x7a, 0xad, - 0x6e, 0xa7, 0xa1, 0xd0, 0x24, 0x7a, 0xe5, 0x01, 0x58, 0x49, 0x15, 0x1c, 0x96, 0x40, 0xee, 0x84, - 0x0c, 0xc3, 0x45, 0x87, 0xf8, 0x27, 0x5c, 0x07, 0x85, 0x01, 0xee, 0xf5, 0x49, 0xd8, 0x81, 0x28, - 0x3c, 0xdc, 0x9f, 0xbd, 0xa7, 0xd4, 0x7e, 0x56, 0x40, 0x29, 0xde, 0x3d, 0x97, 0xb0, 0x36, 0x5a, - 0xc9, 0xb5, 0xb1, 0x79, 0xde, 0xc6, 0x9e, 0xb2, 0x3b, 0xbe, 0x9b, 0x05, 0xa5, 0xb0, 0x38, 0xe1, - 0xad, 0x6f, 0x11, 0x9b, 0x5d, 0xc2, 0x68, 0xa3, 0xc4, 0x5d, 0x75, 0xeb, 0xec, 0x3d, 0x1e, 0x79, - 0x37, 0xed, 0xd2, 0x82, 0xcf, 0xc1, 0x9c, 0xc7, 0x30, 0xeb, 0xf3, 0x99, 0xe7, 0xa8, 0x5b, 0x17, - 0x42, 0x15, 0x9a, 0xd1, 0xa5, 0x15, 0x9e, 0x91, 0x44, 0xac, 0xfd, 0xa2, 0x80, 0xf5, 0xb4, 0xca, - 0x25, 0x14, 0xfb, 0xe3, 0x64, 0xb1, 0x6f, 0x5c, 0x24, 0xa2, 0x29, 0x05, 0xff, 0x4d, 0x01, 0xff, - 0x99, 0x08, 0x5e, 0x5c, 0x8f, 0x7c, 0x4f, 0x38, 0xa9, 0x6d, 0xb4, 0x1f, 0xdd, 0xf9, 0x62, 0x4f, - 0x1c, 0x64, 0xf0, 0x51, 0xa6, 0x16, 0x7c, 0x01, 0x4a, 0xa6, 0xdd, 0x33, 0x6d, 0x12, 0xd2, 0xda, - 0x51, 0xb9, 0x33, 0x87, 0x39, 0x8d, 0x2c, 0xca, 0xbc, 0x1e, 0xf8, 0x6a, 0xa9, 0x99, 0x42, 0x41, - 0x13, 0xb8, 0xb5, 0x5f, 0x33, 0xca, 0x23, 0xee, 0xc2, 0x1b, 0x60, 0x21, 0x7c, 0xbd, 0x12, 0x57, - 0x86, 0x31, 0x4e, 0xf7, 0xb6, 0xa4, 0xa3, 0xb1, 0x84, 0xe8, 0x20, 0x91, 0x0a, 0xe9, 0xe8, 0xc5, - 0x3a, 0x48, 0x68, 0xc6, 0x3a, 0x48, 0x9c, 0x91, 0x44, 0xe4, 0x9e, 0xf0, 0x07, 0x90, 0x48, 0x68, - 0x2e, 0xe9, 0xc9, 0xbe, 0xa4, 0xa3, 0xb1, 0x44, 0xed, 0xef, 0x5c, 0x46, 0x95, 0x44, 0x2b, 0xc6, - 0x42, 0x1a, 0x3d, 0xda, 0xd3, 0x21, 0x19, 0xe3, 0x90, 0x0c, 0xf8, 0xad, 0x02, 0x20, 0x1e, 0x43, - 0xb4, 0x46, 0xad, 0x1a, 0xf6, 0xd3, 0xe3, 0x8b, 0x4f, 0x88, 0xb6, 0x3d, 0x01, 0x16, 0xde, 0x93, - 0x15, 0xe9, 0x04, 0x9c, 0x14, 0x40, 0x19, 0x1e, 0x40, 0x13, 0x14, 0x43, 0xea, 0xae, 0xeb, 0x52, - 0x57, 0x8e, 0xec, 0xd5, 0xb3, 0x1d, 0x12, 0xe2, 0x8d, 0xaa, 0x78, 0xc8, 0x45, 0xfa, 0xa7, 0xbe, - 0x5a, 0x8c, 0xf1, 0x51, 0x1c, 0x9b, 0x9b, 0x32, 0x48, 0x64, 0x2a, 0xff, 0x2f, 0x4c, 0x3d, 0x24, - 0xd3, 0x4d, 0xc5, 0xb0, 0x2b, 0xbb, 0xe0, 0xbf, 0x53, 0x12, 0x74, 0xa1, 0x7b, 0xe5, 0x2b, 0x05, - 0xc4, 0x6d, 0xc0, 0x3d, 0x90, 0xe7, 0x3f, 0xd6, 0x72, 0xc3, 0x5c, 0x3f, 0xdf, 0x86, 0x39, 0x34, - 0x2d, 0x12, 0x2d, 0x4a, 0x7e, 0x42, 0x02, 0x05, 0x5e, 0x03, 0xf3, 0x16, 0xf1, 0x3c, 0xdc, 0x95, - 0x96, 0xa3, 0x57, 0x5f, 0x2b, 0x24, 0xa3, 0x11, 0xbf, 0x76, 0x17, 0xac, 0x65, 0xbc, 0xa3, 0xa1, - 0x0a, 0x0a, 0xba, 0xf8, 0xf3, 0xe3, 0x0e, 0x15, 0x1a, 0x8b, 0x7c, 0xcb, 0xec, 0x88, 0x1f, 0xbe, - 0x90, 0xde, 0xb8, 0xf9, 0xea, 0x6d, 0x75, 0xe6, 0xf5, 0xdb, 0xea, 0xcc, 0x9b, 0xb7, 0xd5, 0x99, - 0x2f, 0x83, 0xaa, 0xf2, 0x2a, 0xa8, 0x2a, 0xaf, 0x83, 0xaa, 0xf2, 0x26, 0xa8, 0x2a, 0xbf, 0x07, - 0x55, 0xe5, 0xeb, 0x3f, 0xaa, 0x33, 0xcf, 0xe7, 0x65, 0xbe, 0xff, 0x09, 0x00, 0x00, 0xff, 0xff, - 0x4b, 0x3f, 0x49, 0x6e, 0x6d, 0x11, 0x00, 0x00, + // 1508 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x57, 0xbd, 0x6f, 0x1b, 0x47, + 0x16, 0xd7, 0x8a, 0xd4, 0xd7, 0x50, 0xb2, 0xa4, 0x91, 0x7c, 0xc7, 0x53, 0x41, 0x0a, 0x3c, 0xdc, + 0x59, 0x36, 0xec, 0xa5, 0x2d, 0xf8, 0x0c, 0xc3, 0x80, 0x0b, 0xad, 0xac, 0x3b, 0xcb, 0x96, 0x64, + 0xdd, 0x50, 0x30, 0x0e, 0xc6, 0x15, 0x19, 0xee, 0x3e, 0x51, 0x6b, 0x71, 0x77, 0xd6, 0x3b, 0x43, + 0xc5, 0xec, 0x92, 0x26, 0x75, 0x90, 0x22, 0x7d, 0x80, 0xfc, 0x0b, 0x09, 0x90, 0x34, 0x29, 0xe3, + 0x2a, 0x30, 0x52, 0xb9, 0x22, 0x62, 0xe6, 0x4f, 0x48, 0x27, 0xa4, 0x08, 0x66, 0x76, 0xc8, 0xfd, + 0x20, 0x69, 0x49, 0x29, 0xd4, 0x71, 0xde, 0xc7, 0xef, 0xbd, 0x79, 0xef, 0xcd, 0xef, 0x2d, 0xd1, + 0xe6, 0xf1, 0x7d, 0x6e, 0xba, 0xac, 0x7a, 0xdc, 0xaa, 0x43, 0xe8, 0x83, 0x00, 0x5e, 0x3d, 0x01, + 0xdf, 0x61, 0x61, 0x55, 0x2b, 0x68, 0xe0, 0x56, 0xb9, 0x60, 0x21, 0x6d, 0x40, 0xf5, 0xe4, 0x4e, + 0x1d, 0x04, 0xbd, 0x53, 0x6d, 0x80, 0x0f, 0x21, 0x15, 0xe0, 0x98, 0x41, 0xc8, 0x04, 0xc3, 0x2b, + 0x91, 0xad, 0x49, 0x03, 0xd7, 0xd4, 0xb6, 0xa6, 0xb6, 0x5d, 0xb9, 0xd5, 0x70, 0xc5, 0x51, 0xab, + 0x6e, 0xda, 0xcc, 0xab, 0x36, 0x58, 0x83, 0x55, 0x95, 0x4b, 0xbd, 0x75, 0xa8, 0x4e, 0xea, 0xa0, + 0x7e, 0x45, 0x50, 0x2b, 0x95, 0x44, 0x58, 0x9b, 0x85, 0x32, 0x66, 0x36, 0xdc, 0xca, 0xdd, 0xd8, + 0xc6, 0xa3, 0xf6, 0x91, 0xeb, 0x43, 0xd8, 0xae, 0x06, 0xc7, 0x0d, 0x29, 0xe0, 0x55, 0x0f, 0x04, + 0x1d, 0xe6, 0x55, 0x1d, 0xe5, 0x15, 0xb6, 0x7c, 0xe1, 0x7a, 0x30, 0xe0, 0x70, 0xef, 0x2c, 0x07, + 0x6e, 0x1f, 0x81, 0x47, 0xb3, 0x7e, 0x95, 0xef, 0x0d, 0x34, 0xb3, 0x59, 0xdb, 0x7e, 0x14, 0xba, + 0x27, 0x10, 0xe2, 0x8f, 0xd0, 0xb4, 0xcc, 0xc8, 0xa1, 0x82, 0x16, 0x8d, 0x55, 0x63, 0xad, 0xb0, + 0x7e, 0xdb, 0x8c, 0xcb, 0xd5, 0x07, 0x36, 0x83, 0xe3, 0x86, 0x14, 0x70, 0x53, 0x5a, 0x9b, 0x27, + 0x77, 0xcc, 0x67, 0xf5, 0x97, 0x60, 0x8b, 0x5d, 0x10, 0xd4, 0xc2, 0x6f, 0x3a, 0xe5, 0xb1, 0x6e, + 0xa7, 0x8c, 0x62, 0x19, 0xe9, 0xa3, 0xe2, 0xa7, 0x28, 0xcf, 0x03, 0xb0, 0x8b, 0xe3, 0x0a, 0xfd, + 0xba, 0x39, 0xba, 0x19, 0x66, 0x3f, 0xad, 0x5a, 0x00, 0xb6, 0x35, 0xab, 0x61, 0xf3, 0xf2, 0x44, + 0x14, 0x48, 0xe5, 0x3b, 0x03, 0xcd, 0xf5, 0xad, 0x76, 0x5c, 0x2e, 0xf0, 0xff, 0x07, 0x2e, 0x60, + 0x9e, 0xef, 0x02, 0xd2, 0x5b, 0xa5, 0xbf, 0xa0, 0xe3, 0x4c, 0xf7, 0x24, 0x89, 0xe4, 0x9f, 0xa0, + 0x09, 0x57, 0x80, 0xc7, 0x8b, 0xe3, 0xab, 0xb9, 0xb5, 0xc2, 0xfa, 0x3f, 0xce, 0x95, 0xbd, 0x35, + 0xa7, 0x11, 0x27, 0xb6, 0xa5, 0x2f, 0x89, 0x20, 0x2a, 0x5f, 0xe5, 0x13, 0xb9, 0xcb, 0x3b, 0xe1, + 0x07, 0xe8, 0x0a, 0x15, 0x82, 0xda, 0x47, 0x04, 0x5e, 0xb5, 0xdc, 0x10, 0x1c, 0x75, 0x83, 0x69, + 0x0b, 0x77, 0x3b, 0xe5, 0x2b, 0x1b, 0x29, 0x0d, 0xc9, 0x58, 0x4a, 0xdf, 0x80, 0x39, 0xdb, 0xfe, + 0x21, 0x7b, 0xe6, 0xef, 0xb2, 0x96, 0x2f, 0x54, 0x81, 0xb5, 0xef, 0x7e, 0x4a, 0x43, 0x32, 0x96, + 0xd8, 0x46, 0xcb, 0x27, 0xac, 0xd9, 0xf2, 0x60, 0xc7, 0x3d, 0x04, 0xbb, 0x6d, 0x37, 0x61, 0x97, + 0x39, 0xc0, 0x8b, 0xb9, 0xd5, 0xdc, 0xda, 0x8c, 0x55, 0xed, 0x76, 0xca, 0xcb, 0xcf, 0x87, 0xe8, + 0x4f, 0x3b, 0xe5, 0xa5, 0x21, 0x72, 0x32, 0x14, 0x0c, 0x3f, 0x44, 0xf3, 0xba, 0x42, 0x9b, 0x34, + 0xa0, 0xb6, 0x2b, 0xda, 0xc5, 0xbc, 0xca, 0x70, 0xa9, 0xdb, 0x29, 0xcf, 0xd7, 0xd2, 0x2a, 0x92, + 0xb5, 0xc5, 0x8f, 0xd1, 0xdc, 0x21, 0xff, 0x4f, 0xc8, 0x5a, 0xc1, 0x3e, 0x6b, 0xba, 0x76, 0xbb, + 0x38, 0xb1, 0x6a, 0xac, 0xcd, 0x58, 0x95, 0x6e, 0xa7, 0x3c, 0xf7, 0xef, 0x5a, 0x42, 0x71, 0x9a, + 0x15, 0x90, 0xb4, 0x23, 0x06, 0x34, 0x27, 0xd8, 0x31, 0xf8, 0xb2, 0x74, 0xc0, 0x05, 0x2f, 0x4e, + 0xaa, 0x5e, 0xae, 0x7d, 0xa8, 0x97, 0x07, 0x09, 0x07, 0xeb, 0xaa, 0x6e, 0xe7, 0x5c, 0x52, 0xca, + 0x49, 0x1a, 0x15, 0x6f, 0xa2, 0xc5, 0x30, 0x6a, 0x0e, 0x27, 0x10, 0xb4, 0xea, 0x4d, 0x97, 0x1f, + 0x15, 0xa7, 0xd4, 0x8d, 0xaf, 0x76, 0x3b, 0xe5, 0x45, 0x92, 0x55, 0x92, 0x41, 0xfb, 0xca, 0xb7, + 0x06, 0x9a, 0xda, 0xac, 0x6d, 0xef, 0x31, 0x07, 0x2e, 0xe1, 0x69, 0x6e, 0xa7, 0x9e, 0xe6, 0xb5, + 0x33, 0x86, 0x5b, 0x26, 0x35, 0xf2, 0x61, 0xfe, 0x16, 0x3d, 0x4c, 0x69, 0xa3, 0x99, 0x65, 0x15, + 0xe5, 0x7d, 0xea, 0x81, 0x4a, 0x7d, 0x26, 0xf6, 0xd9, 0xa3, 0x1e, 0x10, 0xa5, 0xc1, 0xff, 0x44, + 0x93, 0x3e, 0x73, 0x60, 0xfb, 0x91, 0x4a, 0x60, 0xc6, 0xba, 0xa2, 0x6d, 0x26, 0xf7, 0x94, 0x94, + 0x68, 0x2d, 0xbe, 0x8b, 0x66, 0x05, 0x0b, 0x58, 0x93, 0x35, 0xda, 0x4f, 0xa1, 0xdd, 0x1b, 0xd3, + 0x85, 0x6e, 0xa7, 0x3c, 0x7b, 0x90, 0x90, 0x93, 0x94, 0x15, 0xae, 0xa3, 0x02, 0x6d, 0x36, 0x99, + 0x4d, 0x05, 0xad, 0x37, 0x41, 0xcd, 0x5e, 0x61, 0xbd, 0xfa, 0xa1, 0x3b, 0x46, 0xb3, 0x2d, 0x83, + 0x13, 0xe0, 0xac, 0x15, 0xda, 0xc0, 0xad, 0xf9, 0x6e, 0xa7, 0x5c, 0xd8, 0x88, 0x71, 0x48, 0x12, + 0xb4, 0xf2, 0x8d, 0x81, 0x0a, 0xfa, 0xd6, 0x97, 0x40, 0x46, 0x8f, 0xd3, 0x64, 0xf4, 0xf7, 0x73, + 0xf4, 0x6b, 0x04, 0x15, 0xd9, 0xfd, 0xb4, 0x15, 0x0f, 0x1d, 0xa0, 0x29, 0x47, 0x35, 0x8d, 0x17, + 0x0d, 0x05, 0x7d, 0xfd, 0x1c, 0xd0, 0x9a, 0xeb, 0xe6, 0x75, 0x80, 0xa9, 0xe8, 0xcc, 0x49, 0x0f, + 0xaa, 0xf2, 0xc5, 0x24, 0x9a, 0xed, 0x3d, 0xf3, 0x26, 0xe5, 0xfc, 0x12, 0x06, 0xfa, 0x5f, 0xa8, + 0x10, 0x84, 0xec, 0xc4, 0xe5, 0x2e, 0xf3, 0x21, 0xd4, 0x63, 0xb5, 0xa4, 0x5d, 0x0a, 0xfb, 0xb1, + 0x8a, 0x24, 0xed, 0x70, 0x13, 0xa1, 0x80, 0x86, 0xd4, 0x03, 0x21, 0x4b, 0x90, 0x53, 0x25, 0xb8, + 0xff, 0xa1, 0x12, 0x24, 0xaf, 0x65, 0xee, 0xf7, 0x5d, 0xb7, 0x7c, 0x11, 0xb6, 0xe3, 0x14, 0x63, + 0x05, 0x49, 0xe0, 0xe3, 0x63, 0x34, 0x17, 0x82, 0xdd, 0xa4, 0xae, 0xa7, 0x99, 0x2d, 0xaf, 0xd2, + 0xdc, 0x92, 0x0c, 0x43, 0x92, 0x8a, 0xd3, 0x4e, 0xf9, 0xf6, 0xe0, 0xf7, 0x86, 0xb9, 0x0f, 0x21, + 0x77, 0xb9, 0x00, 0x5f, 0x44, 0x03, 0x9b, 0xf2, 0x21, 0x69, 0x6c, 0xf9, 0x76, 0x3c, 0xc9, 0xf9, + 0xcf, 0x02, 0xe1, 0x32, 0x9f, 0x17, 0x27, 0xe2, 0xb7, 0xb3, 0x9b, 0x90, 0x93, 0x94, 0x15, 0xde, + 0x41, 0xcb, 0x72, 0xcc, 0x3f, 0x8e, 0x02, 0x6c, 0xbd, 0x0e, 0xa8, 0x2f, 0x4b, 0x55, 0x9c, 0x54, + 0x74, 0x56, 0x94, 0x0b, 0x62, 0x63, 0x88, 0x9e, 0x0c, 0xf5, 0xc2, 0xff, 0x43, 0x8b, 0xd1, 0x86, + 0xb0, 0x5c, 0xdf, 0x71, 0xfd, 0x86, 0xdc, 0x0f, 0x8a, 0x19, 0x67, 0xac, 0x1b, 0x92, 0x19, 0x9f, + 0x67, 0x95, 0xa7, 0xc3, 0x84, 0x64, 0x10, 0x04, 0xbf, 0x42, 0x8b, 0x2a, 0x22, 0x38, 0x9a, 0x08, + 0x5c, 0xe0, 0xc5, 0xe9, 0x41, 0x7a, 0x97, 0xa5, 0x93, 0x83, 0xd4, 0xa3, 0x8b, 0x1a, 0x34, 0xc1, + 0x16, 0x2c, 0x3c, 0x80, 0xd0, 0xb3, 0xfe, 0xa6, 0xfb, 0xb5, 0xb8, 0x91, 0x85, 0x22, 0x83, 0xe8, + 0x2b, 0x0f, 0xd1, 0x7c, 0xa6, 0xe1, 0x78, 0x01, 0xe5, 0x8e, 0xa1, 0x1d, 0x11, 0x1d, 0x91, 0x3f, + 0xf1, 0x32, 0x9a, 0x38, 0xa1, 0xcd, 0x16, 0x44, 0x13, 0x48, 0xa2, 0xc3, 0x83, 0xf1, 0xfb, 0x46, + 0xe5, 0x07, 0x03, 0x2d, 0x24, 0xa7, 0xe7, 0x12, 0x68, 0x63, 0x37, 0x4d, 0x1b, 0x6b, 0xe7, 0x1d, + 0xec, 0x11, 0xdc, 0xf1, 0xa9, 0x81, 0x66, 0x93, 0x8b, 0x10, 0xdf, 0x44, 0xd3, 0xb4, 0xe5, 0xb8, + 0xe0, 0xdb, 0x3d, 0xb2, 0xef, 0x67, 0xb3, 0xa1, 0xe5, 0xa4, 0x6f, 0x21, 0xd7, 0x24, 0xbc, 0x0e, + 0xdc, 0x90, 0xca, 0x49, 0xab, 0x81, 0xcd, 0x7c, 0x87, 0xab, 0x32, 0xe5, 0xa2, 0x35, 0xb9, 0x95, + 0x55, 0x92, 0x41, 0xfb, 0xca, 0xd7, 0xe3, 0x68, 0x21, 0x1a, 0x90, 0xe8, 0x2b, 0xc9, 0x03, 0x5f, + 0x5c, 0x02, 0xbd, 0x90, 0xd4, 0xbe, 0xbc, 0x7d, 0xf6, 0x2e, 0x89, 0xb3, 0x1b, 0xb5, 0x38, 0xf1, + 0x0b, 0x34, 0xc9, 0x05, 0x15, 0x2d, 0xc9, 0x3b, 0x12, 0x75, 0xfd, 0x42, 0xa8, 0xca, 0x33, 0x5e, + 0x9c, 0xd1, 0x99, 0x68, 0xc4, 0xca, 0x8f, 0x06, 0x5a, 0xce, 0xba, 0x5c, 0xc2, 0xc0, 0xfd, 0x37, + 0x3d, 0x70, 0x37, 0x2f, 0x72, 0xa3, 0x11, 0x43, 0xf7, 0xb3, 0x81, 0xfe, 0x32, 0x70, 0x79, 0xb5, + 0xa2, 0x25, 0x57, 0x05, 0x19, 0x46, 0xdc, 0x8b, 0xbf, 0x3b, 0x14, 0x57, 0xed, 0x0f, 0xd1, 0x93, + 0xa1, 0x5e, 0xf8, 0x25, 0x5a, 0x70, 0xfd, 0xa6, 0xeb, 0x43, 0x24, 0xab, 0xc5, 0xed, 0x1e, 0x4a, + 0x28, 0x59, 0x64, 0xd5, 0xe6, 0xe5, 0x6e, 0xa7, 0xbc, 0xb0, 0x9d, 0x41, 0x21, 0x03, 0xb8, 0x95, + 0x9f, 0x86, 0xb4, 0x47, 0xed, 0x63, 0xf9, 0xa2, 0x94, 0x04, 0xc2, 0x81, 0x17, 0xa5, 0xe5, 0xa4, + 0x6f, 0xa1, 0x26, 0x48, 0x95, 0x42, 0x27, 0x7a, 0xb1, 0x09, 0x52, 0x9e, 0x89, 0x09, 0x52, 0x67, + 0xa2, 0x11, 0x65, 0x26, 0xf2, 0x23, 0x4c, 0x15, 0x34, 0x97, 0xce, 0x64, 0x4f, 0xcb, 0x49, 0xdf, + 0xa2, 0xf2, 0x7b, 0x6e, 0x48, 0x97, 0xd4, 0x28, 0x26, 0xae, 0xd4, 0xfb, 0x93, 0x93, 0xbd, 0x92, + 0xd3, 0xbf, 0x92, 0x83, 0xbf, 0x34, 0x10, 0xa6, 0x7d, 0x88, 0xdd, 0xde, 0xa8, 0x46, 0xf3, 0xf4, + 0xe4, 0xe2, 0x2f, 0xc4, 0xdc, 0x18, 0x00, 0x8b, 0x76, 0xf5, 0x8a, 0x4e, 0x02, 0x0f, 0x1a, 0x90, + 0x21, 0x19, 0x60, 0x17, 0x15, 0x22, 0xe9, 0x56, 0x18, 0xb2, 0x50, 0x3f, 0xd9, 0x6b, 0x67, 0x27, + 0xa4, 0xcc, 0xad, 0x92, 0xfa, 0x98, 0x8c, 0xfd, 0x4f, 0x3b, 0xe5, 0x42, 0x42, 0x4f, 0x92, 0xd8, + 0x32, 0x94, 0x03, 0x71, 0xa8, 0xfc, 0x9f, 0x08, 0xf5, 0x08, 0x46, 0x87, 0x4a, 0x60, 0xaf, 0x6c, + 0xa1, 0xbf, 0x8e, 0x28, 0xd0, 0x85, 0x76, 0xdb, 0x67, 0x06, 0x4a, 0xc6, 0xc0, 0x3b, 0x28, 0x2f, + 0x5c, 0xfd, 0x12, 0x0b, 0xeb, 0x37, 0xce, 0xc7, 0x30, 0x07, 0xae, 0x07, 0x31, 0x51, 0xca, 0x13, + 0x51, 0x28, 0xf8, 0x3a, 0x9a, 0xf2, 0x80, 0x73, 0xda, 0xd0, 0x91, 0xe3, 0x2f, 0xcf, 0xdd, 0x48, + 0x4c, 0x7a, 0xfa, 0xca, 0x3d, 0xb4, 0x34, 0xe4, 0x5b, 0x1e, 0x97, 0xd1, 0x84, 0xad, 0xfe, 0x29, + 0xcb, 0x84, 0x26, 0xac, 0x19, 0xc9, 0x32, 0x9b, 0xea, 0x0f, 0x72, 0x24, 0xb7, 0x6e, 0xbd, 0x79, + 0x5f, 0x1a, 0x7b, 0xfb, 0xbe, 0x34, 0xf6, 0xee, 0x7d, 0x69, 0xec, 0x93, 0x6e, 0xc9, 0x78, 0xd3, + 0x2d, 0x19, 0x6f, 0xbb, 0x25, 0xe3, 0x5d, 0xb7, 0x64, 0xfc, 0xd2, 0x2d, 0x19, 0x9f, 0xff, 0x5a, + 0x1a, 0x7b, 0x31, 0xa5, 0xeb, 0xfd, 0x47, 0x00, 0x00, 0x00, 0xff, 0xff, 0x6a, 0xb1, 0x9d, 0x65, + 0x9d, 0x12, 0x00, 0x00, } func (m *CSIDriver) Marshal() (dAtA []byte, err error) { @@ -721,6 +757,30 @@ func (m *CSIDriverSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.RequiresRepublish != nil { + i-- + if *m.RequiresRepublish { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 + } + if len(m.TokenRequests) > 0 { + for iNdEx := len(m.TokenRequests) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.TokenRequests[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } if m.FSGroupPolicy != nil { i -= len(*m.FSGroupPolicy) copy(dAtA[i:], *m.FSGroupPolicy) @@ -1107,6 +1167,39 @@ func (m *StorageClassList) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *TokenRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TokenRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TokenRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ExpirationSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.ExpirationSeconds)) + i-- + dAtA[i] = 0x10 + } + i -= len(m.Audience) + copy(dAtA[i:], m.Audience) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Audience))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *VolumeAttachment) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -1503,6 +1596,15 @@ func (m *CSIDriverSpec) Size() (n int) { l = len(*m.FSGroupPolicy) n += 1 + l + sovGenerated(uint64(l)) } + if len(m.TokenRequests) > 0 { + for _, e := range m.TokenRequests { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.RequiresRepublish != nil { + n += 2 + } return n } @@ -1635,6 +1737,20 @@ func (m *StorageClassList) Size() (n int) { return n } +func (m *TokenRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Audience) + n += 1 + l + sovGenerated(uint64(l)) + if m.ExpirationSeconds != nil { + n += 1 + sovGenerated(uint64(*m.ExpirationSeconds)) + } + return n +} + func (m *VolumeAttachment) Size() (n int) { if m == nil { return 0 @@ -1787,12 +1903,19 @@ func (this *CSIDriverSpec) String() string { if this == nil { return "nil" } + repeatedStringForTokenRequests := "[]TokenRequest{" + for _, f := range this.TokenRequests { + repeatedStringForTokenRequests += strings.Replace(strings.Replace(f.String(), "TokenRequest", "TokenRequest", 1), `&`, ``, 1) + "," + } + repeatedStringForTokenRequests += "}" s := strings.Join([]string{`&CSIDriverSpec{`, `AttachRequired:` + valueToStringGenerated(this.AttachRequired) + `,`, `PodInfoOnMount:` + valueToStringGenerated(this.PodInfoOnMount) + `,`, `VolumeLifecycleModes:` + fmt.Sprintf("%v", this.VolumeLifecycleModes) + `,`, `StorageCapacity:` + valueToStringGenerated(this.StorageCapacity) + `,`, `FSGroupPolicy:` + valueToStringGenerated(this.FSGroupPolicy) + `,`, + `TokenRequests:` + repeatedStringForTokenRequests + `,`, + `RequiresRepublish:` + valueToStringGenerated(this.RequiresRepublish) + `,`, `}`, }, "") return s @@ -1900,6 +2023,17 @@ func (this *StorageClassList) String() string { }, "") return s } +func (this *TokenRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TokenRequest{`, + `Audience:` + fmt.Sprintf("%v", this.Audience) + `,`, + `ExpirationSeconds:` + valueToStringGenerated(this.ExpirationSeconds) + `,`, + `}`, + }, "") + return s +} func (this *VolumeAttachment) String() string { if this == nil { return "nil" @@ -2104,10 +2238,7 @@ func (m *CSIDriver) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2224,10 +2355,7 @@ func (m *CSIDriverList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2399,16 +2527,68 @@ func (m *CSIDriverSpec) Unmarshal(dAtA []byte) error { s := FSGroupPolicy(dAtA[iNdEx:postIndex]) m.FSGroupPolicy = &s iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TokenRequests", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TokenRequests = append(m.TokenRequests, TokenRequest{}) + if err := m.TokenRequests[len(m.TokenRequests)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RequiresRepublish", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.RequiresRepublish = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2524,10 +2704,7 @@ func (m *CSINode) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2709,10 +2886,7 @@ func (m *CSINodeDriver) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2829,10 +3003,7 @@ func (m *CSINodeList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2916,10 +3087,7 @@ func (m *CSINodeSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3144,7 +3312,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -3314,10 +3482,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3434,10 +3599,109 @@ func (m *StorageClassList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TokenRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TokenRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TokenRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Audience", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Audience = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExpirationSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ExpirationSeconds = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3586,10 +3850,7 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3706,10 +3967,7 @@ func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3828,10 +4086,7 @@ func (m *VolumeAttachmentSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3978,10 +4233,7 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4161,7 +4413,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -4250,10 +4502,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4368,10 +4617,7 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4441,10 +4687,7 @@ func (m *VolumeNodeResources) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/k8s.io/api/storage/v1beta1/generated.proto b/vendor/k8s.io/api/storage/v1beta1/generated.proto index e61876ed56..1eb4e1f88c 100644 --- a/vendor/k8s.io/api/storage/v1beta1/generated.proto +++ b/vendor/k8s.io/api/storage/v1beta1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.storage.v1beta1; @@ -147,6 +147,43 @@ message CSIDriverSpec { // that enable the CSIVolumeFSGroupPolicy feature gate. // +optional optional string fsGroupPolicy = 5; + + // TokenRequests indicates the CSI driver needs pods' service account + // tokens it is mounting volume for to do necessary authentication. Kubelet + // will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. + // The CSI driver should parse and validate the following VolumeContext: + // "csi.storage.k8s.io/serviceAccount.tokens": { + // "": { + // "token": , + // "expirationTimestamp": , + // }, + // ... + // } + // + // Note: Audience in each TokenRequest should be different and at + // most one token is empty string. To receive a new token after expiry, + // RequiresRepublish can be used to trigger NodePublishVolume periodically. + // + // This is an alpha feature and only available when the + // CSIServiceAccountToken feature is enabled. + // + // +optional + // +listType=atomic + repeated TokenRequest tokenRequests = 6; + + // RequiresRepublish indicates the CSI driver wants `NodePublishVolume` + // being periodically called to reflect any possible change in the mounted + // volume. This field defaults to false. + // + // Note: After a successful initial NodePublishVolume call, subsequent calls + // to NodePublishVolume should only update the contents of the volume. New + // mount points will not be seen by a running container. + // + // This is an alpha feature and only available when the + // CSIServiceAccountToken feature is enabled. + // + // +optional + optional bool requiresRepublish = 7; } // DEPRECATED - This group version of CSINode is deprecated by storage/v1/CSINode. @@ -283,6 +320,19 @@ message StorageClassList { repeated StorageClass items = 2; } +// TokenRequest contains parameters of a service account token. +message TokenRequest { + // Audience is the intended audience of the token in "TokenRequestSpec". + // It will default to the audiences of kube apiserver. + optional string audience = 1; + + // ExpirationSeconds is the duration of validity of the token in "TokenRequestSpec". + // It has the same default value of "ExpirationSeconds" in "TokenRequestSpec" + // + // +optional + optional int64 expirationSeconds = 2; +} + // VolumeAttachment captures the intent to attach or detach the specified volume // to/from the specified node. // diff --git a/vendor/k8s.io/api/storage/v1beta1/types.go b/vendor/k8s.io/api/storage/v1beta1/types.go index 7946663a3f..10df2baa7a 100644 --- a/vendor/k8s.io/api/storage/v1beta1/types.go +++ b/vendor/k8s.io/api/storage/v1beta1/types.go @@ -364,6 +364,43 @@ type CSIDriverSpec struct { // that enable the CSIVolumeFSGroupPolicy feature gate. // +optional FSGroupPolicy *FSGroupPolicy `json:"fsGroupPolicy,omitempty" protobuf:"bytes,5,opt,name=fsGroupPolicy"` + + // TokenRequests indicates the CSI driver needs pods' service account + // tokens it is mounting volume for to do necessary authentication. Kubelet + // will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. + // The CSI driver should parse and validate the following VolumeContext: + // "csi.storage.k8s.io/serviceAccount.tokens": { + // "": { + // "token": , + // "expirationTimestamp": , + // }, + // ... + // } + // + // Note: Audience in each TokenRequest should be different and at + // most one token is empty string. To receive a new token after expiry, + // RequiresRepublish can be used to trigger NodePublishVolume periodically. + // + // This is an alpha feature and only available when the + // CSIServiceAccountToken feature is enabled. + // + // +optional + // +listType=atomic + TokenRequests []TokenRequest `json:"tokenRequests,omitempty" protobuf:"bytes,6,opt,name=tokenRequests"` + + // RequiresRepublish indicates the CSI driver wants `NodePublishVolume` + // being periodically called to reflect any possible change in the mounted + // volume. This field defaults to false. + // + // Note: After a successful initial NodePublishVolume call, subsequent calls + // to NodePublishVolume should only update the contents of the volume. New + // mount points will not be seen by a running container. + // + // This is an alpha feature and only available when the + // CSIServiceAccountToken feature is enabled. + // + // +optional + RequiresRepublish *bool `json:"requiresRepublish,omitempty" protobuf:"varint,7,opt,name=requiresRepublish"` } // FSGroupPolicy specifies if a CSI Driver supports modifying @@ -395,6 +432,20 @@ const ( // provided by a CSI driver. More modes may be added in the future. type VolumeLifecycleMode string +// TokenRequest contains parameters of a service account token. +type TokenRequest struct { + // Audience is the intended audience of the token in "TokenRequestSpec". + // It will default to the audiences of kube apiserver. + // + Audience string `json:"audience" protobuf:"bytes,1,opt,name=audience"` + + // ExpirationSeconds is the duration of validity of the token in "TokenRequestSpec". + // It has the same default value of "ExpirationSeconds" in "TokenRequestSpec" + // + // +optional + ExpirationSeconds *int64 `json:"expirationSeconds,omitempty" protobuf:"varint,2,opt,name=expirationSeconds"` +} + const ( // VolumeLifecyclePersistent explicitly confirms that the driver implements // the full CSI spec. It is the default when CSIDriverSpec.VolumeLifecycleModes is not diff --git a/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go index 60cc4c6a45..c51950d7c8 100644 --- a/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go @@ -54,6 +54,8 @@ var map_CSIDriverSpec = map[string]string{ "volumeLifecycleModes": "VolumeLifecycleModes defines what kind of volumes this CSI volume driver supports. The default if the list is empty is \"Persistent\", which is the usage defined by the CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism. The other mode is \"Ephemeral\". In this mode, volumes are defined inline inside the pod spec with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume. For more information about implementing this mode, see https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html A driver can support one or more of these modes and more modes may be added in the future.", "storageCapacity": "If set to true, storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage capacity that the driver deployment will report by creating CSIStorageCapacity objects with capacity information.\n\nThe check can be enabled immediately when deploying a driver. In that case, provisioning new volumes with late binding will pause until the driver deployment has published some suitable CSIStorageCapacity object.\n\nAlternatively, the driver can be deployed with the field unset or false and it can be flipped later when storage capacity information has been published.\n\nThis is an alpha field and only available when the CSIStorageCapacity feature is enabled. The default is false.", "fsGroupPolicy": "Defines if the underlying volume supports changing ownership and permission of the volume before being mounted. Refer to the specific FSGroupPolicy values for additional details. This field is alpha-level, and is only honored by servers that enable the CSIVolumeFSGroupPolicy feature gate.", + "tokenRequests": "TokenRequests indicates the CSI driver needs pods' service account tokens it is mounting volume for to do necessary authentication. Kubelet will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. The CSI driver should parse and validate the following VolumeContext: \"csi.storage.k8s.io/serviceAccount.tokens\": {\n \"\": {\n \"token\": ,\n \"expirationTimestamp\": ,\n },\n ...\n}\n\nNote: Audience in each TokenRequest should be different and at most one token is empty string. To receive a new token after expiry, RequiresRepublish can be used to trigger NodePublishVolume periodically.\n\nThis is an alpha feature and only available when the CSIServiceAccountToken feature is enabled.", + "requiresRepublish": "RequiresRepublish indicates the CSI driver wants `NodePublishVolume` being periodically called to reflect any possible change in the mounted volume. This field defaults to false.\n\nNote: After a successful initial NodePublishVolume call, subsequent calls to NodePublishVolume should only update the contents of the volume. New mount points will not be seen by a running container.\n\nThis is an alpha feature and only available when the CSIServiceAccountToken feature is enabled.", } func (CSIDriverSpec) SwaggerDoc() map[string]string { @@ -127,6 +129,16 @@ func (StorageClassList) SwaggerDoc() map[string]string { return map_StorageClassList } +var map_TokenRequest = map[string]string{ + "": "TokenRequest contains parameters of a service account token.", + "audience": "Audience is the intended audience of the token in \"TokenRequestSpec\". It will default to the audiences of kube apiserver.", + "expirationSeconds": "ExpirationSeconds is the duration of validity of the token in \"TokenRequestSpec\". It has the same default value of \"ExpirationSeconds\" in \"TokenRequestSpec\"", +} + +func (TokenRequest) SwaggerDoc() map[string]string { + return map_TokenRequest +} + var map_VolumeAttachment = map[string]string{ "": "VolumeAttachment captures the intent to attach or detach the specified volume to/from the specified node.\n\nVolumeAttachment objects are non-namespaced.", "metadata": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", diff --git a/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go index a1538c1319..89a102901c 100644 --- a/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go @@ -113,6 +113,18 @@ func (in *CSIDriverSpec) DeepCopyInto(out *CSIDriverSpec) { *out = new(FSGroupPolicy) **out = **in } + if in.TokenRequests != nil { + in, out := &in.TokenRequests, &out.TokenRequests + *out = make([]TokenRequest, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RequiresRepublish != nil { + in, out := &in.RequiresRepublish, &out.RequiresRepublish + *out = new(bool) + **out = **in + } return } @@ -328,6 +340,27 @@ func (in *StorageClassList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TokenRequest) DeepCopyInto(out *TokenRequest) { + *out = *in + if in.ExpirationSeconds != nil { + in, out := &in.ExpirationSeconds, &out.ExpirationSeconds + *out = new(int64) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenRequest. +func (in *TokenRequest) DeepCopy() *TokenRequest { + if in == nil { + return nil + } + out := new(TokenRequest) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VolumeAttachment) DeepCopyInto(out *VolumeAttachment) { *out = *in diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.pb.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.pb.go index d6f0c35fe2..f3388ffe85 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.pb.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.pb.go @@ -3863,10 +3863,7 @@ func (m *ConversionRequest) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4015,10 +4012,7 @@ func (m *ConversionResponse) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4140,10 +4134,7 @@ func (m *ConversionReview) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4372,10 +4363,7 @@ func (m *CustomResourceColumnDefinition) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4493,10 +4481,7 @@ func (m *CustomResourceConversion) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4645,10 +4630,7 @@ func (m *CustomResourceDefinition) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4859,10 +4841,7 @@ func (m *CustomResourceDefinitionCondition) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4979,10 +4958,7 @@ func (m *CustomResourceDefinitionList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5224,10 +5200,7 @@ func (m *CustomResourceDefinitionNames) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5464,10 +5437,7 @@ func (m *CustomResourceDefinitionSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5616,10 +5586,7 @@ func (m *CustomResourceDefinitionStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5900,10 +5867,7 @@ func (m *CustomResourceDefinitionVersion) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -6050,10 +6014,7 @@ func (m *CustomResourceSubresourceScale) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -6103,10 +6064,7 @@ func (m *CustomResourceSubresourceStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -6228,10 +6186,7 @@ func (m *CustomResourceSubresources) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -6317,10 +6272,7 @@ func (m *CustomResourceValidation) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -6434,10 +6386,7 @@ func (m *ExternalDocumentation) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -6521,10 +6470,7 @@ func (m *JSON) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -7435,7 +7381,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -7600,7 +7546,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -7729,7 +7675,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -7894,7 +7840,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -8162,10 +8108,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -8285,10 +8228,7 @@ func (m *JSONSchemaPropsOrArray) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -8394,10 +8334,7 @@ func (m *JSONSchemaPropsOrBool) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -8515,10 +8452,7 @@ func (m *JSONSchemaPropsOrStringArray) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -8685,10 +8619,7 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -8841,10 +8772,7 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -8962,10 +8890,7 @@ func (m *WebhookConversion) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.proto b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.proto index 5a083cb4b4..3494a31ba0 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.proto +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1; diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.pb.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.pb.go index 0382b04458..2c53cd916a 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.pb.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.pb.go @@ -3850,10 +3850,7 @@ func (m *ConversionRequest) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4002,10 +3999,7 @@ func (m *ConversionResponse) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4127,10 +4121,7 @@ func (m *ConversionReview) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4359,10 +4350,7 @@ func (m *CustomResourceColumnDefinition) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4512,10 +4500,7 @@ func (m *CustomResourceConversion) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4664,10 +4649,7 @@ func (m *CustomResourceDefinition) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4878,10 +4860,7 @@ func (m *CustomResourceDefinitionCondition) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4998,10 +4977,7 @@ func (m *CustomResourceDefinitionList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5243,10 +5219,7 @@ func (m *CustomResourceDefinitionNames) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5622,10 +5595,7 @@ func (m *CustomResourceDefinitionSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5774,10 +5744,7 @@ func (m *CustomResourceDefinitionStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -6058,10 +6025,7 @@ func (m *CustomResourceDefinitionVersion) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -6208,10 +6172,7 @@ func (m *CustomResourceSubresourceScale) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -6261,10 +6222,7 @@ func (m *CustomResourceSubresourceStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -6386,10 +6344,7 @@ func (m *CustomResourceSubresources) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -6475,10 +6430,7 @@ func (m *CustomResourceValidation) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -6592,10 +6544,7 @@ func (m *ExternalDocumentation) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -6679,10 +6628,7 @@ func (m *JSON) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -7593,7 +7539,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -7758,7 +7704,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -7887,7 +7833,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -8052,7 +7998,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -8320,10 +8266,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -8443,10 +8386,7 @@ func (m *JSONSchemaPropsOrArray) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -8552,10 +8492,7 @@ func (m *JSONSchemaPropsOrBool) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -8673,10 +8610,7 @@ func (m *JSONSchemaPropsOrStringArray) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -8843,10 +8777,7 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -8999,10 +8930,7 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto index 8a1f7b957c..40635aff37 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1; diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/conditions.go b/vendor/k8s.io/apimachinery/pkg/api/meta/conditions.go index 934790dcb1..343a6f550e 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/conditions.go +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/conditions.go @@ -51,6 +51,7 @@ func SetStatusCondition(conditions *[]metav1.Condition, newCondition metav1.Cond existingCondition.Reason = newCondition.Reason existingCondition.Message = newCondition.Message + existingCondition.ObservedGeneration = newCondition.ObservedGeneration } // RemoveStatusCondition removes the corresponding conditionType from conditions. diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go b/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go index 9ca34c9fa9..6a4116a040 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go @@ -40,8 +40,6 @@ func CommonAccessor(obj interface{}) (metav1.Common, error) { switch t := obj.(type) { case List: return t, nil - case metav1.ListInterface: - return t, nil case ListMetaAccessor: if m := t.GetListMeta(); m != nil { return m, nil @@ -72,8 +70,6 @@ func ListAccessor(obj interface{}) (List, error) { switch t := obj.(type) { case List: return t, nil - case metav1.ListInterface: - return t, nil case ListMetaAccessor: if m := t.GetListMeta(); m != nil { return m, nil diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/restmapper.go b/vendor/k8s.io/apimachinery/pkg/api/meta/restmapper.go index 41b60d7319..00bd86f51a 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/restmapper.go +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/restmapper.go @@ -65,6 +65,9 @@ type DefaultRESTMapper struct { } func (m *DefaultRESTMapper) String() string { + if m == nil { + return "" + } return fmt.Sprintf("DefaultRESTMapper{kindToPluralResource=%v}", m.kindToPluralResource) } diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto index 18a6c7cd68..472104d542 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.apimachinery.pkg.api.resource; diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go index d95e03aa92..8d718945d0 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go @@ -20,6 +20,7 @@ import ( "bytes" "errors" "fmt" + "math" "math/big" "strconv" "strings" @@ -120,7 +121,7 @@ const ( ) // MustParse turns the given string into a quantity or panics; for tests -// or others cases where you know the string is valid. +// or other cases where you know the string is valid. func MustParse(str string) Quantity { q, err := ParseQuantity(str) if err != nil { @@ -442,6 +443,36 @@ func (q *Quantity) CanonicalizeBytes(out []byte) (result, suffix []byte) { } } +// AsApproximateFloat64 returns a float64 representation of the quantity which may +// lose precision. If the value of the quantity is outside the range of a float64 +// +Inf/-Inf will be returned. +func (q *Quantity) AsApproximateFloat64() float64 { + var base float64 + var exponent int + if q.d.Dec != nil { + base, _ = big.NewFloat(0).SetInt(q.d.Dec.UnscaledBig()).Float64() + exponent = int(-q.d.Dec.Scale()) + } else { + base = float64(q.i.value) + exponent = int(q.i.scale) + } + if exponent == 0 { + return base + } + + // multiply by the appropriate exponential scale + switch q.Format { + case DecimalExponent, DecimalSI: + return base * math.Pow10(exponent) + default: + // fast path for exponents that can fit in 64 bits + if exponent > 0 && exponent < 7 { + return base * float64(int64(1)<<(exponent*10)) + } + return base * math.Pow(2, float64(exponent*10)) + } +} + // AsInt64 returns a representation of the current value as an int64 if a fast conversion // is possible. If false is returned, callers must use the inf.Dec form of this quantity. func (q *Quantity) AsInt64() (int64, bool) { @@ -598,6 +629,9 @@ const int64QuantityExpectedBytes = 18 // String is an expensive operation and caching this result significantly reduces the cost of // normal parse / marshal operations on Quantity. func (q *Quantity) String() string { + if q == nil { + return "" + } if len(q.s) == 0 { result := make([]byte, 0, int64QuantityExpectedBytes) number, suffix := q.CanonicalizeBytes(result) diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity_proto.go b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity_proto.go index f89ca163cd..3e0cdb10d4 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity_proto.go +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity_proto.go @@ -166,7 +166,7 @@ func (m *Quantity) Unmarshal(data []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS index 15b4c875a3..40018601c0 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS @@ -26,6 +26,5 @@ reviewers: - mml - mbohlool - therc -- mqliang - kevin-wangzefeng - jianhuiz diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go index e74a51099d..e41a3901fc 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go @@ -4925,10 +4925,7 @@ func (m *APIGroup) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5012,10 +5009,7 @@ func (m *APIGroupList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5377,10 +5371,7 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5496,10 +5487,7 @@ func (m *APIResourceList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5615,10 +5603,7 @@ func (m *APIVersions) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5848,10 +5833,7 @@ func (m *Condition) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5965,10 +5947,7 @@ func (m *CreateOptions) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -6160,10 +6139,7 @@ func (m *DeleteOptions) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -6232,10 +6208,7 @@ func (m *Duration) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -6325,10 +6298,7 @@ func (m *ExportOptions) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -6412,10 +6382,7 @@ func (m *FieldsV1) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -6497,10 +6464,7 @@ func (m *GetOptions) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -6614,10 +6578,7 @@ func (m *GroupKind) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -6731,10 +6692,7 @@ func (m *GroupResource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -6848,10 +6806,7 @@ func (m *GroupVersion) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -6965,10 +6920,7 @@ func (m *GroupVersionForDiscovery) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -7114,10 +7066,7 @@ func (m *GroupVersionKind) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -7263,10 +7212,7 @@ func (m *GroupVersionResource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -7426,7 +7372,7 @@ func (m *LabelSelector) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -7477,10 +7423,7 @@ func (m *LabelSelector) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -7626,10 +7569,7 @@ func (m *LabelSelectorRequirement) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -7746,10 +7686,7 @@ func (m *List) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -7915,10 +7852,7 @@ func (m *ListMeta) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -8207,10 +8141,7 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -8460,10 +8391,7 @@ func (m *ManagedFieldsEntry) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -8923,7 +8851,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -9050,7 +8978,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -9199,10 +9127,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -9422,10 +9347,7 @@ func (m *OwnerReference) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -9508,10 +9430,7 @@ func (m *PartialObjectMetadata) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -9628,10 +9547,7 @@ func (m *PartialObjectMetadataList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -9681,10 +9597,7 @@ func (m *Patch) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -9819,10 +9732,7 @@ func (m *PatchOptions) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -9938,10 +9848,7 @@ func (m *Preconditions) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -10023,10 +9930,7 @@ func (m *RootPaths) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -10140,10 +10044,7 @@ func (m *ServerAddressByClientCIDR) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -10377,10 +10278,7 @@ func (m *Status) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -10526,10 +10424,7 @@ func (m *StatusCause) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -10760,10 +10655,7 @@ func (m *StatusDetails) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -10845,10 +10737,7 @@ func (m *TableOptions) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -10936,10 +10825,7 @@ func (m *Timestamp) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -11053,10 +10939,7 @@ func (m *TypeMeta) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -11170,10 +11053,7 @@ func (m *UpdateOptions) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -11255,10 +11135,7 @@ func (m *Verbs) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -11373,10 +11250,7 @@ func (m *WatchEvent) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto index b72d43ff00..fd24483c0e 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.apimachinery.pkg.apis.meta.v1; @@ -375,6 +375,7 @@ message GroupVersionResource { // A label selector is a label query over a set of resources. The result of matchLabels and // matchExpressions are ANDed. An empty label selector matches all objects. A null // label selector matches no objects. +// +structType=atomic message LabelSelector { // matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels // map is equivalent to an element of matchExpressions, whose key field is "key", the diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/group_version.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/group_version.go index bd4c6d9b58..54a0944af1 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/group_version.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/group_version.go @@ -34,6 +34,9 @@ type GroupResource struct { } func (gr *GroupResource) String() string { + if gr == nil { + return "" + } if len(gr.Group) == 0 { return gr.Resource } @@ -51,6 +54,9 @@ type GroupVersionResource struct { } func (gvr *GroupVersionResource) String() string { + if gvr == nil { + return "" + } return strings.Join([]string{gvr.Group, "/", gvr.Version, ", Resource=", gvr.Resource}, "") } @@ -64,6 +70,9 @@ type GroupKind struct { } func (gk *GroupKind) String() string { + if gk == nil { + return "" + } if len(gk.Group) == 0 { return gk.Kind } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go index ad989ad75c..3c5a1518c8 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go @@ -201,6 +201,20 @@ func SetMetaDataAnnotation(obj *ObjectMeta, ann string, value string) { obj.Annotations[ann] = value } +// HasLabel returns a bool if passed in label exists +func HasLabel(obj ObjectMeta, label string) bool { + _, found := obj.Labels[label] + return found +} + +// SetMetaDataLabel sets the label and value +func SetMetaDataLabel(obj *ObjectMeta, label string, value string) { + if obj.Labels == nil { + obj.Labels = make(map[string]string) + } + obj.Labels[label] = value +} + // SingleObject returns a ListOptions for watching a single object. func SingleObject(meta ObjectMeta) ListOptions { return ListOptions{ diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go index cdd9a6a7a0..8eb37f4367 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go @@ -19,8 +19,6 @@ package v1 import ( "encoding/json" "time" - - "github.com/google/gofuzz" ) const RFC3339Micro = "2006-01-02T15:04:05.000000Z07:00" @@ -181,16 +179,3 @@ func (t MicroTime) MarshalQueryParameter() (string, error) { return t.UTC().Format(RFC3339Micro), nil } - -// Fuzz satisfies fuzz.Interface. -func (t *MicroTime) Fuzz(c fuzz.Continue) { - if t == nil { - return - } - // Allow for about 1000 years of randomness. Accurate to a tenth of - // micro second. Leave off nanoseconds because JSON doesn't - // represent them so they can't round-trip properly. - t.Time = time.Unix(c.Rand.Int63n(1000*365*24*60*60), 1000*c.Rand.Int63n(1000000)) -} - -var _ fuzz.Interface = &MicroTime{} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time_fuzz.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time_fuzz.go new file mode 100644 index 0000000000..befab16f72 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time_fuzz.go @@ -0,0 +1,39 @@ +// +build !notest + +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "time" + + fuzz "github.com/google/gofuzz" +) + +// Fuzz satisfies fuzz.Interface. +func (t *MicroTime) Fuzz(c fuzz.Continue) { + if t == nil { + return + } + // Allow for about 1000 years of randomness. Accurate to a tenth of + // micro second. Leave off nanoseconds because JSON doesn't + // represent them so they can't round-trip properly. + t.Time = time.Unix(c.Rand.Int63n(1000*365*24*60*60), 1000*c.Rand.Int63n(1000000)) +} + +// ensure MicroTime implements fuzz.Interface +var _ fuzz.Interface = &MicroTime{} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go index 4a1d89cfce..421770d432 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go @@ -19,8 +19,6 @@ package v1 import ( "encoding/json" "time" - - fuzz "github.com/google/gofuzz" ) // Time is a wrapper around time.Time which supports correct @@ -182,16 +180,3 @@ func (t Time) MarshalQueryParameter() (string, error) { return t.UTC().Format(time.RFC3339), nil } - -// Fuzz satisfies fuzz.Interface. -func (t *Time) Fuzz(c fuzz.Continue) { - if t == nil { - return - } - // Allow for about 1000 years of randomness. Leave off nanoseconds - // because JSON doesn't represent them so they can't round-trip - // properly. - t.Time = time.Unix(c.Rand.Int63n(1000*365*24*60*60), 0) -} - -var _ fuzz.Interface = &Time{} diff --git a/vendor/knative.dev/pkg/metrics/nostackdriver.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_fuzz.go similarity index 52% rename from vendor/knative.dev/pkg/metrics/nostackdriver.go rename to vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_fuzz.go index b5c71eb77a..94ad8d7cf4 100644 --- a/vendor/knative.dev/pkg/metrics/nostackdriver.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_fuzz.go @@ -1,7 +1,7 @@ -// +build nostackdriver +// +build !notest /* -Copyright 2019 The Knative Authors +Copyright 2020 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -16,20 +16,24 @@ See the License for the specific language governing permissions and limitations under the License. */ -package metrics +package v1 import ( - "context" - "errors" + "time" - "go.opencensus.io/stats/view" - "go.uber.org/zap" + fuzz "github.com/google/gofuzz" ) -func sdinit(ctx context.Context, m map[string]string, mc *metricsConfig, ops ExporterOptions) error { - return errors.New("Stackdriver support is not included") +// Fuzz satisfies fuzz.Interface. +func (t *Time) Fuzz(c fuzz.Continue) { + if t == nil { + return + } + // Allow for about 1000 years of randomness. Leave off nanoseconds + // because JSON doesn't represent them so they can't round-trip + // properly. + t.Time = time.Unix(c.Rand.Int63n(1000*365*24*60*60), 0) } -func newStackdriverExporter(config *metricsConfig, logger *zap.SugaredLogger) (view.Exporter, ResourceExporterFactory, error) { - return nil, nil, errors.New("Stackdriver support is not included") -} +// ensure Time implements fuzz.Interface +var _ fuzz.Interface = &Time{} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go index bb57f2cc43..d84878d7c2 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go @@ -1092,6 +1092,7 @@ type Patch struct{} // A label selector is a label query over a set of resources. The result of matchLabels and // matchExpressions are ANDed. An empty label selector matches all objects. A null // label selector matches no objects. +// +structType=atomic type LabelSelector struct { // matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels // map is equivalent to an element of matchExpressions, whose key field is "key", the diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go index 54a231e493..7b101ea512 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go @@ -282,14 +282,6 @@ func getNestedString(obj map[string]interface{}, fields ...string) string { return val } -func getNestedInt64(obj map[string]interface{}, fields ...string) int64 { - val, found, err := NestedInt64(obj, fields...) - if !found || err != nil { - return 0 - } - return val -} - func getNestedInt64Pointer(obj map[string]interface{}, fields ...string) *int64 { val, found, err := NestedInt64(obj, fields...) if !found || err != nil { diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go index cd5fc9026c..a5a9496796 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go @@ -311,10 +311,7 @@ func (m *PartialObjectMetadataList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto index 59ce743766..a209dd4567 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.apimachinery.pkg.apis.meta.v1beta1; diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/converter.go b/vendor/k8s.io/apimachinery/pkg/conversion/converter.go index 838d5b0aac..7913484764 100644 --- a/vendor/k8s.io/apimachinery/pkg/conversion/converter.go +++ b/vendor/k8s.io/apimachinery/pkg/conversion/converter.go @@ -26,16 +26,6 @@ type typePair struct { dest reflect.Type } -type typeNamePair struct { - fieldType reflect.Type - fieldName string -} - -// DebugLogger allows you to get debugging messages if necessary. -type DebugLogger interface { - Logf(format string, args ...interface{}) -} - type NameFunc func(t reflect.Type) string var DefaultNameFunc = func(t reflect.Type) string { return t.Name() } @@ -57,24 +47,6 @@ type Converter struct { ignoredConversions map[typePair]struct{} ignoredUntypedConversions map[typePair]struct{} - // This is a map from a source field type and name, to a list of destination - // field type and name. - structFieldDests map[typeNamePair][]typeNamePair - - // Allows for the opposite lookup of structFieldDests. So that SourceFromDest - // copy flag also works. So this is a map of destination field name, to potential - // source field name and type to look for. - structFieldSources map[typeNamePair][]typeNamePair - - // Map from an input type to a function which can apply a key name mapping - inputFieldMappingFuncs map[reflect.Type]FieldMappingFunc - - // Map from an input type to a set of default conversion flags. - inputDefaultFlags map[reflect.Type]FieldMatchingFlags - - // If non-nil, will be called to print helpful debugging info. Quite verbose. - Debug DebugLogger - // nameFunc is called to retrieve the name of a type; this name is used for the // purpose of deciding whether two types match or not (i.e., will we attempt to // do a conversion). The default returns the go type name. @@ -89,11 +61,6 @@ func NewConverter(nameFn NameFunc) *Converter { ignoredConversions: make(map[typePair]struct{}), ignoredUntypedConversions: make(map[typePair]struct{}), nameFunc: nameFn, - structFieldDests: make(map[typeNamePair][]typeNamePair), - structFieldSources: make(map[typeNamePair][]typeNamePair), - - inputFieldMappingFuncs: make(map[reflect.Type]FieldMappingFunc), - inputDefaultFlags: make(map[reflect.Type]FieldMatchingFlags), } c.RegisterUntypedConversionFunc( (*[]byte)(nil), (*[]byte)(nil), @@ -112,11 +79,9 @@ func (c *Converter) WithConversions(fns ConversionFuncs) *Converter { return &copied } -// DefaultMeta returns the conversion FieldMappingFunc and meta for a given type. -func (c *Converter) DefaultMeta(t reflect.Type) (FieldMatchingFlags, *Meta) { - return c.inputDefaultFlags[t], &Meta{ - KeyNameMapping: c.inputFieldMappingFuncs[t], - } +// DefaultMeta returns meta for a given type. +func (c *Converter) DefaultMeta(t reflect.Type) *Meta { + return &Meta{} } // Convert_Slice_byte_To_Slice_byte prevents recursing into every byte @@ -136,24 +101,12 @@ func Convert_Slice_byte_To_Slice_byte(in *[]byte, out *[]byte, s Scope) error { type Scope interface { // Call Convert to convert sub-objects. Note that if you call it with your own exact // parameters, you'll run out of stack space before anything useful happens. - Convert(src, dest interface{}, flags FieldMatchingFlags) error - - // SrcTags and DestTags contain the struct tags that src and dest had, respectively. - // If the enclosing object was not a struct, then these will contain no tags, of course. - SrcTag() reflect.StructTag - DestTag() reflect.StructTag - - // Flags returns the flags with which the conversion was started. - Flags() FieldMatchingFlags + Convert(src, dest interface{}) error // Meta returns any information originally passed to Convert. Meta() *Meta } -// FieldMappingFunc can convert an input field value into different values, depending on -// the value of the source or destination struct tags. -type FieldMappingFunc func(key string, sourceTag, destTag reflect.StructTag) (source string, dest string) - func NewConversionFuncs() ConversionFuncs { return ConversionFuncs{ untyped: make(map[typePair]ConversionFunc), @@ -194,9 +147,6 @@ func (c ConversionFuncs) Merge(other ConversionFuncs) ConversionFuncs { // Meta is supplied by Scheme, when it calls Convert. type Meta struct { - // KeyNameMapping is an optional function which may map the listed key (field name) - // into a source and destination value. - KeyNameMapping FieldMappingFunc // Context is an optional field that callers may use to pass info to conversion functions. Context interface{} } @@ -205,84 +155,11 @@ type Meta struct { type scope struct { converter *Converter meta *Meta - flags FieldMatchingFlags - - // srcStack & destStack are separate because they may not have a 1:1 - // relationship. - srcStack scopeStack - destStack scopeStack -} - -type scopeStackElem struct { - tag reflect.StructTag - value reflect.Value - key string -} - -type scopeStack []scopeStackElem - -func (s *scopeStack) pop() { - n := len(*s) - *s = (*s)[:n-1] -} - -func (s *scopeStack) push(e scopeStackElem) { - *s = append(*s, e) -} - -func (s *scopeStack) top() *scopeStackElem { - return &(*s)[len(*s)-1] -} - -func (s scopeStack) describe() string { - desc := "" - if len(s) > 1 { - desc = "(" + s[1].value.Type().String() + ")" - } - for i, v := range s { - if i < 2 { - // First layer on stack is not real; second is handled specially above. - continue - } - if v.key == "" { - desc += fmt.Sprintf(".%v", v.value.Type()) - } else { - desc += fmt.Sprintf(".%v", v.key) - } - } - return desc -} - -// Formats src & dest as indices for printing. -func (s *scope) setIndices(src, dest int) { - s.srcStack.top().key = fmt.Sprintf("[%v]", src) - s.destStack.top().key = fmt.Sprintf("[%v]", dest) -} - -// Formats src & dest as map keys for printing. -func (s *scope) setKeys(src, dest interface{}) { - s.srcStack.top().key = fmt.Sprintf(`["%v"]`, src) - s.destStack.top().key = fmt.Sprintf(`["%v"]`, dest) } // Convert continues a conversion. -func (s *scope) Convert(src, dest interface{}, flags FieldMatchingFlags) error { - return s.converter.Convert(src, dest, flags, s.meta) -} - -// SrcTag returns the tag of the struct containing the current source item, if any. -func (s *scope) SrcTag() reflect.StructTag { - return s.srcStack.top().tag -} - -// DestTag returns the tag of the struct containing the current dest item, if any. -func (s *scope) DestTag() reflect.StructTag { - return s.destStack.top().tag -} - -// Flags returns the flags with which the current conversion was started. -func (s *scope) Flags() FieldMatchingFlags { - return s.flags +func (s *scope) Convert(src, dest interface{}) error { + return s.converter.Convert(src, dest, s.meta) } // Meta returns the meta object that was originally passed to Convert. @@ -290,50 +167,6 @@ func (s *scope) Meta() *Meta { return s.meta } -// describe prints the path to get to the current (source, dest) values. -func (s *scope) describe() (src, dest string) { - return s.srcStack.describe(), s.destStack.describe() -} - -// error makes an error that includes information about where we were in the objects -// we were asked to convert. -func (s *scope) errorf(message string, args ...interface{}) error { - srcPath, destPath := s.describe() - where := fmt.Sprintf("converting %v to %v: ", srcPath, destPath) - return fmt.Errorf(where+message, args...) -} - -// Verifies whether a conversion function has a correct signature. -func verifyConversionFunctionSignature(ft reflect.Type) error { - if ft.Kind() != reflect.Func { - return fmt.Errorf("expected func, got: %v", ft) - } - if ft.NumIn() != 3 { - return fmt.Errorf("expected three 'in' params, got: %v", ft) - } - if ft.NumOut() != 1 { - return fmt.Errorf("expected one 'out' param, got: %v", ft) - } - if ft.In(0).Kind() != reflect.Ptr { - return fmt.Errorf("expected pointer arg for 'in' param 0, got: %v", ft) - } - if ft.In(1).Kind() != reflect.Ptr { - return fmt.Errorf("expected pointer arg for 'in' param 1, got: %v", ft) - } - scopeType := Scope(nil) - if e, a := reflect.TypeOf(&scopeType).Elem(), ft.In(2); e != a { - return fmt.Errorf("expected '%v' arg for 'in' param 2, got '%v' (%v)", e, a, ft) - } - var forErrorType error - // This convolution is necessary, otherwise TypeOf picks up on the fact - // that forErrorType is nil. - errorType := reflect.TypeOf(&forErrorType).Elem() - if ft.Out(0) != errorType { - return fmt.Errorf("expected error return, got: %v", ft) - } - return nil -} - // RegisterUntypedConversionFunc registers a function that converts between a and b by passing objects of those // types to the provided function. The function *must* accept objects of a and b - this machinery will not enforce // any other guarantee. @@ -364,71 +197,16 @@ func (c *Converter) RegisterIgnoredConversion(from, to interface{}) error { return nil } -// RegisterInputDefaults registers a field name mapping function, used when converting -// from maps to structs. Inputs to the conversion methods are checked for this type and a mapping -// applied automatically if the input matches in. A set of default flags for the input conversion -// may also be provided, which will be used when no explicit flags are requested. -func (c *Converter) RegisterInputDefaults(in interface{}, fn FieldMappingFunc, defaultFlags FieldMatchingFlags) error { - fv := reflect.ValueOf(in) - ft := fv.Type() - if ft.Kind() != reflect.Ptr { - return fmt.Errorf("expected pointer 'in' argument, got: %v", ft) - } - c.inputFieldMappingFuncs[ft] = fn - c.inputDefaultFlags[ft] = defaultFlags - return nil -} - -// FieldMatchingFlags contains a list of ways in which struct fields could be -// copied. These constants may be | combined. -type FieldMatchingFlags int - -const ( - // Loop through destination fields, search for matching source - // field to copy it from. Source fields with no corresponding - // destination field will be ignored. If SourceToDest is - // specified, this flag is ignored. If neither is specified, - // or no flags are passed, this flag is the default. - DestFromSource FieldMatchingFlags = 0 - // Loop through source fields, search for matching dest field - // to copy it into. Destination fields with no corresponding - // source field will be ignored. - SourceToDest FieldMatchingFlags = 1 << iota - // Don't treat it as an error if the corresponding source or - // dest field can't be found. - IgnoreMissingFields - // Don't require type names to match. - AllowDifferentFieldTypeNames -) - -// IsSet returns true if the given flag or combination of flags is set. -func (f FieldMatchingFlags) IsSet(flag FieldMatchingFlags) bool { - if flag == DestFromSource { - // The bit logic doesn't work on the default value. - return f&SourceToDest != SourceToDest - } - return f&flag == flag -} - // Convert will translate src to dest if it knows how. Both must be pointers. // If no conversion func is registered and the default copying mechanism // doesn't work on this type pair, an error will be returned. -// Read the comments on the various FieldMatchingFlags constants to understand -// what the 'flags' parameter does. // 'meta' is given to allow you to pass information to conversion functions, // it is not used by Convert() other than storing it in the scope. // Not safe for objects with cyclic references! -func (c *Converter) Convert(src, dest interface{}, flags FieldMatchingFlags, meta *Meta) error { - return c.doConversion(src, dest, flags, meta, c.convert) -} - -type conversionFunc func(sv, dv reflect.Value, scope *scope) error - -func (c *Converter) doConversion(src, dest interface{}, flags FieldMatchingFlags, meta *Meta, f conversionFunc) error { +func (c *Converter) Convert(src, dest interface{}, meta *Meta) error { pair := typePair{reflect.TypeOf(src), reflect.TypeOf(dest)} scope := &scope{ converter: c, - flags: flags, meta: meta, } @@ -452,366 +230,4 @@ func (c *Converter) doConversion(src, dest interface{}, flags FieldMatchingFlags return err } return fmt.Errorf("converting (%s) to (%s): unknown conversion", sv.Type(), dv.Type()) - - // TODO: Everything past this point is deprecated. - // Remove in 1.20 once we're sure it didn't break anything. - - // Leave something on the stack, so that calls to struct tag getters never fail. - scope.srcStack.push(scopeStackElem{}) - scope.destStack.push(scopeStackElem{}) - return f(sv, dv, scope) -} - -// callUntyped calls predefined conversion func. -func (c *Converter) callUntyped(sv, dv reflect.Value, f ConversionFunc, scope *scope) error { - if !dv.CanAddr() { - return scope.errorf("cant addr dest") - } - var svPointer reflect.Value - if sv.CanAddr() { - svPointer = sv.Addr() - } else { - svPointer = reflect.New(sv.Type()) - svPointer.Elem().Set(sv) - } - dvPointer := dv.Addr() - return f(svPointer.Interface(), dvPointer.Interface(), scope) -} - -// convert recursively copies sv into dv, calling an appropriate conversion function if -// one is registered. -func (c *Converter) convert(sv, dv reflect.Value, scope *scope) error { - dt, st := dv.Type(), sv.Type() - pair := typePair{st, dt} - - // ignore conversions of this type - if _, ok := c.ignoredConversions[pair]; ok { - if c.Debug != nil { - c.Debug.Logf("Ignoring conversion of '%v' to '%v'", st, dt) - } - return nil - } - - // Convert sv to dv. - pair = typePair{reflect.PtrTo(sv.Type()), reflect.PtrTo(dv.Type())} - if f, ok := c.conversionFuncs.untyped[pair]; ok { - return c.callUntyped(sv, dv, f, scope) - } - if f, ok := c.generatedConversionFuncs.untyped[pair]; ok { - return c.callUntyped(sv, dv, f, scope) - } - - if !dv.CanSet() { - return scope.errorf("Cannot set dest. (Tried to deep copy something with unexported fields?)") - } - - if !scope.flags.IsSet(AllowDifferentFieldTypeNames) && c.nameFunc(dt) != c.nameFunc(st) { - return scope.errorf( - "type names don't match (%v, %v), and no conversion 'func (%v, %v) error' registered.", - c.nameFunc(st), c.nameFunc(dt), st, dt) - } - - switch st.Kind() { - case reflect.Map, reflect.Ptr, reflect.Slice, reflect.Interface, reflect.Struct: - // Don't copy these via assignment/conversion! - default: - // This should handle all simple types. - if st.AssignableTo(dt) { - dv.Set(sv) - return nil - } - if st.ConvertibleTo(dt) { - dv.Set(sv.Convert(dt)) - return nil - } - } - - if c.Debug != nil { - c.Debug.Logf("Trying to convert '%v' to '%v'", st, dt) - } - - scope.srcStack.push(scopeStackElem{value: sv}) - scope.destStack.push(scopeStackElem{value: dv}) - defer scope.srcStack.pop() - defer scope.destStack.pop() - - switch dv.Kind() { - case reflect.Struct: - return c.convertKV(toKVValue(sv), toKVValue(dv), scope) - case reflect.Slice: - if sv.IsNil() { - // Don't make a zero-length slice. - dv.Set(reflect.Zero(dt)) - return nil - } - dv.Set(reflect.MakeSlice(dt, sv.Len(), sv.Cap())) - for i := 0; i < sv.Len(); i++ { - scope.setIndices(i, i) - if err := c.convert(sv.Index(i), dv.Index(i), scope); err != nil { - return err - } - } - case reflect.Ptr: - if sv.IsNil() { - // Don't copy a nil ptr! - dv.Set(reflect.Zero(dt)) - return nil - } - dv.Set(reflect.New(dt.Elem())) - switch st.Kind() { - case reflect.Ptr, reflect.Interface: - return c.convert(sv.Elem(), dv.Elem(), scope) - default: - return c.convert(sv, dv.Elem(), scope) - } - case reflect.Map: - if sv.IsNil() { - // Don't copy a nil ptr! - dv.Set(reflect.Zero(dt)) - return nil - } - dv.Set(reflect.MakeMap(dt)) - for _, sk := range sv.MapKeys() { - dk := reflect.New(dt.Key()).Elem() - if err := c.convert(sk, dk, scope); err != nil { - return err - } - dkv := reflect.New(dt.Elem()).Elem() - scope.setKeys(sk.Interface(), dk.Interface()) - // TODO: sv.MapIndex(sk) may return a value with CanAddr() == false, - // because a map[string]struct{} does not allow a pointer reference. - // Calling a custom conversion function defined for the map value - // will panic. Example is PodInfo map[string]ContainerStatus. - if err := c.convert(sv.MapIndex(sk), dkv, scope); err != nil { - return err - } - dv.SetMapIndex(dk, dkv) - } - case reflect.Interface: - if sv.IsNil() { - // Don't copy a nil interface! - dv.Set(reflect.Zero(dt)) - return nil - } - tmpdv := reflect.New(sv.Elem().Type()).Elem() - if err := c.convert(sv.Elem(), tmpdv, scope); err != nil { - return err - } - dv.Set(reflect.ValueOf(tmpdv.Interface())) - return nil - default: - return scope.errorf("couldn't copy '%v' into '%v'; didn't understand types", st, dt) - } - return nil -} - -var stringType = reflect.TypeOf("") - -func toKVValue(v reflect.Value) kvValue { - switch v.Kind() { - case reflect.Struct: - return structAdaptor(v) - case reflect.Map: - if v.Type().Key().AssignableTo(stringType) { - return stringMapAdaptor(v) - } - } - - return nil -} - -// kvValue lets us write the same conversion logic to work with both maps -// and structs. Only maps with string keys make sense for this. -type kvValue interface { - // returns all keys, as a []string. - keys() []string - // Will just return "" for maps. - tagOf(key string) reflect.StructTag - // Will return the zero Value if the key doesn't exist. - value(key string) reflect.Value - // Maps require explicit setting-- will do nothing for structs. - // Returns false on failure. - confirmSet(key string, v reflect.Value) bool -} - -type stringMapAdaptor reflect.Value - -func (a stringMapAdaptor) len() int { - return reflect.Value(a).Len() -} - -func (a stringMapAdaptor) keys() []string { - v := reflect.Value(a) - keys := make([]string, v.Len()) - for i, v := range v.MapKeys() { - if v.IsNil() { - continue - } - switch t := v.Interface().(type) { - case string: - keys[i] = t - } - } - return keys -} - -func (a stringMapAdaptor) tagOf(key string) reflect.StructTag { - return "" -} - -func (a stringMapAdaptor) value(key string) reflect.Value { - return reflect.Value(a).MapIndex(reflect.ValueOf(key)) -} - -func (a stringMapAdaptor) confirmSet(key string, v reflect.Value) bool { - return true -} - -type structAdaptor reflect.Value - -func (a structAdaptor) len() int { - v := reflect.Value(a) - return v.Type().NumField() -} - -func (a structAdaptor) keys() []string { - v := reflect.Value(a) - t := v.Type() - keys := make([]string, t.NumField()) - for i := range keys { - keys[i] = t.Field(i).Name - } - return keys -} - -func (a structAdaptor) tagOf(key string) reflect.StructTag { - v := reflect.Value(a) - field, ok := v.Type().FieldByName(key) - if ok { - return field.Tag - } - return "" -} - -func (a structAdaptor) value(key string) reflect.Value { - v := reflect.Value(a) - return v.FieldByName(key) -} - -func (a structAdaptor) confirmSet(key string, v reflect.Value) bool { - return true -} - -// convertKV can convert things that consist of key/value pairs, like structs -// and some maps. -func (c *Converter) convertKV(skv, dkv kvValue, scope *scope) error { - if skv == nil || dkv == nil { - // TODO: add keys to stack to support really understandable error messages. - return fmt.Errorf("Unable to convert %#v to %#v", skv, dkv) - } - - lister := dkv - if scope.flags.IsSet(SourceToDest) { - lister = skv - } - - var mapping FieldMappingFunc - if scope.meta != nil && scope.meta.KeyNameMapping != nil { - mapping = scope.meta.KeyNameMapping - } - - for _, key := range lister.keys() { - if found, err := c.checkField(key, skv, dkv, scope); found { - if err != nil { - return err - } - continue - } - stag := skv.tagOf(key) - dtag := dkv.tagOf(key) - skey := key - dkey := key - if mapping != nil { - skey, dkey = scope.meta.KeyNameMapping(key, stag, dtag) - } - - df := dkv.value(dkey) - sf := skv.value(skey) - if !df.IsValid() || !sf.IsValid() { - switch { - case scope.flags.IsSet(IgnoreMissingFields): - // No error. - case scope.flags.IsSet(SourceToDest): - return scope.errorf("%v not present in dest", dkey) - default: - return scope.errorf("%v not present in src", skey) - } - continue - } - scope.srcStack.top().key = skey - scope.srcStack.top().tag = stag - scope.destStack.top().key = dkey - scope.destStack.top().tag = dtag - if err := c.convert(sf, df, scope); err != nil { - return err - } - } - return nil -} - -// checkField returns true if the field name matches any of the struct -// field copying rules. The error should be ignored if it returns false. -func (c *Converter) checkField(fieldName string, skv, dkv kvValue, scope *scope) (bool, error) { - replacementMade := false - if scope.flags.IsSet(DestFromSource) { - df := dkv.value(fieldName) - if !df.IsValid() { - return false, nil - } - destKey := typeNamePair{df.Type(), fieldName} - // Check each of the potential source (type, name) pairs to see if they're - // present in sv. - for _, potentialSourceKey := range c.structFieldSources[destKey] { - sf := skv.value(potentialSourceKey.fieldName) - if !sf.IsValid() { - continue - } - if sf.Type() == potentialSourceKey.fieldType { - // Both the source's name and type matched, so copy. - scope.srcStack.top().key = potentialSourceKey.fieldName - scope.destStack.top().key = fieldName - if err := c.convert(sf, df, scope); err != nil { - return true, err - } - dkv.confirmSet(fieldName, df) - replacementMade = true - } - } - return replacementMade, nil - } - - sf := skv.value(fieldName) - if !sf.IsValid() { - return false, nil - } - srcKey := typeNamePair{sf.Type(), fieldName} - // Check each of the potential dest (type, name) pairs to see if they're - // present in dv. - for _, potentialDestKey := range c.structFieldDests[srcKey] { - df := dkv.value(potentialDestKey.fieldName) - if !df.IsValid() { - continue - } - if df.Type() == potentialDestKey.fieldType { - // Both the dest's name and type matched, so copy. - scope.srcStack.top().key = fieldName - scope.destStack.top().key = potentialDestKey.fieldName - if err := c.convert(sf, df, scope); err != nil { - return true, err - } - dkv.confirmSet(potentialDestKey.fieldName, df) - replacementMade = true - } - } - return replacementMade, nil } diff --git a/vendor/k8s.io/apimachinery/pkg/labels/labels.go b/vendor/k8s.io/apimachinery/pkg/labels/labels.go index d9eeb4f919..d6bbeeaca7 100644 --- a/vendor/k8s.io/apimachinery/pkg/labels/labels.go +++ b/vendor/k8s.io/apimachinery/pkg/labels/labels.go @@ -141,25 +141,6 @@ func Equals(labels1, labels2 Set) bool { return true } -// AreLabelsInWhiteList verifies if the provided label list -// is in the provided whitelist and returns true, otherwise false. -func AreLabelsInWhiteList(labels, whitelist Set) bool { - if len(whitelist) == 0 { - return true - } - - for k, v := range labels { - value, ok := whitelist[k] - if !ok { - return false - } - if value != v { - return false - } - } - return true -} - // ConvertSelectorToLabelsMap converts selector string to labels map // and validates keys and values func ConvertSelectorToLabelsMap(selector string) (Set, error) { diff --git a/vendor/k8s.io/apimachinery/pkg/labels/selector.go b/vendor/k8s.io/apimachinery/pkg/labels/selector.go index bf62f98a42..50ae4f7cef 100644 --- a/vendor/k8s.io/apimachinery/pkg/labels/selector.go +++ b/vendor/k8s.io/apimachinery/pkg/labels/selector.go @@ -263,11 +263,11 @@ func (r *Requirement) Values() sets.String { } // Empty returns true if the internalSelector doesn't restrict selection space -func (lsel internalSelector) Empty() bool { - if lsel == nil { +func (s internalSelector) Empty() bool { + if s == nil { return true } - return len(lsel) == 0 + return len(s) == 0 } // String returns a human-readable string that represents this @@ -330,51 +330,51 @@ func safeSort(in []string) []string { } // Add adds requirements to the selector. It copies the current selector returning a new one -func (lsel internalSelector) Add(reqs ...Requirement) Selector { - var sel internalSelector - for ix := range lsel { - sel = append(sel, lsel[ix]) +func (s internalSelector) Add(reqs ...Requirement) Selector { + var ret internalSelector + for ix := range s { + ret = append(ret, s[ix]) } for _, r := range reqs { - sel = append(sel, r) + ret = append(ret, r) } - sort.Sort(ByKey(sel)) - return sel + sort.Sort(ByKey(ret)) + return ret } // Matches for a internalSelector returns true if all // its Requirements match the input Labels. If any // Requirement does not match, false is returned. -func (lsel internalSelector) Matches(l Labels) bool { - for ix := range lsel { - if matches := lsel[ix].Matches(l); !matches { +func (s internalSelector) Matches(l Labels) bool { + for ix := range s { + if matches := s[ix].Matches(l); !matches { return false } } return true } -func (lsel internalSelector) Requirements() (Requirements, bool) { return Requirements(lsel), true } +func (s internalSelector) Requirements() (Requirements, bool) { return Requirements(s), true } // String returns a comma-separated string of all // the internalSelector Requirements' human-readable strings. -func (lsel internalSelector) String() string { +func (s internalSelector) String() string { var reqs []string - for ix := range lsel { - reqs = append(reqs, lsel[ix].String()) + for ix := range s { + reqs = append(reqs, s[ix].String()) } return strings.Join(reqs, ",") } // RequiresExactMatch introspect whether a given selector requires a single specific field // to be set, and if so returns the value it requires. -func (lsel internalSelector) RequiresExactMatch(label string) (value string, found bool) { - for ix := range lsel { - if lsel[ix].key == label { - switch lsel[ix].operator { +func (s internalSelector) RequiresExactMatch(label string) (value string, found bool) { + for ix := range s { + if s[ix].key == label { + switch s[ix].operator { case selection.Equals, selection.DoubleEquals, selection.In: - if len(lsel[ix].strValues) == 1 { - return lsel[ix].strValues[0], true + if len(s[ix].strValues) == 1 { + return s[ix].strValues[0], true } } return "", false @@ -789,12 +789,12 @@ func (p *Parser) parseIdentifiersList() (sets.String, error) { // parseExactValue parses the only value for exact match style func (p *Parser) parseExactValue() (sets.String, error) { s := sets.NewString() - tok, lit := p.lookahead(Values) + tok, _ := p.lookahead(Values) if tok == EndOfStringToken || tok == CommaToken { s.Insert("") return s, nil } - tok, lit = p.consume(Values) + tok, lit := p.consume(Values) if tok == IdentifierToken { s.Insert(lit) return s, nil diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/converter.go b/vendor/k8s.io/apimachinery/pkg/runtime/converter.go index 871e4c8c46..4a6cc68574 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/converter.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/converter.go @@ -186,6 +186,9 @@ func fromUnstructured(sv, dv reflect.Value) error { reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: dv.Set(sv.Convert(dt)) return nil + case reflect.Float32, reflect.Float64: + dv.Set(sv.Convert(dt)) + return nil } case reflect.Float32, reflect.Float64: switch dt.Kind() { diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go index 0719718173..ac428d6103 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go @@ -450,10 +450,7 @@ func (m *RawExtension) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -567,10 +564,7 @@ func (m *TypeMeta) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -751,10 +745,7 @@ func (m *Unknown) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto b/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto index 0e212ec941..3b25391fa3 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto +++ b/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.apimachinery.pkg.runtime; diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go b/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go index f44693c0c6..3e1fab1d11 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go @@ -57,7 +57,7 @@ type Encoder interface { // Identifiers of two different encoders should be equal if and only if for every input // object it will be encoded to the same representation by both of them. // - // Identifier is inteted for use with CacheableObject#CacheEncode method. In order to + // Identifier is intended for use with CacheableObject#CacheEncode method. In order to // correctly handle CacheableObject, Encode() method should look similar to below, where // doEncode() is the encoding logic of implemented encoder: // func (e *MyEncoder) Encode(obj Object, w io.Writer) error { diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/negotiate.go b/vendor/k8s.io/apimachinery/pkg/runtime/negotiate.go index 159b301206..3ab119b0a8 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/negotiate.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/negotiate.go @@ -92,39 +92,6 @@ func NewClientNegotiator(serializer NegotiatedSerializer, gv schema.GroupVersion } } -// NewInternalClientNegotiator applies the default client rules for connecting to a Kubernetes apiserver -// where objects are converted to gv prior to sending and decoded to their internal representation prior -// to retrieval. -// -// DEPRECATED: Internal clients are deprecated and will be removed in a future Kubernetes release. -func NewInternalClientNegotiator(serializer NegotiatedSerializer, gv schema.GroupVersion) ClientNegotiator { - decode := schema.GroupVersions{ - { - Group: gv.Group, - Version: APIVersionInternal, - }, - // always include the legacy group as a decoding target to handle non-error `Status` return types - { - Group: "", - Version: APIVersionInternal, - }, - } - return &clientNegotiator{ - encode: gv, - decode: decode, - serializer: serializer, - } -} - -// NewSimpleClientNegotiator will negotiate for a single serializer. This should only be used -// for testing or when the caller is taking responsibility for setting the GVK on encoded objects. -func NewSimpleClientNegotiator(info SerializerInfo, gv schema.GroupVersion) ClientNegotiator { - return &clientNegotiator{ - serializer: &simpleNegotiatedSerializer{info: info}, - encode: gv, - } -} - type simpleNegotiatedSerializer struct { info SerializerInfo } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto b/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto index 5aeeaa100a..c50766a4b7 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto +++ b/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.apimachinery.pkg.runtime.schema; diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/schema/interfaces.go b/vendor/k8s.io/apimachinery/pkg/runtime/schema/interfaces.go index b570668458..f04453fb01 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/schema/interfaces.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/schema/interfaces.go @@ -23,8 +23,8 @@ type ObjectKind interface { // SetGroupVersionKind sets or clears the intended serialized kind of an object. Passing kind nil // should clear the current setting. SetGroupVersionKind(kind GroupVersionKind) - // GroupVersionKind returns the stored group, version, and kind of an object, or nil if the object does - // not expose or provide these fields. + // GroupVersionKind returns the stored group, version, and kind of an object, or an empty struct + // if the object does not expose or provide these fields. GroupVersionKind() GroupVersionKind } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go b/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go index 3b254961d7..697dd4ed77 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go @@ -18,7 +18,6 @@ package runtime import ( "fmt" - "net/url" "reflect" "strings" @@ -105,9 +104,6 @@ func NewScheme() *Scheme { // Enable couple default conversions by default. utilruntime.Must(RegisterEmbeddedConversions(s)) utilruntime.Must(RegisterStringConversions(s)) - - utilruntime.Must(s.RegisterInputDefaults(&map[string][]string{}, JSONKeyMapper, conversion.AllowDifferentFieldTypeNames|conversion.IgnoreMissingFields)) - utilruntime.Must(s.RegisterInputDefaults(&url.Values{}, JSONKeyMapper, conversion.AllowDifferentFieldTypeNames|conversion.IgnoreMissingFields)) return s } @@ -309,11 +305,6 @@ func (s *Scheme) New(kind schema.GroupVersionKind) (Object, error) { return nil, NewNotRegisteredErrForKind(s.schemeName, kind) } -// Log sets a logger on the scheme. For test purposes only -func (s *Scheme) Log(l conversion.DebugLogger) { - s.converter.Debug = l -} - // AddIgnoredConversionType identifies a pair of types that should be skipped by // conversion (because the data inside them is explicitly dropped during // conversion). @@ -342,14 +333,6 @@ func (s *Scheme) AddFieldLabelConversionFunc(gvk schema.GroupVersionKind, conver return nil } -// RegisterInputDefaults sets the provided field mapping function and field matching -// as the defaults for the provided input type. The fn may be nil, in which case no -// mapping will happen by default. Use this method to register a mechanism for handling -// a specific input type in conversion, such as a map[string]string to structs. -func (s *Scheme) RegisterInputDefaults(in interface{}, fn conversion.FieldMappingFunc, defaultFlags conversion.FieldMatchingFlags) error { - return s.converter.RegisterInputDefaults(in, fn, defaultFlags) -} - // AddTypeDefaultingFunc registers a function that is passed a pointer to an // object and can default fields on the object. These functions will be invoked // when Default() is called. The function will never be called unless the @@ -433,12 +416,9 @@ func (s *Scheme) Convert(in, out interface{}, context interface{}) error { in = typed } - flags, meta := s.generateConvertMeta(in) + meta := s.generateConvertMeta(in) meta.Context = context - if flags == 0 { - flags = conversion.AllowDifferentFieldTypeNames - } - return s.converter.Convert(in, out, flags, meta) + return s.converter.Convert(in, out, meta) } // ConvertFieldLabel alters the given field label and value for an kind field selector from @@ -535,9 +515,9 @@ func (s *Scheme) convertToVersion(copy bool, in Object, target GroupVersioner) ( in = in.DeepCopyObject() } - flags, meta := s.generateConvertMeta(in) + meta := s.generateConvertMeta(in) meta.Context = target - if err := s.converter.Convert(in, out, flags, meta); err != nil { + if err := s.converter.Convert(in, out, meta); err != nil { return nil, err } @@ -565,7 +545,7 @@ func (s *Scheme) unstructuredToTyped(in Unstructured) (Object, error) { } // generateConvertMeta constructs the meta value we pass to Convert. -func (s *Scheme) generateConvertMeta(in interface{}) (conversion.FieldMatchingFlags, *conversion.Meta) { +func (s *Scheme) generateConvertMeta(in interface{}) *conversion.Meta { return s.converter.DefaultMeta(reflect.TypeOf(in)) } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go index f21b0ef19d..e55ab94d14 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go @@ -108,10 +108,9 @@ func newSerializersForScheme(scheme *runtime.Scheme, mf json.MetaFactory, option // CodecFactory provides methods for retrieving codecs and serializers for specific // versions and content types. type CodecFactory struct { - scheme *runtime.Scheme - serializers []serializerType - universal runtime.Decoder - accepts []runtime.SerializerInfo + scheme *runtime.Scheme + universal runtime.Decoder + accepts []runtime.SerializerInfo legacySerializer runtime.Serializer } @@ -216,9 +215,8 @@ func newCodecFactory(scheme *runtime.Scheme, serializers []serializerType) Codec } return CodecFactory{ - scheme: scheme, - serializers: serializers, - universal: recognizer.NewDecoder(decoders...), + scheme: scheme, + universal: recognizer.NewDecoder(decoders...), accepts: accepts, diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go index e081d7ff19..83b2e13931 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go @@ -96,6 +96,7 @@ type SerializerOptions struct { Strict bool } +// Serializer handles encoding versioned objects into the proper JSON form type Serializer struct { meta MetaFactory options SerializerOptions @@ -144,10 +145,10 @@ func (customNumberDecoder) Decode(ptr unsafe.Pointer, iter *jsoniter.Iterator) { } } -// CaseSensitiveJsonIterator returns a jsoniterator API that's configured to be +// CaseSensitiveJSONIterator returns a jsoniterator API that's configured to be // case-sensitive when unmarshalling, and otherwise compatible with // the encoding/json standard library. -func CaseSensitiveJsonIterator() jsoniter.API { +func CaseSensitiveJSONIterator() jsoniter.API { config := jsoniter.Config{ EscapeHTML: true, SortMapKeys: true, @@ -159,10 +160,10 @@ func CaseSensitiveJsonIterator() jsoniter.API { return config } -// StrictCaseSensitiveJsonIterator returns a jsoniterator API that's configured to be +// StrictCaseSensitiveJSONIterator returns a jsoniterator API that's configured to be // case-sensitive, but also disallows unknown fields when unmarshalling. It is compatible with // the encoding/json standard library. -func StrictCaseSensitiveJsonIterator() jsoniter.API { +func StrictCaseSensitiveJSONIterator() jsoniter.API { config := jsoniter.Config{ EscapeHTML: true, SortMapKeys: true, @@ -179,8 +180,8 @@ func StrictCaseSensitiveJsonIterator() jsoniter.API { // from outside. Still does not protect from package level jsoniter.Register*() functions - someone calling them // in some other library will mess with every usage of the jsoniter library in the whole program. // See https://github.com/json-iterator/go/issues/265 -var caseSensitiveJsonIterator = CaseSensitiveJsonIterator() -var strictCaseSensitiveJsonIterator = StrictCaseSensitiveJsonIterator() +var caseSensitiveJSONIterator = CaseSensitiveJSONIterator() +var strictCaseSensitiveJSONIterator = StrictCaseSensitiveJSONIterator() // gvkWithDefaults returns group kind and version defaulting from provided default func gvkWithDefaults(actual, defaultGVK schema.GroupVersionKind) schema.GroupVersionKind { @@ -236,7 +237,7 @@ func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, i types, _, err := s.typer.ObjectKinds(into) switch { case runtime.IsNotRegisteredError(err), isUnstructured: - if err := caseSensitiveJsonIterator.Unmarshal(data, into); err != nil { + if err := caseSensitiveJSONIterator.Unmarshal(data, into); err != nil { return nil, actual, err } return into, actual, nil @@ -260,7 +261,7 @@ func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, i return nil, actual, err } - if err := caseSensitiveJsonIterator.Unmarshal(data, obj); err != nil { + if err := caseSensitiveJSONIterator.Unmarshal(data, obj); err != nil { return nil, actual, err } @@ -285,7 +286,7 @@ func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, i // due to that a matching field doesn't exist in the object. hence we can return a typed strictDecoderError, // the actual error is that the object contains unknown field. strictObj := obj.DeepCopyObject() - if err := strictCaseSensitiveJsonIterator.Unmarshal(altered, strictObj); err != nil { + if err := strictCaseSensitiveJSONIterator.Unmarshal(altered, strictObj); err != nil { return nil, actual, runtime.NewStrictDecodingError(err.Error(), string(originalData)) } // Always return the same object as the non-strict serializer to avoid any deviations. @@ -302,7 +303,7 @@ func (s *Serializer) Encode(obj runtime.Object, w io.Writer) error { func (s *Serializer) doEncode(obj runtime.Object, w io.Writer) error { if s.options.Yaml { - json, err := caseSensitiveJsonIterator.Marshal(obj) + json, err := caseSensitiveJSONIterator.Marshal(obj) if err != nil { return err } @@ -315,7 +316,7 @@ func (s *Serializer) doEncode(obj runtime.Object, w io.Writer) error { } if s.options.Pretty { - data, err := caseSensitiveJsonIterator.MarshalIndent(obj, "", " ") + data, err := caseSensitiveJSONIterator.MarshalIndent(obj, "", " ") if err != nil { return err } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go index f606b7d728..404fb1b7e5 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go @@ -61,6 +61,7 @@ func (e errNotMarshalable) Status() metav1.Status { } } +// IsNotMarshalable checks the type of error, returns a boolean true if error is not nil and not marshalable false otherwise func IsNotMarshalable(err error) bool { _, ok := err.(errNotMarshalable) return err != nil && ok @@ -77,6 +78,7 @@ func NewSerializer(creater runtime.ObjectCreater, typer runtime.ObjectTyper) *Se } } +// Serializer handles encoding versioned objects into the proper wire form type Serializer struct { prefix []byte creater runtime.ObjectCreater @@ -457,8 +459,10 @@ func (s *RawSerializer) Identifier() runtime.Identifier { return rawSerializerIdentifier } +// LengthDelimitedFramer is exported variable of type lengthDelimitedFramer var LengthDelimitedFramer = lengthDelimitedFramer{} +// Provides length delimited frame reader and writer methods type lengthDelimitedFramer struct{} // NewFrameWriter implements stream framing for this serializer diff --git a/vendor/k8s.io/apimachinery/pkg/types/namespacedname.go b/vendor/k8s.io/apimachinery/pkg/types/namespacedname.go index 88f0de36db..b19750f3a0 100644 --- a/vendor/k8s.io/apimachinery/pkg/types/namespacedname.go +++ b/vendor/k8s.io/apimachinery/pkg/types/namespacedname.go @@ -16,10 +16,6 @@ limitations under the License. package types -import ( - "fmt" -) - // NamespacedName comprises a resource name, with a mandatory namespace, // rendered as "/". Being a type captures intent and // helps make sure that UIDs, namespaced names and non-namespaced names @@ -39,5 +35,5 @@ const ( // String returns the general purpose string representation func (n NamespacedName) String() string { - return fmt.Sprintf("%s%c%s", n.Namespace, Separator, n.Name) + return n.Namespace + string(Separator) + n.Name } diff --git a/vendor/k8s.io/apimachinery/pkg/util/cache/expiring.go b/vendor/k8s.io/apimachinery/pkg/util/cache/expiring.go index 84b4f58849..faf2c26453 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/cache/expiring.go +++ b/vendor/k8s.io/apimachinery/pkg/util/cache/expiring.go @@ -145,7 +145,7 @@ func (c *Expiring) gc(now time.Time) { // expired. // // heap[0] is a peek at the next element in the heap, which is not obvious - // from looking at the (*expiringHeap).Pop() implmentation below. + // from looking at the (*expiringHeap).Pop() implementation below. // heap.Pop() swaps the first entry with the last entry of the heap, then // calls (*expiringHeap).Pop() which returns the last element. if len(c.heap) == 0 || now.Before(c.heap[0].expiry) { diff --git a/vendor/k8s.io/apimachinery/pkg/util/clock/clock.go b/vendor/k8s.io/apimachinery/pkg/util/clock/clock.go index 6cf13d83d1..3e1e2517b4 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/clock/clock.go +++ b/vendor/k8s.io/apimachinery/pkg/util/clock/clock.go @@ -348,7 +348,13 @@ func (f *fakeTimer) Stop() bool { // Reset conditionally updates the firing time of the timer. If the // timer has neither fired nor been stopped then this call resets the // timer to the fake clock's "now" + d and returns true, otherwise -// this call returns false. This is like time.Timer::Reset. +// it creates a new waiter, adds it to the clock, and returns true. +// +// It is not possible to return false, because a fake timer can be reset +// from any state (waiting to fire, already fired, and stopped). +// +// See the GoDoc for time.Timer::Reset for more context on why +// the return value of Reset() is not useful. func (f *fakeTimer) Reset(d time.Duration) bool { f.fakeClock.lock.Lock() defer f.fakeClock.lock.Unlock() @@ -360,7 +366,15 @@ func (f *fakeTimer) Reset(d time.Duration) bool { return true } } - return false + // No existing waiter, timer has already fired or been reset. + // We should still enable Reset() to succeed by creating a + // new waiter and adding it to the clock's waiters. + newWaiter := fakeClockWaiter{ + targetTime: f.fakeClock.time.Add(d), + destChan: seekChan, + } + f.fakeClock.waiters = append(f.fakeClock.waiters, newWaiter) + return true } // Ticker defines the Ticker interface diff --git a/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go b/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go index 5bafc218e2..1f5a04fd41 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go +++ b/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go @@ -163,7 +163,7 @@ func matchesError(err error, fns ...Matcher) bool { // filterErrors returns any errors (or nested errors, if the list contains // nested Errors) for which all fns return false. If no errors -// remain a nil list is returned. The resulting silec will have all +// remain a nil list is returned. The resulting slice will have all // nested slices flattened as a side effect. func filterErrors(list []error, fns ...Matcher) []error { result := []error{} diff --git a/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go b/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go index 066680f443..45aa74bf58 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go +++ b/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go @@ -132,12 +132,14 @@ func (r *jsonFrameReader) Read(data []byte) (int, error) { // Return whatever remaining data exists from an in progress frame if n := len(r.remaining); n > 0 { if n <= len(data) { + //lint:ignore SA4006,SA4010 underlying array of data is modified here. data = append(data[0:0], r.remaining...) r.remaining = nil return n, nil } n = len(data) + //lint:ignore SA4006,SA4010 underlying array of data is modified here. data = append(data[0:0], r.remaining[:n]...) r.remaining = r.remaining[n:] return n, io.ErrShortBuffer @@ -155,6 +157,7 @@ func (r *jsonFrameReader) Read(data []byte) (int, error) { // and set m to it, which means we need to copy the partial result back into data and preserve // the remaining result for subsequent reads. if len(m) > n { + //lint:ignore SA4006,SA4010 underlying array of data is modified here. data = append(data[0:0], m[:n]...) r.remaining = m[n:] return n, io.ErrShortBuffer diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go index ec1cb70f29..a4792034ab 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go @@ -268,10 +268,7 @@ func (m *IntOrString) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto b/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto index e79fb9e572..a76f79851f 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto +++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.apimachinery.pkg.util.intstr; diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/instr_fuzz.go b/vendor/k8s.io/apimachinery/pkg/util/intstr/instr_fuzz.go new file mode 100644 index 0000000000..2501d55166 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/instr_fuzz.go @@ -0,0 +1,42 @@ +// +build !notest + +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package intstr + +import ( + fuzz "github.com/google/gofuzz" +) + +// Fuzz satisfies fuzz.Interface +func (intstr *IntOrString) Fuzz(c fuzz.Continue) { + if intstr == nil { + return + } + if c.RandBool() { + intstr.Type = Int + c.Fuzz(&intstr.IntVal) + intstr.StrVal = "" + } else { + intstr.Type = String + intstr.IntVal = 0 + c.Fuzz(&intstr.StrVal) + } +} + +// ensure IntOrString implements fuzz.Interface +var _ fuzz.Interface = &IntOrString{} diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go b/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go index 6576def82e..c0e8927fe4 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go +++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go @@ -25,7 +25,6 @@ import ( "strconv" "strings" - "github.com/google/gofuzz" "k8s.io/klog/v2" ) @@ -90,6 +89,9 @@ func (intstr *IntOrString) UnmarshalJSON(value []byte) error { // String returns the string value, or the Itoa of the int value. func (intstr *IntOrString) String() string { + if intstr == nil { + return "" + } if intstr.Type == String { return intstr.StrVal } @@ -129,21 +131,6 @@ func (IntOrString) OpenAPISchemaType() []string { return []string{"string"} } // the OpenAPI spec of this type. func (IntOrString) OpenAPISchemaFormat() string { return "int-or-string" } -func (intstr *IntOrString) Fuzz(c fuzz.Continue) { - if intstr == nil { - return - } - if c.RandBool() { - intstr.Type = Int - c.Fuzz(&intstr.IntVal) - intstr.StrVal = "" - } else { - intstr.Type = String - intstr.IntVal = 0 - c.Fuzz(&intstr.StrVal) - } -} - func ValueOrDefault(intOrPercent *IntOrString, defaultValue IntOrString) *IntOrString { if intOrPercent == nil { return &defaultValue @@ -151,6 +138,33 @@ func ValueOrDefault(intOrPercent *IntOrString, defaultValue IntOrString) *IntOrS return intOrPercent } +// GetScaledValueFromIntOrPercent is meant to replace GetValueFromIntOrPercent. +// This method returns a scaled value from an IntOrString type. If the IntOrString +// is a percentage string value it's treated as a percentage and scaled appropriately +// in accordance to the total, if it's an int value it's treated as a a simple value and +// if it is a string value which is either non-numeric or numeric but lacking a trailing '%' it returns an error. +func GetScaledValueFromIntOrPercent(intOrPercent *IntOrString, total int, roundUp bool) (int, error) { + if intOrPercent == nil { + return 0, errors.New("nil value for IntOrString") + } + value, isPercent, err := getIntOrPercentValueSafely(intOrPercent) + if err != nil { + return 0, fmt.Errorf("invalid value for IntOrString: %v", err) + } + if isPercent { + if roundUp { + value = int(math.Ceil(float64(value) * (float64(total)) / 100)) + } else { + value = int(math.Floor(float64(value) * (float64(total)) / 100)) + } + } + return value, nil +} + +// GetValueFromIntOrPercent was deprecated in favor of +// GetScaledValueFromIntOrPercent. This method was treating all int as a numeric value and all +// strings with or without a percent symbol as a percentage value. +// Deprecated func GetValueFromIntOrPercent(intOrPercent *IntOrString, total int, roundUp bool) (int, error) { if intOrPercent == nil { return 0, errors.New("nil value for IntOrString") @@ -169,6 +183,8 @@ func GetValueFromIntOrPercent(intOrPercent *IntOrString, total int, roundUp bool return value, nil } +// getIntOrPercentValue is a legacy function and only meant to be called by GetValueFromIntOrPercent +// For a more correct implementation call getIntOrPercentSafely func getIntOrPercentValue(intOrStr *IntOrString) (int, bool, error) { switch intOrStr.Type { case Int: @@ -183,3 +199,25 @@ func getIntOrPercentValue(intOrStr *IntOrString) (int, bool, error) { } return 0, false, fmt.Errorf("invalid type: neither int nor percentage") } + +func getIntOrPercentValueSafely(intOrStr *IntOrString) (int, bool, error) { + switch intOrStr.Type { + case Int: + return intOrStr.IntValue(), false, nil + case String: + isPercent := false + s := intOrStr.StrVal + if strings.HasSuffix(s, "%") { + isPercent = true + s = strings.TrimSuffix(intOrStr.StrVal, "%") + } else { + return 0, false, fmt.Errorf("invalid type: string is not a percentage") + } + v, err := strconv.Atoi(s) + if err != nil { + return 0, false, fmt.Errorf("invalid value %q: %v", intOrStr.StrVal, err) + } + return int(v), isPercent, nil + } + return 0, false, fmt.Errorf("invalid type: neither int nor percentage") +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/json/json.go b/vendor/k8s.io/apimachinery/pkg/util/json/json.go index 204834883f..778e58f704 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/json/json.go +++ b/vendor/k8s.io/apimachinery/pkg/util/json/json.go @@ -39,7 +39,8 @@ func Marshal(v interface{}) ([]byte, error) { const maxDepth = 10000 // Unmarshal unmarshals the given data -// If v is a *map[string]interface{}, numbers are converted to int64 or float64 +// If v is a *map[string]interface{}, *[]interface{}, or *interface{} numbers +// are converted to int64 or float64 func Unmarshal(data []byte, v interface{}) error { switch v := v.(type) { case *map[string]interface{}: @@ -52,7 +53,7 @@ func Unmarshal(data []byte, v interface{}) error { return err } // If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64 - return convertMapNumbers(*v, 0) + return ConvertMapNumbers(*v, 0) case *[]interface{}: // Build a decoder from the given data @@ -64,7 +65,7 @@ func Unmarshal(data []byte, v interface{}) error { return err } // If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64 - return convertSliceNumbers(*v, 0) + return ConvertSliceNumbers(*v, 0) case *interface{}: // Build a decoder from the given data @@ -76,29 +77,31 @@ func Unmarshal(data []byte, v interface{}) error { return err } // If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64 - return convertInterfaceNumbers(v, 0) + return ConvertInterfaceNumbers(v, 0) default: return json.Unmarshal(data, v) } } -func convertInterfaceNumbers(v *interface{}, depth int) error { +// ConvertInterfaceNumbers converts any json.Number values to int64 or float64. +// Values which are map[string]interface{} or []interface{} are recursively visited +func ConvertInterfaceNumbers(v *interface{}, depth int) error { var err error switch v2 := (*v).(type) { case json.Number: *v, err = convertNumber(v2) case map[string]interface{}: - err = convertMapNumbers(v2, depth+1) + err = ConvertMapNumbers(v2, depth+1) case []interface{}: - err = convertSliceNumbers(v2, depth+1) + err = ConvertSliceNumbers(v2, depth+1) } return err } -// convertMapNumbers traverses the map, converting any json.Number values to int64 or float64. +// ConvertMapNumbers traverses the map, converting any json.Number values to int64 or float64. // values which are map[string]interface{} or []interface{} are recursively visited -func convertMapNumbers(m map[string]interface{}, depth int) error { +func ConvertMapNumbers(m map[string]interface{}, depth int) error { if depth > maxDepth { return fmt.Errorf("exceeded max depth of %d", maxDepth) } @@ -109,9 +112,9 @@ func convertMapNumbers(m map[string]interface{}, depth int) error { case json.Number: m[k], err = convertNumber(v) case map[string]interface{}: - err = convertMapNumbers(v, depth+1) + err = ConvertMapNumbers(v, depth+1) case []interface{}: - err = convertSliceNumbers(v, depth+1) + err = ConvertSliceNumbers(v, depth+1) } if err != nil { return err @@ -120,9 +123,9 @@ func convertMapNumbers(m map[string]interface{}, depth int) error { return nil } -// convertSliceNumbers traverses the slice, converting any json.Number values to int64 or float64. +// ConvertSliceNumbers traverses the slice, converting any json.Number values to int64 or float64. // values which are map[string]interface{} or []interface{} are recursively visited -func convertSliceNumbers(s []interface{}, depth int) error { +func ConvertSliceNumbers(s []interface{}, depth int) error { if depth > maxDepth { return fmt.Errorf("exceeded max depth of %d", maxDepth) } @@ -133,9 +136,9 @@ func convertSliceNumbers(s []interface{}, depth int) error { case json.Number: s[i], err = convertNumber(v) case map[string]interface{}: - err = convertMapNumbers(v, depth+1) + err = ConvertMapNumbers(v, depth+1) case []interface{}: - err = convertSliceNumbers(v, depth+1) + err = ConvertSliceNumbers(v, depth+1) } if err != nil { return err diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/port_range.go b/vendor/k8s.io/apimachinery/pkg/util/net/port_range.go index 7b6eca8932..42ecffcca0 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/net/port_range.go +++ b/vendor/k8s.io/apimachinery/pkg/util/net/port_range.go @@ -130,7 +130,7 @@ func (*PortRange) Type() string { } // ParsePortRange parses a string of the form "min-max", inclusive at both -// ends, and initializs a new PortRange from it. +// ends, and initializes a new PortRange from it. func ParsePortRange(value string) (*PortRange, error) { pr := &PortRange{} err := pr.Set(value) diff --git a/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go index e8a9f609f4..035c52811c 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go +++ b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go @@ -79,7 +79,7 @@ func logPanic(r interface{}) { } } -// ErrorHandlers is a list of functions which will be invoked when an unreturnable +// ErrorHandlers is a list of functions which will be invoked when a nonreturnable // error occurs. // TODO(lavalamp): for testability, this and the below HandleError function // should be packaged up into a testable and reusable object. @@ -165,7 +165,7 @@ func RecoverFromPanic(err *error) { } } -// Must panics on non-nil errors. Useful to handling programmer level errors. +// Must panics on non-nil errors. Useful to handling programmer level errors. func Must(err error) { if err != nil { panic(err) diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/field/path.go b/vendor/k8s.io/apimachinery/pkg/util/validation/field/path.go index 2efc8eec76..f9be7ac339 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/validation/field/path.go +++ b/vendor/k8s.io/apimachinery/pkg/util/validation/field/path.go @@ -67,6 +67,9 @@ func (p *Path) Key(key string) *Path { // String produces a string representation of the Path. func (p *Path) String() string { + if p == nil { + return "" + } // make a slice to iterate elems := []*Path{} for ; p != nil; p = p.parent { diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go b/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go index 4752b29a96..c8b4199840 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go +++ b/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go @@ -175,7 +175,7 @@ func IsValidLabelValue(value string) []string { } const dns1123LabelFmt string = "[a-z0-9]([-a-z0-9]*[a-z0-9])?" -const dns1123LabelErrMsg string = "a DNS-1123 label must consist of lower case alphanumeric characters or '-', and must start and end with an alphanumeric character" +const dns1123LabelErrMsg string = "a lowercase RFC 1123 label must consist of lower case alphanumeric characters or '-', and must start and end with an alphanumeric character" // DNS1123LabelMaxLength is a label's max length in DNS (RFC 1123) const DNS1123LabelMaxLength int = 63 @@ -196,7 +196,7 @@ func IsDNS1123Label(value string) []string { } const dns1123SubdomainFmt string = dns1123LabelFmt + "(\\." + dns1123LabelFmt + ")*" -const dns1123SubdomainErrorMsg string = "a DNS-1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character" +const dns1123SubdomainErrorMsg string = "a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character" // DNS1123SubdomainMaxLength is a subdomain's max length in DNS (RFC 1123) const DNS1123SubdomainMaxLength int = 253 @@ -347,7 +347,7 @@ func IsValidPortName(port string) []string { // IsValidIP tests that the argument is a valid IP address. func IsValidIP(value string) []string { if net.ParseIP(value) == nil { - return []string{"must be a valid IP address, (e.g. 10.9.8.7)"} + return []string{"must be a valid IP address, (e.g. 10.9.8.7 or 2001:db8::ffff)"} } return nil } diff --git a/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go b/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go index d759d912be..3dea7fe7f9 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go +++ b/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go @@ -604,3 +604,32 @@ func poller(interval, timeout time.Duration) WaitFunc { return ch }) } + +// ExponentialBackoffWithContext works with a request context and a Backoff. It ensures that the retry wait never +// exceeds the deadline specified by the request context. +func ExponentialBackoffWithContext(ctx context.Context, backoff Backoff, condition ConditionFunc) error { + for backoff.Steps > 0 { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + if ok, err := runConditionWithCrashProtection(condition); err != nil || ok { + return err + } + + if backoff.Steps == 1 { + break + } + + waitBeforeRetry := backoff.Step() + select { + case <-ctx.Done(): + return ctx.Err() + case <-time.After(waitBeforeRetry): + } + } + + return ErrWaitTimeout +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go b/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go index 492171faf4..7fe7064677 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go +++ b/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go @@ -26,10 +26,41 @@ import ( "strings" "unicode" + jsonutil "k8s.io/apimachinery/pkg/util/json" + "k8s.io/klog/v2" "sigs.k8s.io/yaml" ) +// Unmarshal unmarshals the given data +// If v is a *map[string]interface{}, *[]interface{}, or *interface{} numbers +// are converted to int64 or float64 +func Unmarshal(data []byte, v interface{}) error { + preserveIntFloat := func(d *json.Decoder) *json.Decoder { + d.UseNumber() + return d + } + switch v := v.(type) { + case *map[string]interface{}: + if err := yaml.Unmarshal(data, v, preserveIntFloat); err != nil { + return err + } + return jsonutil.ConvertMapNumbers(*v, 0) + case *[]interface{}: + if err := yaml.Unmarshal(data, v, preserveIntFloat); err != nil { + return err + } + return jsonutil.ConvertSliceNumbers(*v, 0) + case *interface{}: + if err := yaml.Unmarshal(data, v, preserveIntFloat); err != nil { + return err + } + return jsonutil.ConvertInterfaceNumbers(v, 0) + default: + return yaml.Unmarshal(data, v) + } +} + // ToJSON converts a single YAML document into a JSON document // or returns an error. If the document appears to be JSON the // YAML decoding path is not used (so that error messages are diff --git a/vendor/k8s.io/apimachinery/pkg/watch/mux.go b/vendor/k8s.io/apimachinery/pkg/watch/mux.go index 0ac8dc4ef9..0aaf01adce 100644 --- a/vendor/k8s.io/apimachinery/pkg/watch/mux.go +++ b/vendor/k8s.io/apimachinery/pkg/watch/mux.go @@ -40,15 +40,12 @@ const incomingQueueLength = 25 // Broadcaster distributes event notifications among any number of watchers. Every event // is delivered to every watcher. type Broadcaster struct { - // TODO: see if this lock is needed now that new watchers go through - // the incoming channel. - lock sync.Mutex - watchers map[int64]*broadcasterWatcher nextWatcher int64 distributing sync.WaitGroup incoming chan Event + stopped chan struct{} // How large to make watcher's channel. watchQueueLength int @@ -68,6 +65,7 @@ func NewBroadcaster(queueLength int, fullChannelBehavior FullChannelBehavior) *B m := &Broadcaster{ watchers: map[int64]*broadcasterWatcher{}, incoming: make(chan Event, incomingQueueLength), + stopped: make(chan struct{}), watchQueueLength: queueLength, fullChannelBehavior: fullChannelBehavior, } @@ -96,10 +94,15 @@ func (obj functionFakeRuntimeObject) DeepCopyObject() runtime.Object { // The purpose of this terrible hack is so that watchers added after an event // won't ever see that event, and will always see any event after they are // added. -func (b *Broadcaster) blockQueue(f func()) { +func (m *Broadcaster) blockQueue(f func()) { + select { + case <-m.stopped: + return + default: + } var wg sync.WaitGroup wg.Add(1) - b.incoming <- Event{ + m.incoming <- Event{ Type: internalRunFunctionMarker, Object: functionFakeRuntimeObject(func() { defer wg.Done() @@ -111,12 +114,11 @@ func (b *Broadcaster) blockQueue(f func()) { // Watch adds a new watcher to the list and returns an Interface for it. // Note: new watchers will only receive new events. They won't get an entire history -// of previous events. +// of previous events. It will block until the watcher is actually added to the +// broadcaster. func (m *Broadcaster) Watch() Interface { var w *broadcasterWatcher m.blockQueue(func() { - m.lock.Lock() - defer m.lock.Unlock() id := m.nextWatcher m.nextWatcher++ w = &broadcasterWatcher{ @@ -127,18 +129,22 @@ func (m *Broadcaster) Watch() Interface { } m.watchers[id] = w }) + if w == nil { + // The panic here is to be consistent with the previous interface behavior + // we are willing to re-evaluate in the future. + panic("broadcaster already stopped") + } return w } // WatchWithPrefix adds a new watcher to the list and returns an Interface for it. It sends // queuedEvents down the new watch before beginning to send ordinary events from Broadcaster. // The returned watch will have a queue length that is at least large enough to accommodate -// all of the items in queuedEvents. +// all of the items in queuedEvents. It will block until the watcher is actually added to +// the broadcaster. func (m *Broadcaster) WatchWithPrefix(queuedEvents []Event) Interface { var w *broadcasterWatcher m.blockQueue(func() { - m.lock.Lock() - defer m.lock.Unlock() id := m.nextWatcher m.nextWatcher++ length := m.watchQueueLength @@ -156,26 +162,29 @@ func (m *Broadcaster) WatchWithPrefix(queuedEvents []Event) Interface { w.result <- e } }) + if w == nil { + // The panic here is to be consistent with the previous interface behavior + // we are willing to re-evaluate in the future. + panic("broadcaster already stopped") + } return w } // stopWatching stops the given watcher and removes it from the list. func (m *Broadcaster) stopWatching(id int64) { - m.lock.Lock() - defer m.lock.Unlock() - w, ok := m.watchers[id] - if !ok { - // No need to do anything, it's already been removed from the list. - return - } - delete(m.watchers, id) - close(w.result) + m.blockQueue(func() { + w, ok := m.watchers[id] + if !ok { + // No need to do anything, it's already been removed from the list. + return + } + delete(m.watchers, id) + close(w.result) + }) } // closeAll disconnects all watchers (presumably in response to a Shutdown call). func (m *Broadcaster) closeAll() { - m.lock.Lock() - defer m.lock.Unlock() for _, w := range m.watchers { close(w.result) } @@ -194,9 +203,12 @@ func (m *Broadcaster) Action(action EventType, obj runtime.Object) { // until all events have been distributed through the outbound channels. Note // that since they can be buffered, this means that the watchers might not // have received the data yet as it can remain sitting in the buffered -// channel. +// channel. It will block until the broadcaster stop request is actually executed func (m *Broadcaster) Shutdown() { - close(m.incoming) + m.blockQueue(func() { + close(m.stopped) + close(m.incoming) + }) m.distributing.Wait() } @@ -217,8 +229,6 @@ func (m *Broadcaster) loop() { // distribute sends event to all watchers. Blocking. func (m *Broadcaster) distribute(event Event) { - m.lock.Lock() - defer m.lock.Unlock() if m.fullChannelBehavior == DropIfChannelFull { for _, w := range m.watchers { select { @@ -252,6 +262,7 @@ func (mw *broadcasterWatcher) ResultChan() <-chan Event { } // Stop stops watching and removes mw from its list. +// It will block until the watcher stop request is actually executed func (mw *broadcasterWatcher) Stop() { mw.stop.Do(func() { close(mw.stopped) diff --git a/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go b/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go index 8271e9b707..99f6770b91 100644 --- a/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go +++ b/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go @@ -97,9 +97,9 @@ func (sw *StreamWatcher) stopping() bool { // receive reads result from the decoder in a loop and sends down the result channel. func (sw *StreamWatcher) receive() { + defer utilruntime.HandleCrash() defer close(sw.result) defer sw.Stop() - defer utilruntime.HandleCrash() for { action, obj, err := sw.source.Decode() if err != nil { diff --git a/vendor/k8s.io/apimachinery/pkg/watch/watch.go b/vendor/k8s.io/apimachinery/pkg/watch/watch.go index 1f4911a311..fd0550e4a7 100644 --- a/vendor/k8s.io/apimachinery/pkg/watch/watch.go +++ b/vendor/k8s.io/apimachinery/pkg/watch/watch.go @@ -276,7 +276,7 @@ func (f *RaceFreeFakeWatcher) Action(action EventType, obj runtime.Object) { } } -// ProxyWatcher lets you wrap your channel in watch Interface. Threadsafe. +// ProxyWatcher lets you wrap your channel in watch Interface. threadsafe. type ProxyWatcher struct { result chan Event stopCh chan struct{} diff --git a/vendor/k8s.io/client-go/discovery/discovery_client.go b/vendor/k8s.io/client-go/discovery/discovery_client.go index 6c8e87e232..57404e0b2b 100644 --- a/vendor/k8s.io/client-go/discovery/discovery_client.go +++ b/vendor/k8s.io/client-go/discovery/discovery_client.go @@ -501,7 +501,7 @@ func NewDiscoveryClientForConfigOrDie(c *restclient.Config) *DiscoveryClient { } -// NewDiscoveryClient returns a new DiscoveryClient for the given RESTClient. +// NewDiscoveryClient returns a new DiscoveryClient for the given RESTClient. func NewDiscoveryClient(c restclient.Interface) *DiscoveryClient { return &DiscoveryClient{restClient: c, LegacyPrefix: "/api"} } diff --git a/vendor/k8s.io/client-go/informers/settings/interface.go b/vendor/k8s.io/client-go/informers/apiserverinternal/interface.go similarity index 94% rename from vendor/k8s.io/client-go/informers/settings/interface.go rename to vendor/k8s.io/client-go/informers/apiserverinternal/interface.go index d91e498679..122c030998 100644 --- a/vendor/k8s.io/client-go/informers/settings/interface.go +++ b/vendor/k8s.io/client-go/informers/apiserverinternal/interface.go @@ -16,11 +16,11 @@ limitations under the License. // Code generated by informer-gen. DO NOT EDIT. -package settings +package apiserverinternal import ( + v1alpha1 "k8s.io/client-go/informers/apiserverinternal/v1alpha1" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" - v1alpha1 "k8s.io/client-go/informers/settings/v1alpha1" ) // Interface provides access to each of this group's versions. diff --git a/vendor/k8s.io/client-go/informers/settings/v1alpha1/interface.go b/vendor/k8s.io/client-go/informers/apiserverinternal/v1alpha1/interface.go similarity index 80% rename from vendor/k8s.io/client-go/informers/settings/v1alpha1/interface.go rename to vendor/k8s.io/client-go/informers/apiserverinternal/v1alpha1/interface.go index 2502204695..9778325c65 100644 --- a/vendor/k8s.io/client-go/informers/settings/v1alpha1/interface.go +++ b/vendor/k8s.io/client-go/informers/apiserverinternal/v1alpha1/interface.go @@ -24,8 +24,8 @@ import ( // Interface provides access to all the informers in this group version. type Interface interface { - // PodPresets returns a PodPresetInformer. - PodPresets() PodPresetInformer + // StorageVersions returns a StorageVersionInformer. + StorageVersions() StorageVersionInformer } type version struct { @@ -39,7 +39,7 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } -// PodPresets returns a PodPresetInformer. -func (v *version) PodPresets() PodPresetInformer { - return &podPresetInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +// StorageVersions returns a StorageVersionInformer. +func (v *version) StorageVersions() StorageVersionInformer { + return &storageVersionInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} } diff --git a/vendor/k8s.io/client-go/informers/apiserverinternal/v1alpha1/storageversion.go b/vendor/k8s.io/client-go/informers/apiserverinternal/v1alpha1/storageversion.go new file mode 100644 index 0000000000..34175b522d --- /dev/null +++ b/vendor/k8s.io/client-go/informers/apiserverinternal/v1alpha1/storageversion.go @@ -0,0 +1,89 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + time "time" + + apiserverinternalv1alpha1 "k8s.io/api/apiserverinternal/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1alpha1 "k8s.io/client-go/listers/apiserverinternal/v1alpha1" + cache "k8s.io/client-go/tools/cache" +) + +// StorageVersionInformer provides access to a shared informer and lister for +// StorageVersions. +type StorageVersionInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.StorageVersionLister +} + +type storageVersionInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewStorageVersionInformer constructs a new informer for StorageVersion type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewStorageVersionInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredStorageVersionInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredStorageVersionInformer constructs a new informer for StorageVersion type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredStorageVersionInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.InternalV1alpha1().StorageVersions().List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.InternalV1alpha1().StorageVersions().Watch(context.TODO(), options) + }, + }, + &apiserverinternalv1alpha1.StorageVersion{}, + resyncPeriod, + indexers, + ) +} + +func (f *storageVersionInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredStorageVersionInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *storageVersionInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apiserverinternalv1alpha1.StorageVersion{}, f.defaultInformer) +} + +func (f *storageVersionInformer) Lister() v1alpha1.StorageVersionLister { + return v1alpha1.NewStorageVersionLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/factory.go b/vendor/k8s.io/client-go/informers/factory.go index 5c17f81f93..c6e1026990 100644 --- a/vendor/k8s.io/client-go/informers/factory.go +++ b/vendor/k8s.io/client-go/informers/factory.go @@ -27,6 +27,7 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" schema "k8s.io/apimachinery/pkg/runtime/schema" admissionregistration "k8s.io/client-go/informers/admissionregistration" + apiserverinternal "k8s.io/client-go/informers/apiserverinternal" apps "k8s.io/client-go/informers/apps" autoscaling "k8s.io/client-go/informers/autoscaling" batch "k8s.io/client-go/informers/batch" @@ -43,7 +44,6 @@ import ( policy "k8s.io/client-go/informers/policy" rbac "k8s.io/client-go/informers/rbac" scheduling "k8s.io/client-go/informers/scheduling" - settings "k8s.io/client-go/informers/settings" storage "k8s.io/client-go/informers/storage" kubernetes "k8s.io/client-go/kubernetes" cache "k8s.io/client-go/tools/cache" @@ -190,6 +190,7 @@ type SharedInformerFactory interface { WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool Admissionregistration() admissionregistration.Interface + Internal() apiserverinternal.Interface Apps() apps.Interface Autoscaling() autoscaling.Interface Batch() batch.Interface @@ -205,7 +206,6 @@ type SharedInformerFactory interface { Policy() policy.Interface Rbac() rbac.Interface Scheduling() scheduling.Interface - Settings() settings.Interface Storage() storage.Interface } @@ -213,6 +213,10 @@ func (f *sharedInformerFactory) Admissionregistration() admissionregistration.In return admissionregistration.New(f, f.namespace, f.tweakListOptions) } +func (f *sharedInformerFactory) Internal() apiserverinternal.Interface { + return apiserverinternal.New(f, f.namespace, f.tweakListOptions) +} + func (f *sharedInformerFactory) Apps() apps.Interface { return apps.New(f, f.namespace, f.tweakListOptions) } @@ -273,10 +277,6 @@ func (f *sharedInformerFactory) Scheduling() scheduling.Interface { return scheduling.New(f, f.namespace, f.tweakListOptions) } -func (f *sharedInformerFactory) Settings() settings.Interface { - return settings.New(f, f.namespace, f.tweakListOptions) -} - func (f *sharedInformerFactory) Storage() storage.Interface { return storage.New(f, f.namespace, f.tweakListOptions) } diff --git a/vendor/k8s.io/client-go/informers/flowcontrol/interface.go b/vendor/k8s.io/client-go/informers/flowcontrol/interface.go index 27e68efe83..b04ca59d3c 100644 --- a/vendor/k8s.io/client-go/informers/flowcontrol/interface.go +++ b/vendor/k8s.io/client-go/informers/flowcontrol/interface.go @@ -20,6 +20,7 @@ package flowcontrol import ( v1alpha1 "k8s.io/client-go/informers/flowcontrol/v1alpha1" + v1beta1 "k8s.io/client-go/informers/flowcontrol/v1beta1" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" ) @@ -27,6 +28,8 @@ import ( type Interface interface { // V1alpha1 provides access to shared informers for resources in V1alpha1. V1alpha1() v1alpha1.Interface + // V1beta1 provides access to shared informers for resources in V1beta1. + V1beta1() v1beta1.Interface } type group struct { @@ -44,3 +47,8 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList func (g *group) V1alpha1() v1alpha1.Interface { return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions) } + +// V1beta1 returns a new v1beta1.Interface. +func (g *group) V1beta1() v1beta1.Interface { + return v1beta1.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/vendor/k8s.io/client-go/informers/settings/v1alpha1/podpreset.go b/vendor/k8s.io/client-go/informers/flowcontrol/v1beta1/flowschema.go similarity index 51% rename from vendor/k8s.io/client-go/informers/settings/v1alpha1/podpreset.go rename to vendor/k8s.io/client-go/informers/flowcontrol/v1beta1/flowschema.go index 8c10b16c85..13f4ff0933 100644 --- a/vendor/k8s.io/client-go/informers/settings/v1alpha1/podpreset.go +++ b/vendor/k8s.io/client-go/informers/flowcontrol/v1beta1/flowschema.go @@ -16,75 +16,74 @@ limitations under the License. // Code generated by informer-gen. DO NOT EDIT. -package v1alpha1 +package v1beta1 import ( "context" time "time" - settingsv1alpha1 "k8s.io/api/settings/v1alpha1" + flowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1alpha1 "k8s.io/client-go/listers/settings/v1alpha1" + v1beta1 "k8s.io/client-go/listers/flowcontrol/v1beta1" cache "k8s.io/client-go/tools/cache" ) -// PodPresetInformer provides access to a shared informer and lister for -// PodPresets. -type PodPresetInformer interface { +// FlowSchemaInformer provides access to a shared informer and lister for +// FlowSchemas. +type FlowSchemaInformer interface { Informer() cache.SharedIndexInformer - Lister() v1alpha1.PodPresetLister + Lister() v1beta1.FlowSchemaLister } -type podPresetInformer struct { +type flowSchemaInformer struct { factory internalinterfaces.SharedInformerFactory tweakListOptions internalinterfaces.TweakListOptionsFunc - namespace string } -// NewPodPresetInformer constructs a new informer for PodPreset type. +// NewFlowSchemaInformer constructs a new informer for FlowSchema type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. -func NewPodPresetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredPodPresetInformer(client, namespace, resyncPeriod, indexers, nil) +func NewFlowSchemaInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredFlowSchemaInformer(client, resyncPeriod, indexers, nil) } -// NewFilteredPodPresetInformer constructs a new informer for PodPreset type. +// NewFilteredFlowSchemaInformer constructs a new informer for FlowSchema type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. -func NewFilteredPodPresetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { +func NewFilteredFlowSchemaInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.SettingsV1alpha1().PodPresets(namespace).List(context.TODO(), options) + return client.FlowcontrolV1beta1().FlowSchemas().List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.SettingsV1alpha1().PodPresets(namespace).Watch(context.TODO(), options) + return client.FlowcontrolV1beta1().FlowSchemas().Watch(context.TODO(), options) }, }, - &settingsv1alpha1.PodPreset{}, + &flowcontrolv1beta1.FlowSchema{}, resyncPeriod, indexers, ) } -func (f *podPresetInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredPodPresetInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +func (f *flowSchemaInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredFlowSchemaInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } -func (f *podPresetInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&settingsv1alpha1.PodPreset{}, f.defaultInformer) +func (f *flowSchemaInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&flowcontrolv1beta1.FlowSchema{}, f.defaultInformer) } -func (f *podPresetInformer) Lister() v1alpha1.PodPresetLister { - return v1alpha1.NewPodPresetLister(f.Informer().GetIndexer()) +func (f *flowSchemaInformer) Lister() v1beta1.FlowSchemaLister { + return v1beta1.NewFlowSchemaLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/flowcontrol/v1beta1/interface.go b/vendor/k8s.io/client-go/informers/flowcontrol/v1beta1/interface.go new file mode 100644 index 0000000000..50329bb0ac --- /dev/null +++ b/vendor/k8s.io/client-go/informers/flowcontrol/v1beta1/interface.go @@ -0,0 +1,52 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1beta1 + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // FlowSchemas returns a FlowSchemaInformer. + FlowSchemas() FlowSchemaInformer + // PriorityLevelConfigurations returns a PriorityLevelConfigurationInformer. + PriorityLevelConfigurations() PriorityLevelConfigurationInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// FlowSchemas returns a FlowSchemaInformer. +func (v *version) FlowSchemas() FlowSchemaInformer { + return &flowSchemaInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// PriorityLevelConfigurations returns a PriorityLevelConfigurationInformer. +func (v *version) PriorityLevelConfigurations() PriorityLevelConfigurationInformer { + return &priorityLevelConfigurationInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/client-go/informers/flowcontrol/v1beta1/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/informers/flowcontrol/v1beta1/prioritylevelconfiguration.go new file mode 100644 index 0000000000..fa4835906a --- /dev/null +++ b/vendor/k8s.io/client-go/informers/flowcontrol/v1beta1/prioritylevelconfiguration.go @@ -0,0 +1,89 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "context" + time "time" + + flowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1beta1 "k8s.io/client-go/listers/flowcontrol/v1beta1" + cache "k8s.io/client-go/tools/cache" +) + +// PriorityLevelConfigurationInformer provides access to a shared informer and lister for +// PriorityLevelConfigurations. +type PriorityLevelConfigurationInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1beta1.PriorityLevelConfigurationLister +} + +type priorityLevelConfigurationInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewPriorityLevelConfigurationInformer constructs a new informer for PriorityLevelConfiguration type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewPriorityLevelConfigurationInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredPriorityLevelConfigurationInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredPriorityLevelConfigurationInformer constructs a new informer for PriorityLevelConfiguration type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredPriorityLevelConfigurationInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.FlowcontrolV1beta1().PriorityLevelConfigurations().List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.FlowcontrolV1beta1().PriorityLevelConfigurations().Watch(context.TODO(), options) + }, + }, + &flowcontrolv1beta1.PriorityLevelConfiguration{}, + resyncPeriod, + indexers, + ) +} + +func (f *priorityLevelConfigurationInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredPriorityLevelConfigurationInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *priorityLevelConfigurationInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&flowcontrolv1beta1.PriorityLevelConfiguration{}, f.defaultInformer) +} + +func (f *priorityLevelConfigurationInformer) Lister() v1beta1.PriorityLevelConfigurationLister { + return v1beta1.NewPriorityLevelConfigurationLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/generic.go b/vendor/k8s.io/client-go/informers/generic.go index f67c64ac8d..2bc451095e 100644 --- a/vendor/k8s.io/client-go/informers/generic.go +++ b/vendor/k8s.io/client-go/informers/generic.go @@ -23,6 +23,7 @@ import ( v1 "k8s.io/api/admissionregistration/v1" v1beta1 "k8s.io/api/admissionregistration/v1beta1" + apiserverinternalv1alpha1 "k8s.io/api/apiserverinternal/v1alpha1" appsv1 "k8s.io/api/apps/v1" appsv1beta1 "k8s.io/api/apps/v1beta1" v1beta2 "k8s.io/api/apps/v1beta2" @@ -43,8 +44,10 @@ import ( eventsv1beta1 "k8s.io/api/events/v1beta1" extensionsv1beta1 "k8s.io/api/extensions/v1beta1" flowcontrolv1alpha1 "k8s.io/api/flowcontrol/v1alpha1" + flowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1" networkingv1 "k8s.io/api/networking/v1" networkingv1beta1 "k8s.io/api/networking/v1beta1" + nodev1 "k8s.io/api/node/v1" nodev1alpha1 "k8s.io/api/node/v1alpha1" nodev1beta1 "k8s.io/api/node/v1beta1" policyv1beta1 "k8s.io/api/policy/v1beta1" @@ -54,7 +57,6 @@ import ( schedulingv1 "k8s.io/api/scheduling/v1" schedulingv1alpha1 "k8s.io/api/scheduling/v1alpha1" schedulingv1beta1 "k8s.io/api/scheduling/v1beta1" - settingsv1alpha1 "k8s.io/api/settings/v1alpha1" storagev1 "k8s.io/api/storage/v1" storagev1alpha1 "k8s.io/api/storage/v1alpha1" storagev1beta1 "k8s.io/api/storage/v1beta1" @@ -242,6 +244,16 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource case flowcontrolv1alpha1.SchemeGroupVersion.WithResource("prioritylevelconfigurations"): return &genericInformer{resource: resource.GroupResource(), informer: f.Flowcontrol().V1alpha1().PriorityLevelConfigurations().Informer()}, nil + // Group=flowcontrol.apiserver.k8s.io, Version=v1beta1 + case flowcontrolv1beta1.SchemeGroupVersion.WithResource("flowschemas"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Flowcontrol().V1beta1().FlowSchemas().Informer()}, nil + case flowcontrolv1beta1.SchemeGroupVersion.WithResource("prioritylevelconfigurations"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Flowcontrol().V1beta1().PriorityLevelConfigurations().Informer()}, nil + + // Group=internal.apiserver.k8s.io, Version=v1alpha1 + case apiserverinternalv1alpha1.SchemeGroupVersion.WithResource("storageversions"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Internal().V1alpha1().StorageVersions().Informer()}, nil + // Group=networking.k8s.io, Version=v1 case networkingv1.SchemeGroupVersion.WithResource("ingresses"): return &genericInformer{resource: resource.GroupResource(), informer: f.Networking().V1().Ingresses().Informer()}, nil @@ -256,6 +268,10 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource case networkingv1beta1.SchemeGroupVersion.WithResource("ingressclasses"): return &genericInformer{resource: resource.GroupResource(), informer: f.Networking().V1beta1().IngressClasses().Informer()}, nil + // Group=node.k8s.io, Version=v1 + case nodev1.SchemeGroupVersion.WithResource("runtimeclasses"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Node().V1().RuntimeClasses().Informer()}, nil + // Group=node.k8s.io, Version=v1alpha1 case nodev1alpha1.SchemeGroupVersion.WithResource("runtimeclasses"): return &genericInformer{resource: resource.GroupResource(), informer: f.Node().V1alpha1().RuntimeClasses().Informer()}, nil @@ -312,10 +328,6 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource case schedulingv1beta1.SchemeGroupVersion.WithResource("priorityclasses"): return &genericInformer{resource: resource.GroupResource(), informer: f.Scheduling().V1beta1().PriorityClasses().Informer()}, nil - // Group=settings.k8s.io, Version=v1alpha1 - case settingsv1alpha1.SchemeGroupVersion.WithResource("podpresets"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Settings().V1alpha1().PodPresets().Informer()}, nil - // Group=storage.k8s.io, Version=v1 case storagev1.SchemeGroupVersion.WithResource("csidrivers"): return &genericInformer{resource: resource.GroupResource(), informer: f.Storage().V1().CSIDrivers().Informer()}, nil diff --git a/vendor/k8s.io/client-go/informers/node/interface.go b/vendor/k8s.io/client-go/informers/node/interface.go index 9773693797..61ed5af76a 100644 --- a/vendor/k8s.io/client-go/informers/node/interface.go +++ b/vendor/k8s.io/client-go/informers/node/interface.go @@ -20,12 +20,15 @@ package node import ( internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + v1 "k8s.io/client-go/informers/node/v1" v1alpha1 "k8s.io/client-go/informers/node/v1alpha1" v1beta1 "k8s.io/client-go/informers/node/v1beta1" ) // Interface provides access to each of this group's versions. type Interface interface { + // V1 provides access to shared informers for resources in V1. + V1() v1.Interface // V1alpha1 provides access to shared informers for resources in V1alpha1. V1alpha1() v1alpha1.Interface // V1beta1 provides access to shared informers for resources in V1beta1. @@ -43,6 +46,11 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } +// V1 returns a new v1.Interface. +func (g *group) V1() v1.Interface { + return v1.New(g.factory, g.namespace, g.tweakListOptions) +} + // V1alpha1 returns a new v1alpha1.Interface. func (g *group) V1alpha1() v1alpha1.Interface { return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions) diff --git a/vendor/k8s.io/client-go/informers/node/v1/interface.go b/vendor/k8s.io/client-go/informers/node/v1/interface.go new file mode 100644 index 0000000000..913fec4aca --- /dev/null +++ b/vendor/k8s.io/client-go/informers/node/v1/interface.go @@ -0,0 +1,45 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // RuntimeClasses returns a RuntimeClassInformer. + RuntimeClasses() RuntimeClassInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// RuntimeClasses returns a RuntimeClassInformer. +func (v *version) RuntimeClasses() RuntimeClassInformer { + return &runtimeClassInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/client-go/informers/node/v1/runtimeclass.go b/vendor/k8s.io/client-go/informers/node/v1/runtimeclass.go new file mode 100644 index 0000000000..293f4e2e2b --- /dev/null +++ b/vendor/k8s.io/client-go/informers/node/v1/runtimeclass.go @@ -0,0 +1,89 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + time "time" + + nodev1 "k8s.io/api/node/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/node/v1" + cache "k8s.io/client-go/tools/cache" +) + +// RuntimeClassInformer provides access to a shared informer and lister for +// RuntimeClasses. +type RuntimeClassInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.RuntimeClassLister +} + +type runtimeClassInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewRuntimeClassInformer constructs a new informer for RuntimeClass type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewRuntimeClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredRuntimeClassInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredRuntimeClassInformer constructs a new informer for RuntimeClass type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredRuntimeClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.NodeV1().RuntimeClasses().List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.NodeV1().RuntimeClasses().Watch(context.TODO(), options) + }, + }, + &nodev1.RuntimeClass{}, + resyncPeriod, + indexers, + ) +} + +func (f *runtimeClassInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredRuntimeClassInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *runtimeClassInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&nodev1.RuntimeClass{}, f.defaultInformer) +} + +func (f *runtimeClassInformer) Lister() v1.RuntimeClassLister { + return v1.NewRuntimeClassLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/kubernetes/clientset.go b/vendor/k8s.io/client-go/kubernetes/clientset.go index 064c24d12e..f0d54b6a18 100644 --- a/vendor/k8s.io/client-go/kubernetes/clientset.go +++ b/vendor/k8s.io/client-go/kubernetes/clientset.go @@ -24,6 +24,7 @@ import ( discovery "k8s.io/client-go/discovery" admissionregistrationv1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1" admissionregistrationv1beta1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1" + internalv1alpha1 "k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1" appsv1 "k8s.io/client-go/kubernetes/typed/apps/v1" appsv1beta1 "k8s.io/client-go/kubernetes/typed/apps/v1beta1" appsv1beta2 "k8s.io/client-go/kubernetes/typed/apps/v1beta2" @@ -48,8 +49,10 @@ import ( eventsv1beta1 "k8s.io/client-go/kubernetes/typed/events/v1beta1" extensionsv1beta1 "k8s.io/client-go/kubernetes/typed/extensions/v1beta1" flowcontrolv1alpha1 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1" + flowcontrolv1beta1 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1" networkingv1 "k8s.io/client-go/kubernetes/typed/networking/v1" networkingv1beta1 "k8s.io/client-go/kubernetes/typed/networking/v1beta1" + nodev1 "k8s.io/client-go/kubernetes/typed/node/v1" nodev1alpha1 "k8s.io/client-go/kubernetes/typed/node/v1alpha1" nodev1beta1 "k8s.io/client-go/kubernetes/typed/node/v1beta1" policyv1beta1 "k8s.io/client-go/kubernetes/typed/policy/v1beta1" @@ -59,7 +62,6 @@ import ( schedulingv1 "k8s.io/client-go/kubernetes/typed/scheduling/v1" schedulingv1alpha1 "k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1" schedulingv1beta1 "k8s.io/client-go/kubernetes/typed/scheduling/v1beta1" - settingsv1alpha1 "k8s.io/client-go/kubernetes/typed/settings/v1alpha1" storagev1 "k8s.io/client-go/kubernetes/typed/storage/v1" storagev1alpha1 "k8s.io/client-go/kubernetes/typed/storage/v1alpha1" storagev1beta1 "k8s.io/client-go/kubernetes/typed/storage/v1beta1" @@ -71,6 +73,7 @@ type Interface interface { Discovery() discovery.DiscoveryInterface AdmissionregistrationV1() admissionregistrationv1.AdmissionregistrationV1Interface AdmissionregistrationV1beta1() admissionregistrationv1beta1.AdmissionregistrationV1beta1Interface + InternalV1alpha1() internalv1alpha1.InternalV1alpha1Interface AppsV1() appsv1.AppsV1Interface AppsV1beta1() appsv1beta1.AppsV1beta1Interface AppsV1beta2() appsv1beta2.AppsV1beta2Interface @@ -95,8 +98,10 @@ type Interface interface { EventsV1beta1() eventsv1beta1.EventsV1beta1Interface ExtensionsV1beta1() extensionsv1beta1.ExtensionsV1beta1Interface FlowcontrolV1alpha1() flowcontrolv1alpha1.FlowcontrolV1alpha1Interface + FlowcontrolV1beta1() flowcontrolv1beta1.FlowcontrolV1beta1Interface NetworkingV1() networkingv1.NetworkingV1Interface NetworkingV1beta1() networkingv1beta1.NetworkingV1beta1Interface + NodeV1() nodev1.NodeV1Interface NodeV1alpha1() nodev1alpha1.NodeV1alpha1Interface NodeV1beta1() nodev1beta1.NodeV1beta1Interface PolicyV1beta1() policyv1beta1.PolicyV1beta1Interface @@ -106,7 +111,6 @@ type Interface interface { SchedulingV1alpha1() schedulingv1alpha1.SchedulingV1alpha1Interface SchedulingV1beta1() schedulingv1beta1.SchedulingV1beta1Interface SchedulingV1() schedulingv1.SchedulingV1Interface - SettingsV1alpha1() settingsv1alpha1.SettingsV1alpha1Interface StorageV1beta1() storagev1beta1.StorageV1beta1Interface StorageV1() storagev1.StorageV1Interface StorageV1alpha1() storagev1alpha1.StorageV1alpha1Interface @@ -118,6 +122,7 @@ type Clientset struct { *discovery.DiscoveryClient admissionregistrationV1 *admissionregistrationv1.AdmissionregistrationV1Client admissionregistrationV1beta1 *admissionregistrationv1beta1.AdmissionregistrationV1beta1Client + internalV1alpha1 *internalv1alpha1.InternalV1alpha1Client appsV1 *appsv1.AppsV1Client appsV1beta1 *appsv1beta1.AppsV1beta1Client appsV1beta2 *appsv1beta2.AppsV1beta2Client @@ -142,8 +147,10 @@ type Clientset struct { eventsV1beta1 *eventsv1beta1.EventsV1beta1Client extensionsV1beta1 *extensionsv1beta1.ExtensionsV1beta1Client flowcontrolV1alpha1 *flowcontrolv1alpha1.FlowcontrolV1alpha1Client + flowcontrolV1beta1 *flowcontrolv1beta1.FlowcontrolV1beta1Client networkingV1 *networkingv1.NetworkingV1Client networkingV1beta1 *networkingv1beta1.NetworkingV1beta1Client + nodeV1 *nodev1.NodeV1Client nodeV1alpha1 *nodev1alpha1.NodeV1alpha1Client nodeV1beta1 *nodev1beta1.NodeV1beta1Client policyV1beta1 *policyv1beta1.PolicyV1beta1Client @@ -153,7 +160,6 @@ type Clientset struct { schedulingV1alpha1 *schedulingv1alpha1.SchedulingV1alpha1Client schedulingV1beta1 *schedulingv1beta1.SchedulingV1beta1Client schedulingV1 *schedulingv1.SchedulingV1Client - settingsV1alpha1 *settingsv1alpha1.SettingsV1alpha1Client storageV1beta1 *storagev1beta1.StorageV1beta1Client storageV1 *storagev1.StorageV1Client storageV1alpha1 *storagev1alpha1.StorageV1alpha1Client @@ -169,6 +175,11 @@ func (c *Clientset) AdmissionregistrationV1beta1() admissionregistrationv1beta1. return c.admissionregistrationV1beta1 } +// InternalV1alpha1 retrieves the InternalV1alpha1Client +func (c *Clientset) InternalV1alpha1() internalv1alpha1.InternalV1alpha1Interface { + return c.internalV1alpha1 +} + // AppsV1 retrieves the AppsV1Client func (c *Clientset) AppsV1() appsv1.AppsV1Interface { return c.appsV1 @@ -289,6 +300,11 @@ func (c *Clientset) FlowcontrolV1alpha1() flowcontrolv1alpha1.FlowcontrolV1alpha return c.flowcontrolV1alpha1 } +// FlowcontrolV1beta1 retrieves the FlowcontrolV1beta1Client +func (c *Clientset) FlowcontrolV1beta1() flowcontrolv1beta1.FlowcontrolV1beta1Interface { + return c.flowcontrolV1beta1 +} + // NetworkingV1 retrieves the NetworkingV1Client func (c *Clientset) NetworkingV1() networkingv1.NetworkingV1Interface { return c.networkingV1 @@ -299,6 +315,11 @@ func (c *Clientset) NetworkingV1beta1() networkingv1beta1.NetworkingV1beta1Inter return c.networkingV1beta1 } +// NodeV1 retrieves the NodeV1Client +func (c *Clientset) NodeV1() nodev1.NodeV1Interface { + return c.nodeV1 +} + // NodeV1alpha1 retrieves the NodeV1alpha1Client func (c *Clientset) NodeV1alpha1() nodev1alpha1.NodeV1alpha1Interface { return c.nodeV1alpha1 @@ -344,11 +365,6 @@ func (c *Clientset) SchedulingV1() schedulingv1.SchedulingV1Interface { return c.schedulingV1 } -// SettingsV1alpha1 retrieves the SettingsV1alpha1Client -func (c *Clientset) SettingsV1alpha1() settingsv1alpha1.SettingsV1alpha1Interface { - return c.settingsV1alpha1 -} - // StorageV1beta1 retrieves the StorageV1beta1Client func (c *Clientset) StorageV1beta1() storagev1beta1.StorageV1beta1Interface { return c.storageV1beta1 @@ -393,6 +409,10 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { if err != nil { return nil, err } + cs.internalV1alpha1, err = internalv1alpha1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } cs.appsV1, err = appsv1.NewForConfig(&configShallowCopy) if err != nil { return nil, err @@ -489,6 +509,10 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { if err != nil { return nil, err } + cs.flowcontrolV1beta1, err = flowcontrolv1beta1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } cs.networkingV1, err = networkingv1.NewForConfig(&configShallowCopy) if err != nil { return nil, err @@ -497,6 +521,10 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { if err != nil { return nil, err } + cs.nodeV1, err = nodev1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } cs.nodeV1alpha1, err = nodev1alpha1.NewForConfig(&configShallowCopy) if err != nil { return nil, err @@ -533,10 +561,6 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { if err != nil { return nil, err } - cs.settingsV1alpha1, err = settingsv1alpha1.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } cs.storageV1beta1, err = storagev1beta1.NewForConfig(&configShallowCopy) if err != nil { return nil, err @@ -563,6 +587,7 @@ func NewForConfigOrDie(c *rest.Config) *Clientset { var cs Clientset cs.admissionregistrationV1 = admissionregistrationv1.NewForConfigOrDie(c) cs.admissionregistrationV1beta1 = admissionregistrationv1beta1.NewForConfigOrDie(c) + cs.internalV1alpha1 = internalv1alpha1.NewForConfigOrDie(c) cs.appsV1 = appsv1.NewForConfigOrDie(c) cs.appsV1beta1 = appsv1beta1.NewForConfigOrDie(c) cs.appsV1beta2 = appsv1beta2.NewForConfigOrDie(c) @@ -587,8 +612,10 @@ func NewForConfigOrDie(c *rest.Config) *Clientset { cs.eventsV1beta1 = eventsv1beta1.NewForConfigOrDie(c) cs.extensionsV1beta1 = extensionsv1beta1.NewForConfigOrDie(c) cs.flowcontrolV1alpha1 = flowcontrolv1alpha1.NewForConfigOrDie(c) + cs.flowcontrolV1beta1 = flowcontrolv1beta1.NewForConfigOrDie(c) cs.networkingV1 = networkingv1.NewForConfigOrDie(c) cs.networkingV1beta1 = networkingv1beta1.NewForConfigOrDie(c) + cs.nodeV1 = nodev1.NewForConfigOrDie(c) cs.nodeV1alpha1 = nodev1alpha1.NewForConfigOrDie(c) cs.nodeV1beta1 = nodev1beta1.NewForConfigOrDie(c) cs.policyV1beta1 = policyv1beta1.NewForConfigOrDie(c) @@ -598,7 +625,6 @@ func NewForConfigOrDie(c *rest.Config) *Clientset { cs.schedulingV1alpha1 = schedulingv1alpha1.NewForConfigOrDie(c) cs.schedulingV1beta1 = schedulingv1beta1.NewForConfigOrDie(c) cs.schedulingV1 = schedulingv1.NewForConfigOrDie(c) - cs.settingsV1alpha1 = settingsv1alpha1.NewForConfigOrDie(c) cs.storageV1beta1 = storagev1beta1.NewForConfigOrDie(c) cs.storageV1 = storagev1.NewForConfigOrDie(c) cs.storageV1alpha1 = storagev1alpha1.NewForConfigOrDie(c) @@ -612,6 +638,7 @@ func New(c rest.Interface) *Clientset { var cs Clientset cs.admissionregistrationV1 = admissionregistrationv1.New(c) cs.admissionregistrationV1beta1 = admissionregistrationv1beta1.New(c) + cs.internalV1alpha1 = internalv1alpha1.New(c) cs.appsV1 = appsv1.New(c) cs.appsV1beta1 = appsv1beta1.New(c) cs.appsV1beta2 = appsv1beta2.New(c) @@ -636,8 +663,10 @@ func New(c rest.Interface) *Clientset { cs.eventsV1beta1 = eventsv1beta1.New(c) cs.extensionsV1beta1 = extensionsv1beta1.New(c) cs.flowcontrolV1alpha1 = flowcontrolv1alpha1.New(c) + cs.flowcontrolV1beta1 = flowcontrolv1beta1.New(c) cs.networkingV1 = networkingv1.New(c) cs.networkingV1beta1 = networkingv1beta1.New(c) + cs.nodeV1 = nodev1.New(c) cs.nodeV1alpha1 = nodev1alpha1.New(c) cs.nodeV1beta1 = nodev1beta1.New(c) cs.policyV1beta1 = policyv1beta1.New(c) @@ -647,7 +676,6 @@ func New(c rest.Interface) *Clientset { cs.schedulingV1alpha1 = schedulingv1alpha1.New(c) cs.schedulingV1beta1 = schedulingv1beta1.New(c) cs.schedulingV1 = schedulingv1.New(c) - cs.settingsV1alpha1 = settingsv1alpha1.New(c) cs.storageV1beta1 = storagev1beta1.New(c) cs.storageV1 = storagev1.New(c) cs.storageV1alpha1 = storagev1alpha1.New(c) diff --git a/vendor/k8s.io/client-go/kubernetes/fake/clientset_generated.go b/vendor/k8s.io/client-go/kubernetes/fake/clientset_generated.go index 70c4ac6e4d..7293844ca5 100644 --- a/vendor/k8s.io/client-go/kubernetes/fake/clientset_generated.go +++ b/vendor/k8s.io/client-go/kubernetes/fake/clientset_generated.go @@ -28,6 +28,8 @@ import ( fakeadmissionregistrationv1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake" admissionregistrationv1beta1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1" fakeadmissionregistrationv1beta1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake" + internalv1alpha1 "k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1" + fakeinternalv1alpha1 "k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/fake" appsv1 "k8s.io/client-go/kubernetes/typed/apps/v1" fakeappsv1 "k8s.io/client-go/kubernetes/typed/apps/v1/fake" appsv1beta1 "k8s.io/client-go/kubernetes/typed/apps/v1beta1" @@ -76,10 +78,14 @@ import ( fakeextensionsv1beta1 "k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake" flowcontrolv1alpha1 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1" fakeflowcontrolv1alpha1 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/fake" + flowcontrolv1beta1 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1" + fakeflowcontrolv1beta1 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake" networkingv1 "k8s.io/client-go/kubernetes/typed/networking/v1" fakenetworkingv1 "k8s.io/client-go/kubernetes/typed/networking/v1/fake" networkingv1beta1 "k8s.io/client-go/kubernetes/typed/networking/v1beta1" fakenetworkingv1beta1 "k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake" + nodev1 "k8s.io/client-go/kubernetes/typed/node/v1" + fakenodev1 "k8s.io/client-go/kubernetes/typed/node/v1/fake" nodev1alpha1 "k8s.io/client-go/kubernetes/typed/node/v1alpha1" fakenodev1alpha1 "k8s.io/client-go/kubernetes/typed/node/v1alpha1/fake" nodev1beta1 "k8s.io/client-go/kubernetes/typed/node/v1beta1" @@ -98,8 +104,6 @@ import ( fakeschedulingv1alpha1 "k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake" schedulingv1beta1 "k8s.io/client-go/kubernetes/typed/scheduling/v1beta1" fakeschedulingv1beta1 "k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake" - settingsv1alpha1 "k8s.io/client-go/kubernetes/typed/settings/v1alpha1" - fakesettingsv1alpha1 "k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake" storagev1 "k8s.io/client-go/kubernetes/typed/storage/v1" fakestoragev1 "k8s.io/client-go/kubernetes/typed/storage/v1/fake" storagev1alpha1 "k8s.io/client-go/kubernetes/typed/storage/v1alpha1" @@ -166,6 +170,11 @@ func (c *Clientset) AdmissionregistrationV1beta1() admissionregistrationv1beta1. return &fakeadmissionregistrationv1beta1.FakeAdmissionregistrationV1beta1{Fake: &c.Fake} } +// InternalV1alpha1 retrieves the InternalV1alpha1Client +func (c *Clientset) InternalV1alpha1() internalv1alpha1.InternalV1alpha1Interface { + return &fakeinternalv1alpha1.FakeInternalV1alpha1{Fake: &c.Fake} +} + // AppsV1 retrieves the AppsV1Client func (c *Clientset) AppsV1() appsv1.AppsV1Interface { return &fakeappsv1.FakeAppsV1{Fake: &c.Fake} @@ -286,6 +295,11 @@ func (c *Clientset) FlowcontrolV1alpha1() flowcontrolv1alpha1.FlowcontrolV1alpha return &fakeflowcontrolv1alpha1.FakeFlowcontrolV1alpha1{Fake: &c.Fake} } +// FlowcontrolV1beta1 retrieves the FlowcontrolV1beta1Client +func (c *Clientset) FlowcontrolV1beta1() flowcontrolv1beta1.FlowcontrolV1beta1Interface { + return &fakeflowcontrolv1beta1.FakeFlowcontrolV1beta1{Fake: &c.Fake} +} + // NetworkingV1 retrieves the NetworkingV1Client func (c *Clientset) NetworkingV1() networkingv1.NetworkingV1Interface { return &fakenetworkingv1.FakeNetworkingV1{Fake: &c.Fake} @@ -296,6 +310,11 @@ func (c *Clientset) NetworkingV1beta1() networkingv1beta1.NetworkingV1beta1Inter return &fakenetworkingv1beta1.FakeNetworkingV1beta1{Fake: &c.Fake} } +// NodeV1 retrieves the NodeV1Client +func (c *Clientset) NodeV1() nodev1.NodeV1Interface { + return &fakenodev1.FakeNodeV1{Fake: &c.Fake} +} + // NodeV1alpha1 retrieves the NodeV1alpha1Client func (c *Clientset) NodeV1alpha1() nodev1alpha1.NodeV1alpha1Interface { return &fakenodev1alpha1.FakeNodeV1alpha1{Fake: &c.Fake} @@ -341,11 +360,6 @@ func (c *Clientset) SchedulingV1() schedulingv1.SchedulingV1Interface { return &fakeschedulingv1.FakeSchedulingV1{Fake: &c.Fake} } -// SettingsV1alpha1 retrieves the SettingsV1alpha1Client -func (c *Clientset) SettingsV1alpha1() settingsv1alpha1.SettingsV1alpha1Interface { - return &fakesettingsv1alpha1.FakeSettingsV1alpha1{Fake: &c.Fake} -} - // StorageV1beta1 retrieves the StorageV1beta1Client func (c *Clientset) StorageV1beta1() storagev1beta1.StorageV1beta1Interface { return &fakestoragev1beta1.FakeStorageV1beta1{Fake: &c.Fake} diff --git a/vendor/k8s.io/client-go/kubernetes/fake/register.go b/vendor/k8s.io/client-go/kubernetes/fake/register.go index 00e71589bc..0e8ab29f5f 100644 --- a/vendor/k8s.io/client-go/kubernetes/fake/register.go +++ b/vendor/k8s.io/client-go/kubernetes/fake/register.go @@ -21,6 +21,7 @@ package fake import ( admissionregistrationv1 "k8s.io/api/admissionregistration/v1" admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" + internalv1alpha1 "k8s.io/api/apiserverinternal/v1alpha1" appsv1 "k8s.io/api/apps/v1" appsv1beta1 "k8s.io/api/apps/v1beta1" appsv1beta2 "k8s.io/api/apps/v1beta2" @@ -45,8 +46,10 @@ import ( eventsv1beta1 "k8s.io/api/events/v1beta1" extensionsv1beta1 "k8s.io/api/extensions/v1beta1" flowcontrolv1alpha1 "k8s.io/api/flowcontrol/v1alpha1" + flowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1" networkingv1 "k8s.io/api/networking/v1" networkingv1beta1 "k8s.io/api/networking/v1beta1" + nodev1 "k8s.io/api/node/v1" nodev1alpha1 "k8s.io/api/node/v1alpha1" nodev1beta1 "k8s.io/api/node/v1beta1" policyv1beta1 "k8s.io/api/policy/v1beta1" @@ -56,7 +59,6 @@ import ( schedulingv1 "k8s.io/api/scheduling/v1" schedulingv1alpha1 "k8s.io/api/scheduling/v1alpha1" schedulingv1beta1 "k8s.io/api/scheduling/v1beta1" - settingsv1alpha1 "k8s.io/api/settings/v1alpha1" storagev1 "k8s.io/api/storage/v1" storagev1alpha1 "k8s.io/api/storage/v1alpha1" storagev1beta1 "k8s.io/api/storage/v1beta1" @@ -73,6 +75,7 @@ var codecs = serializer.NewCodecFactory(scheme) var localSchemeBuilder = runtime.SchemeBuilder{ admissionregistrationv1.AddToScheme, admissionregistrationv1beta1.AddToScheme, + internalv1alpha1.AddToScheme, appsv1.AddToScheme, appsv1beta1.AddToScheme, appsv1beta2.AddToScheme, @@ -97,8 +100,10 @@ var localSchemeBuilder = runtime.SchemeBuilder{ eventsv1beta1.AddToScheme, extensionsv1beta1.AddToScheme, flowcontrolv1alpha1.AddToScheme, + flowcontrolv1beta1.AddToScheme, networkingv1.AddToScheme, networkingv1beta1.AddToScheme, + nodev1.AddToScheme, nodev1alpha1.AddToScheme, nodev1beta1.AddToScheme, policyv1beta1.AddToScheme, @@ -108,7 +113,6 @@ var localSchemeBuilder = runtime.SchemeBuilder{ schedulingv1alpha1.AddToScheme, schedulingv1beta1.AddToScheme, schedulingv1.AddToScheme, - settingsv1alpha1.AddToScheme, storagev1beta1.AddToScheme, storagev1.AddToScheme, storagev1alpha1.AddToScheme, diff --git a/vendor/k8s.io/client-go/kubernetes/scheme/register.go b/vendor/k8s.io/client-go/kubernetes/scheme/register.go index 2710bf2dec..5601e20dd5 100644 --- a/vendor/k8s.io/client-go/kubernetes/scheme/register.go +++ b/vendor/k8s.io/client-go/kubernetes/scheme/register.go @@ -21,6 +21,7 @@ package scheme import ( admissionregistrationv1 "k8s.io/api/admissionregistration/v1" admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" + internalv1alpha1 "k8s.io/api/apiserverinternal/v1alpha1" appsv1 "k8s.io/api/apps/v1" appsv1beta1 "k8s.io/api/apps/v1beta1" appsv1beta2 "k8s.io/api/apps/v1beta2" @@ -45,8 +46,10 @@ import ( eventsv1beta1 "k8s.io/api/events/v1beta1" extensionsv1beta1 "k8s.io/api/extensions/v1beta1" flowcontrolv1alpha1 "k8s.io/api/flowcontrol/v1alpha1" + flowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1" networkingv1 "k8s.io/api/networking/v1" networkingv1beta1 "k8s.io/api/networking/v1beta1" + nodev1 "k8s.io/api/node/v1" nodev1alpha1 "k8s.io/api/node/v1alpha1" nodev1beta1 "k8s.io/api/node/v1beta1" policyv1beta1 "k8s.io/api/policy/v1beta1" @@ -56,7 +59,6 @@ import ( schedulingv1 "k8s.io/api/scheduling/v1" schedulingv1alpha1 "k8s.io/api/scheduling/v1alpha1" schedulingv1beta1 "k8s.io/api/scheduling/v1beta1" - settingsv1alpha1 "k8s.io/api/settings/v1alpha1" storagev1 "k8s.io/api/storage/v1" storagev1alpha1 "k8s.io/api/storage/v1alpha1" storagev1beta1 "k8s.io/api/storage/v1beta1" @@ -73,6 +75,7 @@ var ParameterCodec = runtime.NewParameterCodec(Scheme) var localSchemeBuilder = runtime.SchemeBuilder{ admissionregistrationv1.AddToScheme, admissionregistrationv1beta1.AddToScheme, + internalv1alpha1.AddToScheme, appsv1.AddToScheme, appsv1beta1.AddToScheme, appsv1beta2.AddToScheme, @@ -97,8 +100,10 @@ var localSchemeBuilder = runtime.SchemeBuilder{ eventsv1beta1.AddToScheme, extensionsv1beta1.AddToScheme, flowcontrolv1alpha1.AddToScheme, + flowcontrolv1beta1.AddToScheme, networkingv1.AddToScheme, networkingv1beta1.AddToScheme, + nodev1.AddToScheme, nodev1alpha1.AddToScheme, nodev1beta1.AddToScheme, policyv1beta1.AddToScheme, @@ -108,7 +113,6 @@ var localSchemeBuilder = runtime.SchemeBuilder{ schedulingv1alpha1.AddToScheme, schedulingv1beta1.AddToScheme, schedulingv1.AddToScheme, - settingsv1alpha1.AddToScheme, storagev1beta1.AddToScheme, storagev1.AddToScheme, storagev1alpha1.AddToScheme, diff --git a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/settings_client.go b/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/apiserverinternal_client.go similarity index 64% rename from vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/settings_client.go rename to vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/apiserverinternal_client.go index 8d3a8d8e19..e43a9a3687 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/settings_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/apiserverinternal_client.go @@ -19,27 +19,27 @@ limitations under the License. package v1alpha1 import ( - v1alpha1 "k8s.io/api/settings/v1alpha1" + v1alpha1 "k8s.io/api/apiserverinternal/v1alpha1" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) -type SettingsV1alpha1Interface interface { +type InternalV1alpha1Interface interface { RESTClient() rest.Interface - PodPresetsGetter + StorageVersionsGetter } -// SettingsV1alpha1Client is used to interact with features provided by the settings.k8s.io group. -type SettingsV1alpha1Client struct { +// InternalV1alpha1Client is used to interact with features provided by the internal.apiserver.k8s.io group. +type InternalV1alpha1Client struct { restClient rest.Interface } -func (c *SettingsV1alpha1Client) PodPresets(namespace string) PodPresetInterface { - return newPodPresets(c, namespace) +func (c *InternalV1alpha1Client) StorageVersions() StorageVersionInterface { + return newStorageVersions(c) } -// NewForConfig creates a new SettingsV1alpha1Client for the given config. -func NewForConfig(c *rest.Config) (*SettingsV1alpha1Client, error) { +// NewForConfig creates a new InternalV1alpha1Client for the given config. +func NewForConfig(c *rest.Config) (*InternalV1alpha1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err @@ -48,12 +48,12 @@ func NewForConfig(c *rest.Config) (*SettingsV1alpha1Client, error) { if err != nil { return nil, err } - return &SettingsV1alpha1Client{client}, nil + return &InternalV1alpha1Client{client}, nil } -// NewForConfigOrDie creates a new SettingsV1alpha1Client for the given config and +// NewForConfigOrDie creates a new InternalV1alpha1Client for the given config and // panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *SettingsV1alpha1Client { +func NewForConfigOrDie(c *rest.Config) *InternalV1alpha1Client { client, err := NewForConfig(c) if err != nil { panic(err) @@ -61,9 +61,9 @@ func NewForConfigOrDie(c *rest.Config) *SettingsV1alpha1Client { return client } -// New creates a new SettingsV1alpha1Client for the given RESTClient. -func New(c rest.Interface) *SettingsV1alpha1Client { - return &SettingsV1alpha1Client{c} +// New creates a new InternalV1alpha1Client for the given RESTClient. +func New(c rest.Interface) *InternalV1alpha1Client { + return &InternalV1alpha1Client{c} } func setConfigDefaults(config *rest.Config) error { @@ -81,7 +81,7 @@ func setConfigDefaults(config *rest.Config) error { // RESTClient returns a RESTClient that is used to communicate // with API server by this client implementation. -func (c *SettingsV1alpha1Client) RESTClient() rest.Interface { +func (c *InternalV1alpha1Client) RESTClient() rest.Interface { if c == nil { return nil } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/doc.go similarity index 100% rename from vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/doc.go rename to vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/doc.go diff --git a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/fake/doc.go similarity index 100% rename from vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake/doc.go rename to vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/fake/doc.go diff --git a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake/fake_settings_client.go b/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/fake/fake_apiserverinternal_client.go similarity index 75% rename from vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake/fake_settings_client.go rename to vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/fake/fake_apiserverinternal_client.go index a142edfed0..0960a5e81e 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake/fake_settings_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/fake/fake_apiserverinternal_client.go @@ -19,22 +19,22 @@ limitations under the License. package fake import ( - v1alpha1 "k8s.io/client-go/kubernetes/typed/settings/v1alpha1" + v1alpha1 "k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1" rest "k8s.io/client-go/rest" testing "k8s.io/client-go/testing" ) -type FakeSettingsV1alpha1 struct { +type FakeInternalV1alpha1 struct { *testing.Fake } -func (c *FakeSettingsV1alpha1) PodPresets(namespace string) v1alpha1.PodPresetInterface { - return &FakePodPresets{c, namespace} +func (c *FakeInternalV1alpha1) StorageVersions() v1alpha1.StorageVersionInterface { + return &FakeStorageVersions{c} } // RESTClient returns a RESTClient that is used to communicate // with API server by this client implementation. -func (c *FakeSettingsV1alpha1) RESTClient() rest.Interface { +func (c *FakeInternalV1alpha1) RESTClient() rest.Interface { var ret *rest.RESTClient return ret } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/fake/fake_storageversion.go b/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/fake/fake_storageversion.go new file mode 100644 index 0000000000..d75049a40b --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/fake/fake_storageversion.go @@ -0,0 +1,133 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v1alpha1 "k8s.io/api/apiserverinternal/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeStorageVersions implements StorageVersionInterface +type FakeStorageVersions struct { + Fake *FakeInternalV1alpha1 +} + +var storageversionsResource = schema.GroupVersionResource{Group: "internal.apiserver.k8s.io", Version: "v1alpha1", Resource: "storageversions"} + +var storageversionsKind = schema.GroupVersionKind{Group: "internal.apiserver.k8s.io", Version: "v1alpha1", Kind: "StorageVersion"} + +// Get takes name of the storageVersion, and returns the corresponding storageVersion object, and an error if there is any. +func (c *FakeStorageVersions) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.StorageVersion, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(storageversionsResource, name), &v1alpha1.StorageVersion{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.StorageVersion), err +} + +// List takes label and field selectors, and returns the list of StorageVersions that match those selectors. +func (c *FakeStorageVersions) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.StorageVersionList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(storageversionsResource, storageversionsKind, opts), &v1alpha1.StorageVersionList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.StorageVersionList{ListMeta: obj.(*v1alpha1.StorageVersionList).ListMeta} + for _, item := range obj.(*v1alpha1.StorageVersionList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested storageVersions. +func (c *FakeStorageVersions) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(storageversionsResource, opts)) +} + +// Create takes the representation of a storageVersion and creates it. Returns the server's representation of the storageVersion, and an error, if there is any. +func (c *FakeStorageVersions) Create(ctx context.Context, storageVersion *v1alpha1.StorageVersion, opts v1.CreateOptions) (result *v1alpha1.StorageVersion, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(storageversionsResource, storageVersion), &v1alpha1.StorageVersion{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.StorageVersion), err +} + +// Update takes the representation of a storageVersion and updates it. Returns the server's representation of the storageVersion, and an error, if there is any. +func (c *FakeStorageVersions) Update(ctx context.Context, storageVersion *v1alpha1.StorageVersion, opts v1.UpdateOptions) (result *v1alpha1.StorageVersion, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(storageversionsResource, storageVersion), &v1alpha1.StorageVersion{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.StorageVersion), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeStorageVersions) UpdateStatus(ctx context.Context, storageVersion *v1alpha1.StorageVersion, opts v1.UpdateOptions) (*v1alpha1.StorageVersion, error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(storageversionsResource, "status", storageVersion), &v1alpha1.StorageVersion{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.StorageVersion), err +} + +// Delete takes name of the storageVersion and deletes it. Returns an error if one occurs. +func (c *FakeStorageVersions) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(storageversionsResource, name), &v1alpha1.StorageVersion{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeStorageVersions) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(storageversionsResource, listOpts) + + _, err := c.Fake.Invokes(action, &v1alpha1.StorageVersionList{}) + return err +} + +// Patch applies the patch and returns the patched storageVersion. +func (c *FakeStorageVersions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.StorageVersion, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(storageversionsResource, name, pt, data, subresources...), &v1alpha1.StorageVersion{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.StorageVersion), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/generated_expansion.go similarity index 93% rename from vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/generated_expansion.go rename to vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/generated_expansion.go index 23d9f94d5a..f2835e6075 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/generated_expansion.go @@ -18,4 +18,4 @@ limitations under the License. package v1alpha1 -type PodPresetExpansion interface{} +type StorageVersionExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/storageversion.go b/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/storageversion.go new file mode 100644 index 0000000000..af5466b043 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/storageversion.go @@ -0,0 +1,184 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + "time" + + v1alpha1 "k8s.io/api/apiserverinternal/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// StorageVersionsGetter has a method to return a StorageVersionInterface. +// A group's client should implement this interface. +type StorageVersionsGetter interface { + StorageVersions() StorageVersionInterface +} + +// StorageVersionInterface has methods to work with StorageVersion resources. +type StorageVersionInterface interface { + Create(ctx context.Context, storageVersion *v1alpha1.StorageVersion, opts v1.CreateOptions) (*v1alpha1.StorageVersion, error) + Update(ctx context.Context, storageVersion *v1alpha1.StorageVersion, opts v1.UpdateOptions) (*v1alpha1.StorageVersion, error) + UpdateStatus(ctx context.Context, storageVersion *v1alpha1.StorageVersion, opts v1.UpdateOptions) (*v1alpha1.StorageVersion, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.StorageVersion, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.StorageVersionList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.StorageVersion, err error) + StorageVersionExpansion +} + +// storageVersions implements StorageVersionInterface +type storageVersions struct { + client rest.Interface +} + +// newStorageVersions returns a StorageVersions +func newStorageVersions(c *InternalV1alpha1Client) *storageVersions { + return &storageVersions{ + client: c.RESTClient(), + } +} + +// Get takes name of the storageVersion, and returns the corresponding storageVersion object, and an error if there is any. +func (c *storageVersions) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.StorageVersion, err error) { + result = &v1alpha1.StorageVersion{} + err = c.client.Get(). + Resource("storageversions"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of StorageVersions that match those selectors. +func (c *storageVersions) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.StorageVersionList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.StorageVersionList{} + err = c.client.Get(). + Resource("storageversions"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested storageVersions. +func (c *storageVersions) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("storageversions"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a storageVersion and creates it. Returns the server's representation of the storageVersion, and an error, if there is any. +func (c *storageVersions) Create(ctx context.Context, storageVersion *v1alpha1.StorageVersion, opts v1.CreateOptions) (result *v1alpha1.StorageVersion, err error) { + result = &v1alpha1.StorageVersion{} + err = c.client.Post(). + Resource("storageversions"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(storageVersion). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a storageVersion and updates it. Returns the server's representation of the storageVersion, and an error, if there is any. +func (c *storageVersions) Update(ctx context.Context, storageVersion *v1alpha1.StorageVersion, opts v1.UpdateOptions) (result *v1alpha1.StorageVersion, err error) { + result = &v1alpha1.StorageVersion{} + err = c.client.Put(). + Resource("storageversions"). + Name(storageVersion.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(storageVersion). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *storageVersions) UpdateStatus(ctx context.Context, storageVersion *v1alpha1.StorageVersion, opts v1.UpdateOptions) (result *v1alpha1.StorageVersion, err error) { + result = &v1alpha1.StorageVersion{} + err = c.client.Put(). + Resource("storageversions"). + Name(storageVersion.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(storageVersion). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the storageVersion and deletes it. Returns an error if one occurs. +func (c *storageVersions) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Resource("storageversions"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *storageVersions) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("storageversions"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched storageVersion. +func (c *storageVersions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.StorageVersion, err error) { + result = &v1alpha1.StorageVersion{} + err = c.client.Patch(pt). + Resource("storageversions"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/doc.go new file mode 100644 index 0000000000..771101956f --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1beta1 diff --git a/vendor/github.com/jmespath/go-jmespath/LICENSE b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake/doc.go similarity index 76% rename from vendor/github.com/jmespath/go-jmespath/LICENSE rename to vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake/doc.go index b03310a91f..16f4439906 100644 --- a/vendor/github.com/jmespath/go-jmespath/LICENSE +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake/doc.go @@ -1,4 +1,5 @@ -Copyright 2015 James Saryerwinnie +/* +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -11,3 +12,9 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake/fake_flowcontrol_client.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake/fake_flowcontrol_client.go new file mode 100644 index 0000000000..1bd58d088a --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake/fake_flowcontrol_client.go @@ -0,0 +1,44 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1beta1 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeFlowcontrolV1beta1 struct { + *testing.Fake +} + +func (c *FakeFlowcontrolV1beta1) FlowSchemas() v1beta1.FlowSchemaInterface { + return &FakeFlowSchemas{c} +} + +func (c *FakeFlowcontrolV1beta1) PriorityLevelConfigurations() v1beta1.PriorityLevelConfigurationInterface { + return &FakePriorityLevelConfigurations{c} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeFlowcontrolV1beta1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake/fake_flowschema.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake/fake_flowschema.go new file mode 100644 index 0000000000..7732b69dca --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake/fake_flowschema.go @@ -0,0 +1,133 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v1beta1 "k8s.io/api/flowcontrol/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeFlowSchemas implements FlowSchemaInterface +type FakeFlowSchemas struct { + Fake *FakeFlowcontrolV1beta1 +} + +var flowschemasResource = schema.GroupVersionResource{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta1", Resource: "flowschemas"} + +var flowschemasKind = schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta1", Kind: "FlowSchema"} + +// Get takes name of the flowSchema, and returns the corresponding flowSchema object, and an error if there is any. +func (c *FakeFlowSchemas) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.FlowSchema, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(flowschemasResource, name), &v1beta1.FlowSchema{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.FlowSchema), err +} + +// List takes label and field selectors, and returns the list of FlowSchemas that match those selectors. +func (c *FakeFlowSchemas) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.FlowSchemaList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(flowschemasResource, flowschemasKind, opts), &v1beta1.FlowSchemaList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1beta1.FlowSchemaList{ListMeta: obj.(*v1beta1.FlowSchemaList).ListMeta} + for _, item := range obj.(*v1beta1.FlowSchemaList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested flowSchemas. +func (c *FakeFlowSchemas) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(flowschemasResource, opts)) +} + +// Create takes the representation of a flowSchema and creates it. Returns the server's representation of the flowSchema, and an error, if there is any. +func (c *FakeFlowSchemas) Create(ctx context.Context, flowSchema *v1beta1.FlowSchema, opts v1.CreateOptions) (result *v1beta1.FlowSchema, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(flowschemasResource, flowSchema), &v1beta1.FlowSchema{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.FlowSchema), err +} + +// Update takes the representation of a flowSchema and updates it. Returns the server's representation of the flowSchema, and an error, if there is any. +func (c *FakeFlowSchemas) Update(ctx context.Context, flowSchema *v1beta1.FlowSchema, opts v1.UpdateOptions) (result *v1beta1.FlowSchema, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(flowschemasResource, flowSchema), &v1beta1.FlowSchema{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.FlowSchema), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeFlowSchemas) UpdateStatus(ctx context.Context, flowSchema *v1beta1.FlowSchema, opts v1.UpdateOptions) (*v1beta1.FlowSchema, error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(flowschemasResource, "status", flowSchema), &v1beta1.FlowSchema{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.FlowSchema), err +} + +// Delete takes name of the flowSchema and deletes it. Returns an error if one occurs. +func (c *FakeFlowSchemas) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(flowschemasResource, name), &v1beta1.FlowSchema{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeFlowSchemas) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(flowschemasResource, listOpts) + + _, err := c.Fake.Invokes(action, &v1beta1.FlowSchemaList{}) + return err +} + +// Patch applies the patch and returns the patched flowSchema. +func (c *FakeFlowSchemas) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.FlowSchema, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(flowschemasResource, name, pt, data, subresources...), &v1beta1.FlowSchema{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.FlowSchema), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake/fake_prioritylevelconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake/fake_prioritylevelconfiguration.go new file mode 100644 index 0000000000..f93a505d4c --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake/fake_prioritylevelconfiguration.go @@ -0,0 +1,133 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v1beta1 "k8s.io/api/flowcontrol/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakePriorityLevelConfigurations implements PriorityLevelConfigurationInterface +type FakePriorityLevelConfigurations struct { + Fake *FakeFlowcontrolV1beta1 +} + +var prioritylevelconfigurationsResource = schema.GroupVersionResource{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta1", Resource: "prioritylevelconfigurations"} + +var prioritylevelconfigurationsKind = schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta1", Kind: "PriorityLevelConfiguration"} + +// Get takes name of the priorityLevelConfiguration, and returns the corresponding priorityLevelConfiguration object, and an error if there is any. +func (c *FakePriorityLevelConfigurations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.PriorityLevelConfiguration, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(prioritylevelconfigurationsResource, name), &v1beta1.PriorityLevelConfiguration{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.PriorityLevelConfiguration), err +} + +// List takes label and field selectors, and returns the list of PriorityLevelConfigurations that match those selectors. +func (c *FakePriorityLevelConfigurations) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.PriorityLevelConfigurationList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(prioritylevelconfigurationsResource, prioritylevelconfigurationsKind, opts), &v1beta1.PriorityLevelConfigurationList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1beta1.PriorityLevelConfigurationList{ListMeta: obj.(*v1beta1.PriorityLevelConfigurationList).ListMeta} + for _, item := range obj.(*v1beta1.PriorityLevelConfigurationList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested priorityLevelConfigurations. +func (c *FakePriorityLevelConfigurations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(prioritylevelconfigurationsResource, opts)) +} + +// Create takes the representation of a priorityLevelConfiguration and creates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. +func (c *FakePriorityLevelConfigurations) Create(ctx context.Context, priorityLevelConfiguration *v1beta1.PriorityLevelConfiguration, opts v1.CreateOptions) (result *v1beta1.PriorityLevelConfiguration, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(prioritylevelconfigurationsResource, priorityLevelConfiguration), &v1beta1.PriorityLevelConfiguration{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.PriorityLevelConfiguration), err +} + +// Update takes the representation of a priorityLevelConfiguration and updates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. +func (c *FakePriorityLevelConfigurations) Update(ctx context.Context, priorityLevelConfiguration *v1beta1.PriorityLevelConfiguration, opts v1.UpdateOptions) (result *v1beta1.PriorityLevelConfiguration, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(prioritylevelconfigurationsResource, priorityLevelConfiguration), &v1beta1.PriorityLevelConfiguration{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.PriorityLevelConfiguration), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakePriorityLevelConfigurations) UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1beta1.PriorityLevelConfiguration, opts v1.UpdateOptions) (*v1beta1.PriorityLevelConfiguration, error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(prioritylevelconfigurationsResource, "status", priorityLevelConfiguration), &v1beta1.PriorityLevelConfiguration{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.PriorityLevelConfiguration), err +} + +// Delete takes name of the priorityLevelConfiguration and deletes it. Returns an error if one occurs. +func (c *FakePriorityLevelConfigurations) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(prioritylevelconfigurationsResource, name), &v1beta1.PriorityLevelConfiguration{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakePriorityLevelConfigurations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(prioritylevelconfigurationsResource, listOpts) + + _, err := c.Fake.Invokes(action, &v1beta1.PriorityLevelConfigurationList{}) + return err +} + +// Patch applies the patch and returns the patched priorityLevelConfiguration. +func (c *FakePriorityLevelConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PriorityLevelConfiguration, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(prioritylevelconfigurationsResource, name, pt, data, subresources...), &v1beta1.PriorityLevelConfiguration{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.PriorityLevelConfiguration), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/flowcontrol_client.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/flowcontrol_client.go new file mode 100644 index 0000000000..9a8ba560e6 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/flowcontrol_client.go @@ -0,0 +1,94 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/flowcontrol/v1beta1" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type FlowcontrolV1beta1Interface interface { + RESTClient() rest.Interface + FlowSchemasGetter + PriorityLevelConfigurationsGetter +} + +// FlowcontrolV1beta1Client is used to interact with features provided by the flowcontrol.apiserver.k8s.io group. +type FlowcontrolV1beta1Client struct { + restClient rest.Interface +} + +func (c *FlowcontrolV1beta1Client) FlowSchemas() FlowSchemaInterface { + return newFlowSchemas(c) +} + +func (c *FlowcontrolV1beta1Client) PriorityLevelConfigurations() PriorityLevelConfigurationInterface { + return newPriorityLevelConfigurations(c) +} + +// NewForConfig creates a new FlowcontrolV1beta1Client for the given config. +func NewForConfig(c *rest.Config) (*FlowcontrolV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &FlowcontrolV1beta1Client{client}, nil +} + +// NewForConfigOrDie creates a new FlowcontrolV1beta1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *FlowcontrolV1beta1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new FlowcontrolV1beta1Client for the given RESTClient. +func New(c rest.Interface) *FlowcontrolV1beta1Client { + return &FlowcontrolV1beta1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1beta1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FlowcontrolV1beta1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/flowschema.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/flowschema.go new file mode 100644 index 0000000000..398f4f3473 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/flowschema.go @@ -0,0 +1,184 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "context" + "time" + + v1beta1 "k8s.io/api/flowcontrol/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// FlowSchemasGetter has a method to return a FlowSchemaInterface. +// A group's client should implement this interface. +type FlowSchemasGetter interface { + FlowSchemas() FlowSchemaInterface +} + +// FlowSchemaInterface has methods to work with FlowSchema resources. +type FlowSchemaInterface interface { + Create(ctx context.Context, flowSchema *v1beta1.FlowSchema, opts v1.CreateOptions) (*v1beta1.FlowSchema, error) + Update(ctx context.Context, flowSchema *v1beta1.FlowSchema, opts v1.UpdateOptions) (*v1beta1.FlowSchema, error) + UpdateStatus(ctx context.Context, flowSchema *v1beta1.FlowSchema, opts v1.UpdateOptions) (*v1beta1.FlowSchema, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.FlowSchema, error) + List(ctx context.Context, opts v1.ListOptions) (*v1beta1.FlowSchemaList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.FlowSchema, err error) + FlowSchemaExpansion +} + +// flowSchemas implements FlowSchemaInterface +type flowSchemas struct { + client rest.Interface +} + +// newFlowSchemas returns a FlowSchemas +func newFlowSchemas(c *FlowcontrolV1beta1Client) *flowSchemas { + return &flowSchemas{ + client: c.RESTClient(), + } +} + +// Get takes name of the flowSchema, and returns the corresponding flowSchema object, and an error if there is any. +func (c *flowSchemas) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.FlowSchema, err error) { + result = &v1beta1.FlowSchema{} + err = c.client.Get(). + Resource("flowschemas"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of FlowSchemas that match those selectors. +func (c *flowSchemas) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.FlowSchemaList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1beta1.FlowSchemaList{} + err = c.client.Get(). + Resource("flowschemas"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested flowSchemas. +func (c *flowSchemas) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("flowschemas"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a flowSchema and creates it. Returns the server's representation of the flowSchema, and an error, if there is any. +func (c *flowSchemas) Create(ctx context.Context, flowSchema *v1beta1.FlowSchema, opts v1.CreateOptions) (result *v1beta1.FlowSchema, err error) { + result = &v1beta1.FlowSchema{} + err = c.client.Post(). + Resource("flowschemas"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(flowSchema). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a flowSchema and updates it. Returns the server's representation of the flowSchema, and an error, if there is any. +func (c *flowSchemas) Update(ctx context.Context, flowSchema *v1beta1.FlowSchema, opts v1.UpdateOptions) (result *v1beta1.FlowSchema, err error) { + result = &v1beta1.FlowSchema{} + err = c.client.Put(). + Resource("flowschemas"). + Name(flowSchema.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(flowSchema). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *flowSchemas) UpdateStatus(ctx context.Context, flowSchema *v1beta1.FlowSchema, opts v1.UpdateOptions) (result *v1beta1.FlowSchema, err error) { + result = &v1beta1.FlowSchema{} + err = c.client.Put(). + Resource("flowschemas"). + Name(flowSchema.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(flowSchema). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the flowSchema and deletes it. Returns an error if one occurs. +func (c *flowSchemas) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Resource("flowschemas"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *flowSchemas) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("flowschemas"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched flowSchema. +func (c *flowSchemas) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.FlowSchema, err error) { + result = &v1beta1.FlowSchema{} + err = c.client.Patch(pt). + Resource("flowschemas"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/generated_expansion.go new file mode 100644 index 0000000000..fc15f69355 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/generated_expansion.go @@ -0,0 +1,23 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +type FlowSchemaExpansion interface{} + +type PriorityLevelConfigurationExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/prioritylevelconfiguration.go new file mode 100644 index 0000000000..88633c8278 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/prioritylevelconfiguration.go @@ -0,0 +1,184 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "context" + "time" + + v1beta1 "k8s.io/api/flowcontrol/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// PriorityLevelConfigurationsGetter has a method to return a PriorityLevelConfigurationInterface. +// A group's client should implement this interface. +type PriorityLevelConfigurationsGetter interface { + PriorityLevelConfigurations() PriorityLevelConfigurationInterface +} + +// PriorityLevelConfigurationInterface has methods to work with PriorityLevelConfiguration resources. +type PriorityLevelConfigurationInterface interface { + Create(ctx context.Context, priorityLevelConfiguration *v1beta1.PriorityLevelConfiguration, opts v1.CreateOptions) (*v1beta1.PriorityLevelConfiguration, error) + Update(ctx context.Context, priorityLevelConfiguration *v1beta1.PriorityLevelConfiguration, opts v1.UpdateOptions) (*v1beta1.PriorityLevelConfiguration, error) + UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1beta1.PriorityLevelConfiguration, opts v1.UpdateOptions) (*v1beta1.PriorityLevelConfiguration, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.PriorityLevelConfiguration, error) + List(ctx context.Context, opts v1.ListOptions) (*v1beta1.PriorityLevelConfigurationList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PriorityLevelConfiguration, err error) + PriorityLevelConfigurationExpansion +} + +// priorityLevelConfigurations implements PriorityLevelConfigurationInterface +type priorityLevelConfigurations struct { + client rest.Interface +} + +// newPriorityLevelConfigurations returns a PriorityLevelConfigurations +func newPriorityLevelConfigurations(c *FlowcontrolV1beta1Client) *priorityLevelConfigurations { + return &priorityLevelConfigurations{ + client: c.RESTClient(), + } +} + +// Get takes name of the priorityLevelConfiguration, and returns the corresponding priorityLevelConfiguration object, and an error if there is any. +func (c *priorityLevelConfigurations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.PriorityLevelConfiguration, err error) { + result = &v1beta1.PriorityLevelConfiguration{} + err = c.client.Get(). + Resource("prioritylevelconfigurations"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of PriorityLevelConfigurations that match those selectors. +func (c *priorityLevelConfigurations) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.PriorityLevelConfigurationList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1beta1.PriorityLevelConfigurationList{} + err = c.client.Get(). + Resource("prioritylevelconfigurations"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested priorityLevelConfigurations. +func (c *priorityLevelConfigurations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("prioritylevelconfigurations"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a priorityLevelConfiguration and creates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. +func (c *priorityLevelConfigurations) Create(ctx context.Context, priorityLevelConfiguration *v1beta1.PriorityLevelConfiguration, opts v1.CreateOptions) (result *v1beta1.PriorityLevelConfiguration, err error) { + result = &v1beta1.PriorityLevelConfiguration{} + err = c.client.Post(). + Resource("prioritylevelconfigurations"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(priorityLevelConfiguration). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a priorityLevelConfiguration and updates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. +func (c *priorityLevelConfigurations) Update(ctx context.Context, priorityLevelConfiguration *v1beta1.PriorityLevelConfiguration, opts v1.UpdateOptions) (result *v1beta1.PriorityLevelConfiguration, err error) { + result = &v1beta1.PriorityLevelConfiguration{} + err = c.client.Put(). + Resource("prioritylevelconfigurations"). + Name(priorityLevelConfiguration.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(priorityLevelConfiguration). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *priorityLevelConfigurations) UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1beta1.PriorityLevelConfiguration, opts v1.UpdateOptions) (result *v1beta1.PriorityLevelConfiguration, err error) { + result = &v1beta1.PriorityLevelConfiguration{} + err = c.client.Put(). + Resource("prioritylevelconfigurations"). + Name(priorityLevelConfiguration.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(priorityLevelConfiguration). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the priorityLevelConfiguration and deletes it. Returns an error if one occurs. +func (c *priorityLevelConfigurations) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Resource("prioritylevelconfigurations"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *priorityLevelConfigurations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("prioritylevelconfigurations"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched priorityLevelConfiguration. +func (c *priorityLevelConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PriorityLevelConfiguration, err error) { + result = &v1beta1.PriorityLevelConfiguration{} + err = c.client.Patch(pt). + Resource("prioritylevelconfigurations"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1/doc.go new file mode 100644 index 0000000000..3af5d054f1 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1/fake/doc.go new file mode 100644 index 0000000000..16f4439906 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1/fake/fake_node_client.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1/fake/fake_node_client.go new file mode 100644 index 0000000000..dea10cbada --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1/fake/fake_node_client.go @@ -0,0 +1,40 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "k8s.io/client-go/kubernetes/typed/node/v1" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeNodeV1 struct { + *testing.Fake +} + +func (c *FakeNodeV1) RuntimeClasses() v1.RuntimeClassInterface { + return &FakeRuntimeClasses{c} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeNodeV1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1/fake/fake_runtimeclass.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1/fake/fake_runtimeclass.go new file mode 100644 index 0000000000..461386f452 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1/fake/fake_runtimeclass.go @@ -0,0 +1,122 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + nodev1 "k8s.io/api/node/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeRuntimeClasses implements RuntimeClassInterface +type FakeRuntimeClasses struct { + Fake *FakeNodeV1 +} + +var runtimeclassesResource = schema.GroupVersionResource{Group: "node.k8s.io", Version: "v1", Resource: "runtimeclasses"} + +var runtimeclassesKind = schema.GroupVersionKind{Group: "node.k8s.io", Version: "v1", Kind: "RuntimeClass"} + +// Get takes name of the runtimeClass, and returns the corresponding runtimeClass object, and an error if there is any. +func (c *FakeRuntimeClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *nodev1.RuntimeClass, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(runtimeclassesResource, name), &nodev1.RuntimeClass{}) + if obj == nil { + return nil, err + } + return obj.(*nodev1.RuntimeClass), err +} + +// List takes label and field selectors, and returns the list of RuntimeClasses that match those selectors. +func (c *FakeRuntimeClasses) List(ctx context.Context, opts v1.ListOptions) (result *nodev1.RuntimeClassList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(runtimeclassesResource, runtimeclassesKind, opts), &nodev1.RuntimeClassList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &nodev1.RuntimeClassList{ListMeta: obj.(*nodev1.RuntimeClassList).ListMeta} + for _, item := range obj.(*nodev1.RuntimeClassList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested runtimeClasses. +func (c *FakeRuntimeClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(runtimeclassesResource, opts)) +} + +// Create takes the representation of a runtimeClass and creates it. Returns the server's representation of the runtimeClass, and an error, if there is any. +func (c *FakeRuntimeClasses) Create(ctx context.Context, runtimeClass *nodev1.RuntimeClass, opts v1.CreateOptions) (result *nodev1.RuntimeClass, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(runtimeclassesResource, runtimeClass), &nodev1.RuntimeClass{}) + if obj == nil { + return nil, err + } + return obj.(*nodev1.RuntimeClass), err +} + +// Update takes the representation of a runtimeClass and updates it. Returns the server's representation of the runtimeClass, and an error, if there is any. +func (c *FakeRuntimeClasses) Update(ctx context.Context, runtimeClass *nodev1.RuntimeClass, opts v1.UpdateOptions) (result *nodev1.RuntimeClass, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(runtimeclassesResource, runtimeClass), &nodev1.RuntimeClass{}) + if obj == nil { + return nil, err + } + return obj.(*nodev1.RuntimeClass), err +} + +// Delete takes name of the runtimeClass and deletes it. Returns an error if one occurs. +func (c *FakeRuntimeClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(runtimeclassesResource, name), &nodev1.RuntimeClass{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeRuntimeClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(runtimeclassesResource, listOpts) + + _, err := c.Fake.Invokes(action, &nodev1.RuntimeClassList{}) + return err +} + +// Patch applies the patch and returns the patched runtimeClass. +func (c *FakeRuntimeClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *nodev1.RuntimeClass, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(runtimeclassesResource, name, pt, data, subresources...), &nodev1.RuntimeClass{}) + if obj == nil { + return nil, err + } + return obj.(*nodev1.RuntimeClass), err +} diff --git a/third_party/github.com/jmespath/go-jmespath/LICENSE b/vendor/k8s.io/client-go/kubernetes/typed/node/v1/generated_expansion.go similarity index 79% rename from third_party/github.com/jmespath/go-jmespath/LICENSE rename to vendor/k8s.io/client-go/kubernetes/typed/node/v1/generated_expansion.go index b03310a91f..e2c25926ff 100644 --- a/third_party/github.com/jmespath/go-jmespath/LICENSE +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1/generated_expansion.go @@ -1,4 +1,5 @@ -Copyright 2015 James Saryerwinnie +/* +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -11,3 +12,10 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +type RuntimeClassExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1/node_client.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1/node_client.go new file mode 100644 index 0000000000..7f0da811b2 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1/node_client.go @@ -0,0 +1,89 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/node/v1" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type NodeV1Interface interface { + RESTClient() rest.Interface + RuntimeClassesGetter +} + +// NodeV1Client is used to interact with features provided by the node.k8s.io group. +type NodeV1Client struct { + restClient rest.Interface +} + +func (c *NodeV1Client) RuntimeClasses() RuntimeClassInterface { + return newRuntimeClasses(c) +} + +// NewForConfig creates a new NodeV1Client for the given config. +func NewForConfig(c *rest.Config) (*NodeV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &NodeV1Client{client}, nil +} + +// NewForConfigOrDie creates a new NodeV1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *NodeV1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new NodeV1Client for the given RESTClient. +func New(c rest.Interface) *NodeV1Client { + return &NodeV1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *NodeV1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1/runtimeclass.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1/runtimeclass.go new file mode 100644 index 0000000000..df8c1cafe8 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1/runtimeclass.go @@ -0,0 +1,168 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + v1 "k8s.io/api/node/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// RuntimeClassesGetter has a method to return a RuntimeClassInterface. +// A group's client should implement this interface. +type RuntimeClassesGetter interface { + RuntimeClasses() RuntimeClassInterface +} + +// RuntimeClassInterface has methods to work with RuntimeClass resources. +type RuntimeClassInterface interface { + Create(ctx context.Context, runtimeClass *v1.RuntimeClass, opts metav1.CreateOptions) (*v1.RuntimeClass, error) + Update(ctx context.Context, runtimeClass *v1.RuntimeClass, opts metav1.UpdateOptions) (*v1.RuntimeClass, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.RuntimeClass, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.RuntimeClassList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.RuntimeClass, err error) + RuntimeClassExpansion +} + +// runtimeClasses implements RuntimeClassInterface +type runtimeClasses struct { + client rest.Interface +} + +// newRuntimeClasses returns a RuntimeClasses +func newRuntimeClasses(c *NodeV1Client) *runtimeClasses { + return &runtimeClasses{ + client: c.RESTClient(), + } +} + +// Get takes name of the runtimeClass, and returns the corresponding runtimeClass object, and an error if there is any. +func (c *runtimeClasses) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.RuntimeClass, err error) { + result = &v1.RuntimeClass{} + err = c.client.Get(). + Resource("runtimeclasses"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of RuntimeClasses that match those selectors. +func (c *runtimeClasses) List(ctx context.Context, opts metav1.ListOptions) (result *v1.RuntimeClassList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.RuntimeClassList{} + err = c.client.Get(). + Resource("runtimeclasses"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested runtimeClasses. +func (c *runtimeClasses) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("runtimeclasses"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a runtimeClass and creates it. Returns the server's representation of the runtimeClass, and an error, if there is any. +func (c *runtimeClasses) Create(ctx context.Context, runtimeClass *v1.RuntimeClass, opts metav1.CreateOptions) (result *v1.RuntimeClass, err error) { + result = &v1.RuntimeClass{} + err = c.client.Post(). + Resource("runtimeclasses"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(runtimeClass). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a runtimeClass and updates it. Returns the server's representation of the runtimeClass, and an error, if there is any. +func (c *runtimeClasses) Update(ctx context.Context, runtimeClass *v1.RuntimeClass, opts metav1.UpdateOptions) (result *v1.RuntimeClass, err error) { + result = &v1.RuntimeClass{} + err = c.client.Put(). + Resource("runtimeclasses"). + Name(runtimeClass.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(runtimeClass). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the runtimeClass and deletes it. Returns an error if one occurs. +func (c *runtimeClasses) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Resource("runtimeclasses"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *runtimeClasses) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("runtimeclasses"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched runtimeClass. +func (c *runtimeClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.RuntimeClass, err error) { + result = &v1.RuntimeClass{} + err = c.client.Patch(pt). + Resource("runtimeclasses"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake/fake_podpreset.go b/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake/fake_podpreset.go deleted file mode 100644 index c8ecd09cec..0000000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake/fake_podpreset.go +++ /dev/null @@ -1,130 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - - v1alpha1 "k8s.io/api/settings/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - testing "k8s.io/client-go/testing" -) - -// FakePodPresets implements PodPresetInterface -type FakePodPresets struct { - Fake *FakeSettingsV1alpha1 - ns string -} - -var podpresetsResource = schema.GroupVersionResource{Group: "settings.k8s.io", Version: "v1alpha1", Resource: "podpresets"} - -var podpresetsKind = schema.GroupVersionKind{Group: "settings.k8s.io", Version: "v1alpha1", Kind: "PodPreset"} - -// Get takes name of the podPreset, and returns the corresponding podPreset object, and an error if there is any. -func (c *FakePodPresets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.PodPreset, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(podpresetsResource, c.ns, name), &v1alpha1.PodPreset{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.PodPreset), err -} - -// List takes label and field selectors, and returns the list of PodPresets that match those selectors. -func (c *FakePodPresets) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.PodPresetList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(podpresetsResource, podpresetsKind, c.ns, opts), &v1alpha1.PodPresetList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1alpha1.PodPresetList{ListMeta: obj.(*v1alpha1.PodPresetList).ListMeta} - for _, item := range obj.(*v1alpha1.PodPresetList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested podPresets. -func (c *FakePodPresets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(podpresetsResource, c.ns, opts)) - -} - -// Create takes the representation of a podPreset and creates it. Returns the server's representation of the podPreset, and an error, if there is any. -func (c *FakePodPresets) Create(ctx context.Context, podPreset *v1alpha1.PodPreset, opts v1.CreateOptions) (result *v1alpha1.PodPreset, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(podpresetsResource, c.ns, podPreset), &v1alpha1.PodPreset{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.PodPreset), err -} - -// Update takes the representation of a podPreset and updates it. Returns the server's representation of the podPreset, and an error, if there is any. -func (c *FakePodPresets) Update(ctx context.Context, podPreset *v1alpha1.PodPreset, opts v1.UpdateOptions) (result *v1alpha1.PodPreset, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(podpresetsResource, c.ns, podPreset), &v1alpha1.PodPreset{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.PodPreset), err -} - -// Delete takes name of the podPreset and deletes it. Returns an error if one occurs. -func (c *FakePodPresets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteAction(podpresetsResource, c.ns, name), &v1alpha1.PodPreset{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakePodPresets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(podpresetsResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &v1alpha1.PodPresetList{}) - return err -} - -// Patch applies the patch and returns the patched podPreset. -func (c *FakePodPresets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.PodPreset, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(podpresetsResource, c.ns, name, pt, data, subresources...), &v1alpha1.PodPreset{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.PodPreset), err -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/podpreset.go b/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/podpreset.go deleted file mode 100644 index aa1cb364ea..0000000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/podpreset.go +++ /dev/null @@ -1,178 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "context" - "time" - - v1alpha1 "k8s.io/api/settings/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// PodPresetsGetter has a method to return a PodPresetInterface. -// A group's client should implement this interface. -type PodPresetsGetter interface { - PodPresets(namespace string) PodPresetInterface -} - -// PodPresetInterface has methods to work with PodPreset resources. -type PodPresetInterface interface { - Create(ctx context.Context, podPreset *v1alpha1.PodPreset, opts v1.CreateOptions) (*v1alpha1.PodPreset, error) - Update(ctx context.Context, podPreset *v1alpha1.PodPreset, opts v1.UpdateOptions) (*v1alpha1.PodPreset, error) - Delete(ctx context.Context, name string, opts v1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.PodPreset, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.PodPresetList, error) - Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.PodPreset, err error) - PodPresetExpansion -} - -// podPresets implements PodPresetInterface -type podPresets struct { - client rest.Interface - ns string -} - -// newPodPresets returns a PodPresets -func newPodPresets(c *SettingsV1alpha1Client, namespace string) *podPresets { - return &podPresets{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the podPreset, and returns the corresponding podPreset object, and an error if there is any. -func (c *podPresets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.PodPreset, err error) { - result = &v1alpha1.PodPreset{} - err = c.client.Get(). - Namespace(c.ns). - Resource("podpresets"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PodPresets that match those selectors. -func (c *podPresets) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.PodPresetList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.PodPresetList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("podpresets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested podPresets. -func (c *podPresets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("podpresets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a podPreset and creates it. Returns the server's representation of the podPreset, and an error, if there is any. -func (c *podPresets) Create(ctx context.Context, podPreset *v1alpha1.PodPreset, opts v1.CreateOptions) (result *v1alpha1.PodPreset, err error) { - result = &v1alpha1.PodPreset{} - err = c.client.Post(). - Namespace(c.ns). - Resource("podpresets"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(podPreset). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a podPreset and updates it. Returns the server's representation of the podPreset, and an error, if there is any. -func (c *podPresets) Update(ctx context.Context, podPreset *v1alpha1.PodPreset, opts v1.UpdateOptions) (result *v1alpha1.PodPreset, err error) { - result = &v1alpha1.PodPreset{} - err = c.client.Put(). - Namespace(c.ns). - Resource("podpresets"). - Name(podPreset.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(podPreset). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the podPreset and deletes it. Returns an error if one occurs. -func (c *podPresets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("podpresets"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *podPresets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("podpresets"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched podPreset. -func (c *podPresets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.PodPreset, err error) { - result = &v1alpha1.PodPreset{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("podpresets"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/listers/settings/v1alpha1/expansion_generated.go b/vendor/k8s.io/client-go/listers/apiserverinternal/v1alpha1/expansion_generated.go similarity index 69% rename from vendor/k8s.io/client-go/listers/settings/v1alpha1/expansion_generated.go rename to vendor/k8s.io/client-go/listers/apiserverinternal/v1alpha1/expansion_generated.go index fba210343a..ad860c7c95 100644 --- a/vendor/k8s.io/client-go/listers/settings/v1alpha1/expansion_generated.go +++ b/vendor/k8s.io/client-go/listers/apiserverinternal/v1alpha1/expansion_generated.go @@ -18,10 +18,6 @@ limitations under the License. package v1alpha1 -// PodPresetListerExpansion allows custom methods to be added to -// PodPresetLister. -type PodPresetListerExpansion interface{} - -// PodPresetNamespaceListerExpansion allows custom methods to be added to -// PodPresetNamespaceLister. -type PodPresetNamespaceListerExpansion interface{} +// StorageVersionListerExpansion allows custom methods to be added to +// StorageVersionLister. +type StorageVersionListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/apiserverinternal/v1alpha1/storageversion.go b/vendor/k8s.io/client-go/listers/apiserverinternal/v1alpha1/storageversion.go new file mode 100644 index 0000000000..9a6d74b2bf --- /dev/null +++ b/vendor/k8s.io/client-go/listers/apiserverinternal/v1alpha1/storageversion.go @@ -0,0 +1,68 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "k8s.io/api/apiserverinternal/v1alpha1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// StorageVersionLister helps list StorageVersions. +// All objects returned here must be treated as read-only. +type StorageVersionLister interface { + // List lists all StorageVersions in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1alpha1.StorageVersion, err error) + // Get retrieves the StorageVersion from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1alpha1.StorageVersion, error) + StorageVersionListerExpansion +} + +// storageVersionLister implements the StorageVersionLister interface. +type storageVersionLister struct { + indexer cache.Indexer +} + +// NewStorageVersionLister returns a new StorageVersionLister. +func NewStorageVersionLister(indexer cache.Indexer) StorageVersionLister { + return &storageVersionLister{indexer: indexer} +} + +// List lists all StorageVersions in the indexer. +func (s *storageVersionLister) List(selector labels.Selector) (ret []*v1alpha1.StorageVersion, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.StorageVersion)) + }) + return ret, err +} + +// Get retrieves the StorageVersion from the index for a given name. +func (s *storageVersionLister) Get(name string) (*v1alpha1.StorageVersion, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("storageversion"), name) + } + return obj.(*v1alpha1.StorageVersion), nil +} diff --git a/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/cluster_interceptor_defaults.go b/vendor/k8s.io/client-go/listers/flowcontrol/v1beta1/expansion_generated.go similarity index 56% rename from vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/cluster_interceptor_defaults.go rename to vendor/k8s.io/client-go/listers/flowcontrol/v1beta1/expansion_generated.go index f2d6d0f2e7..c674e951e0 100644 --- a/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/cluster_interceptor_defaults.go +++ b/vendor/k8s.io/client-go/listers/flowcontrol/v1beta1/expansion_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2021 The Tekton Authors +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,19 +14,14 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1alpha1 +// Code generated by lister-gen. DO NOT EDIT. -import ( - "context" -) +package v1beta1 -// SetDefaults sets the defaults on the object. -func (it *ClusterInterceptor) SetDefaults(ctx context.Context) { - if IsUpgradeViaDefaulting(ctx) { - if svc := it.Spec.ClientConfig.Service; svc != nil { - if svc.Port == nil { - svc.Port = &defaultPort - } - } - } -} +// FlowSchemaListerExpansion allows custom methods to be added to +// FlowSchemaLister. +type FlowSchemaListerExpansion interface{} + +// PriorityLevelConfigurationListerExpansion allows custom methods to be added to +// PriorityLevelConfigurationLister. +type PriorityLevelConfigurationListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/flowcontrol/v1beta1/flowschema.go b/vendor/k8s.io/client-go/listers/flowcontrol/v1beta1/flowschema.go new file mode 100644 index 0000000000..7927a8411e --- /dev/null +++ b/vendor/k8s.io/client-go/listers/flowcontrol/v1beta1/flowschema.go @@ -0,0 +1,68 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/flowcontrol/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// FlowSchemaLister helps list FlowSchemas. +// All objects returned here must be treated as read-only. +type FlowSchemaLister interface { + // List lists all FlowSchemas in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1beta1.FlowSchema, err error) + // Get retrieves the FlowSchema from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1beta1.FlowSchema, error) + FlowSchemaListerExpansion +} + +// flowSchemaLister implements the FlowSchemaLister interface. +type flowSchemaLister struct { + indexer cache.Indexer +} + +// NewFlowSchemaLister returns a new FlowSchemaLister. +func NewFlowSchemaLister(indexer cache.Indexer) FlowSchemaLister { + return &flowSchemaLister{indexer: indexer} +} + +// List lists all FlowSchemas in the indexer. +func (s *flowSchemaLister) List(selector labels.Selector) (ret []*v1beta1.FlowSchema, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.FlowSchema)) + }) + return ret, err +} + +// Get retrieves the FlowSchema from the index for a given name. +func (s *flowSchemaLister) Get(name string) (*v1beta1.FlowSchema, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta1.Resource("flowschema"), name) + } + return obj.(*v1beta1.FlowSchema), nil +} diff --git a/vendor/k8s.io/client-go/listers/flowcontrol/v1beta1/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/listers/flowcontrol/v1beta1/prioritylevelconfiguration.go new file mode 100644 index 0000000000..c94aaa4c1d --- /dev/null +++ b/vendor/k8s.io/client-go/listers/flowcontrol/v1beta1/prioritylevelconfiguration.go @@ -0,0 +1,68 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/flowcontrol/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// PriorityLevelConfigurationLister helps list PriorityLevelConfigurations. +// All objects returned here must be treated as read-only. +type PriorityLevelConfigurationLister interface { + // List lists all PriorityLevelConfigurations in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1beta1.PriorityLevelConfiguration, err error) + // Get retrieves the PriorityLevelConfiguration from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1beta1.PriorityLevelConfiguration, error) + PriorityLevelConfigurationListerExpansion +} + +// priorityLevelConfigurationLister implements the PriorityLevelConfigurationLister interface. +type priorityLevelConfigurationLister struct { + indexer cache.Indexer +} + +// NewPriorityLevelConfigurationLister returns a new PriorityLevelConfigurationLister. +func NewPriorityLevelConfigurationLister(indexer cache.Indexer) PriorityLevelConfigurationLister { + return &priorityLevelConfigurationLister{indexer: indexer} +} + +// List lists all PriorityLevelConfigurations in the indexer. +func (s *priorityLevelConfigurationLister) List(selector labels.Selector) (ret []*v1beta1.PriorityLevelConfiguration, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.PriorityLevelConfiguration)) + }) + return ret, err +} + +// Get retrieves the PriorityLevelConfiguration from the index for a given name. +func (s *priorityLevelConfigurationLister) Get(name string) (*v1beta1.PriorityLevelConfiguration, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta1.Resource("prioritylevelconfiguration"), name) + } + return obj.(*v1beta1.PriorityLevelConfiguration), nil +} diff --git a/vendor/k8s.io/client-go/listers/node/v1/expansion_generated.go b/vendor/k8s.io/client-go/listers/node/v1/expansion_generated.go new file mode 100644 index 0000000000..4f010b87c3 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/node/v1/expansion_generated.go @@ -0,0 +1,23 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +// RuntimeClassListerExpansion allows custom methods to be added to +// RuntimeClassLister. +type RuntimeClassListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/node/v1/runtimeclass.go b/vendor/k8s.io/client-go/listers/node/v1/runtimeclass.go new file mode 100644 index 0000000000..6e00cf1a59 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/node/v1/runtimeclass.go @@ -0,0 +1,68 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/node/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// RuntimeClassLister helps list RuntimeClasses. +// All objects returned here must be treated as read-only. +type RuntimeClassLister interface { + // List lists all RuntimeClasses in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.RuntimeClass, err error) + // Get retrieves the RuntimeClass from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1.RuntimeClass, error) + RuntimeClassListerExpansion +} + +// runtimeClassLister implements the RuntimeClassLister interface. +type runtimeClassLister struct { + indexer cache.Indexer +} + +// NewRuntimeClassLister returns a new RuntimeClassLister. +func NewRuntimeClassLister(indexer cache.Indexer) RuntimeClassLister { + return &runtimeClassLister{indexer: indexer} +} + +// List lists all RuntimeClasses in the indexer. +func (s *runtimeClassLister) List(selector labels.Selector) (ret []*v1.RuntimeClass, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.RuntimeClass)) + }) + return ret, err +} + +// Get retrieves the RuntimeClass from the index for a given name. +func (s *runtimeClassLister) Get(name string) (*v1.RuntimeClass, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("runtimeclass"), name) + } + return obj.(*v1.RuntimeClass), nil +} diff --git a/vendor/k8s.io/client-go/listers/settings/v1alpha1/podpreset.go b/vendor/k8s.io/client-go/listers/settings/v1alpha1/podpreset.go deleted file mode 100644 index c21eb72e60..0000000000 --- a/vendor/k8s.io/client-go/listers/settings/v1alpha1/podpreset.go +++ /dev/null @@ -1,99 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1alpha1 "k8s.io/api/settings/v1alpha1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" -) - -// PodPresetLister helps list PodPresets. -// All objects returned here must be treated as read-only. -type PodPresetLister interface { - // List lists all PodPresets in the indexer. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.PodPreset, err error) - // PodPresets returns an object that can list and get PodPresets. - PodPresets(namespace string) PodPresetNamespaceLister - PodPresetListerExpansion -} - -// podPresetLister implements the PodPresetLister interface. -type podPresetLister struct { - indexer cache.Indexer -} - -// NewPodPresetLister returns a new PodPresetLister. -func NewPodPresetLister(indexer cache.Indexer) PodPresetLister { - return &podPresetLister{indexer: indexer} -} - -// List lists all PodPresets in the indexer. -func (s *podPresetLister) List(selector labels.Selector) (ret []*v1alpha1.PodPreset, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.PodPreset)) - }) - return ret, err -} - -// PodPresets returns an object that can list and get PodPresets. -func (s *podPresetLister) PodPresets(namespace string) PodPresetNamespaceLister { - return podPresetNamespaceLister{indexer: s.indexer, namespace: namespace} -} - -// PodPresetNamespaceLister helps list and get PodPresets. -// All objects returned here must be treated as read-only. -type PodPresetNamespaceLister interface { - // List lists all PodPresets in the indexer for a given namespace. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.PodPreset, err error) - // Get retrieves the PodPreset from the indexer for a given namespace and name. - // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha1.PodPreset, error) - PodPresetNamespaceListerExpansion -} - -// podPresetNamespaceLister implements the PodPresetNamespaceLister -// interface. -type podPresetNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all PodPresets in the indexer for a given namespace. -func (s podPresetNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.PodPreset, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.PodPreset)) - }) - return ret, err -} - -// Get retrieves the PodPreset from the indexer for a given namespace and name. -func (s podPresetNamespaceLister) Get(name string) (*v1alpha1.PodPreset, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("podpreset"), name) - } - return obj.(*v1alpha1.PodPreset), nil -} diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/types.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/types.go index 6fb53cecf9..c108997920 100644 --- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/types.go +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/types.go @@ -18,11 +18,12 @@ package clientauthentication import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" ) // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// ExecCredentials is used by exec-based plugins to communicate credentials to +// ExecCredential is used by exec-based plugins to communicate credentials to // HTTP transports. type ExecCredential struct { metav1.TypeMeta @@ -37,7 +38,7 @@ type ExecCredential struct { Status *ExecCredentialStatus } -// ExecCredenitalSpec holds request and runtime specific information provided by +// ExecCredentialSpec holds request and runtime specific information provided by // the transport. type ExecCredentialSpec struct { // Response is populated when the transport encounters HTTP status codes, such as 401, @@ -49,6 +50,13 @@ type ExecCredentialSpec struct { // interactive prompt. // +optional Interactive bool + + // Cluster contains information to allow an exec plugin to communicate with the + // kubernetes cluster being authenticated to. Note that Cluster is non-nil only + // when provideClusterInfo is set to true in the exec provider config (i.e., + // ExecConfig.ProvideClusterInfo). + // +optional + Cluster *Cluster } // ExecCredentialStatus holds credentials for the transport to use. @@ -58,13 +66,13 @@ type ExecCredentialStatus struct { ExpirationTimestamp *metav1.Time // Token is a bearer token used by the client for request authentication. // +optional - Token string + Token string `datapolicy:"token"` // PEM-encoded client TLS certificate. // +optional ClientCertificateData string // PEM-encoded client TLS private key. // +optional - ClientKeyData string + ClientKeyData string `datapolicy:"secret-key"` } // Response defines metadata about a failed request, including HTTP status code and @@ -75,3 +83,56 @@ type Response struct { // Code is the HTTP status code returned by the server. Code int32 } + +// Cluster contains information to allow an exec plugin to communicate +// with the kubernetes cluster being authenticated to. +// +// To ensure that this struct contains everything someone would need to communicate +// with a kubernetes cluster (just like they would via a kubeconfig), the fields +// should shadow "k8s.io/client-go/tools/clientcmd/api/v1".Cluster, with the exception +// of CertificateAuthority, since CA data will always be passed to the plugin as bytes. +type Cluster struct { + // Server is the address of the kubernetes cluster (https://hostname:port). + Server string + // TLSServerName is passed to the server for SNI and is used in the client to + // check server certificates against. If ServerName is empty, the hostname + // used to contact the server is used. + // +optional + TLSServerName string + // InsecureSkipTLSVerify skips the validity check for the server's certificate. + // This will make your HTTPS connections insecure. + // +optional + InsecureSkipTLSVerify bool + // CAData contains PEM-encoded certificate authority certificates. + // If empty, system roots should be used. + // +listType=atomic + // +optional + CertificateAuthorityData []byte + // ProxyURL is the URL to the proxy to be used for all requests to this + // cluster. + // +optional + ProxyURL string + // Config holds additional config data that is specific to the exec + // plugin with regards to the cluster being authenticated to. + // + // This data is sourced from the clientcmd Cluster object's + // extensions[client.authentication.k8s.io/exec] field: + // + // clusters: + // - name: my-cluster + // cluster: + // ... + // extensions: + // - name: client.authentication.k8s.io/exec # reserved extension name for per cluster exec config + // extension: + // audience: 06e3fbd18de8 # arbitrary config + // + // In some environments, the user config may be exactly the same across many clusters + // (i.e. call this exec plugin) minus some details that are specific to each cluster + // such as the audience. This field allows the per cluster config to be directly + // specified with the cluster info. Using this field to store secret data is not + // recommended as one of the prime benefits of exec plugins is that no secrets need + // to be stored directly in the kubeconfig. + // +optional + Config runtime.Object +} diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/conversion.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/conversion.go new file mode 100644 index 0000000000..572e049f81 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/conversion.go @@ -0,0 +1,27 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/client-go/pkg/apis/clientauthentication" +) + +func Convert_clientauthentication_ExecCredentialSpec_To_v1alpha1_ExecCredentialSpec(in *clientauthentication.ExecCredentialSpec, out *ExecCredentialSpec, s conversion.Scope) error { + // This conversion intentionally omits the Cluster field which is only supported in newer versions. + return autoConvert_clientauthentication_ExecCredentialSpec_To_v1alpha1_ExecCredentialSpec(in, out, s) +} diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/types.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/types.go index c714e2457a..1ff13c4382 100644 --- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/types.go +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/types.go @@ -37,7 +37,7 @@ type ExecCredential struct { Status *ExecCredentialStatus `json:"status,omitempty"` } -// ExecCredenitalSpec holds request and runtime specific information provided by +// ExecCredentialSpec holds request and runtime specific information provided by // the transport. type ExecCredentialSpec struct { // Response is populated when the transport encounters HTTP status codes, such as 401, @@ -61,11 +61,11 @@ type ExecCredentialStatus struct { // +optional ExpirationTimestamp *metav1.Time `json:"expirationTimestamp,omitempty"` // Token is a bearer token used by the client for request authentication. - Token string `json:"token,omitempty"` + Token string `json:"token,omitempty" datapolicy:"token"` // PEM-encoded client TLS certificates (including intermediates, if any). ClientCertificateData string `json:"clientCertificateData,omitempty"` // PEM-encoded private key for the above certificate. - ClientKeyData string `json:"clientKeyData,omitempty"` + ClientKeyData string `json:"clientKeyData,omitempty" datapolicy:"security-key"` } // Response defines metadata about a failed request, including HTTP status code and diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.conversion.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.conversion.go index 461c20b298..b0e503af49 100644 --- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.conversion.go +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.conversion.go @@ -51,11 +51,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*clientauthentication.ExecCredentialSpec)(nil), (*ExecCredentialSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_clientauthentication_ExecCredentialSpec_To_v1alpha1_ExecCredentialSpec(a.(*clientauthentication.ExecCredentialSpec), b.(*ExecCredentialSpec), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*ExecCredentialStatus)(nil), (*clientauthentication.ExecCredentialStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus(a.(*ExecCredentialStatus), b.(*clientauthentication.ExecCredentialStatus), scope) }); err != nil { @@ -76,6 +71,11 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddConversionFunc((*clientauthentication.ExecCredentialSpec)(nil), (*ExecCredentialSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_clientauthentication_ExecCredentialSpec_To_v1alpha1_ExecCredentialSpec(a.(*clientauthentication.ExecCredentialSpec), b.(*ExecCredentialSpec), scope) + }); err != nil { + return err + } return nil } @@ -119,14 +119,10 @@ func Convert_v1alpha1_ExecCredentialSpec_To_clientauthentication_ExecCredentialS func autoConvert_clientauthentication_ExecCredentialSpec_To_v1alpha1_ExecCredentialSpec(in *clientauthentication.ExecCredentialSpec, out *ExecCredentialSpec, s conversion.Scope) error { out.Response = (*Response)(unsafe.Pointer(in.Response)) out.Interactive = in.Interactive + // WARNING: in.Cluster requires manual conversion: does not exist in peer-type return nil } -// Convert_clientauthentication_ExecCredentialSpec_To_v1alpha1_ExecCredentialSpec is an autogenerated conversion function. -func Convert_clientauthentication_ExecCredentialSpec_To_v1alpha1_ExecCredentialSpec(in *clientauthentication.ExecCredentialSpec, out *ExecCredentialSpec, s conversion.Scope) error { - return autoConvert_clientauthentication_ExecCredentialSpec_To_v1alpha1_ExecCredentialSpec(in, out, s) -} - func autoConvert_v1alpha1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus(in *ExecCredentialStatus, out *clientauthentication.ExecCredentialStatus, s conversion.Scope) error { out.ExpirationTimestamp = (*v1.Time)(unsafe.Pointer(in.ExpirationTimestamp)) out.Token = in.Token diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/conversion.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/conversion.go index f543806ac9..441b7c44bd 100644 --- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/conversion.go +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/conversion.go @@ -17,10 +17,12 @@ limitations under the License. package v1beta1 import ( - conversion "k8s.io/apimachinery/pkg/conversion" - clientauthentication "k8s.io/client-go/pkg/apis/clientauthentication" + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/client-go/pkg/apis/clientauthentication" ) func Convert_clientauthentication_ExecCredentialSpec_To_v1beta1_ExecCredentialSpec(in *clientauthentication.ExecCredentialSpec, out *ExecCredentialSpec, s conversion.Scope) error { - return nil + // This conversion intentionally omits the Response and Interactive fields, which were only + // supported in v1alpha1. + return autoConvert_clientauthentication_ExecCredentialSpec_To_v1beta1_ExecCredentialSpec(in, out, s) } diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/types.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/types.go index d6e267452e..fabc6f65e6 100644 --- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/types.go +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/types.go @@ -18,17 +18,17 @@ package v1beta1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" ) // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// ExecCredentials is used by exec-based plugins to communicate credentials to +// ExecCredential is used by exec-based plugins to communicate credentials to // HTTP transports. type ExecCredential struct { metav1.TypeMeta `json:",inline"` - // Spec holds information passed to the plugin by the transport. This contains - // request and runtime specific information, such as if the session is interactive. + // Spec holds information passed to the plugin by the transport. Spec ExecCredentialSpec `json:"spec,omitempty"` // Status is filled in by the plugin and holds the credentials that the transport @@ -37,9 +37,16 @@ type ExecCredential struct { Status *ExecCredentialStatus `json:"status,omitempty"` } -// ExecCredenitalSpec holds request and runtime specific information provided by +// ExecCredentialSpec holds request and runtime specific information provided by // the transport. -type ExecCredentialSpec struct{} +type ExecCredentialSpec struct { + // Cluster contains information to allow an exec plugin to communicate with the + // kubernetes cluster being authenticated to. Note that Cluster is non-nil only + // when provideClusterInfo is set to true in the exec provider config (i.e., + // ExecConfig.ProvideClusterInfo). + // +optional + Cluster *Cluster `json:"cluster,omitempty"` +} // ExecCredentialStatus holds credentials for the transport to use. // @@ -51,9 +58,62 @@ type ExecCredentialStatus struct { // +optional ExpirationTimestamp *metav1.Time `json:"expirationTimestamp,omitempty"` // Token is a bearer token used by the client for request authentication. - Token string `json:"token,omitempty"` + Token string `json:"token,omitempty" datapolicy:"token"` // PEM-encoded client TLS certificates (including intermediates, if any). ClientCertificateData string `json:"clientCertificateData,omitempty"` // PEM-encoded private key for the above certificate. - ClientKeyData string `json:"clientKeyData,omitempty"` + ClientKeyData string `json:"clientKeyData,omitempty" datapolicy:"security-key"` +} + +// Cluster contains information to allow an exec plugin to communicate +// with the kubernetes cluster being authenticated to. +// +// To ensure that this struct contains everything someone would need to communicate +// with a kubernetes cluster (just like they would via a kubeconfig), the fields +// should shadow "k8s.io/client-go/tools/clientcmd/api/v1".Cluster, with the exception +// of CertificateAuthority, since CA data will always be passed to the plugin as bytes. +type Cluster struct { + // Server is the address of the kubernetes cluster (https://hostname:port). + Server string `json:"server"` + // TLSServerName is passed to the server for SNI and is used in the client to + // check server certificates against. If ServerName is empty, the hostname + // used to contact the server is used. + // +optional + TLSServerName string `json:"tls-server-name,omitempty"` + // InsecureSkipTLSVerify skips the validity check for the server's certificate. + // This will make your HTTPS connections insecure. + // +optional + InsecureSkipTLSVerify bool `json:"insecure-skip-tls-verify,omitempty"` + // CAData contains PEM-encoded certificate authority certificates. + // If empty, system roots should be used. + // +listType=atomic + // +optional + CertificateAuthorityData []byte `json:"certificate-authority-data,omitempty"` + // ProxyURL is the URL to the proxy to be used for all requests to this + // cluster. + // +optional + ProxyURL string `json:"proxy-url,omitempty"` + // Config holds additional config data that is specific to the exec + // plugin with regards to the cluster being authenticated to. + // + // This data is sourced from the clientcmd Cluster object's + // extensions[client.authentication.k8s.io/exec] field: + // + // clusters: + // - name: my-cluster + // cluster: + // ... + // extensions: + // - name: client.authentication.k8s.io/exec # reserved extension name for per cluster exec config + // extension: + // audience: 06e3fbd18de8 # arbitrary config + // + // In some environments, the user config may be exactly the same across many clusters + // (i.e. call this exec plugin) minus some details that are specific to each cluster + // such as the audience. This field allows the per cluster config to be directly + // specified with the cluster info. Using this field to store secret data is not + // recommended as one of the prime benefits of exec plugins is that no secrets need + // to be stored directly in the kubeconfig. + // +optional + Config runtime.RawExtension `json:"config,omitempty"` } diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.conversion.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.conversion.go index 0e533e4657..90f7935fef 100644 --- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.conversion.go +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.conversion.go @@ -36,6 +36,16 @@ func init() { // RegisterConversions adds conversion functions to the given scheme. // Public to allow building arbitrary schemes. func RegisterConversions(s *runtime.Scheme) error { + if err := s.AddGeneratedConversionFunc((*Cluster)(nil), (*clientauthentication.Cluster)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_Cluster_To_clientauthentication_Cluster(a.(*Cluster), b.(*clientauthentication.Cluster), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*clientauthentication.Cluster)(nil), (*Cluster)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_clientauthentication_Cluster_To_v1beta1_Cluster(a.(*clientauthentication.Cluster), b.(*Cluster), scope) + }); err != nil { + return err + } if err := s.AddGeneratedConversionFunc((*ExecCredential)(nil), (*clientauthentication.ExecCredential)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_ExecCredential_To_clientauthentication_ExecCredential(a.(*ExecCredential), b.(*clientauthentication.ExecCredential), scope) }); err != nil { @@ -69,6 +79,40 @@ func RegisterConversions(s *runtime.Scheme) error { return nil } +func autoConvert_v1beta1_Cluster_To_clientauthentication_Cluster(in *Cluster, out *clientauthentication.Cluster, s conversion.Scope) error { + out.Server = in.Server + out.TLSServerName = in.TLSServerName + out.InsecureSkipTLSVerify = in.InsecureSkipTLSVerify + out.CertificateAuthorityData = *(*[]byte)(unsafe.Pointer(&in.CertificateAuthorityData)) + out.ProxyURL = in.ProxyURL + if err := runtime.Convert_runtime_RawExtension_To_runtime_Object(&in.Config, &out.Config, s); err != nil { + return err + } + return nil +} + +// Convert_v1beta1_Cluster_To_clientauthentication_Cluster is an autogenerated conversion function. +func Convert_v1beta1_Cluster_To_clientauthentication_Cluster(in *Cluster, out *clientauthentication.Cluster, s conversion.Scope) error { + return autoConvert_v1beta1_Cluster_To_clientauthentication_Cluster(in, out, s) +} + +func autoConvert_clientauthentication_Cluster_To_v1beta1_Cluster(in *clientauthentication.Cluster, out *Cluster, s conversion.Scope) error { + out.Server = in.Server + out.TLSServerName = in.TLSServerName + out.InsecureSkipTLSVerify = in.InsecureSkipTLSVerify + out.CertificateAuthorityData = *(*[]byte)(unsafe.Pointer(&in.CertificateAuthorityData)) + out.ProxyURL = in.ProxyURL + if err := runtime.Convert_runtime_Object_To_runtime_RawExtension(&in.Config, &out.Config, s); err != nil { + return err + } + return nil +} + +// Convert_clientauthentication_Cluster_To_v1beta1_Cluster is an autogenerated conversion function. +func Convert_clientauthentication_Cluster_To_v1beta1_Cluster(in *clientauthentication.Cluster, out *Cluster, s conversion.Scope) error { + return autoConvert_clientauthentication_Cluster_To_v1beta1_Cluster(in, out, s) +} + func autoConvert_v1beta1_ExecCredential_To_clientauthentication_ExecCredential(in *ExecCredential, out *clientauthentication.ExecCredential, s conversion.Scope) error { if err := Convert_v1beta1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec(&in.Spec, &out.Spec, s); err != nil { return err @@ -96,6 +140,15 @@ func Convert_clientauthentication_ExecCredential_To_v1beta1_ExecCredential(in *c } func autoConvert_v1beta1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec(in *ExecCredentialSpec, out *clientauthentication.ExecCredentialSpec, s conversion.Scope) error { + if in.Cluster != nil { + in, out := &in.Cluster, &out.Cluster + *out = new(clientauthentication.Cluster) + if err := Convert_v1beta1_Cluster_To_clientauthentication_Cluster(*in, *out, s); err != nil { + return err + } + } else { + out.Cluster = nil + } return nil } @@ -107,6 +160,15 @@ func Convert_v1beta1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSp func autoConvert_clientauthentication_ExecCredentialSpec_To_v1beta1_ExecCredentialSpec(in *clientauthentication.ExecCredentialSpec, out *ExecCredentialSpec, s conversion.Scope) error { // WARNING: in.Response requires manual conversion: does not exist in peer-type // WARNING: in.Interactive requires manual conversion: does not exist in peer-type + if in.Cluster != nil { + in, out := &in.Cluster, &out.Cluster + *out = new(Cluster) + if err := Convert_clientauthentication_Cluster_To_v1beta1_Cluster(*in, *out, s); err != nil { + return err + } + } else { + out.Cluster = nil + } return nil } diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.deepcopy.go index 736b8cf00d..3a72ece0c6 100644 --- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.deepcopy.go @@ -24,11 +24,33 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Cluster) DeepCopyInto(out *Cluster) { + *out = *in + if in.CertificateAuthorityData != nil { + in, out := &in.CertificateAuthorityData, &out.CertificateAuthorityData + *out = make([]byte, len(*in)) + copy(*out, *in) + } + in.Config.DeepCopyInto(&out.Config) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster. +func (in *Cluster) DeepCopy() *Cluster { + if in == nil { + return nil + } + out := new(Cluster) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ExecCredential) DeepCopyInto(out *ExecCredential) { *out = *in out.TypeMeta = in.TypeMeta - out.Spec = in.Spec + in.Spec.DeepCopyInto(&out.Spec) if in.Status != nil { in, out := &in.Status, &out.Status *out = new(ExecCredentialStatus) @@ -58,6 +80,11 @@ func (in *ExecCredential) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ExecCredentialSpec) DeepCopyInto(out *ExecCredentialSpec) { *out = *in + if in.Cluster != nil { + in, out := &in.Cluster, &out.Cluster + *out = new(Cluster) + (*in).DeepCopyInto(*out) + } return } diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/zz_generated.deepcopy.go index c568a6fc8a..045b07f5b0 100644 --- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/zz_generated.deepcopy.go +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/zz_generated.deepcopy.go @@ -24,6 +24,30 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Cluster) DeepCopyInto(out *Cluster) { + *out = *in + if in.CertificateAuthorityData != nil { + in, out := &in.CertificateAuthorityData, &out.CertificateAuthorityData + *out = make([]byte, len(*in)) + copy(*out, *in) + } + if in.Config != nil { + out.Config = in.Config.DeepCopyObject() + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster. +func (in *Cluster) DeepCopy() *Cluster { + if in == nil { + return nil + } + out := new(Cluster) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ExecCredential) DeepCopyInto(out *ExecCredential) { *out = *in @@ -63,6 +87,11 @@ func (in *ExecCredentialSpec) DeepCopyInto(out *ExecCredentialSpec) { *out = new(Response) (*in).DeepCopyInto(*out) } + if in.Cluster != nil { + in, out := &in.Cluster, &out.Cluster + *out = new(Cluster) + (*in).DeepCopyInto(*out) + } return } diff --git a/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go b/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go index 627bb2de94..af21c49953 100644 --- a/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go +++ b/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go @@ -87,8 +87,15 @@ func newCache() *cache { var spewConfig = &spew.ConfigState{DisableMethods: true, Indent: " "} -func cacheKey(c *api.ExecConfig) string { - return spewConfig.Sprint(c) +func cacheKey(conf *api.ExecConfig, cluster *clientauthentication.Cluster) string { + key := struct { + conf *api.ExecConfig + cluster *clientauthentication.Cluster + }{ + conf: conf, + cluster: cluster, + } + return spewConfig.Sprint(key) } type cache struct { @@ -155,12 +162,12 @@ func (s *sometimes) Do(f func()) { } // GetAuthenticator returns an exec-based plugin for providing client credentials. -func GetAuthenticator(config *api.ExecConfig) (*Authenticator, error) { - return newAuthenticator(globalCache, config) +func GetAuthenticator(config *api.ExecConfig, cluster *clientauthentication.Cluster) (*Authenticator, error) { + return newAuthenticator(globalCache, config, cluster) } -func newAuthenticator(c *cache, config *api.ExecConfig) (*Authenticator, error) { - key := cacheKey(config) +func newAuthenticator(c *cache, config *api.ExecConfig, cluster *clientauthentication.Cluster) (*Authenticator, error) { + key := cacheKey(config, cluster) if a, ok := c.get(key); ok { return a, nil } @@ -171,9 +178,11 @@ func newAuthenticator(c *cache, config *api.ExecConfig) (*Authenticator, error) } a := &Authenticator{ - cmd: config.Command, - args: config.Args, - group: gv, + cmd: config.Command, + args: config.Args, + group: gv, + cluster: cluster, + provideClusterInfo: config.ProvideClusterInfo, installHint: config.InstallHint, sometimes: &sometimes{ @@ -200,10 +209,12 @@ func newAuthenticator(c *cache, config *api.ExecConfig) (*Authenticator, error) // The plugin input and output are defined by the API group client.authentication.k8s.io. type Authenticator struct { // Set by the config - cmd string - args []string - group schema.GroupVersion - env []string + cmd string + args []string + group schema.GroupVersion + env []string + cluster *clientauthentication.Cluster + provideClusterInfo bool // Used to avoid log spew by rate limiting install hint printing. We didn't do // this by interval based rate limiting alone since that way may have prevented @@ -230,8 +241,8 @@ type Authenticator struct { } type credentials struct { - token string - cert *tls.Certificate + token string `datapolicy:"token"` + cert *tls.Certificate `datapolicy:"secret-key"` } // UpdateTransportConfig updates the transport.Config to use credentials @@ -367,19 +378,16 @@ func (a *Authenticator) refreshCredsLocked(r *clientauthentication.Response) err Interactive: a.interactive, }, } + if a.provideClusterInfo { + cred.Spec.Cluster = a.cluster + } env := append(a.environ(), a.env...) - if a.group == v1alpha1.SchemeGroupVersion { - // Input spec disabled for beta due to lack of use. Possibly re-enable this later if - // someone wants it back. - // - // See: https://github.com/kubernetes/kubernetes/issues/61796 - data, err := runtime.Encode(codecs.LegacyCodec(a.group), cred) - if err != nil { - return fmt.Errorf("encode ExecCredentials: %v", err) - } - env = append(env, fmt.Sprintf("%s=%s", execInfoEnv, data)) + data, err := runtime.Encode(codecs.LegacyCodec(a.group), cred) + if err != nil { + return fmt.Errorf("encode ExecCredentials: %v", err) } + env = append(env, fmt.Sprintf("%s=%s", execInfoEnv, data)) stdout := &bytes.Buffer{} cmd := exec.Command(a.cmd, a.args...) diff --git a/vendor/k8s.io/client-go/plugin/pkg/client/auth/gcp/gcp.go b/vendor/k8s.io/client-go/plugin/pkg/client/auth/gcp/gcp.go index 389dc6c79f..ee1bfdef3f 100644 --- a/vendor/k8s.io/client-go/plugin/pkg/client/auth/gcp/gcp.go +++ b/vendor/k8s.io/client-go/plugin/pkg/client/auth/gcp/gcp.go @@ -188,7 +188,7 @@ func (g *gcpAuthProvider) Login() error { return nil } type cachedTokenSource struct { lk sync.Mutex source oauth2.TokenSource - accessToken string + accessToken string `datapolicy:"token"` expiry time.Time persister restclient.AuthProviderConfigPersister cache map[string]string @@ -269,8 +269,8 @@ func (t *cachedTokenSource) baseCache() map[string]string { type commandTokenSource struct { cmd string args []string - tokenKey string - expiryKey string + tokenKey string `datapolicy:"token"` + expiryKey string `datapolicy:"secret-key"` timeFmt string } diff --git a/vendor/k8s.io/client-go/rest/config.go b/vendor/k8s.io/client-go/rest/config.go index 6e50eef51e..3735750bbc 100644 --- a/vendor/k8s.io/client-go/rest/config.go +++ b/vendor/k8s.io/client-go/rest/config.go @@ -65,12 +65,12 @@ type Config struct { // Server requires Basic authentication Username string - Password string + Password string `datapolicy:"password"` // Server requires Bearer authentication. This client will not attempt to use // refresh tokens for an OAuth2 flow. // TODO: demonstrate an OAuth2 compatible client. - BearerToken string + BearerToken string `datapolicy:"token"` // Path to a file containing a BearerToken. // If set, the contents are periodically read. @@ -125,6 +125,7 @@ type Config struct { // WarningHandler handles warnings in server responses. // If not set, the default warning handler is used. + // See documentation for SetDefaultWarningHandler() for details. WarningHandler WarningHandler // The maximum length of time to wait before giving up on a server request. A value of zero means no timeout. @@ -133,7 +134,7 @@ type Config struct { // Dial specifies the dial function for creating unencrypted TCP connections. Dial func(ctx context.Context, network, address string) (net.Conn, error) - // Proxy is the the proxy func to be used for all requests made by this + // Proxy is the proxy func to be used for all requests made by this // transport. If Proxy is nil, http.ProxyFromEnvironment is used. If Proxy // returns a nil *URL, no proxy is used. // @@ -159,6 +160,15 @@ func (sanitizedAuthConfigPersister) String() string { return "rest.AuthProviderConfigPersister(--- REDACTED ---)" } +type sanitizedObject struct{ runtime.Object } + +func (sanitizedObject) GoString() string { + return "runtime.Object(--- REDACTED ---)" +} +func (sanitizedObject) String() string { + return "runtime.Object(--- REDACTED ---)" +} + // GoString implements fmt.GoStringer and sanitizes sensitive fields of Config // to prevent accidental leaking via logs. func (c *Config) GoString() string { @@ -182,7 +192,9 @@ func (c *Config) String() string { if cc.AuthConfigPersister != nil { cc.AuthConfigPersister = sanitizedAuthConfigPersister{cc.AuthConfigPersister} } - + if cc.ExecProvider != nil && cc.ExecProvider.Config != nil { + cc.ExecProvider.Config = sanitizedObject{Object: cc.ExecProvider.Config} + } return fmt.Sprintf("%#v", cc) } @@ -203,7 +215,7 @@ type TLSClientConfig struct { // Server should be accessed without verifying the TLS certificate. For testing only. Insecure bool // ServerName is passed to the server for SNI and is used in the client to check server - // ceritificates against. If ServerName is empty, the hostname used to contact the + // certificates against. If ServerName is empty, the hostname used to contact the // server is used. ServerName string @@ -219,7 +231,7 @@ type TLSClientConfig struct { CertData []byte // KeyData holds PEM-encoded bytes (typically read from a client certificate key file). // KeyData takes precedence over KeyFile - KeyData []byte + KeyData []byte `datapolicy:"security-key"` // CAData holds PEM-encoded bytes (typically read from a root certificates bundle). // CAData takes precedence over CAFile CAData []byte @@ -587,7 +599,7 @@ func AnonymousClientConfig(config *Config) *Config { // CopyConfig returns a copy of the given config func CopyConfig(config *Config) *Config { - return &Config{ + c := &Config{ Host: config.Host, APIPath: config.APIPath, ContentConfig: config.ContentConfig, @@ -626,4 +638,8 @@ func CopyConfig(config *Config) *Config { Dial: config.Dial, Proxy: config.Proxy, } + if config.ExecProvider != nil && config.ExecProvider.Config != nil { + c.ExecProvider.Config = config.ExecProvider.Config.DeepCopyObject() + } + return c } diff --git a/vendor/k8s.io/client-go/rest/exec.go b/vendor/k8s.io/client-go/rest/exec.go new file mode 100644 index 0000000000..5f3b43c55a --- /dev/null +++ b/vendor/k8s.io/client-go/rest/exec.go @@ -0,0 +1,85 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rest + +import ( + "fmt" + "net/http" + "net/url" + + "k8s.io/client-go/pkg/apis/clientauthentication" + clientauthenticationapi "k8s.io/client-go/pkg/apis/clientauthentication" +) + +// This file contains Config logic related to exec credential plugins. + +// ConfigToExecCluster creates a clientauthenticationapi.Cluster with the corresponding fields from +// the provided Config. +func ConfigToExecCluster(config *Config) (*clientauthenticationapi.Cluster, error) { + caData, err := dataFromSliceOrFile(config.CAData, config.CAFile) + if err != nil { + return nil, fmt.Errorf("failed to load CA bundle for execProvider: %v", err) + } + + var proxyURL string + if config.Proxy != nil { + req, err := http.NewRequest("", config.Host, nil) + if err != nil { + return nil, fmt.Errorf("failed to create proxy URL request for execProvider: %w", err) + } + url, err := config.Proxy(req) + if err != nil { + return nil, fmt.Errorf("failed to get proxy URL for execProvider: %w", err) + } + if url != nil { + proxyURL = url.String() + } + } + + return &clientauthentication.Cluster{ + Server: config.Host, + TLSServerName: config.ServerName, + InsecureSkipTLSVerify: config.Insecure, + CertificateAuthorityData: caData, + ProxyURL: proxyURL, + Config: config.ExecProvider.Config, + }, nil +} + +// ExecClusterToConfig creates a Config with the corresponding fields from the provided +// clientauthenticationapi.Cluster. The returned Config will be anonymous (i.e., it will not have +// any authentication-related fields set). +func ExecClusterToConfig(cluster *clientauthentication.Cluster) (*Config, error) { + var proxy func(*http.Request) (*url.URL, error) + if cluster.ProxyURL != "" { + proxyURL, err := url.Parse(cluster.ProxyURL) + if err != nil { + return nil, fmt.Errorf("cannot parse proxy URL: %w", err) + } + proxy = http.ProxyURL(proxyURL) + } + + return &Config{ + Host: cluster.Server, + TLSClientConfig: TLSClientConfig{ + Insecure: cluster.InsecureSkipTLSVerify, + ServerName: cluster.TLSServerName, + CAData: cluster.CertificateAuthorityData, + }, + Proxy: proxy, + }, nil +} diff --git a/vendor/k8s.io/client-go/rest/request.go b/vendor/k8s.io/client-go/rest/request.go index 0ed7def73e..1ccc0dafe6 100644 --- a/vendor/k8s.io/client-go/rest/request.go +++ b/vendor/k8s.io/client-go/rest/request.go @@ -511,13 +511,23 @@ func (r Request) finalURLTemplate() url.URL { } r.params = newParams url := r.URL() - segments := strings.Split(r.URL().Path, "/") + + segments := strings.Split(url.Path, "/") groupIndex := 0 index := 0 - if r.URL() != nil && r.c.base != nil && strings.Contains(r.URL().Path, r.c.base.Path) { - groupIndex += len(strings.Split(r.c.base.Path, "/")) + trimmedBasePath := "" + if url != nil && r.c.base != nil && strings.Contains(url.Path, r.c.base.Path) { + p := strings.TrimPrefix(url.Path, r.c.base.Path) + if !strings.HasPrefix(p, "/") { + p = "/" + p + } + // store the base path that we have trimmed so we can append it + // before returning the URL + trimmedBasePath = r.c.base.Path + segments = strings.Split(p, "/") + groupIndex = 1 } - if groupIndex >= len(segments) { + if len(segments) <= 2 { return *url } @@ -563,7 +573,7 @@ func (r Request) finalURLTemplate() url.URL { segments[index+3] = "{name}" } } - url.Path = path.Join(segments...) + url.Path = path.Join(trimmedBasePath, path.Join(segments...)) return *url } @@ -638,7 +648,7 @@ func (b *throttledLogger) attemptToLog() (klog.Level, bool) { return -1, false } -// Infof will write a log message at each logLevel specified by the reciever's throttleSettings +// Infof will write a log message at each logLevel specified by the receiver's throttleSettings // as long as it hasn't written a log message more recently than minLogInterval. func (b *throttledLogger) Infof(message string, args ...interface{}) { if logLevel, ok := b.attemptToLog(); ok { diff --git a/vendor/k8s.io/client-go/rest/transport.go b/vendor/k8s.io/client-go/rest/transport.go index 450edc6edd..87792750ad 100644 --- a/vendor/k8s.io/client-go/rest/transport.go +++ b/vendor/k8s.io/client-go/rest/transport.go @@ -21,6 +21,7 @@ import ( "errors" "net/http" + "k8s.io/client-go/pkg/apis/clientauthentication" "k8s.io/client-go/plugin/pkg/client/auth/exec" "k8s.io/client-go/transport" ) @@ -94,7 +95,15 @@ func (c *Config) TransportConfig() (*transport.Config, error) { } if c.ExecProvider != nil { - provider, err := exec.GetAuthenticator(c.ExecProvider) + var cluster *clientauthentication.Cluster + if c.ExecProvider.ProvideClusterInfo { + var err error + cluster, err = ConfigToExecCluster(c) + if err != nil { + return nil, err + } + } + provider, err := exec.GetAuthenticator(c.ExecProvider, cluster) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/rest/warnings.go b/vendor/k8s.io/client-go/rest/warnings.go index 45c1c3b2cd..18476f5ff9 100644 --- a/vendor/k8s.io/client-go/rest/warnings.go +++ b/vendor/k8s.io/client-go/rest/warnings.go @@ -38,8 +38,11 @@ var ( defaultWarningHandlerLock sync.RWMutex ) -// SetDefaultWarningHandler sets the default handler client uses when warning headers are encountered. -// By default, warnings are printed to stderr. +// SetDefaultWarningHandler sets the default handler clients use when warning headers are encountered. +// By default, warnings are logged. Several built-in implementations are provided: +// - NoWarnings suppresses warnings. +// - WarningLogger logs warnings. +// - NewWarningWriter() outputs warnings to the provided writer. func SetDefaultWarningHandler(l WarningHandler) { defaultWarningHandlerLock.Lock() defer defaultWarningHandlerLock.Unlock() diff --git a/vendor/k8s.io/client-go/restmapper/category_expansion.go b/vendor/k8s.io/client-go/restmapper/category_expansion.go index 2537a2b4e2..484e4c8393 100644 --- a/vendor/k8s.io/client-go/restmapper/category_expansion.go +++ b/vendor/k8s.io/client-go/restmapper/category_expansion.go @@ -58,7 +58,7 @@ func NewDiscoveryCategoryExpander(client discovery.DiscoveryInterface) CategoryE // Expand fulfills CategoryExpander func (e discoveryCategoryExpander) Expand(category string) ([]schema.GroupResource, bool) { // Get all supported resources for groups and versions from server, if no resource found, fallback anyway. - apiResourceLists, _ := e.discoveryClient.ServerResources() + _, apiResourceLists, _ := e.discoveryClient.ServerGroupsAndResources() if len(apiResourceLists) == 0 { return nil, false } diff --git a/vendor/k8s.io/client-go/restmapper/shortcut.go b/vendor/k8s.io/client-go/restmapper/shortcut.go index 6903ec8088..73b317c1ca 100644 --- a/vendor/k8s.io/client-go/restmapper/shortcut.go +++ b/vendor/k8s.io/client-go/restmapper/shortcut.go @@ -84,7 +84,7 @@ func (e shortcutExpander) getShortcutMappings() ([]*metav1.APIResourceList, []re res := []resourceShortcuts{} // get server resources // This can return an error *and* the results it was able to find. We don't need to fail on the error. - apiResList, err := e.discoveryClient.ServerResources() + _, apiResList, err := e.discoveryClient.ServerGroupsAndResources() if err != nil { klog.V(1).Infof("Error loading discovery information: %v", err) } diff --git a/vendor/k8s.io/client-go/tools/auth/clientauth.go b/vendor/k8s.io/client-go/tools/auth/clientauth.go index c341726774..4c24f79977 100644 --- a/vendor/k8s.io/client-go/tools/auth/clientauth.go +++ b/vendor/k8s.io/client-go/tools/auth/clientauth.go @@ -75,11 +75,11 @@ import ( // to be read/written from a file as a JSON object. type Info struct { User string - Password string + Password string `datapolicy:"password"` CAFile string CertFile string KeyFile string - BearerToken string + BearerToken string `datapolicy:"token"` Insecure *bool } diff --git a/vendor/k8s.io/client-go/tools/cache/OWNERS b/vendor/k8s.io/client-go/tools/cache/OWNERS index 7bbe635426..9d0a18771e 100644 --- a/vendor/k8s.io/client-go/tools/cache/OWNERS +++ b/vendor/k8s.io/client-go/tools/cache/OWNERS @@ -38,6 +38,5 @@ reviewers: - resouer - jessfraz - mfojtik -- mqliang - sdminonne - ncdc diff --git a/vendor/k8s.io/client-go/tools/cache/controller.go b/vendor/k8s.io/client-go/tools/cache/controller.go index 3ad9b53bbe..684d1a8d38 100644 --- a/vendor/k8s.io/client-go/tools/cache/controller.go +++ b/vendor/k8s.io/client-go/tools/cache/controller.go @@ -72,6 +72,9 @@ type Config struct { // Called whenever the ListAndWatch drops the connection with an error. WatchErrorHandler WatchErrorHandler + + // WatchListPageSize is the requested chunk size of initial and relist watch lists. + WatchListPageSize int64 } // ShouldResyncFunc is a type of function that indicates if a reflector should perform a @@ -134,6 +137,7 @@ func (c *controller) Run(stopCh <-chan struct{}) { c.config.FullResyncPeriod, ) r.ShouldResync = c.config.ShouldResync + r.WatchListPageSize = c.config.WatchListPageSize r.clock = c.clock if c.config.WatchErrorHandler != nil { r.watchErrorHandler = c.config.WatchErrorHandler diff --git a/vendor/k8s.io/client-go/tools/cache/delta_fifo.go b/vendor/k8s.io/client-go/tools/cache/delta_fifo.go index 2774f4f211..148b478d58 100644 --- a/vendor/k8s.io/client-go/tools/cache/delta_fifo.go +++ b/vendor/k8s.io/client-go/tools/cache/delta_fifo.go @@ -145,7 +145,7 @@ func NewDeltaFIFOWithOptions(opts DeltaFIFOOptions) *DeltaFIFO { // DeltaFIFO's Pop(), Get(), and GetByKey() methods return // interface{} to satisfy the Store/Queue interfaces, but they // will always return an object of type Deltas. List() returns -// the newest objects currently in the FIFO. +// the newest object from each accumulator in the FIFO. // // A DeltaFIFO's knownObjects KeyListerGetter provides the abilities // to list Store keys and to get objects by Store key. The objects in @@ -161,12 +161,13 @@ type DeltaFIFO struct { lock sync.RWMutex cond sync.Cond - // `items` maps keys to Deltas. - // `queue` maintains FIFO order of keys for consumption in Pop(). - // We maintain the property that keys in the `items` and `queue` are - // strictly 1:1 mapping, and that all Deltas in `items` should have - // at least one Delta. + // `items` maps a key to a Deltas. + // Each such Deltas has at least one Delta. items map[string]Deltas + + // `queue` maintains FIFO order of keys for consumption in Pop(). + // There are no duplicates in `queue`. + // A key is in `queue` if and only if it is in `items`. queue []string // populated is true if the first batch of items inserted by Replace() has been populated @@ -376,8 +377,8 @@ func (f *DeltaFIFO) queueActionLocked(actionType DeltaType, obj interface{}) err if err != nil { return KeyError{obj, err} } - - newDeltas := append(f.items[id], Delta{actionType, obj}) + oldDeltas := f.items[id] + newDeltas := append(oldDeltas, Delta{actionType, obj}) newDeltas = dedupDeltas(newDeltas) if len(newDeltas) > 0 { @@ -389,10 +390,14 @@ func (f *DeltaFIFO) queueActionLocked(actionType DeltaType, obj interface{}) err } else { // This never happens, because dedupDeltas never returns an empty list // when given a non-empty list (as it is here). - // But if somehow it ever does return an empty list, then - // We need to remove this from our map (extra items in the queue are - // ignored if they are not in the map). - delete(f.items, id) + // If somehow it happens anyway, deal with it but complain. + if oldDeltas == nil { + klog.Errorf("Impossible dedupDeltas for id=%q: oldDeltas=%#+v, obj=%#+v; ignoring", id, oldDeltas, obj) + return nil + } + klog.Errorf("Impossible dedupDeltas for id=%q: oldDeltas=%#+v, obj=%#+v; breaking invariant by storing empty Deltas", id, oldDeltas, obj) + f.items[id] = newDeltas + return fmt.Errorf("Impossible dedupDeltas for id=%q: oldDeltas=%#+v, obj=%#+v; broke DeltaFIFO invariant by storing empty Deltas", id, oldDeltas, obj) } return nil } @@ -459,7 +464,7 @@ func (f *DeltaFIFO) IsClosed() bool { return f.closed } -// Pop blocks until an item is added to the queue, and then returns it. If +// Pop blocks until the queue has some items, and then returns one. If // multiple items are ready, they are returned in the order in which they were // added/updated. The item is removed from the queue (and the store) before it // is returned, so if you don't successfully process it, you need to add it back @@ -494,7 +499,8 @@ func (f *DeltaFIFO) Pop(process PopProcessFunc) (interface{}, error) { } item, ok := f.items[id] if !ok { - // Item may have been deleted subsequently. + // This should never happen + klog.Errorf("Inconceivable! %q was in f.queue but not f.items; ignoring.", id) continue } delete(f.items, id) diff --git a/vendor/k8s.io/client-go/tools/cache/reflector.go b/vendor/k8s.io/client-go/tools/cache/reflector.go index e995abe253..360d7304b7 100644 --- a/vendor/k8s.io/client-go/tools/cache/reflector.go +++ b/vendor/k8s.io/client-go/tools/cache/reflector.go @@ -69,6 +69,8 @@ type Reflector struct { // backoff manages backoff of ListWatch backoffManager wait.BackoffManager + // initConnBackoffManager manages backoff the initial connection with the Watch calll of ListAndWatch. + initConnBackoffManager wait.BackoffManager resyncPeriod time.Duration // ShouldResync is invoked periodically and whenever it returns `true` the Store's Resync operation is invoked @@ -99,6 +101,15 @@ type Reflector struct { watchErrorHandler WatchErrorHandler } +// ResourceVersionUpdater is an interface that allows store implementation to +// track the current resource version of the reflector. This is especially +// important if storage bookmarks are enabled. +type ResourceVersionUpdater interface { + // UpdateResourceVersion is called each time current resource version of the reflector + // is updated. + UpdateResourceVersion(resourceVersion string) +} + // The WatchErrorHandler is called whenever ListAndWatch drops the // connection with an error. After calling this handler, the informer // will backoff and retry. @@ -166,10 +177,11 @@ func NewNamedReflector(name string, lw ListerWatcher, expectedType interface{}, // We used to make the call every 1sec (1 QPS), the goal here is to achieve ~98% traffic reduction when // API server is not healthy. With these parameters, backoff will stop at [30,60) sec interval which is // 0.22 QPS. If we don't backoff for 2min, assume API server is healthy and we reset the backoff. - backoffManager: wait.NewExponentialBackoffManager(800*time.Millisecond, 30*time.Second, 2*time.Minute, 2.0, 1.0, realClock), - resyncPeriod: resyncPeriod, - clock: realClock, - watchErrorHandler: WatchErrorHandler(DefaultWatchErrorHandler), + backoffManager: wait.NewExponentialBackoffManager(800*time.Millisecond, 30*time.Second, 2*time.Minute, 2.0, 1.0, realClock), + initConnBackoffManager: wait.NewExponentialBackoffManager(800*time.Millisecond, 30*time.Second, 2*time.Minute, 2.0, 1.0, realClock), + resyncPeriod: resyncPeriod, + clock: realClock, + watchErrorHandler: WatchErrorHandler(DefaultWatchErrorHandler), } r.setExpectedType(expectedType) return r @@ -404,9 +416,9 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { // If this is "connection refused" error, it means that most likely apiserver is not responsive. // It doesn't make sense to re-list all objects because most likely we will be able to restart // watch where we ended. - // If that's the case wait and resend watch request. + // If that's the case begin exponentially backing off and resend watch request. if utilnet.IsConnectionRefused(err) { - time.Sleep(time.Second) + <-r.initConnBackoffManager.Backoff().C() continue } return err @@ -504,6 +516,9 @@ loop: } *resourceVersion = newResourceVersion r.setLastSyncResourceVersion(newResourceVersion) + if rvu, ok := r.store.(ResourceVersionUpdater); ok { + rvu.UpdateResourceVersion(newResourceVersion) + } eventCount++ } } diff --git a/vendor/k8s.io/client-go/tools/cache/shared_informer.go b/vendor/k8s.io/client-go/tools/cache/shared_informer.go index f489897630..3a3f538a15 100644 --- a/vendor/k8s.io/client-go/tools/cache/shared_informer.go +++ b/vendor/k8s.io/client-go/tools/cache/shared_informer.go @@ -485,13 +485,13 @@ func (s *sharedIndexInformer) AddEventHandlerWithResyncPeriod(handler ResourceEv if resyncPeriod > 0 { if resyncPeriod < minimumResyncPeriod { - klog.Warningf("resyncPeriod %d is too small. Changing it to the minimum allowed value of %d", resyncPeriod, minimumResyncPeriod) + klog.Warningf("resyncPeriod %v is too small. Changing it to the minimum allowed value of %v", resyncPeriod, minimumResyncPeriod) resyncPeriod = minimumResyncPeriod } if resyncPeriod < s.resyncCheckPeriod { if s.started { - klog.Warningf("resyncPeriod %d is smaller than resyncCheckPeriod %d and the informer has already started. Changing it to %d", resyncPeriod, s.resyncCheckPeriod, s.resyncCheckPeriod) + klog.Warningf("resyncPeriod %v is smaller than resyncCheckPeriod %v and the informer has already started. Changing it to %v", resyncPeriod, s.resyncCheckPeriod, s.resyncCheckPeriod) resyncPeriod = s.resyncCheckPeriod } else { // if the event handler's resyncPeriod is smaller than the current resyncCheckPeriod, update diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/types.go b/vendor/k8s.io/client-go/tools/clientcmd/api/types.go index 829424dcf3..24f469236e 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/api/types.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/types.go @@ -114,10 +114,10 @@ type AuthInfo struct { ClientKey string `json:"client-key,omitempty"` // ClientKeyData contains PEM-encoded data from a client key file for TLS. Overrides ClientKey // +optional - ClientKeyData []byte `json:"client-key-data,omitempty"` + ClientKeyData []byte `json:"client-key-data,omitempty" datapolicy:"security-key"` // Token is the bearer token for authentication to the kubernetes cluster. // +optional - Token string `json:"token,omitempty"` + Token string `json:"token,omitempty" datapolicy:"token"` // TokenFile is a pointer to a file that contains a bearer token (as described above). If both Token and TokenFile are present, Token takes precedence. // +optional TokenFile string `json:"tokenFile,omitempty"` @@ -135,7 +135,7 @@ type AuthInfo struct { Username string `json:"username,omitempty"` // Password is the password for basic authentication to the kubernetes cluster. // +optional - Password string `json:"password,omitempty"` + Password string `json:"password,omitempty" datapolicy:"password"` // AuthProvider specifies a custom authentication plugin for the kubernetes cluster. // +optional AuthProvider *AuthProviderConfig `json:"auth-provider,omitempty"` @@ -215,6 +215,36 @@ type ExecConfig struct { // present. For example, `brew install foo-cli` might be a good InstallHint for // foo-cli on Mac OS systems. InstallHint string `json:"installHint,omitempty"` + + // ProvideClusterInfo determines whether or not to provide cluster information, + // which could potentially contain very large CA data, to this exec plugin as a + // part of the KUBERNETES_EXEC_INFO environment variable. By default, it is set + // to false. Package k8s.io/client-go/tools/auth/exec provides helper methods for + // reading this environment variable. + ProvideClusterInfo bool `json:"provideClusterInfo"` + + // Config holds additional config data that is specific to the exec + // plugin with regards to the cluster being authenticated to. + // + // This data is sourced from the clientcmd Cluster object's extensions[exec] field: + // + // clusters: + // - name: my-cluster + // cluster: + // ... + // extensions: + // - name: client.authentication.k8s.io/exec # reserved extension name for per cluster exec config + // extension: + // audience: 06e3fbd18de8 # arbitrary config + // + // In some environments, the user config may be exactly the same across many clusters + // (i.e. call this exec plugin) minus some details that are specific to each cluster + // such as the audience. This field allows the per cluster config to be directly + // specified with the cluster info. Using this field to store secret data is not + // recommended as one of the prime benefits of exec plugins is that no secrets need + // to be stored directly in the kubeconfig. + // +k8s:conversion-gen=false + Config runtime.Object } var _ fmt.Stringer = new(ExecConfig) @@ -237,7 +267,11 @@ func (c ExecConfig) String() string { if len(c.Env) > 0 { env = "[]ExecEnvVar{--- REDACTED ---}" } - return fmt.Sprintf("api.AuthProviderConfig{Command: %q, Args: %#v, Env: %s, APIVersion: %q}", c.Command, args, env, c.APIVersion) + config := "runtime.Object(nil)" + if c.Config != nil { + config = "runtime.Object(--- REDACTED ---)" + } + return fmt.Sprintf("api.ExecConfig{Command: %q, Args: %#v, Env: %s, APIVersion: %q, ProvideClusterInfo: %t, Config: %s}", c.Command, args, env, c.APIVersion, c.ProvideClusterInfo, config) } // ExecEnvVar is used for setting environment variables when executing an exec-based diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/types.go b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/types.go index 0395f860f3..8c29b39c1b 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/types.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/types.go @@ -104,10 +104,10 @@ type AuthInfo struct { ClientKey string `json:"client-key,omitempty"` // ClientKeyData contains PEM-encoded data from a client key file for TLS. Overrides ClientKey // +optional - ClientKeyData []byte `json:"client-key-data,omitempty"` + ClientKeyData []byte `json:"client-key-data,omitempty" datapolicy:"security-key"` // Token is the bearer token for authentication to the kubernetes cluster. // +optional - Token string `json:"token,omitempty"` + Token string `json:"token,omitempty" datapolicy:"token"` // TokenFile is a pointer to a file that contains a bearer token (as described above). If both Token and TokenFile are present, Token takes precedence. // +optional TokenFile string `json:"tokenFile,omitempty"` @@ -125,7 +125,7 @@ type AuthInfo struct { Username string `json:"username,omitempty"` // Password is the password for basic authentication to the kubernetes cluster. // +optional - Password string `json:"password,omitempty"` + Password string `json:"password,omitempty" datapolicy:"password"` // AuthProvider specifies a custom authentication plugin for the kubernetes cluster. // +optional AuthProvider *AuthProviderConfig `json:"auth-provider,omitempty"` @@ -214,6 +214,13 @@ type ExecConfig struct { // present. For example, `brew install foo-cli` might be a good InstallHint for // foo-cli on Mac OS systems. InstallHint string `json:"installHint,omitempty"` + + // ProvideClusterInfo determines whether or not to provide cluster information, + // which could potentially contain very large CA data, to this exec plugin as a + // part of the KUBERNETES_EXEC_INFO environment variable. By default, it is set + // to false. Package k8s.io/client-go/tools/auth/exec provides helper methods for + // reading this environment variable. + ProvideClusterInfo bool `json:"provideClusterInfo"` } // ExecEnvVar is used for setting environment variables when executing an exec-based diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.conversion.go b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.conversion.go index bf9eaeca32..26e96529d3 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.conversion.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.conversion.go @@ -171,7 +171,15 @@ func autoConvert_v1_AuthInfo_To_api_AuthInfo(in *AuthInfo, out *api.AuthInfo, s out.Username = in.Username out.Password = in.Password out.AuthProvider = (*api.AuthProviderConfig)(unsafe.Pointer(in.AuthProvider)) - out.Exec = (*api.ExecConfig)(unsafe.Pointer(in.Exec)) + if in.Exec != nil { + in, out := &in.Exec, &out.Exec + *out = new(api.ExecConfig) + if err := Convert_v1_ExecConfig_To_api_ExecConfig(*in, *out, s); err != nil { + return err + } + } else { + out.Exec = nil + } if err := Convert_Slice_v1_NamedExtension_To_Map_string_To_runtime_Object(&in.Extensions, &out.Extensions, s); err != nil { return err } @@ -197,7 +205,15 @@ func autoConvert_api_AuthInfo_To_v1_AuthInfo(in *api.AuthInfo, out *AuthInfo, s out.Username = in.Username out.Password = in.Password out.AuthProvider = (*AuthProviderConfig)(unsafe.Pointer(in.AuthProvider)) - out.Exec = (*ExecConfig)(unsafe.Pointer(in.Exec)) + if in.Exec != nil { + in, out := &in.Exec, &out.Exec + *out = new(ExecConfig) + if err := Convert_api_ExecConfig_To_v1_ExecConfig(*in, *out, s); err != nil { + return err + } + } else { + out.Exec = nil + } if err := Convert_Map_string_To_runtime_Object_To_Slice_v1_NamedExtension(&in.Extensions, &out.Extensions, s); err != nil { return err } @@ -359,6 +375,7 @@ func autoConvert_v1_ExecConfig_To_api_ExecConfig(in *ExecConfig, out *api.ExecCo out.Env = *(*[]api.ExecEnvVar)(unsafe.Pointer(&in.Env)) out.APIVersion = in.APIVersion out.InstallHint = in.InstallHint + out.ProvideClusterInfo = in.ProvideClusterInfo return nil } @@ -373,6 +390,8 @@ func autoConvert_api_ExecConfig_To_v1_ExecConfig(in *api.ExecConfig, out *ExecCo out.Env = *(*[]ExecEnvVar)(unsafe.Pointer(&in.Env)) out.APIVersion = in.APIVersion out.InstallHint = in.InstallHint + out.ProvideClusterInfo = in.ProvideClusterInfo + // INFO: in.Config opted out of conversion generation return nil } diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/tools/clientcmd/api/zz_generated.deepcopy.go index 3240a7a98d..a04de6260b 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/api/zz_generated.deepcopy.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/zz_generated.deepcopy.go @@ -267,6 +267,9 @@ func (in *ExecConfig) DeepCopyInto(out *ExecConfig) { *out = make([]ExecEnvVar, len(*in)) copy(*out, *in) } + if in.Config != nil { + out.Config = in.Config.DeepCopyObject() + } return } diff --git a/vendor/k8s.io/client-go/tools/clientcmd/client_config.go b/vendor/k8s.io/client-go/tools/clientcmd/client_config.go index 690afce0cd..9e1cd64a09 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/client_config.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/client_config.go @@ -34,6 +34,11 @@ import ( "github.com/imdario/mergo" ) +const ( + // clusterExtensionKey is reserved in the cluster extensions list for exec plugin config. + clusterExtensionKey = "client.authentication.k8s.io/exec" +) + var ( // ClusterDefaults has the same behavior as the old EnvVar and DefaultCluster fields // DEPRECATED will be replaced @@ -72,7 +77,7 @@ type PersistAuthProviderConfigForUser func(user string) restclient.AuthProviderC type promptedCredentials struct { username string - password string + password string `datapolicy:"password"` } // DirectClientConfig is a ClientConfig interface that is backed by a clientcmdapi.Config, options overrides, and an optional fallbackReader for auth information @@ -189,7 +194,7 @@ func (config *DirectClientConfig) ClientConfig() (*restclient.Config, error) { authInfoName, _ := config.getAuthInfoName() persister = PersisterForUser(config.configAccess, authInfoName) } - userAuthPartialConfig, err := config.getUserIdentificationPartialConfig(configAuthInfo, config.fallbackReader, persister) + userAuthPartialConfig, err := config.getUserIdentificationPartialConfig(configAuthInfo, config.fallbackReader, persister, configClusterInfo) if err != nil { return nil, err } @@ -232,7 +237,7 @@ func getServerIdentificationPartialConfig(configAuthInfo clientcmdapi.AuthInfo, // 2. configAuthInfo.auth-path (this file can contain information that conflicts with #1, and we want #1 to win the priority) // 3. if there is not enough information to identify the user, load try the ~/.kubernetes_auth file // 4. if there is not enough information to identify the user, prompt if possible -func (config *DirectClientConfig) getUserIdentificationPartialConfig(configAuthInfo clientcmdapi.AuthInfo, fallbackReader io.Reader, persistAuthConfig restclient.AuthProviderConfigPersister) (*restclient.Config, error) { +func (config *DirectClientConfig) getUserIdentificationPartialConfig(configAuthInfo clientcmdapi.AuthInfo, fallbackReader io.Reader, persistAuthConfig restclient.AuthProviderConfigPersister, configClusterInfo clientcmdapi.Cluster) (*restclient.Config, error) { mergedConfig := &restclient.Config{} // blindly overwrite existing values based on precedence @@ -271,6 +276,7 @@ func (config *DirectClientConfig) getUserIdentificationPartialConfig(configAuthI if configAuthInfo.Exec != nil { mergedConfig.ExecProvider = configAuthInfo.Exec mergedConfig.ExecProvider.InstallHint = cleanANSIEscapeCodes(mergedConfig.ExecProvider.InstallHint) + mergedConfig.ExecProvider.Config = configClusterInfo.Extensions[clusterExtensionKey] } // if there still isn't enough information to authenticate the user, try prompting diff --git a/vendor/k8s.io/client-go/tools/clientcmd/config.go b/vendor/k8s.io/client-go/tools/clientcmd/config.go index 5f1660bf93..a7eae66bfa 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/config.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/config.go @@ -83,10 +83,13 @@ func (o *PathOptions) GetEnvVarFiles() []string { } func (o *PathOptions) GetLoadingPrecedence() []string { + if o.IsExplicitFile() { + return []string{o.GetExplicitFile()} + } + if envVarFiles := o.GetEnvVarFiles(); len(envVarFiles) > 0 { return envVarFiles } - return []string{o.GlobalFile} } diff --git a/vendor/k8s.io/client-go/tools/clientcmd/loader.go b/vendor/k8s.io/client-go/tools/clientcmd/loader.go index b0672291a4..901ed50c42 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/loader.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/loader.go @@ -304,6 +304,10 @@ func (rules *ClientConfigLoadingRules) Migrate() error { // GetLoadingPrecedence implements ConfigAccess func (rules *ClientConfigLoadingRules) GetLoadingPrecedence() []string { + if len(rules.ExplicitPath) > 0 { + return []string{rules.ExplicitPath} + } + return rules.Precedence } diff --git a/vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go b/vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go index 483f0cba40..3f6b898e37 100644 --- a/vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go +++ b/vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go @@ -188,12 +188,11 @@ type LeaderElector struct { clock clock.Clock metrics leaderMetricsAdapter - - // name is the name of the resource lock for debugging - name string } -// Run starts the leader election loop +// Run starts the leader election loop. Run will not return +// before leader election loop is stopped by ctx or it has +// stopped holding the leader lease func (le *LeaderElector) Run(ctx context.Context) { defer runtime.HandleCrash() defer func() { @@ -210,7 +209,8 @@ func (le *LeaderElector) Run(ctx context.Context) { } // RunOrDie starts a client with the provided config or panics if the config -// fails to validate. +// fails to validate. RunOrDie blocks until leader election loop is +// stopped by ctx or it has stopped holding the leader lease func RunOrDie(ctx context.Context, lec LeaderElectionConfig) { le, err := NewLeaderElector(lec) if err != nil { @@ -240,7 +240,7 @@ func (le *LeaderElector) acquire(ctx context.Context) bool { defer cancel() succeeded := false desc := le.config.Lock.Describe() - klog.Infof("attempting to acquire leader lease %v...", desc) + klog.Infof("attempting to acquire leader lease %v...", desc) wait.JitterUntil(func() { succeeded = le.tryAcquireOrRenew(ctx) le.maybeReportTransition() diff --git a/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/configmaplock.go b/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/configmaplock.go index 8c28026bad..ceb76b9cbe 100644 --- a/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/configmaplock.go +++ b/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/configmaplock.go @@ -52,13 +52,14 @@ func (cml *ConfigMapLock) Get(ctx context.Context) (*LeaderElectionRecord, []byt if cml.cm.Annotations == nil { cml.cm.Annotations = make(map[string]string) } - recordBytes, found := cml.cm.Annotations[LeaderElectionRecordAnnotationKey] + recordStr, found := cml.cm.Annotations[LeaderElectionRecordAnnotationKey] + recordBytes := []byte(recordStr) if found { - if err := json.Unmarshal([]byte(recordBytes), &record); err != nil { + if err := json.Unmarshal(recordBytes, &record); err != nil { return nil, nil, err } } - return &record, []byte(recordBytes), nil + return &record, recordBytes, nil } // Create attempts to create a LeaderElectionRecord annotation diff --git a/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/endpointslock.go b/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/endpointslock.go index d11e43e338..20b4c94d99 100644 --- a/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/endpointslock.go +++ b/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/endpointslock.go @@ -47,13 +47,14 @@ func (el *EndpointsLock) Get(ctx context.Context) (*LeaderElectionRecord, []byte if el.e.Annotations == nil { el.e.Annotations = make(map[string]string) } - recordBytes, found := el.e.Annotations[LeaderElectionRecordAnnotationKey] + recordStr, found := el.e.Annotations[LeaderElectionRecordAnnotationKey] + recordBytes := []byte(recordStr) if found { - if err := json.Unmarshal([]byte(recordBytes), &record); err != nil { + if err := json.Unmarshal(recordBytes, &record); err != nil { return nil, nil, err } } - return &record, []byte(recordBytes), nil + return &record, recordBytes, nil } // Create attempts to create a LeaderElectionRecord annotation diff --git a/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/interface.go b/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/interface.go index 74630a31ff..c5d7ae3c57 100644 --- a/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/interface.go +++ b/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/interface.go @@ -19,6 +19,9 @@ package resourcelock import ( "context" "fmt" + clientset "k8s.io/client-go/kubernetes" + restclient "k8s.io/client-go/rest" + "time" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -140,3 +143,16 @@ func New(lockType string, ns string, name string, coreClient corev1.CoreV1Interf return nil, fmt.Errorf("Invalid lock-type %s", lockType) } } + +// NewFromKubeconfig will create a lock of a given type according to the input parameters. +func NewFromKubeconfig(lockType string, ns string, name string, rlc ResourceLockConfig, kubeconfig *restclient.Config, renewDeadline time.Duration) (Interface, error) { + // shallow copy, do not modify the kubeconfig + config := *kubeconfig + timeout := ((renewDeadline / time.Millisecond) / 2) * time.Millisecond + if timeout < time.Second { + timeout = time.Second + } + config.Timeout = timeout + leaderElectionClient := clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "leader-election")) + return New(lockType, ns, name, leaderElectionClient.CoreV1(), leaderElectionClient.CoordinationV1(), rlc) +} diff --git a/vendor/k8s.io/client-go/tools/record/event.go b/vendor/k8s.io/client-go/tools/record/event.go index fce4410c0d..48ef45bb5b 100644 --- a/vendor/k8s.io/client-go/tools/record/event.go +++ b/vendor/k8s.io/client-go/tools/record/event.go @@ -270,7 +270,7 @@ func recordEvent(sink EventSink, event *v1.Event, patch []byte, updateExistingEv default: // This case includes actual http transport errors. Go ahead and retry. } - klog.Errorf("Unable to write event: '%v' (may retry after sleeping)", err) + klog.Errorf("Unable to write event: '%#v': '%v'(may retry after sleeping)", event, err) return false } diff --git a/vendor/k8s.io/client-go/tools/record/fake.go b/vendor/k8s.io/client-go/tools/record/fake.go index 2ff444ea88..0b3f344a97 100644 --- a/vendor/k8s.io/client-go/tools/record/fake.go +++ b/vendor/k8s.io/client-go/tools/record/fake.go @@ -27,17 +27,29 @@ import ( // thrown away in this case. type FakeRecorder struct { Events chan string + + IncludeObject bool +} + +func objectString(object runtime.Object, includeObject bool) string { + if !includeObject { + return "" + } + return fmt.Sprintf(" involvedObject{kind=%s,apiVersion=%s}", + object.GetObjectKind().GroupVersionKind().Kind, + object.GetObjectKind().GroupVersionKind().GroupVersion(), + ) } func (f *FakeRecorder) Event(object runtime.Object, eventtype, reason, message string) { if f.Events != nil { - f.Events <- fmt.Sprintf("%s %s %s", eventtype, reason, message) + f.Events <- fmt.Sprintf("%s %s %s%s", eventtype, reason, message, objectString(object, f.IncludeObject)) } } func (f *FakeRecorder) Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) { if f.Events != nil { - f.Events <- fmt.Sprintf(eventtype+" "+reason+" "+messageFmt, args...) + f.Events <- fmt.Sprintf(eventtype+" "+reason+" "+messageFmt, args...) + objectString(object, f.IncludeObject) } } diff --git a/vendor/k8s.io/client-go/transport/cache.go b/vendor/k8s.io/client-go/transport/cache.go index fa2afb1f16..5fe768ed5e 100644 --- a/vendor/k8s.io/client-go/transport/cache.go +++ b/vendor/k8s.io/client-go/transport/cache.go @@ -44,7 +44,7 @@ type tlsCacheKey struct { insecure bool caData string certData string - keyData string + keyData string `datapolicy:"security-key"` certFile string keyFile string serverName string diff --git a/vendor/k8s.io/client-go/transport/config.go b/vendor/k8s.io/client-go/transport/config.go index 45db248647..0704748317 100644 --- a/vendor/k8s.io/client-go/transport/config.go +++ b/vendor/k8s.io/client-go/transport/config.go @@ -35,10 +35,10 @@ type Config struct { // Username and password for basic authentication Username string - Password string + Password string `datapolicy:"password"` // Bearer token for authentication - BearerToken string + BearerToken string `datapolicy:"token"` // Path to a file containing a BearerToken. // If set, the contents are periodically read. @@ -70,7 +70,7 @@ type Config struct { // Dial specifies the dial function for creating unencrypted TCP connections. Dial func(ctx context.Context, network, address string) (net.Conn, error) - // Proxy is the the proxy func to be used for all requests made by this + // Proxy is the proxy func to be used for all requests made by this // transport. If Proxy is nil, http.ProxyFromEnvironment is used. If Proxy // returns a nil *URL, no proxy is used. // @@ -108,7 +108,7 @@ func (c *Config) HasCertAuth() bool { return (len(c.TLS.CertData) != 0 || len(c.TLS.CertFile) != 0) && (len(c.TLS.KeyData) != 0 || len(c.TLS.KeyFile) != 0) } -// HasCertCallbacks returns whether the configuration has certificate callback or not. +// HasCertCallback returns whether the configuration has certificate callback or not. func (c *Config) HasCertCallback() bool { return c.TLS.GetCert != nil } diff --git a/vendor/k8s.io/client-go/transport/round_trippers.go b/vendor/k8s.io/client-go/transport/round_trippers.go index f4cfadbd3d..056bc023c5 100644 --- a/vendor/k8s.io/client-go/transport/round_trippers.go +++ b/vendor/k8s.io/client-go/transport/round_trippers.go @@ -146,6 +146,7 @@ type userAgentRoundTripper struct { rt http.RoundTripper } +// NewUserAgentRoundTripper will add User-Agent header to a request unless it has already been set. func NewUserAgentRoundTripper(agent string, rt http.RoundTripper) http.RoundTripper { return &userAgentRoundTripper{agent, rt} } @@ -167,7 +168,7 @@ func (rt *userAgentRoundTripper) WrappedRoundTripper() http.RoundTripper { retur type basicAuthRoundTripper struct { username string - password string + password string `datapolicy:"password"` rt http.RoundTripper } @@ -260,7 +261,7 @@ func NewBearerAuthRoundTripper(bearer string, rt http.RoundTripper) http.RoundTr return &bearerAuthRoundTripper{bearer, nil, rt} } -// NewBearerAuthRoundTripper adds the provided bearer token to a request +// NewBearerAuthWithRefreshRoundTripper adds the provided bearer token to a request // unless the authorization header has already been set. // If tokenFile is non-empty, it is periodically read, // and the last successfully read content is used as the bearer token. @@ -305,7 +306,7 @@ func (rt *bearerAuthRoundTripper) WrappedRoundTripper() http.RoundTripper { retu // requestInfo keeps track of information about a request/response combination type requestInfo struct { - RequestHeaders http.Header + RequestHeaders http.Header `datapolicy:"token"` RequestVerb string RequestURL string diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/generators/client_generator.go b/vendor/k8s.io/code-generator/cmd/client-gen/generators/client_generator.go index 2c0bc9b5c4..1f1c3b4c02 100644 --- a/vendor/k8s.io/code-generator/cmd/client-gen/generators/client_generator.go +++ b/vendor/k8s.io/code-generator/cmd/client-gen/generators/client_generator.go @@ -28,6 +28,7 @@ import ( "k8s.io/code-generator/cmd/client-gen/path" clientgentypes "k8s.io/code-generator/cmd/client-gen/types" codegennamer "k8s.io/code-generator/pkg/namer" + genutil "k8s.io/code-generator/pkg/util" "k8s.io/gengo/args" "k8s.io/gengo/generator" "k8s.io/gengo/namer" @@ -279,7 +280,7 @@ func applyGroupOverrides(universe types.Universe, customArgs *clientgenargs.Cust // Create a map from "old GV" to "new GV" so we know what changes we need to make. changes := make(map[clientgentypes.GroupVersion]clientgentypes.GroupVersion) for gv, inputDir := range customArgs.GroupVersionPackages() { - p := universe.Package(inputDir) + p := universe.Package(genutil.Vendorless(inputDir)) if override := types.ExtractCommentTags("+", p.Comments)["groupName"]; override != nil { newGV := clientgentypes.GroupVersion{ Group: clientgentypes.Group(override[0]), diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_type.go b/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_type.go index ebd8506ec9..9efe024fca 100644 --- a/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_type.go +++ b/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_type.go @@ -411,7 +411,7 @@ var createSubresourceTemplate = ` func (c *Fake$.type|publicPlural$) Create(ctx context.Context, $.type|private$Name string, $.inputType|private$ *$.inputType|raw$, opts $.CreateOptions|raw$) (result *$.resultType|raw$, err error) { obj, err := c.Fake. $if .namespaced$Invokes($.NewCreateSubresourceAction|raw$($.type|allLowercasePlural$Resource, $.type|private$Name, "$.subresourcePath$", c.ns, $.inputType|private$), &$.resultType|raw${}) - $else$Invokes($.NewRootCreateSubresourceAction|raw$($.type|allLowercasePlural$Resource, "$.subresourcePath$", $.inputType|private$), &$.resultType|raw${})$end$ + $else$Invokes($.NewRootCreateSubresourceAction|raw$($.type|allLowercasePlural$Resource, $.type|private$Name, "$.subresourcePath$", $.inputType|private$), &$.resultType|raw${})$end$ if obj == nil { return nil, err } diff --git a/vendor/k8s.io/code-generator/cmd/informer-gen/generators/packages.go b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/packages.go index 2eaa7cc930..dd2c9cceb9 100644 --- a/vendor/k8s.io/code-generator/cmd/informer-gen/generators/packages.go +++ b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/packages.go @@ -89,13 +89,6 @@ func packageForInternalInterfaces(base string) string { return filepath.Join(base, "internalinterfaces") } -func vendorless(p string) string { - if pos := strings.LastIndex(p, "/vendor/"); pos != -1 { - return p[pos+len("/vendor/"):] - } - return p -} - // Packages makes the client package definition. func Packages(context *generator.Context, arguments *args.GeneratorArgs) generator.Packages { boilerplate, err := arguments.LoadGoBoilerplate() @@ -122,7 +115,7 @@ func Packages(context *generator.Context, arguments *args.GeneratorArgs) generat internalGroupVersions := make(map[string]clientgentypes.GroupVersions) groupGoNames := make(map[string]string) for _, inputDir := range arguments.InputDirs { - p := context.Universe.Package(vendorless(inputDir)) + p := context.Universe.Package(genutil.Vendorless(inputDir)) objectMeta, internal, err := objectMetaForPackage(p) if err != nil { diff --git a/vendor/k8s.io/code-generator/pkg/util/build.go b/vendor/k8s.io/code-generator/pkg/util/build.go index 6ea8f52ee0..6ed1fe497c 100644 --- a/vendor/k8s.io/code-generator/pkg/util/build.go +++ b/vendor/k8s.io/code-generator/pkg/util/build.go @@ -59,3 +59,11 @@ func hasSubdir(root, dir string) (rel string, ok bool) { func BoilerplatePath() string { return path.Join(reflect.TypeOf(empty{}).PkgPath(), "/../../hack/boilerplate.go.txt") } + +// Vendorless trims vendor prefix from a package path to make it canonical +func Vendorless(p string) string { + if pos := strings.LastIndex(p, "/vendor/"); pos != -1 { + return p[pos+len("/vendor/"):] + } + return p +} diff --git a/vendor/knative.dev/pkg/apis/OWNERS b/vendor/knative.dev/pkg/apis/OWNERS index 1227b2b9e8..82f0814ccd 100644 --- a/vendor/knative.dev/pkg/apis/OWNERS +++ b/vendor/knative.dev/pkg/apis/OWNERS @@ -1,7 +1,9 @@ # The OWNERS file is used by prow to automatically merge approved PRs. approvers: -- apis-approvers +- mattmoor +- vaikas +- n3wscott reviewers: -- apis-reviewers +- whaught diff --git a/vendor/knative.dev/pkg/apis/condition_set.go b/vendor/knative.dev/pkg/apis/condition_set.go index 1d81355b36..1b110475ff 100644 --- a/vendor/knative.dev/pkg/apis/condition_set.go +++ b/vendor/knative.dev/pkg/apis/condition_set.go @@ -27,7 +27,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// Conditions is the interface for a Resource that implements the getter and +// ConditionsAccessor is the interface for a Resource that implements the getter and // setter for accessing a Condition collection. // +k8s:deepcopy-gen=true type ConditionsAccessor interface { diff --git a/vendor/knative.dev/pkg/apis/condition_types.go b/vendor/knative.dev/pkg/apis/condition_types.go index 5b42886897..f0517527d6 100644 --- a/vendor/knative.dev/pkg/apis/condition_types.go +++ b/vendor/knative.dev/pkg/apis/condition_types.go @@ -52,7 +52,7 @@ const ( ConditionSeverityInfo ConditionSeverity = "Info" ) -// Conditions defines a readiness condition for a Knative resource. +// Condition defines a readiness condition for a Knative resource. // See: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties // +k8s:deepcopy-gen=true type Condition struct { diff --git a/vendor/knative.dev/pkg/apis/deprecated.go b/vendor/knative.dev/pkg/apis/deprecated.go index fc9bca5f1c..fb73306faa 100644 --- a/vendor/knative.dev/pkg/apis/deprecated.go +++ b/vendor/knative.dev/pkg/apis/deprecated.go @@ -33,7 +33,7 @@ func CheckDeprecated(ctx context.Context, obj interface{}) *FieldError { return CheckDeprecatedUpdate(ctx, obj, nil) } -// CheckDeprecated checks whether the provided named deprecated fields +// CheckDeprecatedUpdate checks whether the provided named deprecated fields // are set in a context where deprecation is disallowed. // This is a json shallow check. We will recursively check inlined structs. func CheckDeprecatedUpdate(ctx context.Context, obj, original interface{}) *FieldError { diff --git a/vendor/knative.dev/pkg/apis/duck/OWNERS b/vendor/knative.dev/pkg/apis/duck/OWNERS index 8df611ba00..f6267f14fe 100644 --- a/vendor/knative.dev/pkg/apis/duck/OWNERS +++ b/vendor/knative.dev/pkg/apis/duck/OWNERS @@ -1,7 +1,8 @@ # The OWNERS file is used by prow to automatically merge approved PRs. approvers: -- apis-duck-approvers +- mattmoor +- vaikas reviewers: -- apis-duck-reviewers +- whaught diff --git a/vendor/knative.dev/pkg/apis/duck/patch.go b/vendor/knative.dev/pkg/apis/duck/patch.go index 8fe57b8889..d548001600 100644 --- a/vendor/knative.dev/pkg/apis/duck/patch.go +++ b/vendor/knative.dev/pkg/apis/duck/patch.go @@ -19,8 +19,8 @@ package duck import ( "encoding/json" + jsonmergepatch "github.com/evanphx/json-patch/v5" jsonpatch "gomodules.xyz/jsonpatch/v2" - jsonmergepatch "gopkg.in/evanphx/json-patch.v4" ) func marshallBeforeAfter(before, after interface{}) ([]byte, []byte, error) { diff --git a/vendor/knative.dev/pkg/apis/duck/register.go b/vendor/knative.dev/pkg/apis/duck/register.go index 93cf55d7fc..34f13d6a28 100644 --- a/vendor/knative.dev/pkg/apis/duck/register.go +++ b/vendor/knative.dev/pkg/apis/duck/register.go @@ -21,6 +21,7 @@ import ( ) const ( + // GroupName is the name of the API group. GroupName = ducktypes.GroupName // AddressableDuckVersionLabel is the label we use to declare diff --git a/vendor/knative.dev/pkg/apis/duck/v1/knative_reference.go b/vendor/knative.dev/pkg/apis/duck/v1/knative_reference.go index 27a0a262b0..a0b169d6f8 100644 --- a/vendor/knative.dev/pkg/apis/duck/v1/knative_reference.go +++ b/vendor/knative.dev/pkg/apis/duck/v1/knative_reference.go @@ -19,6 +19,7 @@ package v1 import ( "context" "fmt" + "strings" "knative.dev/pkg/apis" ) @@ -41,7 +42,13 @@ type KReference struct { Name string `json:"name"` // API version of the referent. - APIVersion string `json:"apiVersion"` + // +optional + APIVersion string `json:"apiVersion,omitempty"` + + // Group of the API, without the version of the group. This can be used as an alternative to the APIVersion, and then resolved using ResolveGroup. + // Note: This API is EXPERIMENTAL and might break anytime. For more details: https://github.com/knative/eventing/issues/5086 + // +optional + Group string `json:"group,omitempty"` } func (kr *KReference) Validate(ctx context.Context) *apis.FieldError { @@ -54,8 +61,25 @@ func (kr *KReference) Validate(ctx context.Context) *apis.FieldError { if kr.Name == "" { errs = errs.Also(apis.ErrMissingField("name")) } - if kr.APIVersion == "" { - errs = errs.Also(apis.ErrMissingField("apiVersion")) + if isKReferenceGroupAllowed(ctx) { + if kr.APIVersion == "" && kr.Group == "" { + errs = errs.Also(apis.ErrMissingField("apiVersion")). + Also(apis.ErrMissingField("group")) + } + if kr.APIVersion != "" && kr.Group != "" && !strings.HasPrefix(kr.APIVersion, kr.Group) { + errs = errs.Also(&apis.FieldError{ + Message: "both apiVersion and group are specified and they refer to different API groups", + Paths: []string{"apiVersion", "group"}, + Details: "Only one of them must be specified", + }) + } + } else { + if kr.Group != "" { + errs = errs.Also(apis.ErrDisallowedFields("group")) + } + if kr.APIVersion == "" { + errs = errs.Also(apis.ErrMissingField("apiVersion")) + } } if kr.Kind == "" { errs = errs.Also(apis.ErrMissingField("kind")) @@ -86,3 +110,17 @@ func (kr *KReference) SetDefaults(ctx context.Context) { kr.Namespace = apis.ParentMeta(ctx).Namespace } } + +type isGroupAllowed struct{} + +func isKReferenceGroupAllowed(ctx context.Context) bool { + return ctx.Value(isGroupAllowed{}) != nil +} + +// KReferenceGroupAllowed notes on the context that further validation +// should allow the KReference.Group, which is disabled by default. +// Note: This API is EXPERIMENTAL and may disappear once the KReference.Group feature will stabilize. +// For more details: https://github.com/knative/eventing/issues/5086 +func KReferenceGroupAllowed(ctx context.Context) context.Context { + return context.WithValue(ctx, isGroupAllowed{}, struct{}{}) +} diff --git a/vendor/knative.dev/pkg/apis/duck/v1/register.go b/vendor/knative.dev/pkg/apis/duck/v1/register.go index 67418b3e91..fb4348a058 100644 --- a/vendor/knative.dev/pkg/apis/duck/v1/register.go +++ b/vendor/knative.dev/pkg/apis/duck/v1/register.go @@ -38,8 +38,10 @@ func Resource(resource string) schema.GroupResource { } var ( + // SchemeBuilder builds a scheme with the types known to the package. SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - AddToScheme = SchemeBuilder.AddToScheme + // AddToScheme adds the types known to this package to an existing schema. + AddToScheme = SchemeBuilder.AddToScheme ) // Adds the list of known types to Scheme. diff --git a/vendor/knative.dev/pkg/apis/duck/v1/zz_generated.deepcopy.go b/vendor/knative.dev/pkg/apis/duck/v1/zz_generated.deepcopy.go index f4da660471..ae98f04688 100644 --- a/vendor/knative.dev/pkg/apis/duck/v1/zz_generated.deepcopy.go +++ b/vendor/knative.dev/pkg/apis/duck/v1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2020 The Knative Authors +Copyright 2021 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/knative.dev/pkg/apis/duck/v1alpha1/addressable_types.go b/vendor/knative.dev/pkg/apis/duck/v1alpha1/addressable_types.go deleted file mode 100644 index 328246f6f6..0000000000 --- a/vendor/knative.dev/pkg/apis/duck/v1alpha1/addressable_types.go +++ /dev/null @@ -1,164 +0,0 @@ -/* -Copyright 2018 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "context" - "fmt" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - - "knative.dev/pkg/apis" - "knative.dev/pkg/apis/duck" - v1 "knative.dev/pkg/apis/duck/v1" - "knative.dev/pkg/apis/duck/v1beta1" -) - -// +genduck - -// Addressable provides a generic mechanism for a custom resource -// definition to indicate a destination for message delivery. -// -// Addressable is the schema for the destination information. This is -// typically stored in the object's `status`, as this information may -// be generated by the controller. -type Addressable struct { - v1beta1.Addressable `json:",omitempty"` - - Hostname string `json:"hostname,omitempty"` -} - -var ( - // Addressable is an Implementable "duck type". - _ duck.Implementable = (*Addressable)(nil) - // Addressable is a Convertible type. - _ apis.Convertible = (*Addressable)(nil) -) - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// AddressableType is a skeleton type wrapping Addressable in the manner we expect -// resource writers defining compatible resources to embed it. We will -// typically use this type to deserialize Addressable ObjectReferences and -// access the Addressable data. This is not a real resource. -type AddressableType struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Status AddressStatus `json:"status"` -} - -// AddressStatus shows how we expect folks to embed Addressable in -// their Status field. -type AddressStatus struct { - Address *Addressable `json:"address,omitempty"` -} - -var ( - // Verify AddressableType resources meet duck contracts. - _ duck.Populatable = (*AddressableType)(nil) - _ apis.Listable = (*AddressableType)(nil) -) - -// GetFullType implements duck.Implementable -func (*Addressable) GetFullType() duck.Populatable { - return &AddressableType{} -} - -// ConvertTo implements apis.Convertible -func (a *Addressable) ConvertTo(ctx context.Context, to apis.Convertible) error { - var url *apis.URL - if a.URL != nil { - url = a.URL - } else if a.Hostname != "" { - u := a.GetURL() - url = &u - } - switch sink := to.(type) { - case *v1.Addressable: - sink.URL = url.DeepCopy() - return nil - case *v1beta1.Addressable: - sink.URL = url.DeepCopy() - return nil - default: - return fmt.Errorf("unknown version, got: %T", to) - } -} - -// ConvertFrom implements apis.Convertible -func (a *Addressable) ConvertFrom(ctx context.Context, from apis.Convertible) error { - switch source := from.(type) { - case *v1.Addressable: - a.URL = source.URL.DeepCopy() - if a.URL != nil { - a.Hostname = a.URL.Host - } - return nil - case *v1beta1.Addressable: - a.URL = source.URL.DeepCopy() - if a.URL != nil { - a.Hostname = a.URL.Host - } - return nil - default: - return fmt.Errorf("unknown version, got: %T", from) - } -} - -// Populate implements duck.Populatable -func (t *AddressableType) Populate() { - t.Status = AddressStatus{ - &Addressable{ - // Populate ALL fields - Addressable: v1beta1.Addressable{ - URL: &apis.URL{ - Scheme: "http", - Host: "foo.bar.svc.cluster.local", - }, - }, - Hostname: "this is not empty", - }, - } -} - -// GetURL returns the URL type for the Addressable. -func (a Addressable) GetURL() apis.URL { - if a.URL != nil { - return *a.URL - } - return apis.URL{ - Scheme: "http", - Host: a.Hostname, - } -} - -// GetListType implements apis.Listable -func (*AddressableType) GetListType() runtime.Object { - return &AddressableTypeList{} -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// AddressableTypeList is a list of AddressableType resources -type AddressableTypeList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata"` - - Items []AddressableType `json:"items"` -} diff --git a/vendor/knative.dev/pkg/apis/duck/v1alpha1/binding_types.go b/vendor/knative.dev/pkg/apis/duck/v1alpha1/binding_types.go deleted file mode 100644 index 168439686c..0000000000 --- a/vendor/knative.dev/pkg/apis/duck/v1alpha1/binding_types.go +++ /dev/null @@ -1,92 +0,0 @@ -/* -Copyright 2019 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - - "knative.dev/pkg/apis" - "knative.dev/pkg/apis/duck" - "knative.dev/pkg/tracker" -) - -// +genduck -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Binding is a duck type that specifies the partial schema to which all -// Binding implementations should adhere. -type Binding struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec BindingSpec `json:"spec"` -} - -// Verify that Binding implements the appropriate interfaces. -var ( - _ duck.Implementable = (*Binding)(nil) - _ duck.Populatable = (*Binding)(nil) - _ apis.Listable = (*Binding)(nil) -) - -// BindingSpec specifies the spec portion of the Binding partial-schema. -type BindingSpec struct { - // Subject references the resource(s) whose "runtime contract" should be - // augmented by Binding implementations. - Subject tracker.Reference `json:"subject"` -} - -// GetFullType implements duck.Implementable -func (*Binding) GetFullType() duck.Populatable { - return &Binding{} -} - -// Populate implements duck.Populatable -func (t *Binding) Populate() { - t.Spec = BindingSpec{ - Subject: tracker.Reference{ - APIVersion: "apps/v1", - Kind: "Deployment", - Namespace: "default", - // Name and Selector are mutually exclusive, - // but we fill them both in for this test. - Name: "bazinga", - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{ - "foo": "bar", - "baz": "blah", - }, - }, - }, - } -} - -// GetListType implements apis.Listable -func (*Binding) GetListType() runtime.Object { - return &BindingList{} -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// BindingList is a list of Binding resources -type BindingList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata"` - - Items []Binding `json:"items"` -} diff --git a/vendor/knative.dev/pkg/apis/duck/v1alpha1/legacy_targetable_types.go b/vendor/knative.dev/pkg/apis/duck/v1alpha1/legacy_targetable_types.go deleted file mode 100644 index c8a32a7350..0000000000 --- a/vendor/knative.dev/pkg/apis/duck/v1alpha1/legacy_targetable_types.go +++ /dev/null @@ -1,96 +0,0 @@ -/* -Copyright 2018 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - - "knative.dev/pkg/apis" - "knative.dev/pkg/apis/duck" -) - -// +genduck - -// LegacyTargetable left around until we migrate to Addressable in the -// dependent resources. Addressable has more structure in the way it -// defines the fields. LegacyTargetable only assumed a single string -// in the Status field and we're moving towards defining proper structs -// under Status rather than strings. -// This is to support existing resources until they migrate. -// -// Do not use this for anything new, use Addressable -// -// LegacyTargetable is the old schema for the addressable portion -// of the payload -// -// For new resources use Addressable. -type LegacyTargetable struct { - DomainInternal string `json:"domainInternal,omitempty"` -} - -// LegacyTargetable is an Implementable "duck type". -var _ duck.Implementable = (*LegacyTargetable)(nil) - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// LegacyTarget is a skeleton type wrapping LegacyTargetable in the manner we -// want to support unless they get migrated into supporting Legacy. -// We will typically use this type to deserialize LegacyTargetable -// ObjectReferences and access the LegacyTargetable data. This is not a -// real resource. -// ** Do not use this for any new resources ** -type LegacyTarget struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Status LegacyTargetable `json:"status"` -} - -// In order for LegacyTargetable to be Implementable, LegacyTarget must be Populatable. -var _ duck.Populatable = (*LegacyTarget)(nil) - -// Ensure LegacyTarget satisfies apis.Listable -var _ apis.Listable = (*LegacyTarget)(nil) - -// GetFullType implements duck.Implementable -func (*LegacyTargetable) GetFullType() duck.Populatable { - return &LegacyTarget{} -} - -// Populate implements duck.Populatable -func (t *LegacyTarget) Populate() { - t.Status = LegacyTargetable{ - // Populate ALL fields - DomainInternal: "this is not empty", - } -} - -// GetListType implements apis.Listable -func (*LegacyTarget) GetListType() runtime.Object { - return &LegacyTargetList{} -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// LegacyTargetList is a list of LegacyTarget resources -type LegacyTargetList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata"` - - Items []LegacyTarget `json:"items"` -} diff --git a/vendor/knative.dev/pkg/apis/duck/v1alpha1/retired_targetable_types.go b/vendor/knative.dev/pkg/apis/duck/v1alpha1/retired_targetable_types.go deleted file mode 100644 index d8afb324a4..0000000000 --- a/vendor/knative.dev/pkg/apis/duck/v1alpha1/retired_targetable_types.go +++ /dev/null @@ -1,100 +0,0 @@ -/* -Copyright 2018 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - - "knative.dev/pkg/apis" - "knative.dev/pkg/apis/duck" -) - -// +genduck - -// Targetable is an earlier version of the Callable interface. -// Callable is a higher-level interface which implements Addressable -// but further promises that the destination may synchronously return -// response messages in reply to a message. -// -// Targetable implementations should instead implement Addressable and -// include an `eventing.knative.dev/returns=any` annotation. -// -// Targetable is retired; implement Addressable for now. -type Targetable struct { - DomainInternal string `json:"domainInternal,omitempty"` -} - -// Targetable is an Implementable "duck type". -var _ duck.Implementable = (*Targetable)(nil) - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Target is a skeleton type wrapping Targetable in the manner we expect -// resource writers defining compatible resources to embed it. We will -// typically use this type to deserialize Targetable ObjectReferences and -// access the Targetable data. This is not a real resource. -type Target struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Status TargetStatus `json:"status"` -} - -// TargetStatus shows how we expect folks to embed Targetable in -// their Status field. -type TargetStatus struct { - Targetable *Targetable `json:"targetable,omitempty"` -} - -var ( - // In order for Targetable to be Implementable, Target must be Populatable. - _ duck.Populatable = (*Target)(nil) - - // Ensure Target satisfies apis.Listable - _ apis.Listable = (*Target)(nil) -) - -// GetFullType implements duck.Implementable -func (*Targetable) GetFullType() duck.Populatable { - return &Target{} -} - -// Populate implements duck.Populatable -func (t *Target) Populate() { - t.Status = TargetStatus{ - &Targetable{ - // Populate ALL fields - DomainInternal: "this is not empty", - }, - } -} - -// GetListType implements apis.Listable -func (*Target) GetListType() runtime.Object { - return &TargetList{} -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// TargetList is a list of Target resources -type TargetList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata"` - - Items []Target `json:"items"` -} diff --git a/vendor/knative.dev/pkg/apis/duck/v1alpha1/zz_generated.deepcopy.go b/vendor/knative.dev/pkg/apis/duck/v1alpha1/zz_generated.deepcopy.go deleted file mode 100644 index 7718f84faf..0000000000 --- a/vendor/knative.dev/pkg/apis/duck/v1alpha1/zz_generated.deepcopy.go +++ /dev/null @@ -1,373 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2020 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AddressStatus) DeepCopyInto(out *AddressStatus) { - *out = *in - if in.Address != nil { - in, out := &in.Address, &out.Address - *out = new(Addressable) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AddressStatus. -func (in *AddressStatus) DeepCopy() *AddressStatus { - if in == nil { - return nil - } - out := new(AddressStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Addressable) DeepCopyInto(out *Addressable) { - *out = *in - in.Addressable.DeepCopyInto(&out.Addressable) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Addressable. -func (in *Addressable) DeepCopy() *Addressable { - if in == nil { - return nil - } - out := new(Addressable) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AddressableType) DeepCopyInto(out *AddressableType) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AddressableType. -func (in *AddressableType) DeepCopy() *AddressableType { - if in == nil { - return nil - } - out := new(AddressableType) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *AddressableType) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AddressableTypeList) DeepCopyInto(out *AddressableTypeList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]AddressableType, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AddressableTypeList. -func (in *AddressableTypeList) DeepCopy() *AddressableTypeList { - if in == nil { - return nil - } - out := new(AddressableTypeList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *AddressableTypeList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Binding) DeepCopyInto(out *Binding) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Binding. -func (in *Binding) DeepCopy() *Binding { - if in == nil { - return nil - } - out := new(Binding) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Binding) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BindingList) DeepCopyInto(out *BindingList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Binding, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BindingList. -func (in *BindingList) DeepCopy() *BindingList { - if in == nil { - return nil - } - out := new(BindingList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *BindingList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BindingSpec) DeepCopyInto(out *BindingSpec) { - *out = *in - in.Subject.DeepCopyInto(&out.Subject) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BindingSpec. -func (in *BindingSpec) DeepCopy() *BindingSpec { - if in == nil { - return nil - } - out := new(BindingSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LegacyTarget) DeepCopyInto(out *LegacyTarget) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Status = in.Status - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LegacyTarget. -func (in *LegacyTarget) DeepCopy() *LegacyTarget { - if in == nil { - return nil - } - out := new(LegacyTarget) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *LegacyTarget) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LegacyTargetList) DeepCopyInto(out *LegacyTargetList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]LegacyTarget, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LegacyTargetList. -func (in *LegacyTargetList) DeepCopy() *LegacyTargetList { - if in == nil { - return nil - } - out := new(LegacyTargetList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *LegacyTargetList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LegacyTargetable) DeepCopyInto(out *LegacyTargetable) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LegacyTargetable. -func (in *LegacyTargetable) DeepCopy() *LegacyTargetable { - if in == nil { - return nil - } - out := new(LegacyTargetable) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Target) DeepCopyInto(out *Target) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Target. -func (in *Target) DeepCopy() *Target { - if in == nil { - return nil - } - out := new(Target) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Target) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TargetList) DeepCopyInto(out *TargetList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Target, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetList. -func (in *TargetList) DeepCopy() *TargetList { - if in == nil { - return nil - } - out := new(TargetList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *TargetList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TargetStatus) DeepCopyInto(out *TargetStatus) { - *out = *in - if in.Targetable != nil { - in, out := &in.Targetable, &out.Targetable - *out = new(Targetable) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetStatus. -func (in *TargetStatus) DeepCopy() *TargetStatus { - if in == nil { - return nil - } - out := new(TargetStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Targetable) DeepCopyInto(out *Targetable) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Targetable. -func (in *Targetable) DeepCopy() *Targetable { - if in == nil { - return nil - } - out := new(Targetable) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/knative.dev/pkg/apis/duck/v1beta1/destination.go b/vendor/knative.dev/pkg/apis/duck/v1beta1/destination.go index fe0f5d171d..4e369b8721 100644 --- a/vendor/knative.dev/pkg/apis/duck/v1beta1/destination.go +++ b/vendor/knative.dev/pkg/apis/duck/v1beta1/destination.go @@ -136,10 +136,7 @@ func (dest *Destination) GetRef() *corev1.ObjectReference { if dest.Ref != nil { return dest.Ref } - if ref := dest.deprecatedObjectReference(); ref != nil { - return ref - } - return nil + return dest.deprecatedObjectReference() } func validateDestinationRef(ref corev1.ObjectReference) *apis.FieldError { diff --git a/vendor/knative.dev/pkg/apis/duck/v1beta1/register.go b/vendor/knative.dev/pkg/apis/duck/v1beta1/register.go index 160eff56b0..9ac1f16d6e 100644 --- a/vendor/knative.dev/pkg/apis/duck/v1beta1/register.go +++ b/vendor/knative.dev/pkg/apis/duck/v1beta1/register.go @@ -38,8 +38,10 @@ func Resource(resource string) schema.GroupResource { } var ( + // SchemeBuilder builds a scheme with the types known to the package. SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - AddToScheme = SchemeBuilder.AddToScheme + // AddToScheme adds the types known to this package to an existing schema. + AddToScheme = SchemeBuilder.AddToScheme ) // Adds the list of known types to Scheme. diff --git a/vendor/knative.dev/pkg/apis/duck/v1beta1/zz_generated.deepcopy.go b/vendor/knative.dev/pkg/apis/duck/v1beta1/zz_generated.deepcopy.go index d22c6724ac..ad5486454e 100644 --- a/vendor/knative.dev/pkg/apis/duck/v1beta1/zz_generated.deepcopy.go +++ b/vendor/knative.dev/pkg/apis/duck/v1beta1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2020 The Knative Authors +Copyright 2021 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/knative.dev/pkg/apis/volatile_time.go b/vendor/knative.dev/pkg/apis/volatile_time.go index b114294bf9..12c12c5cc6 100644 --- a/vendor/knative.dev/pkg/apis/volatile_time.go +++ b/vendor/knative.dev/pkg/apis/volatile_time.go @@ -28,6 +28,8 @@ import ( // Thus differing VolatileTime values are not considered different. // Note, go-cmp will still return inequality, see unit test if you // need this behavior for go-cmp. +// +// +kubebuilder:validation:Type=string type VolatileTime struct { Inner metav1.Time `json:",inline"` } diff --git a/vendor/knative.dev/pkg/apis/zz_generated.deepcopy.go b/vendor/knative.dev/pkg/apis/zz_generated.deepcopy.go index e2b84acd27..1e060d9e00 100644 --- a/vendor/knative.dev/pkg/apis/zz_generated.deepcopy.go +++ b/vendor/knative.dev/pkg/apis/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2020 The Knative Authors +Copyright 2021 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/knative.dev/pkg/client/injection/kube/client/client.go b/vendor/knative.dev/pkg/client/injection/kube/client/client.go index b7994cb725..2c7be6d313 100644 --- a/vendor/knative.dev/pkg/client/injection/kube/client/client.go +++ b/vendor/knative.dev/pkg/client/injection/kube/client/client.go @@ -1,5 +1,5 @@ /* -Copyright 2020 The Knative Authors +Copyright 2021 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/knative.dev/pkg/client/injection/kube/informers/admissionregistration/v1/mutatingwebhookconfiguration/mutatingwebhookconfiguration.go b/vendor/knative.dev/pkg/client/injection/kube/informers/admissionregistration/v1/mutatingwebhookconfiguration/mutatingwebhookconfiguration.go index 27b95afa69..981a47b4a3 100644 --- a/vendor/knative.dev/pkg/client/injection/kube/informers/admissionregistration/v1/mutatingwebhookconfiguration/mutatingwebhookconfiguration.go +++ b/vendor/knative.dev/pkg/client/injection/kube/informers/admissionregistration/v1/mutatingwebhookconfiguration/mutatingwebhookconfiguration.go @@ -1,5 +1,5 @@ /* -Copyright 2020 The Knative Authors +Copyright 2021 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/knative.dev/pkg/client/injection/kube/informers/admissionregistration/v1/validatingwebhookconfiguration/validatingwebhookconfiguration.go b/vendor/knative.dev/pkg/client/injection/kube/informers/admissionregistration/v1/validatingwebhookconfiguration/validatingwebhookconfiguration.go index 7cc5ac316e..08c3ac2bba 100644 --- a/vendor/knative.dev/pkg/client/injection/kube/informers/admissionregistration/v1/validatingwebhookconfiguration/validatingwebhookconfiguration.go +++ b/vendor/knative.dev/pkg/client/injection/kube/informers/admissionregistration/v1/validatingwebhookconfiguration/validatingwebhookconfiguration.go @@ -1,5 +1,5 @@ /* -Copyright 2020 The Knative Authors +Copyright 2021 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/knative.dev/pkg/client/injection/kube/informers/apps/v1/deployment/deployment.go b/vendor/knative.dev/pkg/client/injection/kube/informers/apps/v1/deployment/deployment.go index 066e592abe..de9ff5c8c6 100644 --- a/vendor/knative.dev/pkg/client/injection/kube/informers/apps/v1/deployment/deployment.go +++ b/vendor/knative.dev/pkg/client/injection/kube/informers/apps/v1/deployment/deployment.go @@ -1,5 +1,5 @@ /* -Copyright 2020 The Knative Authors +Copyright 2021 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/knative.dev/pkg/client/injection/kube/informers/core/v1/namespace/namespace.go b/vendor/knative.dev/pkg/client/injection/kube/informers/core/v1/namespace/namespace.go index f8a43dcc6b..0029809035 100644 --- a/vendor/knative.dev/pkg/client/injection/kube/informers/core/v1/namespace/namespace.go +++ b/vendor/knative.dev/pkg/client/injection/kube/informers/core/v1/namespace/namespace.go @@ -1,5 +1,5 @@ /* -Copyright 2020 The Knative Authors +Copyright 2021 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/knative.dev/pkg/client/injection/kube/informers/factory/factory.go b/vendor/knative.dev/pkg/client/injection/kube/informers/factory/factory.go index 7b9e9ab940..c1c136755a 100644 --- a/vendor/knative.dev/pkg/client/injection/kube/informers/factory/factory.go +++ b/vendor/knative.dev/pkg/client/injection/kube/informers/factory/factory.go @@ -1,5 +1,5 @@ /* -Copyright 2020 The Knative Authors +Copyright 2021 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/knative.dev/pkg/codegen/cmd/injection-gen/generators/fake_filtered_factory.go b/vendor/knative.dev/pkg/codegen/cmd/injection-gen/generators/fake_filtered_factory.go index 153261dc42..1e747f4b7b 100644 --- a/vendor/knative.dev/pkg/codegen/cmd/injection-gen/generators/fake_filtered_factory.go +++ b/vendor/knative.dev/pkg/codegen/cmd/injection-gen/generators/fake_filtered_factory.go @@ -109,10 +109,6 @@ func init() { func withInformerFactory(ctx {{.contextContext|raw}}) {{.contextContext|raw}} { c := {{.clientGet|raw}}(ctx) - opts := []{{.informersSharedInformerOption|raw}}{} - if {{.injectionHasNamespace|raw}}(ctx) { - opts = append(opts, {{.informersWithNamespace|raw}}({{.injectionGetNamespace|raw}}(ctx))) - } untyped := ctx.Value({{.factoryLabelKey|raw}}{}) if untyped == nil { {{.loggingFromContext|raw}}(ctx).Panic( @@ -120,11 +116,15 @@ func withInformerFactory(ctx {{.contextContext|raw}}) {{.contextContext|raw}} { } labelSelectors := untyped.([]string) for _, selector := range labelSelectors { - thisOpts := append(opts, {{.informersWithTweakListOptions|raw}}(func(l *{{.metav1ListOptions|raw}}) { + opts := []{{.informersSharedInformerOption|raw}}{} + if {{.injectionHasNamespace|raw}}(ctx) { + opts = append(opts, {{.informersWithNamespace|raw}}({{.injectionGetNamespace|raw}}(ctx))) + } + opts = append(opts, {{.informersWithTweakListOptions|raw}}(func(l *{{.metav1ListOptions|raw}}) { l.LabelSelector = selector })) ctx = context.WithValue(ctx, {{.factoryKey|raw}}{Selector: selector}, - {{.informersNewSharedInformerFactoryWithOptions|raw}}(c, {{.controllerGetResyncPeriod|raw}}(ctx), thisOpts...)) + {{.informersNewSharedInformerFactoryWithOptions|raw}}(c, {{.controllerGetResyncPeriod|raw}}(ctx), opts...)) } return ctx } diff --git a/vendor/knative.dev/pkg/codegen/cmd/injection-gen/generators/filtered_factory.go b/vendor/knative.dev/pkg/codegen/cmd/injection-gen/generators/filtered_factory.go index b036cf1cf6..58e65432f5 100644 --- a/vendor/knative.dev/pkg/codegen/cmd/injection-gen/generators/filtered_factory.go +++ b/vendor/knative.dev/pkg/codegen/cmd/injection-gen/generators/filtered_factory.go @@ -111,10 +111,6 @@ func WithSelectors(ctx {{.contextContext|raw}}, selector ...string) context.Cont func withInformerFactory(ctx {{.contextContext|raw}}) {{.contextContext|raw}} { c := {{.cachingClientGet|raw}}(ctx) - opts := []{{.informersSharedInformerOption|raw}}{} - if {{.injectionHasNamespace|raw}}(ctx) { - opts = append(opts, {{.informersWithNamespace|raw}}({{.injectionGetNamespace|raw}}(ctx))) - } untyped := ctx.Value(LabelKey{}) if untyped == nil { {{.loggingFromContext|raw}}(ctx).Panic( @@ -122,11 +118,15 @@ func withInformerFactory(ctx {{.contextContext|raw}}) {{.contextContext|raw}} { } labelSelectors := untyped.([]string) for _, selector := range labelSelectors { - thisOpts := append(opts, {{.informersWithTweakListOptions|raw}}(func(l *{{.metav1ListOptions|raw}}) { + opts := []{{.informersSharedInformerOption|raw}}{} + if {{.injectionHasNamespace|raw}}(ctx) { + opts = append(opts, {{.informersWithNamespace|raw}}({{.injectionGetNamespace|raw}}(ctx))) + } + opts = append(opts, {{.informersWithTweakListOptions|raw}}(func(l *{{.metav1ListOptions|raw}}) { l.LabelSelector = selector })) ctx = context.WithValue(ctx, Key{Selector: selector}, - {{.informersNewSharedInformerFactoryWithOptions|raw}}(c, {{.controllerGetResyncPeriod|raw}}(ctx), thisOpts...)) + {{.informersNewSharedInformerFactoryWithOptions|raw}}(c, {{.controllerGetResyncPeriod|raw}}(ctx), opts...)) } return ctx } diff --git a/vendor/knative.dev/pkg/codegen/cmd/injection-gen/generators/reconciler_controller.go b/vendor/knative.dev/pkg/codegen/cmd/injection-gen/generators/reconciler_controller.go index 9471b5db8a..c97f8108dd 100644 --- a/vendor/knative.dev/pkg/codegen/cmd/injection-gen/generators/reconciler_controller.go +++ b/vendor/knative.dev/pkg/codegen/cmd/injection-gen/generators/reconciler_controller.go @@ -217,6 +217,8 @@ func NewImpl(ctx {{.contextContext|raw}}, r Interface{{if .hasClass}}, classValu lister := {{.type|lowercaseSingular}}Informer.Lister() + var promoteFilterFunc func(obj interface{}) bool + rec := &reconcilerImpl{ LeaderAwareFuncs: {{.reconcilerLeaderAwareFuncs|raw}}{ PromoteFunc: func(bkt {{.reconcilerBucket|raw}}, enq func({{.reconcilerBucket|raw}}, {{.typesNamespacedName|raw}})) error { @@ -225,7 +227,11 @@ func NewImpl(ctx {{.contextContext|raw}}, r Interface{{if .hasClass}}, classValu return err } for _, elt := range all { - // TODO: Consider letting users specify a filter in options. + if promoteFilterFunc != nil { + if ok := promoteFilterFunc(elt); !ok { + continue + } + } enq(bkt, {{.typesNamespacedName|raw}}{ Namespace: elt.GetNamespace(), Name: elt.GetName(), @@ -274,6 +280,9 @@ func NewImpl(ctx {{.contextContext|raw}}, r Interface{{if .hasClass}}, classValu if opts.DemoteFunc != nil { rec.DemoteFunc = opts.DemoteFunc } + if opts.PromoteFilterFunc != nil { + promoteFilterFunc = opts.PromoteFilterFunc + } } rec.Recorder = createRecorder(ctx, agentName) diff --git a/vendor/knative.dev/pkg/codegen/cmd/injection-gen/generators/reconciler_reconciler.go b/vendor/knative.dev/pkg/codegen/cmd/injection-gen/generators/reconciler_reconciler.go index cc6757da8a..50602cd0bf 100644 --- a/vendor/knative.dev/pkg/codegen/cmd/injection-gen/generators/reconciler_reconciler.go +++ b/vendor/knative.dev/pkg/codegen/cmd/injection-gen/generators/reconciler_reconciler.go @@ -111,6 +111,7 @@ func (g *reconcilerReconcilerGenerator) GenerateType(c *generator.Context, t *ty "reconcilerReconcilerEvent": c.Universe.Type(types.Name{Package: "knative.dev/pkg/reconciler", Name: "ReconcilerEvent"}), "reconcilerRetryUpdateConflicts": c.Universe.Function(types.Name{Package: "knative.dev/pkg/reconciler", Name: "RetryUpdateConflicts"}), "reconcilerConfigStore": c.Universe.Type(types.Name{Name: "ConfigStore", Package: "knative.dev/pkg/reconciler"}), + "reconcilerOnDeletionInterface": c.Universe.Type(types.Name{Package: "knative.dev/pkg/reconciler", Name: "OnDeletionInterface"}), // Deps "clientsetInterface": c.Universe.Type(types.Name{Name: "Interface", Package: g.clientsetPkg}), "resourceLister": c.Universe.Type(types.Name{Name: g.listerName, Package: g.listerPkg}), @@ -166,7 +167,6 @@ func (g *reconcilerReconcilerGenerator) GenerateType(c *generator.Context, t *ty Name: "SafeDiff", }), "fmtErrorf": c.Universe.Package("fmt").Function("Errorf"), - "reflectDeepEqual": c.Universe.Package("reflect").Function("DeepEqual"), "equalitySemantic": c.Universe.Package("k8s.io/apimachinery/pkg/api/equality").Variable("Semantic"), "jsonMarshal": c.Universe.Package("encoding/json").Function("Marshal"), "typesMergePatchType": c.Universe.Package("k8s.io/apimachinery/pkg/types").Constant("MergePatchType"), @@ -210,6 +210,14 @@ func (g *reconcilerReconcilerGenerator) GenerateType(c *generator.Context, t *ty Package: "knative.dev/pkg/reconciler", Name: "DoObserveFinalizeKind", }), + "controllerIsSkipKey": c.Universe.Function(types.Name{ + Package: "knative.dev/pkg/controller", + Name: "IsSkipKey", + }), + "controllerIsRequeueKey": c.Universe.Function(types.Name{ + Package: "knative.dev/pkg/controller", + Name: "IsRequeueKey", + }), } sw.Do(reconcilerInterfaceFactory, m) @@ -270,13 +278,13 @@ type doReconcile func(ctx {{.contextContext|raw}}, o *{{.type|raw}}) {{.reconcil // reconcilerImpl implements controller.Reconciler for {{.type|raw}} resources. type reconcilerImpl struct { - // LeaderAwareFuncs is inlined to help us implement {{.reconcilerLeaderAware|raw}} + // LeaderAwareFuncs is inlined to help us implement {{.reconcilerLeaderAware|raw}}. {{.reconcilerLeaderAwareFuncs|raw}} // Client is used to write back status updates. Client {{.clientsetInterface|raw}} - // Listers index properties about resources + // Listers index properties about resources. Lister {{.resourceLister|raw}} // Recorder is an event recorder for recording Event resources to the @@ -305,7 +313,7 @@ type reconcilerImpl struct { {{end}} } -// Check that our Reconciler implements controller.Reconciler +// Check that our Reconciler implements controller.Reconciler. var _ controller.Reconciler = (*reconcilerImpl)(nil) // Check that our generated Reconciler is always LeaderAware. var _ {{.reconcilerLeaderAware|raw}} = (*reconcilerImpl)(nil) @@ -410,8 +418,15 @@ func (r *reconcilerImpl) Reconcile(ctx {{.contextContext|raw}}, key string) erro original, err := getter.Get(s.name) if {{.apierrsIsNotFound|raw}}(err) { - // The resource may no longer exist, in which case we stop processing. + // The resource may no longer exist, in which case we stop processing and call + // the ObserveDeletion handler if appropriate. logger.Debugf("Resource %q no longer exists", key) + if del, ok := r.reconciler.({{.reconcilerOnDeletionInterface|raw}}); ok { + return del.ObserveDeletion(ctx, {{.typesNamespacedName|raw}}{ + Namespace: s.namespace, + Name: s.name, + }) + } return nil } else if err != nil { return err @@ -501,7 +516,7 @@ func (r *reconcilerImpl) Reconcile(ctx {{.contextContext|raw}}, key string) erro var event *{{.reconcilerReconcilerEvent|raw}} if reconciler.EventAs(reconcileEvent, &event) { logger.Infow("Returned an event", zap.Any("event", reconcileEvent)) - r.Recorder.Eventf(resource, event.EventType, event.Reason, event.Format, event.Args...) + r.Recorder.Event(resource, event.EventType, event.Reason, event.Error()) // the event was wrapped inside an error, consider the reconciliation as failed if _, isEvent := reconcileEvent.(*reconciler.ReconcilerEvent); !isEvent { @@ -510,8 +525,14 @@ func (r *reconcilerImpl) Reconcile(ctx {{.contextContext|raw}}, key string) erro return nil } - logger.Errorw("Returned an error", zap.Error(reconcileEvent)) - r.Recorder.Event(resource, {{.corev1EventTypeWarning|raw}}, "InternalError", reconcileEvent.Error()) + if {{ .controllerIsSkipKey|raw }}(reconcileEvent) { + // This is a wrapped error, don't emit an event. + } else if ok, _ := {{ .controllerIsRequeueKey|raw }}(reconcileEvent); ok { + // This is a wrapped error, don't emit an event. + } else { + logger.Errorw("Returned an error", zap.Error(reconcileEvent)) + r.Recorder.Event(resource, {{.corev1EventTypeWarning|raw}}, "InternalError", reconcileEvent.Error()) + } return reconcileEvent } @@ -537,7 +558,7 @@ func (r *reconcilerImpl) updateStatus(ctx {{.contextContext|raw}}, existing *{{. } // If there's nothing to update, just return. - if {{.reflectDeepEqual|raw}}(existing.Status, desired.Status) { + if {{.equalitySemantic|raw}}.DeepEqual(existing.Status, desired.Status) { return nil } diff --git a/vendor/knative.dev/pkg/codegen/cmd/injection-gen/generators/reconciler_state.go b/vendor/knative.dev/pkg/codegen/cmd/injection-gen/generators/reconciler_state.go index fb14cd8996..db8334b406 100644 --- a/vendor/knative.dev/pkg/codegen/cmd/injection-gen/generators/reconciler_state.go +++ b/vendor/knative.dev/pkg/codegen/cmd/injection-gen/generators/reconciler_state.go @@ -99,28 +99,28 @@ func (g *reconcilerStateGenerator) GenerateType(c *generator.Context, t *types.T var reconcilerStateType = ` // state is used to track the state of a reconciler in a single run. type state struct { - // Key is the original reconciliation key from the queue. + // key is the original reconciliation key from the queue. key string - // Namespace is the namespace split from the reconciliation key. + // namespace is the namespace split from the reconciliation key. namespace string - // Namespace is the name split from the reconciliation key. + // name is the name split from the reconciliation key. name string // reconciler is the reconciler. reconciler Interface - // rof is the read only interface cast of the reconciler. + // roi is the read only interface cast of the reconciler. roi ReadOnlyInterface - // IsROI (Read Only Interface) the reconciler only observes reconciliation. + // isROI (Read Only Interface) the reconciler only observes reconciliation. isROI bool // rof is the read only finalizer cast of the reconciler. rof ReadOnlyFinalizer - // IsROF (Read Only Finalizer) the reconciler only observes finalize. + // isROF (Read Only Finalizer) the reconciler only observes finalize. isROF bool - // IsLeader the instance of the reconciler is the elected leader. + // isLeader the instance of the reconciler is the elected leader. isLeader bool } func newState(key string, r *reconcilerImpl) (*state, error) { - // Convert the namespace/name string into a distinct namespace and name + // Convert the namespace/name string into a distinct namespace and name. namespace, name, err := {{.cacheSplitMetaNamespaceKey|raw}}(key) if err != nil { return nil, {{.fmtErrorf|raw}}("invalid resource key: %s", key) diff --git a/vendor/knative.dev/pkg/configmap/OWNERS b/vendor/knative.dev/pkg/configmap/OWNERS index 2480fc6d43..c3ff67e25b 100644 --- a/vendor/knative.dev/pkg/configmap/OWNERS +++ b/vendor/knative.dev/pkg/configmap/OWNERS @@ -1,4 +1,6 @@ # The OWNERS file is used by prow to automatically merge approved PRs. approvers: -- configmap-approvers +- pkg-configmap-writers +reviewers: +- pkg-configmap-reviewers diff --git a/vendor/knative.dev/pkg/configmap/manual_watcher.go b/vendor/knative.dev/pkg/configmap/manual_watcher.go index bd28e7c231..a2760b4478 100644 --- a/vendor/knative.dev/pkg/configmap/manual_watcher.go +++ b/vendor/knative.dev/pkg/configmap/manual_watcher.go @@ -44,7 +44,7 @@ func (w *ManualWatcher) Watch(name string, o ...Observer) { w.observers[name] = append(w.observers[name], o...) } -// Watch implements Watcher +// ForEach implements Watcher func (w *ManualWatcher) ForEach(f func(string, []Observer) error) error { for k, v := range w.observers { if err := f(k, v); err != nil { diff --git a/vendor/knative.dev/pkg/controller/OWNERS b/vendor/knative.dev/pkg/controller/OWNERS index 0b270d53af..9c7a432ba6 100644 --- a/vendor/knative.dev/pkg/controller/OWNERS +++ b/vendor/knative.dev/pkg/controller/OWNERS @@ -1,7 +1,7 @@ # The OWNERS file is used by prow to automatically merge approved PRs. approvers: -- controller-approvers +- pkg-controller-writers reviewers: -- controller-reviewers +- pkg-controller-reviewers diff --git a/vendor/knative.dev/pkg/controller/controller.go b/vendor/knative.dev/pkg/controller/controller.go index 747cf58efc..7ac98b2e0e 100644 --- a/vendor/knative.dev/pkg/controller/controller.go +++ b/vendor/knative.dev/pkg/controller/controller.go @@ -57,6 +57,7 @@ var ( // when processing the controller's workqueue. Controller binaries // may adjust this process-wide default. For finer control, invoke // Run on the controller directly. + // TODO rename the const to Concurrency and deprecated this DefaultThreadsPerController = 2 ) @@ -203,6 +204,9 @@ type Impl struct { // which are not required to complete at the highest priority. workQueue *twoLaneQueue + // Concurrency - The number of workers to use when processing the controller's workqueue. + Concurrency int + // Sugared logger is easier to use but is not as performant as the // raw logger. In performance critical paths, call logger.Desugar() // and use the returned raw logger instead. In addition to the @@ -221,6 +225,7 @@ type ControllerOptions struct { //nolint // for backcompat. Logger *zap.SugaredLogger Reporter StatsReporter RateLimiter workqueue.RateLimiter + Concurrency int } // NewImpl instantiates an instance of our controller that will feed work to the @@ -244,12 +249,16 @@ func NewImplFull(r Reconciler, options ControllerOptions) *Impl { if options.Reporter == nil { options.Reporter = MustNewStatsReporter(options.WorkQueueName, options.Logger) } + if options.Concurrency == 0 { + options.Concurrency = DefaultThreadsPerController + } return &Impl{ Name: options.WorkQueueName, Reconciler: r, workQueue: newTwoLaneWorkQueue(options.WorkQueueName, options.RateLimiter), logger: options.Logger, statsReporter: options.Reporter, + Concurrency: options.Concurrency, } } @@ -477,7 +486,8 @@ func (c *Impl) RunContext(ctx context.Context, threadiness int) error { return nil } -// DEPRECATED use RunContext instead. +// Run runs the controller. +// DEPRECATED: Use RunContext instead. func (c *Impl) Run(threadiness int, stopCh <-chan struct{}) error { // Create a context that is cancelled when the stopCh is called. ctx, cancel := context.WithCancel(context.Background()) @@ -545,6 +555,12 @@ func (c *Impl) handleErr(err error, key types.NamespacedName, startTime time.Tim c.workQueue.Forget(key) return } + if ok, delay := IsRequeueKey(err); ok { + c.workQueue.AddAfter(key, delay) + c.logger.Debugf("Requeuing key %s (by request) after %v (depth: %d)", safeKey(key), delay, c.workQueue.Len()) + return + } + c.logger.Errorw("Reconcile error", zap.Duration("duration", time.Since(startTime)), zap.Error(err)) // Re-queue the key if it's a transient error. @@ -656,6 +672,49 @@ func (err permanentError) Unwrap() error { return err.e } +// NewRequeueImmediately returns a new instance of requeueKeyError. +// Users can return this type of error to immediately requeue a key. +func NewRequeueImmediately() error { + return requeueKeyError{} +} + +// NewRequeueAfter returns a new instance of requeueKeyError. +// Users can return this type of error to requeue a key after a delay. +func NewRequeueAfter(dur time.Duration) error { + return requeueKeyError{duration: dur} +} + +// requeueKeyError is an error that indicates the reconciler wants to reprocess +// the key after a particular duration (possibly zero). +// We should re-queue keys with the desired duration when this is returned by Reconcile. +type requeueKeyError struct { + duration time.Duration +} + +var _ error = requeueKeyError{} + +// Error implements the Error() interface of error. +func (err requeueKeyError) Error() string { + return fmt.Sprintf("requeue after: %s", err.duration) +} + +// IsRequeueKey returns true if the given error is a requeueKeyError. +func IsRequeueKey(err error) (bool, time.Duration) { + rqe := requeueKeyError{} + if errors.As(err, &rqe) { + return true, rqe.duration + } + return false, 0 +} + +// Is implements the Is() interface of error. It returns whether the target +// error can be treated as equivalent to a requeueKeyError. +func (requeueKeyError) Is(target error) bool { + //nolint: errorlint // This check is actually fine. + _, ok := target.(requeueKeyError) + return ok +} + // Informer is the group of methods that a type must implement to be passed to // StartInformers. type Informer interface { @@ -722,9 +781,10 @@ func StartAll(ctx context.Context, controllers ...*Impl) { // Start all of the controllers. for _, ctrlr := range controllers { wg.Add(1) + concurrency := ctrlr.Concurrency go func(c *Impl) { defer wg.Done() - c.RunContext(ctx, DefaultThreadsPerController) + c.RunContext(ctx, concurrency) }(ctrlr) } wg.Wait() diff --git a/vendor/knative.dev/pkg/controller/options.go b/vendor/knative.dev/pkg/controller/options.go index 8d61835e69..4eef0eb5e7 100644 --- a/vendor/knative.dev/pkg/controller/options.go +++ b/vendor/knative.dev/pkg/controller/options.go @@ -38,6 +38,14 @@ type Options struct { // DemoteFunc configures the demote function this reconciler uses DemoteFunc func(b reconciler.Bucket) + + // Concurrency - The number of workers to use when processing the controller's workqueue. + Concurrency int + + // PromoteFilterFunc filters the objects that are enqueued when the reconciler is promoted to leader. + // Objects that pass the filter (return true) will be reconciled when a new leader is promoted. + // If no filter is specified, all objects will be reconciled. + PromoteFilterFunc func(obj interface{}) bool } // OptionsFn is a callback method signature that accepts an Impl and returns diff --git a/vendor/knative.dev/pkg/controller/stats_reporter.go b/vendor/knative.dev/pkg/controller/stats_reporter.go index 474bab3fc6..6735285db4 100644 --- a/vendor/knative.dev/pkg/controller/stats_reporter.go +++ b/vendor/knative.dev/pkg/controller/stats_reporter.go @@ -48,7 +48,9 @@ var ( // - characters are printable US-ASCII reconcilerTagKey = tag.MustNewKey("reconciler") successTagKey = tag.MustNewKey("success") - NamespaceTagKey = tag.MustNewKey(metricskey.LabelNamespaceName) + + // NamespaceTagKey marks metrics with a namespace. + NamespaceTagKey = tag.MustNewKey(metricskey.LabelNamespaceName) ) func init() { diff --git a/vendor/knative.dev/pkg/environment/client_config.go b/vendor/knative.dev/pkg/environment/client_config.go new file mode 100644 index 0000000000..04d4220b0a --- /dev/null +++ b/vendor/knative.dev/pkg/environment/client_config.go @@ -0,0 +1,86 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package environment + +import ( + "flag" + "fmt" + "math" + "os" + + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" +) + +// ClientConfig holds the information about the environment and can be configured with flags +type ClientConfig struct { + Cluster string // K8s cluster (defaults to cluster in kubeconfig) + ServerURL string // ServerURL - The address of the Kubernetes API server. Overrides any value in kubeconfig. + Burst int // Burst - Maximum burst for throttle. + QPS float64 // QPS - Maximum QPS to the server from the client. + Kubeconfig string // Kubeconfig - Path to a kubeconfig. Current casing is present for backwards compatibility +} + +func (c *ClientConfig) InitFlags(fs *flag.FlagSet) { + fs.StringVar(&c.Cluster, "cluster", "", "Defaults to the current cluster in kubeconfig.") + + fs.StringVar(&c.ServerURL, "server", "", + "The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster.") + + fs.StringVar(&c.Kubeconfig, "kubeconfig", os.Getenv("KUBECONFIG"), + "Path to a kubeconfig. Only required if out-of-cluster.") + + fs.IntVar(&c.Burst, "kube-api-burst", 0, "Maximum burst for throttle.") + + fs.Float64Var(&c.QPS, "kube-api-qps", 0, "Maximum QPS to the server from the client.") +} + +func (c *ClientConfig) GetRESTConfig() (*rest.Config, error) { + if c.Burst < 0 { + return nil, fmt.Errorf("provided burst value %d must be > 0", c.Burst) + } + if c.QPS < 0 || c.QPS > math.MaxFloat32 { + return nil, fmt.Errorf("provided QPS value %f must be >0 and <3.4+e38", c.QPS) + } + + loadingRules := clientcmd.NewDefaultClientConfigLoadingRules() + overrides := &clientcmd.ConfigOverrides{} + + if c.Kubeconfig != "" { + loadingRules.ExplicitPath = c.Kubeconfig + } + if c.Cluster != "" { + overrides.Context = clientcmdapi.Context{Cluster: c.Cluster} + } else if c.ServerURL != "" { + overrides.ClusterInfo = clientcmdapi.Cluster{Server: c.ServerURL} + } + + config, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig( + loadingRules, + overrides, + ).ClientConfig() + + if err != nil { + return nil, fmt.Errorf("failed to create client config: %w", err) + } + + config.QPS = float32(c.QPS) + config.Burst = c.Burst + + return config, nil +} diff --git a/vendor/knative.dev/pkg/hash/OWNERS b/vendor/knative.dev/pkg/hash/OWNERS index 0b270d53af..9c7a432ba6 100644 --- a/vendor/knative.dev/pkg/hash/OWNERS +++ b/vendor/knative.dev/pkg/hash/OWNERS @@ -1,7 +1,7 @@ # The OWNERS file is used by prow to automatically merge approved PRs. approvers: -- controller-approvers +- pkg-controller-writers reviewers: -- controller-reviewers +- pkg-controller-reviewers diff --git a/vendor/knative.dev/pkg/injection/README.md b/vendor/knative.dev/pkg/injection/README.md index 761677e88c..0373d53a2c 100644 --- a/vendor/knative.dev/pkg/injection/README.md +++ b/vendor/knative.dev/pkg/injection/README.md @@ -388,6 +388,28 @@ impl := kindreconciler.NewImpl(ctx, c, func(impl *controller.Impl) controller.Op }) ``` +### Filtering on controller promotion + +The generated controllers implement the +[LeaderAwareFuncs](https://github.com/knative/pkg/blob/main/reconciler/leader.go#L66-L72) +interface to support HA controller deployments. When a generated controller is +promoted, by default it will trigger [a re-reconcile of every resource it +manages](https://github.com/knative/pkg/blob/main/client/injection/apiextensions/reconciler/apiextensions/v1/customresourcedefinition/controller.go#L68-L81). +To filter which objects get reconciled, pass a `PromoteFilterFunc` to the +controller's constructor: + +```go +kindreconciler "knative.dev//pkg/client/injection/reconciler///" +pkgreconciler "knative.dev/pkg/reconciler" +... +impl := kindreconciler.NewImpl(ctx, c, func(impl *controller.Impl) controller.Options { + return controller.Options{ + PromoteFilterFunc: pkgreconciler.LabelFilterFunc("mylabel", "myvalue", false), + } +}) +``` + + ### Artifacts The artifacts are targeted to the configured `client/injection` directory: diff --git a/vendor/knative.dev/pkg/injection/config.go b/vendor/knative.dev/pkg/injection/config.go index 6bcd385507..f54f84b451 100644 --- a/vendor/knative.dev/pkg/injection/config.go +++ b/vendor/knative.dev/pkg/injection/config.go @@ -17,106 +17,34 @@ limitations under the License. package injection import ( - "errors" "flag" "log" - "math" - "os" - "os/user" - "path/filepath" - "sync" "k8s.io/client-go/rest" - "k8s.io/client-go/tools/clientcmd" "k8s.io/klog" + "knative.dev/pkg/environment" ) -// Environment holds the config for flag based config. -type Environment struct { - // ServerURL - The address of the Kubernetes API server. Overrides any value in kubeconfig. - ServerURL string - // Kubeconfig - Path to a kubeconfig. - Kubeconfig string // Note: named Kubeconfig because of legacy reasons vs KubeConfig. - // Burst - Maximum burst for throttle. - Burst int - // QPS - Maximum QPS to the server from the client. - QPS float64 -} - -var ( - env *Environment - once sync.Once -) - -func Flags() *Environment { - once.Do(func() { - env = new(Environment) - - flag.StringVar(&env.ServerURL, "server", "", - "The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster.") - - flag.StringVar(&env.Kubeconfig, "kubeconfig", os.Getenv("KUBECONFIG"), - "Path to a kubeconfig. Only required if out-of-cluster.") - - flag.IntVar(&env.Burst, "kube-api-burst", 0, "Maximum burst for throttle.") - - flag.Float64Var(&env.QPS, "kube-api-qps", 0, "Maximum QPS to the server from the client.") - }) - return env -} - // ParseAndGetRESTConfigOrDie parses the rest config flags and creates a client or // dies by calling log.Fatalf. func ParseAndGetRESTConfigOrDie() *rest.Config { - env := Flags() - + env := new(environment.ClientConfig) + env.InitFlags(flag.CommandLine) klog.InitFlags(flag.CommandLine) flag.Parse() - if env.Burst < 0 { - log.Fatal("Invalid burst value", env.Burst) - } - if env.QPS < 0 || env.QPS > math.MaxFloat32 { - log.Fatal("Invalid QPS value", env.QPS) - } - - cfg, err := GetRESTConfig(env.ServerURL, env.Kubeconfig) + cfg, err := env.GetRESTConfig() if err != nil { log.Fatal("Error building kubeconfig: ", err) } - - cfg.Burst = env.Burst - cfg.QPS = float32(env.QPS) - return cfg } // GetRESTConfig returns a rest.Config to be used for kubernetes client creation. -// It does so in the following order: -// 1. Use the passed kubeconfig/serverURL. -// 2. Fallback to the KUBECONFIG environment variable. -// 3. Fallback to in-cluster config. -// 4. Fallback to the ~/.kube/config. +// Deprecated: use environment.ClientConfig package func GetRESTConfig(serverURL, kubeconfig string) (*rest.Config, error) { - // If we have an explicit indication of where the kubernetes config lives, read that. - if kubeconfig != "" { - c, err := clientcmd.BuildConfigFromFlags(serverURL, kubeconfig) - if err != nil { - return nil, err - } - return c, nil + env := environment.ClientConfig{ + Kubeconfig: kubeconfig, + ServerURL: serverURL, } - - // If not, try the in-cluster config. - if c, err := rest.InClusterConfig(); err == nil { - return c, nil - } - - // If no in-cluster config, try the default location in the user's home directory. - if usr, err := user.Current(); err == nil { - if c, err := clientcmd.BuildConfigFromFlags("", filepath.Join(usr.HomeDir, ".kube", "config")); err == nil { - return c, nil - } - } - - return nil, errors.New("could not create a valid kubeconfig") + return env.GetRESTConfig() } diff --git a/vendor/knative.dev/pkg/injection/sharedmain/main.go b/vendor/knative.dev/pkg/injection/sharedmain/main.go index b2067bb1be..8cc6fb1955 100644 --- a/vendor/knative.dev/pkg/injection/sharedmain/main.go +++ b/vendor/knative.dev/pkg/injection/sharedmain/main.go @@ -57,17 +57,6 @@ func init() { maxprocs.Set() } -// GetConfig returns a rest.Config to be used for kubernetes client creation. -// It does so in the following order: -// 1. Use the passed kubeconfig/serverURL. -// 2. Fallback to the KUBECONFIG environment variable. -// 3. Fallback to in-cluster config. -// 4. Fallback to the ~/.kube/config. -// Deprecated: use injection.GetRESTConfig -func GetConfig(serverURL, kubeconfig string) (*rest.Config, error) { - return injection.GetRESTConfig(serverURL, kubeconfig) -} - // GetLoggingConfig gets the logging config from either the file system if present // or via reading a configMap from the API. // The context is expected to be initialized with injection. @@ -178,6 +167,10 @@ func MainWithConfig(ctx context.Context, component string, cfg *rest.Config, cto logger, atomicLevel := SetupLoggerOrDie(ctx, component) defer flush(logger) ctx = logging.WithLogger(ctx, logger) + + // Override client-go's warning handler to give us nicely printed warnings. + rest.SetDefaultWarningHandler(&logging.WarningHandler{Logger: logger}) + profilingHandler := profiling.NewHandler(logger, false) profilingServer := profiling.NewServer(profilingHandler) @@ -253,13 +246,6 @@ func flush(logger *zap.SugaredLogger) { metrics.FlushExporter() } -// ParseAndGetConfigOrDie parses the rest config flags and creates a client or -// dies by calling log.Fatalf. -// Deprecated: use injection.ParseAndGetRESTConfigOrDie -func ParseAndGetConfigOrDie() *rest.Config { - return injection.ParseAndGetRESTConfigOrDie() -} - // SetupLoggerOrDie sets up the logger using the config from the given context // and returns a logger and atomic level, or dies by calling log.Fatalf. func SetupLoggerOrDie(ctx context.Context, component string) (*zap.SugaredLogger, zap.AtomicLevel) { diff --git a/vendor/knative.dev/pkg/kmeta/OWNERS b/vendor/knative.dev/pkg/kmeta/OWNERS index 29b0d9f256..a1e0f822aa 100644 --- a/vendor/knative.dev/pkg/kmeta/OWNERS +++ b/vendor/knative.dev/pkg/kmeta/OWNERS @@ -1,4 +1,5 @@ # The OWNERS file is used by prow to automatically merge approved PRs. approvers: -- kmeta-approvers +- mattmoor +- vagababov diff --git a/vendor/knative.dev/pkg/leaderelection/config.go b/vendor/knative.dev/pkg/leaderelection/config.go index 3d9df1dc79..8d785c522d 100644 --- a/vendor/knative.dev/pkg/leaderelection/config.go +++ b/vendor/knative.dev/pkg/leaderelection/config.go @@ -25,7 +25,6 @@ import ( "github.com/kelseyhightower/envconfig" corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/client-go/tools/leaderelection/resourcelock" @@ -79,11 +78,6 @@ type Config struct { LeaseDuration time.Duration RenewDeadline time.Duration RetryPeriod time.Duration - - // This field is deprecated and will be removed once downstream - // repositories have removed their validation of it. - // TODO(https://github.com/knative/pkg/issues/1478): Remove this field. - EnabledComponents sets.String } func (c *Config) GetComponentConfig(name string) ComponentConfig { diff --git a/vendor/knative.dev/pkg/logging/OWNERS b/vendor/knative.dev/pkg/logging/OWNERS index fa4854ba0a..f51b91be77 100644 --- a/vendor/knative.dev/pkg/logging/OWNERS +++ b/vendor/knative.dev/pkg/logging/OWNERS @@ -1,4 +1,6 @@ # The OWNERS file is used by prow to automatically merge approved PRs. approvers: -- logging-approvers +- mdemirhan +- n3wscott +- yanweiguo diff --git a/vendor/knative.dev/pkg/logging/warning_handler.go b/vendor/knative.dev/pkg/logging/warning_handler.go new file mode 100644 index 0000000000..7ea7aeeff6 --- /dev/null +++ b/vendor/knative.dev/pkg/logging/warning_handler.go @@ -0,0 +1,33 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logging + +import "go.uber.org/zap" + +// WarningHandler is a warning handler to forward client-go's warning logs into a zap logger. +type WarningHandler struct { + Logger *zap.SugaredLogger +} + +func (h *WarningHandler) HandleWarningHeader(code int, agent string, message string) { + // This condition is copied from K8s' default WarningLogger. + if code != 299 || len(message) == 0 { + return + } + + h.Logger.Warn("API Warning: " + message) +} diff --git a/vendor/knative.dev/pkg/logging/zz_generated.deepcopy.go b/vendor/knative.dev/pkg/logging/zz_generated.deepcopy.go index 53345202cb..79d26075a5 100644 --- a/vendor/knative.dev/pkg/logging/zz_generated.deepcopy.go +++ b/vendor/knative.dev/pkg/logging/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2020 The Knative Authors +Copyright 2021 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/knative.dev/pkg/metrics/OWNERS b/vendor/knative.dev/pkg/metrics/OWNERS index 6d3966df44..7cd699f27e 100644 --- a/vendor/knative.dev/pkg/metrics/OWNERS +++ b/vendor/knative.dev/pkg/metrics/OWNERS @@ -1,4 +1,10 @@ # The OWNERS file is used by prow to automatically merge approved PRs. approvers: -- metrics-approvers +- mdemirhan +- yanweiguo + +reviewers: +- mdemirhan +- yanweiguo +- skonto diff --git a/vendor/knative.dev/pkg/metrics/README.md b/vendor/knative.dev/pkg/metrics/README.md index bbbf2084ed..411094b662 100644 --- a/vendor/knative.dev/pkg/metrics/README.md +++ b/vendor/knative.dev/pkg/metrics/README.md @@ -1,15 +1,14 @@ # Common metrics export interfaces for Knative -_Note that this directory is currently in transition. See [the Plan](#the-plan) -for details on where this is heading._ +See [the Plan](#the-plan) for details on where this is heading. ## Current status The code currently uses OpenCensus to support exporting metrics to multiple -backends. Currently, two backends are supported: Prometheus and Stackdriver. +backends. Currently, two backends are supported: Prometheus and OpenCensus/OTel. Metrics export is controlled by a ConfigMap called `config-observability` which -is a key-value map with specific values supported for each of the Stackdriver +is a key-value map with specific values supported for each of the OpenCensus and Prometheus backends. Hot-reload of the ConfigMap on a running process is supported by directly watching (via the Kubernetes API) the `config-observability` object. Configuration via environment is also supported @@ -20,9 +19,7 @@ namespace. There are currently [6 supported Golang exporters for OpenCensus](https://opencensus.io/exporters/supported-exporters/go/). -At least the Stackdriver exporter causes problems/failures if started without -access to (Google) application default credentials. It's not clear that we want -to build all of those backends into the core of `knative.dev/pkg` and all +We do not want to build all of those backends into the core of `knative.dev/pkg` and all downstream dependents, and we'd like all the code shipped in `knative.dev/pkg` to be able to be tested without needing any special environment setup. @@ -89,19 +86,22 @@ statistics for a short period of time if not. ### Steps to reach the goal -- [ ] [Add OpenCensus Agent as one of the export options](https://github.com/knative/pkg/issues/955). -- [ ] Ensure that all tests pass in a non-Google-Cloud connected environment. +- [x] [Add OpenCensus Agent as one of the export options](https://github.com/knative/pkg/issues/955). +- [x] Ensure that all tests pass in a non-Google-Cloud connected environment. **This is true today.** [Ensure this on an ongoing basis.](https://github.com/knative/pkg/issues/957) -- [ ] Google to implement OpenCensus Agent configuration to match what they are +- [x] Google to implement OpenCensus Agent configuration to match what they are doing for Stackdriver now. (No public issue link because this should be in Google's vendor-specific configuration.) -- [ ] Document how to configure OpenCensus/OpenTelemetry Agent + Prometheus to +- [x] Document how to configure OpenCensus/OpenTelemetry Agent + Prometheus to achieve the current level of application visibility, and determine a long-term course for how to maintain this as a "bare minimum" supported - configuration. -- [ ] Stop adding exporter features outside of the OpenCensus / OpenTelemetry + configuration. https://github.com/knative/docs/pull/3005 +- [x] Stop adding exporter features outside of the OpenCensus / OpenTelemetry export as of 0.13 release (03 March 2020). Between now and 0.13, small amounts of additional features can be built in to assist with the bridging process or to support existing products. New products should build on the OpenCensus Agent approach. +- [x] Removal of the Stackdriver OpenCensus Exporter + https://github.com/knative/pkg/issues/2173 +- [ ] Revisit Adopting OpenTelemetry SDKs instead of OpenCensus diff --git a/vendor/knative.dev/pkg/metrics/config.go b/vendor/knative.dev/pkg/metrics/config.go index 6407a1b0de..aa7002344b 100644 --- a/vendor/knative.dev/pkg/metrics/config.go +++ b/vendor/knative.dev/pkg/metrics/config.go @@ -43,17 +43,9 @@ const ( // The following keys are used to configure metrics reporting. // See https://github.com/knative/serving/blob/main/config/config-observability.yaml // for details. - allowStackdriverCustomMetricsKey = "metrics.allow-stackdriver-custom-metrics" - collectorAddressKey = "metrics.opencensus-address" - collectorSecureKey = "metrics.opencensus-require-tls" - reportingPeriodKey = "metrics.reporting-period-seconds" - - // Stackdriver client configuration keys - stackdriverClusterNameKey = "metrics.stackdriver-cluster-name" - stackdriverCustomMetricSubDomainKey = "metrics.stackdriver-custom-metrics-subdomain" - stackdriverGCPLocationKey = "metrics.stackdriver-gcp-location" - stackdriverProjectIDKey = "metrics.stackdriver-project-id" - stackdriverUseSecretKey = "metrics.stackdriver-use-secret" + collectorAddressKey = "metrics.opencensus-address" + collectorSecureKey = "metrics.opencensus-require-tls" + reportingPeriodKey = "metrics.reporting-period-seconds" defaultBackendEnvName = "DEFAULT_METRICS_BACKEND" defaultPrometheusPort = 9090 @@ -66,14 +58,12 @@ const ( var ( // TestOverrideBundleCount is a variable for testing to reduce the size (number of metrics) buffered before - // Stackdriver will send a bundled metric report. Only applies if non-zero. + // OpenCensus will send a bundled metric report. Only applies if non-zero. TestOverrideBundleCount = 0 ) // Metrics backend "enum". const ( - // stackdriver is used for Stackdriver backend - stackdriver metricsBackend = "stackdriver" // prometheus is used for Prometheus backend prometheus metricsBackend = "prometheus" // openCensus is used to export to the OpenCensus Agent / Collector, @@ -104,6 +94,7 @@ type metricsConfig struct { // ---- OpenCensus specific below ---- // collectorAddress is the address of the collector, if not `localhost:55678` collectorAddress string + // Require mutual TLS. Defaults to "false" because mutual TLS is hard to set up. requireSecure bool @@ -115,52 +106,6 @@ type metricsConfig struct { // prometheusHost is the host where the metrics are exposed in Prometheus // format. It defaults to "0.0.0.0" prometheusHost string - - // ---- Stackdriver specific below ---- - // True if backendDestination equals to "stackdriver". Store this in a variable - // to reduce string comparison operations. - isStackdriverBackend bool - // stackdriverMetricTypePrefix is the metric domain joins component, e.g. - // "knative.dev/serving/activator". Store this in a variable to reduce string - // join operations. - stackdriverMetricTypePrefix string - // stackdriverCustomMetricTypePrefix is "custom.googleapis.com" joined with the subdomain and component. - // E.g., "custom.googleapis.com//". - // Store this in a variable to reduce string join operations. - stackdriverCustomMetricTypePrefix string - // stackdriverClientConfig is the metadata to configure the metrics exporter's Stackdriver client. - stackdriverClientConfig StackdriverClientConfig -} - -// StackdriverClientConfig encapsulates the metadata required to configure a Stackdriver client. -type StackdriverClientConfig struct { - // ProjectID is the stackdriver project ID to which data is uploaded. - // This is not necessarily the GCP project ID where the Kubernetes cluster is hosted. - // Required when the Kubernetes cluster is not hosted on GCE. - ProjectID string - // GCPLocation is the GCP region or zone to which data is uploaded. - // This is not necessarily the GCP location where the Kubernetes cluster is hosted. - // Required when the Kubernetes cluster is not hosted on GCE. - GCPLocation string - // ClusterName is the cluster name with which the data will be associated in Stackdriver. - // Required when the Kubernetes cluster is not hosted on GCE. - ClusterName string - // UseSecret is whether the credentials stored in a Kubernetes Secret should be used to - // authenticate with Stackdriver. The Secret name and namespace can be specified by calling - // metrics.SetStackdriverSecretLocation. - // If UseSecret is false, Google Application Default Credentials - // will be used (https://cloud.google.com/docs/authentication/production). - UseSecret bool -} - -// NewStackdriverClientConfigFromMap creates a stackdriverClientConfig from the given map -func NewStackdriverClientConfigFromMap(config map[string]string) *StackdriverClientConfig { - return &StackdriverClientConfig{ - ProjectID: config[stackdriverProjectIDKey], - GCPLocation: config[stackdriverGCPLocationKey], - ClusterName: config[stackdriverClusterNameKey], - UseSecret: strings.EqualFold(config[stackdriverUseSecretKey], "true"), - } } // record applies the `ros` Options to each measurement in `mss` and then records the resulting @@ -184,7 +129,7 @@ func (mc *metricsConfig) record(ctx context.Context, mss []stats.Measurement, ro return mc.recorder(ctx, mss, ros...) } -func createMetricsConfig(ctx context.Context, ops ExporterOptions) (*metricsConfig, error) { +func createMetricsConfig(_ context.Context, ops ExporterOptions) (*metricsConfig, error) { var mc metricsConfig if ops.Domain == "" { @@ -213,7 +158,7 @@ func createMetricsConfig(ctx context.Context, ops ExporterOptions) (*metricsConf } switch lb := metricsBackend(strings.ToLower(backend)); lb { - case stackdriver, prometheus, openCensus, none: + case prometheus, openCensus, none: mc.backendDestination = lb default: return nil, fmt.Errorf("unsupported metrics backend value %q", backend) @@ -252,16 +197,12 @@ func createMetricsConfig(ctx context.Context, ops ExporterOptions) (*metricsConf mc.prometheusPort = pp mc.prometheusHost = prometheusHost() - case stackdriver: - if err := sdinit(ctx, m, &mc, ops); err != nil { - return nil, err - } } // If reporting period is specified, use the value from the configuration. // If not, set a default value based on the selected backend. // Each exporter makes different promises about what the lowest supported - // reporting period is. For Stackdriver, this value is 1 minute. + // reporting period is. For OpenCensus, this value is 1 minute. // For Prometheus, we will use a lower value since the exporter doesn't // push anything but just responds to pull requests, and shorter durations // do not really hurt the performance and we rely on the scraping configuration. @@ -273,7 +214,7 @@ func createMetricsConfig(ctx context.Context, ops ExporterOptions) (*metricsConf mc.reportingPeriod = time.Duration(repInt) * time.Second } else { switch mc.backendDestination { - case stackdriver, openCensus: + case openCensus: mc.reportingPeriod = time.Minute case prometheus: mc.reportingPeriod = 5 * time.Second diff --git a/vendor/knative.dev/pkg/metrics/config_observability.go b/vendor/knative.dev/pkg/metrics/config_observability.go index 2c320bbff2..0a17e0f03b 100644 --- a/vendor/knative.dev/pkg/metrics/config_observability.go +++ b/vendor/knative.dev/pkg/metrics/config_observability.go @@ -65,8 +65,8 @@ type ObservabilityConfig struct { // EnableProbeRequestLog enables queue-proxy to write health check probe request logs. EnableProbeRequestLog bool - // RequestMetricsBackend specifies the request metrics destination, e.g. Prometheus, - // Stackdriver. "None" disables all backends. + // RequestMetricsBackend specifies the request metrics destination, e.g. Prometheus or + // OpenCensus. "None" disables all backends. RequestMetricsBackend string // EnableProfiling indicates whether it is allowed to retrieve runtime profiling data from diff --git a/vendor/knative.dev/pkg/metrics/doc.go b/vendor/knative.dev/pkg/metrics/doc.go index 9869aa83e2..d56809079d 100644 --- a/vendor/knative.dev/pkg/metrics/doc.go +++ b/vendor/knative.dev/pkg/metrics/doc.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package metrics provides Knative utilities for exporting metrics to Stackdriver -// backend or Prometheus backend based on config-observability settings. +// Package metrics provides Knative utilities for exporting metrics to +// OpenCensus/OTel or Prometheus backend based on config-observability +// settings. package metrics diff --git a/vendor/knative.dev/pkg/metrics/exporter.go b/vendor/knative.dev/pkg/metrics/exporter.go index 46e5916dec..12b46b6467 100644 --- a/vendor/knative.dev/pkg/metrics/exporter.go +++ b/vendor/knative.dev/pkg/metrics/exporter.go @@ -48,16 +48,14 @@ type flushable interface { Flush() } -type stoppable interface { - // StopMetricsExporter stops the exporter - StopMetricsExporter() -} - // ExporterOptions contains options for configuring the exporter. type ExporterOptions struct { // Domain is the metrics domain. e.g. "knative.dev". Must be present. + + // TODO - using this as a prefix is being discussed here: + // https://github.com/knative/pkg/issues/2174 // - // Stackdriver uses the following format to construct full metric name: + // OpenCensus uses the following format to construct full metric name: // // // Prometheus uses the following format to construct full metric name: // _ @@ -168,8 +166,8 @@ func UpdateExporter(ctx context.Context, ops ExporterOptions, logger *zap.Sugare return err } -// isNewExporterRequired compares the non-nil newConfig against curMetricsConfig. When backend changes, -// or stackdriver project ID changes for stackdriver backend, we need to update the metrics exporter. +// isNewExporterRequired compares the non-nil newConfig against curMetricsConfig. +// When backend changes, we need to update the metrics exporter. // This function must be called with the metricsMux reader (or writer) locked. func isNewExporterRequired(newConfig *metricsConfig) bool { cc := curMetricsConfig @@ -183,7 +181,11 @@ func isNewExporterRequired(newConfig *metricsConfig) bool { return newConfig.collectorAddress != cc.collectorAddress || newConfig.requireSecure != cc.requireSecure } - return newConfig.backendDestination == stackdriver && newConfig.stackdriverClientConfig != cc.stackdriverClientConfig + if newConfig.backendDestination == prometheus { + return newConfig.prometheusHost != cc.prometheusHost || newConfig.prometheusPort != cc.prometheusPort + } + + return false } // newMetricsExporter gets a metrics exporter based on the config. @@ -192,16 +194,9 @@ func newMetricsExporter(config *metricsConfig, logger *zap.SugaredLogger) (view. // If there is a Prometheus Exporter server running, stop it. resetCurPromSrv() - // TODO(https://github.com/knative/pkg/issues/866): Move Stackdriver and Prometheus - // operations before stopping to an interface. - if se, ok := curMetricsExporter.(stoppable); ok { - se.StopMetricsExporter() - } - factory := map[metricsBackend]func(*metricsConfig, *zap.SugaredLogger) (view.Exporter, ResourceExporterFactory, error){ - stackdriver: newStackdriverExporter, - openCensus: newOpenCensusExporter, - prometheus: newPrometheusExporter, + openCensus: newOpenCensusExporter, + prometheus: newPrometheusExporter, none: func(*metricsConfig, *zap.SugaredLogger) (view.Exporter, ResourceExporterFactory, error) { noneFactory := func(*resource.Resource) (view.Exporter, error) { return &noneExporter{}, nil diff --git a/vendor/knative.dev/pkg/metrics/gcp_metadata.go b/vendor/knative.dev/pkg/metrics/gcp_metadata.go deleted file mode 100644 index 3c1a6a09ad..0000000000 --- a/vendor/knative.dev/pkg/metrics/gcp_metadata.go +++ /dev/null @@ -1,55 +0,0 @@ -// +build !nostackdriver - -/* -Copyright 2019 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package metrics - -import ( - "cloud.google.com/go/compute/metadata" - "knative.dev/pkg/metrics/metricskey" -) - -type gcpMetadata struct { - project string - location string - cluster string -} - -func retrieveGCPMetadata() *gcpMetadata { - gm := gcpMetadata{ - project: metricskey.ValueUnknown, - location: metricskey.ValueUnknown, - cluster: metricskey.ValueUnknown, - } - - if metadata.OnGCE() { - project, err := metadata.NumericProjectID() - if err == nil && project != "" { - gm.project = project - } - location, err := metadata.InstanceAttributeValue("cluster-location") - if err == nil && location != "" { - gm.location = location - } - cluster, err := metadata.InstanceAttributeValue("cluster-name") - if err == nil && cluster != "" { - gm.cluster = cluster - } - } - - return &gm -} diff --git a/vendor/knative.dev/pkg/metrics/metricskey/constants_eventing.go b/vendor/knative.dev/pkg/metrics/metricskey/constants_eventing.go deleted file mode 100644 index 7362ad037e..0000000000 --- a/vendor/knative.dev/pkg/metrics/metricskey/constants_eventing.go +++ /dev/null @@ -1,104 +0,0 @@ -/* -Copyright 2019 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package metricskey - -import "k8s.io/apimachinery/pkg/util/sets" - -// TODO should be moved to eventing. See https://github.com/knative/pkg/issues/608 - -const ( - // ResourceTypeKnativeTrigger is the Stackdriver resource type for Knative Triggers. - ResourceTypeKnativeTrigger = "knative_trigger" - - // ResourceTypeKnativeBroker is the Stackdriver resource type for Knative Brokers. - ResourceTypeKnativeBroker = "knative_broker" - - // ResourceTypeKnativeSource is the Stackdriver resource type for Knative Sources. - ResourceTypeKnativeSource = "knative_source" - - // LabelName is the label for the name of the resource. - LabelName = "name" - - // LabelResourceGroup is the name of the resource CRD. - LabelResourceGroup = "resource_group" - - // LabelTriggerName is the label for the name of the Trigger. - LabelTriggerName = "trigger_name" - - // LabelBrokerName is the label for the name of the Broker. - LabelBrokerName = "broker_name" - - // LabelEventType is the label for the name of the event type. - LabelEventType = "event_type" - - // LabelEventSource is the label for the name of the event source. - LabelEventSource = "event_source" - - // LabelFilterType is the label for the Trigger filter attribute "type". - LabelFilterType = "filter_type" -) - -var ( - // KnativeTriggerLabels stores the set of resource labels for resource type knative_trigger. - KnativeTriggerLabels = sets.NewString( - LabelProject, - LabelLocation, - LabelClusterName, - LabelNamespaceName, - LabelBrokerName, - LabelTriggerName, - ) - - // KnativeTriggerMetrics stores a set of metric types which are supported - // by resource type knative_trigger. - KnativeTriggerMetrics = sets.NewString( - "knative.dev/internal/eventing/trigger/event_count", - "knative.dev/internal/eventing/trigger/event_processing_latencies", - "knative.dev/internal/eventing/trigger/event_dispatch_latencies", - ) - - // KnativeBrokerLabels stores the set of resource labels for resource type knative_broker. - KnativeBrokerLabels = sets.NewString( - LabelProject, - LabelLocation, - LabelClusterName, - LabelNamespaceName, - LabelBrokerName, - ) - - // KnativeBrokerMetrics stores a set of metric types which are supported - // by resource type knative_trigger. - KnativeBrokerMetrics = sets.NewString( - "knative.dev/internal/eventing/broker/event_count", - ) - - // KnativeSourceLabels stores the set of resource labels for resource type knative_source. - KnativeSourceLabels = sets.NewString( - LabelProject, - LabelLocation, - LabelClusterName, - LabelNamespaceName, - LabelName, - LabelResourceGroup, - ) - - // KnativeSourceMetrics stores a set of metric types which are supported - // by resource type knative_source. - KnativeSourceMetrics = sets.NewString( - "knative.dev/eventing/source/event_count", - ) -) diff --git a/vendor/knative.dev/pkg/metrics/metricskey/constants_serving.go b/vendor/knative.dev/pkg/metrics/metricskey/constants_serving.go deleted file mode 100644 index 239cebadfe..0000000000 --- a/vendor/knative.dev/pkg/metrics/metricskey/constants_serving.go +++ /dev/null @@ -1,72 +0,0 @@ -/* -Copyright 2019 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package metricskey - -import "k8s.io/apimachinery/pkg/util/sets" - -// TODO should be moved to serving. See https://github.com/knative/pkg/issues/608 - -const ( - // ResourceTypeKnativeRevision is the Stackdriver resource type for Knative revision - ResourceTypeKnativeRevision = "knative_revision" - - // LabelServiceName is the label for the deployed service name - LabelServiceName = "service_name" - - // LabelRouteName is the label for immutable name of the route that receives the request - LabelRouteName = "route_name" - - // LabelConfigurationName is the label for the configuration which created the monitored revision - LabelConfigurationName = "configuration_name" - - // LabelRevisionName is the label for the monitored revision - LabelRevisionName = "revision_name" -) - -var ( - // KnativeRevisionLabels stores the set of resource labels for resource type knative_revision. - // LabelRouteName is added as extra label since it is optional, not in this map. - KnativeRevisionLabels = sets.NewString( - LabelProject, - LabelLocation, - LabelClusterName, - LabelNamespaceName, - LabelServiceName, - LabelConfigurationName, - LabelRevisionName, - ) - - // KnativeRevisionMetrics stores a set of metric types which are supported - // by resource type knative_revision. - KnativeRevisionMetrics = sets.NewString( - "knative.dev/internal/serving/activator/request_count", - "knative.dev/internal/serving/activator/request_latencies", - "knative.dev/serving/autoscaler/desired_pods", - "knative.dev/serving/autoscaler/requested_pods", - "knative.dev/serving/autoscaler/actual_pods", - "knative.dev/serving/autoscaler/stable_request_concurrency", - "knative.dev/serving/autoscaler/panic_request_concurrency", - "knative.dev/serving/autoscaler/target_concurrency_per_pod", - "knative.dev/serving/autoscaler/panic_mode", - "knative.dev/internal/serving/revision/request_count", - "knative.dev/internal/serving/revision/request_latencies", - "knative.dev/internal/serving/controller/cert_expiration_durations", - "knative.dev/internal/serving/controller/cert_total_num", - "knative.dev/internal/serving/controller/cert_issuance_latencies", - "knative.dev/internal/serving/controller/cert_creation_count", - ) -) diff --git a/vendor/knative.dev/pkg/metrics/stackdriver_exporter.go b/vendor/knative.dev/pkg/metrics/stackdriver_exporter.go deleted file mode 100644 index a875accef7..0000000000 --- a/vendor/knative.dev/pkg/metrics/stackdriver_exporter.go +++ /dev/null @@ -1,434 +0,0 @@ -// +build !nostackdriver - -/* -Copyright 2019 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package metrics - -import ( - "context" - "fmt" - "path" - "strconv" - "sync" - "time" - - sd "contrib.go.opencensus.io/exporter/stackdriver" - "go.opencensus.io/resource" - "go.opencensus.io/stats" - "go.opencensus.io/stats/view" - "go.opencensus.io/tag" - "go.uber.org/zap" - "google.golang.org/api/option" - "k8s.io/apimachinery/pkg/util/sets" - "knative.dev/pkg/metrics/metricskey" - - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" -) - -const ( - customMetricTypePrefix = "custom.googleapis.com" - // defaultCustomMetricSubDomain is the default subdomain to use for unsupported metrics by monitored resource types. - // See: https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors#MetricDescriptor - defaultCustomMetricSubDomain = "knative.dev" - // StackdriverSecretNamespaceDefault is the default namespace to search for a k8s Secret to pass to Stackdriver client to authenticate with Stackdriver. - StackdriverSecretNamespaceDefault = "default" - // StackdriverSecretNameDefault is the default name of the k8s Secret to pass to Stackdriver client to authenticate with Stackdriver. - StackdriverSecretNameDefault = "stackdriver-service-account-key" - // secretDataFieldKey is the name of the k8s Secret field that contains the Secret's key. - secretDataFieldKey = "key.json" - // stackdriverApiTimeout is the timeout value of Stackdriver API service side. - stackdriverAPITimeout = 12 * time.Second -) - -var ( - // gcpMetadataFunc is the function used to fetch GCP metadata. - // In product usage, this is always set to function retrieveGCPMetadata. - // In unit tests this is set to a fake one to avoid calling GCP metadata - // service. - gcpMetadataFunc func() *gcpMetadata - - // newStackdriverExporterFunc is the function used to create new stackdriver - // exporter. - // In product usage, this is always set to function newOpencensusSDExporter. - // In unit tests this is set to a fake one to avoid calling actual Google API - // service. - newStackdriverExporterFunc func(sd.Options) (view.Exporter, error) - - // kubeclient is the in-cluster Kubernetes kubeclient, which is lazy-initialized on first use. - kubeclient kubernetes.Interface - // initClientOnce is the lazy initializer for kubeclient. - initClientOnce sync.Once - // kubeclientInitErr capture an error during initClientOnce - kubeclientInitErr error - - // stackdriverMtx protects setting secretNamespace and secretName and useStackdriverSecretEnabled - stackdriverMtx sync.RWMutex - // secretName is the name of the k8s Secret to pass to Stackdriver client to authenticate with Stackdriver. - secretName = StackdriverSecretNameDefault - // secretNamespace is the namespace to search for a k8s Secret to pass to Stackdriver client to authenticate with Stackdriver. - secretNamespace = StackdriverSecretNamespaceDefault - // useStackdriverSecretEnabled specifies whether or not the exporter can be configured with a Secret. - // Consuming packages must do explicitly enable this by calling SetStackdriverSecretLocation. - useStackdriverSecretEnabled = false - - // metricToResourceLabels provides a quick lookup from a custom metric name to the set of tags - // which should be promoted to Stackdriver Resource labels via opencensus resources. - metricToResourceLabels = map[string]*resourceTemplate{} -) - -type resourceTemplate struct { - Type string - LabelKeys sets.String -} - -func sdinit(ctx context.Context, m map[string]string, mc *metricsConfig, ops ExporterOptions) error { - // If stackdriverClientConfig is not provided for stackdriver backend destination, OpenCensus will try to - // use the application default credentials. If that is not available, Opencensus would fail to create the - // metrics exporter. - scc := NewStackdriverClientConfigFromMap(m) - mc.stackdriverClientConfig = *scc - mc.isStackdriverBackend = true - var allowCustomMetrics bool - var err error - mc.stackdriverMetricTypePrefix = path.Join(mc.domain, mc.component) - - customMetricsSubDomain := m[stackdriverCustomMetricSubDomainKey] - if customMetricsSubDomain == "" { - customMetricsSubDomain = defaultCustomMetricSubDomain - } - mc.stackdriverCustomMetricTypePrefix = path.Join(customMetricTypePrefix, customMetricsSubDomain, mc.component) - if ascmStr := m[allowStackdriverCustomMetricsKey]; ascmStr != "" { - allowCustomMetrics, err = strconv.ParseBool(ascmStr) - if err != nil { - return fmt.Errorf("invalid %s value %q", allowStackdriverCustomMetricsKey, ascmStr) - } - } - - mc.recorder = sdCustomMetricsRecorder(*mc, allowCustomMetrics) - - if scc.UseSecret { - secret, err := getStackdriverSecret(ctx, ops.Secrets) - if err != nil { - return err - } - - mc.secret = secret - } - return nil -} - -// SetStackdriverSecretLocation sets the name and namespace of the Secret that can be used to authenticate with Stackdriver. -// The Secret is only used if both: -// 1. This function has been explicitly called to set the name and namespace -// 2. Users set metricsConfig.stackdriverClientConfig.UseSecret to "true" -func SetStackdriverSecretLocation(name string, namespace string) { - stackdriverMtx.Lock() - defer stackdriverMtx.Unlock() - secretName = name - secretNamespace = namespace - useStackdriverSecretEnabled = true -} - -func init() { - // Set gcpMetadataFunc to call GCP metadata service. - gcpMetadataFunc = retrieveGCPMetadata - newStackdriverExporterFunc = newOpencensusSDExporter - - kubeclientInitErr = nil - - metricsToTemplates := []struct { - metrics sets.String - template resourceTemplate - }{ - {metricskey.KnativeRevisionMetrics, resourceTemplate{metricskey.ResourceTypeKnativeRevision, metricskey.KnativeRevisionLabels}}, - {metricskey.KnativeTriggerMetrics, resourceTemplate{metricskey.ResourceTypeKnativeTrigger, metricskey.KnativeTriggerLabels}}, - {metricskey.KnativeBrokerMetrics, resourceTemplate{metricskey.ResourceTypeKnativeBroker, metricskey.KnativeBrokerLabels}}, - {metricskey.KnativeSourceMetrics, resourceTemplate{metricskey.ResourceTypeKnativeSource, metricskey.KnativeSourceLabels}}, - } - - for _, item := range metricsToTemplates { - t := item.template - for k := range item.metrics { - metricToResourceLabels[k] = &t - } - } -} - -type pollOnlySDExporter struct { - internalExporter view.Exporter -} - -var _ (view.Exporter) = (*pollOnlySDExporter)(nil) -var _ (flushable) = (*pollOnlySDExporter)(nil) - -func (e *pollOnlySDExporter) ExportView(viewData *view.Data) { - // Stackdriver will run an internal loop to bundle stats, so ignore this. -} - -func (e *pollOnlySDExporter) Flush() { - if e.internalExporter != nil { - if f, ok := e.internalExporter.(flushable); ok { - f.Flush() - } - } -} - -func (e *pollOnlySDExporter) StopMetricsExporter() { - if e.internalExporter != nil { - if f, ok := e.internalExporter.(stoppable); ok { - f.StopMetricsExporter() - } - } -} - -func newOpencensusSDExporter(o sd.Options) (view.Exporter, error) { - e, err := sd.NewExporter(o) - if err != nil { - return nil, err - } - // Start the exporter. - // TODO(https://github.com/knative/pkg/issues/866): Move this to an interface. - if err := e.StartMetricsExporter(); err != nil { - return nil, err - } - return e, nil -} - -func newStackdriverExporter(config *metricsConfig, logger *zap.SugaredLogger) (view.Exporter, ResourceExporterFactory, error) { - gm := getMergedGCPMetadata(config) - mpf := getMetricPrefixFunc(config.stackdriverMetricTypePrefix, config.stackdriverCustomMetricTypePrefix) - co, err := getStackdriverExporterClientOptions(config) - if err != nil { - logger.Warnw("Issue configuring Stackdriver exporter client options, no additional client options will be used: ", zap.Error(err)) - } - - // Automatically fall back on Google application default credentials - e, err := newStackdriverExporterFunc(sd.Options{ - ProjectID: gm.project, - Location: gm.location, - MonitoringClientOptions: co, - TraceClientOptions: co, - GetMetricPrefix: mpf, - ReportingInterval: config.reportingPeriod, - DefaultMonitoringLabels: &sd.Labels{}, - Timeout: stackdriverAPITimeout, - BundleCountThreshold: TestOverrideBundleCount, - }) - if err != nil { - logger.Errorw("Failed to create the Stackdriver exporter: ", zap.Error(err)) - return nil, nil, err - } - logger.Info("Created Opencensus Stackdriver exporter with config ", config) - // We have to return a ResourceExporterFactory here to enable tracking resources, even though we always poll for them. - return &pollOnlySDExporter{e}, - func(r *resource.Resource) (view.Exporter, error) { return &pollOnlySDExporter{}, nil }, - nil -} - -func sdCustomMetricsRecorder(mc metricsConfig, allowCustomMetrics bool) func(context.Context, []stats.Measurement, ...stats.Options) error { - gm := getMergedGCPMetadata(&mc) - metadataMap := map[string]string{ - metricskey.LabelProject: gm.project, - metricskey.LabelLocation: gm.location, - metricskey.LabelClusterName: gm.cluster, - } - return func(ctx context.Context, mss []stats.Measurement, ros ...stats.Options) error { - // Some metrics may be promoted to known Stackdriver schemas, so we may - // end up multiple Resources recorded for a single `RecordBatch` call. - metricsByResource := map[*resourceTemplate][]stats.Measurement{} - - for _, m := range mss { - metricType := path.Join(mc.stackdriverMetricTypePrefix, m.Measure().Name()) - t, ok := metricToResourceLabels[metricType] - if ok || allowCustomMetrics { - if metricsByResource[t] == nil { - metricsByResource[t] = make([]stats.Measurement, 0, len(mss)) - } - metricsByResource[t] = append(metricsByResource[t], m) - } - } - - baseLabels := map[string]string{} - baseResource := metricskey.GetResource(ctx) - if baseResource != nil { - baseLabels = baseResource.Labels - } - tagMap := tag.FromContext(ctx) - for templ, ms := range metricsByResource { - sdResource := baseResource - sdCtx := ctx - if templ != nil { - sdResource = &resource.Resource{ - Type: templ.Type, - Labels: map[string]string{}, - } - tagMutations := make([]tag.Mutator, 0, len(templ.LabelKeys)) - for k := range templ.LabelKeys { - if v, ok := baseLabels[k]; ok { - sdResource.Labels[k] = v - continue - } - tagKey := tag.MustNewKey(k) - if v, ok := tagMap.Value(tagKey); ok { - sdResource.Labels[k] = v - tagMutations = append(tagMutations, tag.Delete(tagKey)) - continue - } - if v, ok := metadataMap[k]; ok { - sdResource.Labels[k] = v - continue - } - sdResource.Labels[k] = metricskey.ValueUnknown - } - var err error - sdCtx, err = tag.New(metricskey.WithResource(ctx, *sdResource), tagMutations...) - if err != nil { - return err - } - } - if sdResource != nil { - opt, err := optionForResource(sdResource) - if err != nil { - return err - } - ros = append(ros, opt) - } - if err := stats.RecordWithOptions(sdCtx, append(ros, stats.WithMeasurements(ms...))...); err != nil { - return err - } - } - return nil - } -} - -// getStackdriverExporterClientOptions creates client options for the opencensus Stackdriver exporter from the given stackdriverClientConfig. -// On error, an empty array of client options is returned. -func getStackdriverExporterClientOptions(config *metricsConfig) ([]option.ClientOption, error) { - var co []option.ClientOption - - // SetStackdriverSecretLocation must have been called by calling package for this to work. - if config.stackdriverClientConfig.UseSecret { - if config.secret == nil { - return co, fmt.Errorf("no secret provided for component %q; cannot use stackdriver-use-secret=true", config.component) - } - - if opt, err := convertSecretToExporterOption(config.secret); err == nil { - co = append(co, opt) - } else { - return co, err - } - } - return co, nil -} - -// getMergedGCPMetadata returns GCP metadata required to export metrics -// to Stackdriver. Values can come from the GCE metadata server or the config. -// Values explicitly set in the config take the highest precedent. -func getMergedGCPMetadata(config *metricsConfig) *gcpMetadata { - gm := gcpMetadataFunc() - if config.stackdriverClientConfig.ProjectID != "" { - gm.project = config.stackdriverClientConfig.ProjectID - } - - if config.stackdriverClientConfig.GCPLocation != "" { - gm.location = config.stackdriverClientConfig.GCPLocation - } - - if config.stackdriverClientConfig.ClusterName != "" { - gm.cluster = config.stackdriverClientConfig.ClusterName - } - - return gm -} - -func getMetricPrefixFunc(metricTypePrefix, customMetricTypePrefix string) func(name string) string { - return func(name string) string { - metricType := path.Join(metricTypePrefix, name) - inServing := metricskey.KnativeRevisionMetrics.Has(metricType) - inEventing := metricskey.KnativeBrokerMetrics.Has(metricType) || - metricskey.KnativeTriggerMetrics.Has(metricType) || - metricskey.KnativeSourceMetrics.Has(metricType) - if inServing || inEventing { - return metricTypePrefix - } - // Unsupported metric by knative_revision, use custom domain. - return customMetricTypePrefix - } -} - -// getStackdriverSecret returns the Kubernetes Secret specified in the given config. -// SetStackdriverSecretLocation must have been called by calling package for this to work. -// TODO(anniefu): Update exporter if Secret changes (https://github.com/knative/pkg/issues/842) -func getStackdriverSecret(ctx context.Context, secretFetcher SecretFetcher) (*corev1.Secret, error) { - stackdriverMtx.RLock() - defer stackdriverMtx.RUnlock() - - if !useStackdriverSecretEnabled { - return nil, nil - } - - var secErr error - var sec *corev1.Secret - if secretFetcher != nil { - sec, secErr = secretFetcher(fmt.Sprintf("%s/%s", secretNamespace, secretName)) - } else { - // This else-block can be removed once UpdateExporterFromConfigMap is fully deprecated in favor of ConfigMapWatcher - if err := ensureKubeclient(); err != nil { - return nil, err - } - - sec, secErr = kubeclient.CoreV1().Secrets(secretNamespace).Get(ctx, secretName, metav1.GetOptions{}) - } - - if secErr != nil { - return nil, fmt.Errorf("error getting Secret [%v] in namespace [%v]: %w", secretName, secretNamespace, secErr) - } - - return sec, nil -} - -// convertSecretToExporterOption converts a Kubernetes Secret to an OpenCensus Stackdriver Exporter Option. -func convertSecretToExporterOption(secret *corev1.Secret) (option.ClientOption, error) { - if data, ok := secret.Data[secretDataFieldKey]; ok { - return option.WithCredentialsJSON(data), nil - } - return nil, fmt.Errorf("expected Secret to store key in data field named [%s]", secretDataFieldKey) -} - -// ensureKubeclient is the lazy initializer for kubeclient. -func ensureKubeclient() error { - // initClientOnce is only run once and cannot return error, so kubeclientInitErr is used to capture errors. - initClientOnce.Do(func() { - config, err := rest.InClusterConfig() - if err != nil { - kubeclientInitErr = err - return - } - - cs, err := kubernetes.NewForConfig(config) - if err != nil { - kubeclientInitErr = err - return - } - kubeclient = cs - }) - - return kubeclientInitErr -} diff --git a/vendor/knative.dev/pkg/metrics/zz_generated.deepcopy.go b/vendor/knative.dev/pkg/metrics/zz_generated.deepcopy.go index 822bb4db22..d9024668ea 100644 --- a/vendor/knative.dev/pkg/metrics/zz_generated.deepcopy.go +++ b/vendor/knative.dev/pkg/metrics/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2020 The Knative Authors +Copyright 2021 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/knative.dev/pkg/network/domain.go b/vendor/knative.dev/pkg/network/domain.go index a205b4eece..ee571e0bce 100644 --- a/vendor/knative.dev/pkg/network/domain.go +++ b/vendor/knative.dev/pkg/network/domain.go @@ -26,8 +26,9 @@ import ( ) const ( - resolverFileName = "/etc/resolv.conf" - defaultDomainName = "cluster.local" + resolverFileName = "/etc/resolv.conf" + clusterDomainEnvKey = "CLUSTER_DOMAIN" + defaultDomainName = "cluster.local" ) var ( @@ -55,6 +56,7 @@ func GetClusterDomainName() string { } func getClusterDomainName(r io.Reader) string { + // First look in the conf file. for scanner := bufio.NewScanner(r); scanner.Scan(); { elements := strings.Split(scanner.Text(), " ") if elements[0] != "search" { @@ -66,6 +68,12 @@ func getClusterDomainName(r io.Reader) string { } } } + + // Then look in the ENV. + if domain := os.Getenv(clusterDomainEnvKey); len(domain) > 0 { + return domain + } + // For all abnormal cases return default domain name. return defaultDomainName } diff --git a/vendor/knative.dev/pkg/ptr/value.go b/vendor/knative.dev/pkg/ptr/value.go new file mode 100644 index 0000000000..148bf2105c --- /dev/null +++ b/vendor/knative.dev/pkg/ptr/value.go @@ -0,0 +1,91 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ptr + +import "time" + +// Int32Value is a helper for turning pointers to integers into values for use +// in API types that want int32. +func Int32Value(i *int32) int32 { + if i == nil { + return 0 + } + return *i +} + +// Int64Value is a helper for turning pointers to integers into values for use +// in API types that want int64. +func Int64Value(i *int64) int64 { + if i == nil { + return 0 + } + return *i +} + +// Float32Value is a helper for turning pointers to floats into values for use +// in API types that want float32. +func Float32Value(f *float32) float32 { + if f == nil { + return 0 + } + return *f +} + +// Float64Value is a helper for turning pointers to floats into values for use +// in API types that want float64. +func Float64Value(f *float64) float64 { + if f == nil { + return 0 + } + return *f +} + +// BoolValue is a helper for turning pointers to bools into values for use in +// API types that want bool. +func BoolValue(b *bool) bool { + if b == nil { + return false + } + return *b +} + +// StringValue is a helper for turning pointers to strings into values for use +// in API types that want string. +func StringValue(s *string) string { + if s == nil { + return "" + } + return *s +} + +// DurationValue is a helper for turning *time.Duration into values for use in +// API types that want time.Duration. +func DurationValue(t *time.Duration) time.Duration { + if t == nil { + return 0 + } + return *t +} + +// TimeValue is a helper for turning *time.Time into values for use in API +// types that want API types that want time.Time. +func TimeValue(t *time.Time) time.Time { + if t == nil { + return time.Time{} + } + return *t +} diff --git a/vendor/knative.dev/pkg/reconciler/OWNERS b/vendor/knative.dev/pkg/reconciler/OWNERS index 0b270d53af..9c7a432ba6 100644 --- a/vendor/knative.dev/pkg/reconciler/OWNERS +++ b/vendor/knative.dev/pkg/reconciler/OWNERS @@ -1,7 +1,7 @@ # The OWNERS file is used by prow to automatically merge approved PRs. approvers: -- controller-approvers +- pkg-controller-writers reviewers: -- controller-reviewers +- pkg-controller-reviewers diff --git a/vendor/knative.dev/pkg/reconciler/configstore.go b/vendor/knative.dev/pkg/reconciler/configstore.go index 80b1dd4e22..c5c9c319fe 100644 --- a/vendor/knative.dev/pkg/reconciler/configstore.go +++ b/vendor/knative.dev/pkg/reconciler/configstore.go @@ -1,7 +1,7 @@ /* Copyright 2020 The Knative Authors -Licensed under the Apache License, Veroute.on 2.0 (the "License"); +Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/vendor/knative.dev/pkg/reconciler/deletion.go b/vendor/knative.dev/pkg/reconciler/deletion.go new file mode 100644 index 0000000000..08711c2521 --- /dev/null +++ b/vendor/knative.dev/pkg/reconciler/deletion.go @@ -0,0 +1,35 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package reconciler + +import ( + "context" + + "k8s.io/apimachinery/pkg/types" +) + +// OnDeletionInterface defines the strongly typed interface to be implemented by a +// controller observing a deletion of an object. Every controller that was active +// during the deletion of the respective resource is guaranteed to observe this event, +// leader or not. It's usually used to clear up in-memory state regarding the respective +// resource. Finalizers should be used to ensure external resources are properly cleaned +// up. +type OnDeletionInterface interface { + // ObserveDeletion implements custom logic to observe deletion of the respective resource + // with the given key. + ObserveDeletion(ctx context.Context, key types.NamespacedName) error +} diff --git a/vendor/knative.dev/pkg/reconciler/events.go b/vendor/knative.dev/pkg/reconciler/events.go index 611ba2dc14..df4c366cca 100644 --- a/vendor/knative.dev/pkg/reconciler/events.go +++ b/vendor/knative.dev/pkg/reconciler/events.go @@ -62,7 +62,7 @@ func NewEvent(eventtype, reason, messageFmt string, args ...interface{}) Event { // ReconcilerEvent wraps the fields required for recorders to create a // kubernetes recorder Event. -type ReconcilerEvent struct { //nolint:golint // for backcompat. +type ReconcilerEvent struct { //nolint:revive // for backcompat. EventType string Reason string Format string @@ -87,8 +87,14 @@ func (e *ReconcilerEvent) Is(target error) bool { return errors.Is(err, target) } +// As allows ReconcilerEvents to be treated as regular error types. +func (e *ReconcilerEvent) As(target interface{}) bool { + err := fmt.Errorf(e.Format, e.Args...) + return errors.As(err, target) +} + // Error returns the string that is formed by using the format string with the // provided args. func (e *ReconcilerEvent) Error() string { - return fmt.Sprintf(e.Format, e.Args...) + return fmt.Errorf(e.Format, e.Args...).Error() } diff --git a/vendor/knative.dev/pkg/reconciler/reconcile_common.go b/vendor/knative.dev/pkg/reconciler/reconcile_common.go index f7408fb477..114d53b036 100644 --- a/vendor/knative.dev/pkg/reconciler/reconcile_common.go +++ b/vendor/knative.dev/pkg/reconciler/reconcile_common.go @@ -31,9 +31,13 @@ import ( const failedGenerationBump = "NewObservedGenFailure" const ( - DoReconcileKind = "ReconcileKind" - DoFinalizeKind = "FinalizeKind" - DoObserveKind = "ObserveKind" + // DoReconcileKind is the function name for reconciling the resource (as a leader). + DoReconcileKind = "ReconcileKind" + // DoFinalizeKind is the function name for finalizing the resource (as a leader). + DoFinalizeKind = "FinalizeKind" + // DoObserveKind is the function name for observing the resource (as a non leader). + DoObserveKind = "ObserveKind" + // DoObserveFinalizeKind is the function name for observing finalization of the resource (as a non leader). DoObserveFinalizeKind = "ObserveFinalizeKind" ) diff --git a/vendor/knative.dev/pkg/reconciler/testing/context.go b/vendor/knative.dev/pkg/reconciler/testing/context.go index e14aa2ade0..150c59bf20 100644 --- a/vendor/knative.dev/pkg/reconciler/testing/context.go +++ b/vendor/knative.dev/pkg/reconciler/testing/context.go @@ -39,16 +39,21 @@ import ( ) // SetupFakeContext sets up the the Context and the fake informers for the tests. -func SetupFakeContext(t testing.TB) (context.Context, []controller.Informer) { - c, _, is := SetupFakeContextWithCancel(t) +// The optional fs() can be used to edit ctx before the SetupInformer steps +func SetupFakeContext(t testing.TB, fs ...func(context.Context) context.Context) (context.Context, []controller.Informer) { + c, _, is := SetupFakeContextWithCancel(t, fs...) return c, is } // SetupFakeContextWithCancel sets up the the Context and the fake informers for the tests // The provided context can be canceled using provided callback. -func SetupFakeContextWithCancel(t testing.TB) (context.Context, context.CancelFunc, []controller.Informer) { +// The optional fs() can be used to edit ctx before the SetupInformer steps +func SetupFakeContextWithCancel(t testing.TB, fs ...func(context.Context) context.Context) (context.Context, context.CancelFunc, []controller.Informer) { ctx, c := context.WithCancel(logtesting.TestContextWithLogger(t)) ctx = controller.WithEventRecorder(ctx, record.NewFakeRecorder(1000)) + for _, f := range fs { + ctx = f(ctx) + } ctx, is := injection.Fake.SetupInformers(ctx, &rest.Config{}) return ctx, c, is } diff --git a/vendor/knative.dev/pkg/system/env.go b/vendor/knative.dev/pkg/system/env.go index d9ec3ddac3..ea1db0f0be 100644 --- a/vendor/knative.dev/pkg/system/env.go +++ b/vendor/knative.dev/pkg/system/env.go @@ -22,7 +22,10 @@ import ( ) const ( - NamespaceEnvKey = "SYSTEM_NAMESPACE" + // NamespaceEnvKey is the environment variable that specifies the system namespace. + NamespaceEnvKey = "SYSTEM_NAMESPACE" + // ResourceLabelEnvKey is the environment variable that specifies the system resource + // label. ResourceLabelEnvKey = "SYSTEM_RESOURCE_LABEL" ) diff --git a/vendor/knative.dev/pkg/test/OWNERS b/vendor/knative.dev/pkg/test/OWNERS index c50adc8493..65aa9e7b11 100644 --- a/vendor/knative.dev/pkg/test/OWNERS +++ b/vendor/knative.dev/pkg/test/OWNERS @@ -1,7 +1,7 @@ # The OWNERS file is used by prow to automatically merge approved PRs. approvers: -- productivity-approvers +- productivity-writers reviewers: - productivity-reviewers diff --git a/vendor/knative.dev/pkg/test/README.md b/vendor/knative.dev/pkg/test/README.md index c56070878b..d54c9ae275 100644 --- a/vendor/knative.dev/pkg/test/README.md +++ b/vendor/knative.dev/pkg/test/README.md @@ -54,7 +54,7 @@ and tests can pass in `t.Logf` like this: ```go _, err = pkgTest.WaitForEndpointState( - clients.KubeClient, + kubeClient, t.Logf, ...), ``` diff --git a/vendor/knative.dev/pkg/test/cleanup.go b/vendor/knative.dev/pkg/test/cleanup.go index b349172629..7fc91bc5e6 100644 --- a/vendor/knative.dev/pkg/test/cleanup.go +++ b/vendor/knative.dev/pkg/test/cleanup.go @@ -14,27 +14,52 @@ See the License for the specific language governing permissions and limitations under the License. */ -// cleanup allows you to define a cleanup function that will be executed -// if your test is interrupted. - package test import ( "os" "os/signal" - - "knative.dev/pkg/test/logging" + "sync" ) -// CleanupOnInterrupt will execute the function cleanup if an interrupt signal is caught -func CleanupOnInterrupt(cleanup func(), logf logging.FormatLogger) { +type logFunc func(template string, args ...interface{}) + +var cleanup struct { + once sync.Once + mutex sync.RWMutex + funcs []func() +} + +func waitForInterrupt() { c := make(chan os.Signal, 1) signal.Notify(c, os.Interrupt) + go func() { - for range c { - logf("Test interrupted, cleaning up.") - cleanup() - os.Exit(1) + <-c + + cleanup.mutex.RLock() + defer cleanup.mutex.RUnlock() + + for i := len(cleanup.funcs) - 1; i >= 0; i-- { + cleanup.funcs[i]() } + + os.Exit(1) }() } + +// CleanupOnInterrupt will execute the function if an interrupt signal is caught +// Deprecated - use OnInterrupt +func CleanupOnInterrupt(f func(), log logFunc) { + OnInterrupt(f) +} + +// OnInterrupt registers a cleanup function to run if an interrupt signal is caught +func OnInterrupt(cleanupFunc func()) { + cleanup.once.Do(waitForInterrupt) + + cleanup.mutex.Lock() + defer cleanup.mutex.Unlock() + + cleanup.funcs = append(cleanup.funcs, cleanupFunc) +} diff --git a/vendor/knative.dev/pkg/test/clients.go b/vendor/knative.dev/pkg/test/clients.go index e35a390457..88530c36b7 100644 --- a/vendor/knative.dev/pkg/test/clients.go +++ b/vendor/knative.dev/pkg/test/clients.go @@ -23,65 +23,39 @@ import ( "fmt" "strings" - "knative.dev/pkg/test/flags" - corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" - k8styped "k8s.io/client-go/kubernetes/typed/core/v1" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" "knative.dev/pkg/test/logging" "knative.dev/pkg/test/spoof" ) -// KubeClient holds instances of interfaces for making requests to kubernetes client. -// Deprecated: use a kubeclient and the helper methods in test. -type KubeClient struct { - kubernetes.Interface -} - // NewSpoofingClient returns a spoofing client to make requests func NewSpoofingClient(ctx context.Context, client kubernetes.Interface, logf logging.FormatLogger, domain string, resolvable bool, opts ...spoof.TransportOption) (*spoof.SpoofingClient, error) { - return spoof.New(ctx, client, logf, domain, resolvable, flags.Flags().IngressEndpoint, - flags.Flags().SpoofRequestInterval, flags.Flags().SpoofRequestTimeout, opts...) -} - -// NewKubeClient instantiates and returns several clientsets required for making request to the -// kube client specified by the combination of clusterName and configPath. Clients can make requests within namespace. -// Deprecated: use a kubeclient and the helper methods in test. -func NewKubeClient(configPath string, clusterName string) (*KubeClient, error) { - cfg, err := BuildClientConfig(configPath, clusterName) - if err != nil { - return nil, err - } - - k, err := kubernetes.NewForConfig(cfg) - if err != nil { - return nil, err - } - return &KubeClient{Interface: k}, nil + return spoof.New(ctx, client, logf, domain, resolvable, Flags.IngressEndpoint, + Flags.SpoofRequestInterval, Flags.SpoofRequestTimeout, opts...) } // BuildClientConfig builds the client config specified by the config path and the cluster name func BuildClientConfig(kubeConfigPath string, clusterName string) (*rest.Config, error) { + loadingRules := clientcmd.NewDefaultClientConfigLoadingRules() overrides := clientcmd.ConfigOverrides{} + + if kubeConfigPath != "" { + loadingRules.ExplicitPath = kubeConfigPath + } // Override the cluster name if provided. if clusterName != "" { overrides.Context.Cluster = clusterName } return clientcmd.NewNonInteractiveDeferredLoadingClientConfig( - &clientcmd.ClientConfigLoadingRules{ExplicitPath: kubeConfigPath}, + loadingRules, &overrides).ClientConfig() } -// UpdateConfigMap updates the config map for specified @name with values -// Deprecated: use UpdateConfigMap -func (client *KubeClient) UpdateConfigMap(ctx context.Context, name string, configName string, values map[string]string) error { - return UpdateConfigMap(ctx, client, name, configName, values) -} - // UpdateConfigMap updates the config map for specified @name with values func UpdateConfigMap(ctx context.Context, client kubernetes.Interface, name string, configName string, values map[string]string) error { configMap, err := client.CoreV1().ConfigMaps(name).Get(ctx, configName, metav1.GetOptions{}) @@ -97,30 +71,12 @@ func UpdateConfigMap(ctx context.Context, client kubernetes.Interface, name stri return err } -// GetConfigMap gets the knative serving config map. -// Deprecated: use kubeclient.CoreV1().ConfigMaps(name) -func (client *KubeClient) GetConfigMap(name string) k8styped.ConfigMapInterface { - return client.CoreV1().ConfigMaps(name) -} - -// CreatePod will create a Pod -// Deprecated: use CreatePod -func (client *KubeClient) CreatePod(ctx context.Context, pod *corev1.Pod) (*corev1.Pod, error) { - return CreatePod(ctx, client, pod) -} - // CreatePod will create a Pod func CreatePod(ctx context.Context, client kubernetes.Interface, pod *corev1.Pod) (*corev1.Pod, error) { pods := client.CoreV1().Pods(pod.GetNamespace()) return pods.Create(ctx, pod, metav1.CreateOptions{}) } -// PodLogs returns Pod logs for given Pod and Container in the namespace -// Deprecated: use PodLogs -func (client *KubeClient) PodLogs(ctx context.Context, podName, containerName, namespace string) ([]byte, error) { - return PodLogs(ctx, client, podName, containerName, namespace) -} - // PodLogs returns Pod logs for given Pod and Container in the namespace func PodLogs(ctx context.Context, client kubernetes.Interface, podName, containerName, namespace string) ([]byte, error) { pods := client.CoreV1().Pods(namespace) diff --git a/vendor/knative.dev/pkg/test/e2e_flags.go b/vendor/knative.dev/pkg/test/e2e_flags.go index 55a59582b0..ef9b5519e7 100644 --- a/vendor/knative.dev/pkg/test/e2e_flags.go +++ b/vendor/knative.dev/pkg/test/e2e_flags.go @@ -22,12 +22,10 @@ package test import ( "bytes" "flag" - "os/user" - "path/filepath" "text/template" - "knative.dev/pkg/injection" - testflags "knative.dev/pkg/test/flags" + env "knative.dev/pkg/environment" + testenv "knative.dev/pkg/test/environment" "knative.dev/pkg/test/logging" ) @@ -41,30 +39,20 @@ var ( // EnvironmentFlags define the flags that are needed to run the e2e tests. // Deprecated: use test/flags.Flags() or injection.Flags() type EnvironmentFlags struct { - *injection.Environment - *testflags.TestEnvironment + env.ClientConfig + testenv.TestClientConfig } func initializeFlags() *EnvironmentFlags { f := new(EnvironmentFlags) - testflags.InitFlags(flag.CommandLine) - - f.TestEnvironment = testflags.Flags() - f.Environment = injection.Flags() - - // We want to do this defaulting for tests only. The flags are reused between tests - // and production code and we want to make sure that production code defaults to - // the in-cluster config correctly. - if f.Environment.Kubeconfig == "" { - if usr, err := user.Current(); err == nil { - f.Environment.Kubeconfig = filepath.Join(usr.HomeDir, ".kube", "config") - } - } + f.ClientConfig.InitFlags(flag.CommandLine) + f.TestClientConfig.InitFlags(flag.CommandLine) return f } +// SetupLoggingFlags initializes a logger for tests. // TODO(coryrc): Remove once other repos are moved to call logging.InitializeLogger() directly func SetupLoggingFlags() { logging.InitializeLogger() @@ -72,7 +60,7 @@ func SetupLoggingFlags() { // ImagePath is a helper function to transform an image name into an image reference that can be pulled. func ImagePath(name string) string { - tpl, err := template.New("image").Parse(testflags.Flags().ImageTemplate) + tpl, err := template.New("image").Parse(Flags.ImageTemplate) if err != nil { panic("could not parse image template: " + err.Error()) } @@ -83,9 +71,9 @@ func ImagePath(name string) string { Name string Tag string }{ - Repository: testflags.Flags().DockerRepo, + Repository: Flags.DockerRepo, Name: name, - Tag: testflags.Flags().Tag, + Tag: Flags.Tag, }); err != nil { panic("could not apply the image template: " + err.Error()) } diff --git a/vendor/knative.dev/pkg/test/flags/e2e_flags.go b/vendor/knative.dev/pkg/test/environment/config.go similarity index 59% rename from vendor/knative.dev/pkg/test/flags/e2e_flags.go rename to vendor/knative.dev/pkg/test/environment/config.go index fc9032a78a..6591a46375 100644 --- a/vendor/knative.dev/pkg/test/flags/e2e_flags.go +++ b/vendor/knative.dev/pkg/test/environment/config.go @@ -17,7 +17,7 @@ limitations under the License. // This file contains logic to encapsulate flags which are needed to specify // what cluster, etc. to use for e2e tests. -package flags +package environment import ( "flag" @@ -25,9 +25,8 @@ import ( "time" ) -// TestEnvironment define the config that are needed to run the e2e tests. -type TestEnvironment struct { - Cluster string // K8s cluster (defaults to cluster in kubeconfig) +// TestClientConfig defines propertis about the test environment +type TestClientConfig struct { Namespace string // K8s namespace (blank by default, to be overwritten by test suite) IngressEndpoint string // Host to use for ingress endpoint ImageTemplate string // Template to build the image reference (defaults to {{.Repository}}/{{.Name}}:{{.Tag}}) @@ -37,43 +36,25 @@ type TestEnvironment struct { SpoofRequestTimeout time.Duration // SpoofRequestTimeout is the timeout for polling requests in SpoofingClient } -var f *TestEnvironment - // InitFlags is for explicitly initializing the flags. -func InitFlags(flagset *flag.FlagSet) { - if flagset == nil { - flagset = flag.CommandLine - } - - f = new(TestEnvironment) - - flagset.StringVar(&f.Cluster, "cluster", "", - "Provide the cluster to test against. Defaults to the current cluster in kubeconfig.") - - flagset.StringVar(&f.Namespace, "namespace", "", +func (c *TestClientConfig) InitFlags(fs *flag.FlagSet) { + fs.StringVar(&c.Namespace, "namespace", "", "Provide the namespace you would like to use for these tests.") - flagset.StringVar(&f.IngressEndpoint, "ingressendpoint", "", "Provide a static endpoint url to the ingress server used during tests.") + fs.StringVar(&c.IngressEndpoint, "ingressendpoint", "", "Provide a static endpoint url to the ingress server used during tests.") - flagset.StringVar(&f.ImageTemplate, "imagetemplate", "{{.Repository}}/{{.Name}}:{{.Tag}}", + fs.StringVar(&c.ImageTemplate, "imagetemplate", "{{.Repository}}/{{.Name}}:{{.Tag}}", "Provide a template to generate the reference to an image from the test. Defaults to `{{.Repository}}/{{.Name}}:{{.Tag}}`.") - flagset.DurationVar(&f.SpoofRequestInterval, "spoofinterval", 1*time.Second, + fs.DurationVar(&c.SpoofRequestInterval, "spoofinterval", 1*time.Second, "Provide an interval between requests for the SpoofingClient") - flagset.DurationVar(&f.SpoofRequestTimeout, "spooftimeout", 5*time.Minute, + fs.DurationVar(&c.SpoofRequestTimeout, "spooftimeout", 5*time.Minute, "Provide a request timeout for the SpoofingClient") defaultRepo := os.Getenv("KO_DOCKER_REPO") - flagset.StringVar(&f.DockerRepo, "dockerrepo", defaultRepo, + fs.StringVar(&c.DockerRepo, "dockerrepo", defaultRepo, "Provide the uri of the docker repo you have uploaded the test image to using `uploadtestimage.sh`. Defaults to $KO_DOCKER_REPO") - flagset.StringVar(&f.Tag, "tag", "latest", "Provide the version tag for the test images.") -} - -// Flags returns the command line flags or defaults for settings in the user's -// environment. See TestEnvironment for a list of supported fields. -// Caller must call InitFlags() and flags.Parse() before calling Flags(). -func Flags() *TestEnvironment { - return f + fs.StringVar(&c.Tag, "tag", "latest", "Provide the version tag for the test images.") } diff --git a/vendor/knative.dev/pkg/test/kube_checks.go b/vendor/knative.dev/pkg/test/kube_checks.go index d6bcd648ad..30698e6941 100644 --- a/vendor/knative.dev/pkg/test/kube_checks.go +++ b/vendor/knative.dev/pkg/test/kube_checks.go @@ -25,14 +25,14 @@ import ( "strings" "time" - "k8s.io/client-go/kubernetes" - + "github.com/davecgh/go-spew/spew" "github.com/google/go-cmp/cmp" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" apierrs "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" k8styped "k8s.io/client-go/kubernetes/typed/core/v1" "knative.dev/pkg/test/logging" ) @@ -51,14 +51,20 @@ func WaitForDeploymentState(ctx context.Context, client kubernetes.Interface, na d := client.AppsV1().Deployments(namespace) span := logging.GetEmitableSpan(ctx, fmt.Sprintf("WaitForDeploymentState/%s/%s", name, desc)) defer span.End() - - return wait.PollImmediate(interval, timeout, func() (bool, error) { - d, err := d.Get(ctx, name, metav1.GetOptions{}) + var lastState *appsv1.Deployment + waitErr := wait.PollImmediate(interval, timeout, func() (bool, error) { + var err error + lastState, err = d.Get(ctx, name, metav1.GetOptions{}) if err != nil { return true, err } - return inState(d) + return inState(lastState) }) + + if waitErr != nil { + return fmt.Errorf("deployment %q is not in desired state, got: %s: %w", name, spew.Sprint(lastState), waitErr) + } + return nil } // WaitForPodListState polls the status of the PodList @@ -70,13 +76,20 @@ func WaitForPodListState(ctx context.Context, client kubernetes.Interface, inSta span := logging.GetEmitableSpan(ctx, "WaitForPodListState/"+desc) defer span.End() - return wait.PollImmediate(interval, podTimeout, func() (bool, error) { - p, err := p.List(ctx, metav1.ListOptions{}) + var lastState *corev1.PodList + waitErr := wait.PollImmediate(interval, podTimeout, func() (bool, error) { + var err error + lastState, err = p.List(ctx, metav1.ListOptions{}) if err != nil { return true, err } - return inState(p) + return inState(lastState) }) + + if waitErr != nil { + return fmt.Errorf("pod list is not in desired state, got: %s: %w", spew.Sprint(lastState), waitErr) + } + return nil } // WaitForPodState polls the status of the specified Pod @@ -88,13 +101,20 @@ func WaitForPodState(ctx context.Context, client kubernetes.Interface, inState f span := logging.GetEmitableSpan(ctx, "WaitForPodState/"+name) defer span.End() - return wait.PollImmediate(interval, podTimeout, func() (bool, error) { - p, err := p.Get(ctx, name, metav1.GetOptions{}) + var lastState *corev1.Pod + waitErr := wait.PollImmediate(interval, podTimeout, func() (bool, error) { + var err error + lastState, err = p.Get(ctx, name, metav1.GetOptions{}) if err != nil { return false, err } - return inState(p) + return inState(lastState) }) + + if waitErr != nil { + return fmt.Errorf("pod %q is not in desired state, got: %s: %w", name, spew.Sprint(lastState), waitErr) + } + return nil } // WaitForPodDeleted waits for the given pod to disappear from the given namespace. @@ -110,21 +130,27 @@ func WaitForPodDeleted(ctx context.Context, client kubernetes.Interface, name, n return nil } -// WaitForServiceHasAtLeastOneEndpoint polls the status of the specified Service +// WaitForServiceEndpoints polls the status of the specified Service // from client every interval until number of service endpoints = numOfEndpoints func WaitForServiceEndpoints(ctx context.Context, client kubernetes.Interface, svcName string, svcNamespace string, numOfEndpoints int) error { endpointsService := client.CoreV1().Endpoints(svcNamespace) span := logging.GetEmitableSpan(ctx, "WaitForServiceHasAtLeastOneEndpoint/"+svcName) defer span.End() - return wait.PollImmediate(interval, podTimeout, func() (bool, error) { - endpoint, err := endpointsService.Get(ctx, svcName, metav1.GetOptions{}) + var endpoints *corev1.Endpoints + waitErr := wait.PollImmediate(interval, podTimeout, func() (bool, error) { + var err error + endpoints, err = endpointsService.Get(ctx, svcName, metav1.GetOptions{}) if err != nil { return false, err } - return countEndpointsNum(endpoint) == numOfEndpoints, nil + return countEndpointsNum(endpoints) == numOfEndpoints, nil }) + if waitErr != nil { + return fmt.Errorf("did not reach the desired number of endpoints, got: %d: %w", countEndpointsNum(endpoints), waitErr) + } + return nil } func countEndpointsNum(e *corev1.Endpoints) int { @@ -155,10 +181,16 @@ func GetEndpointAddresses(ctx context.Context, client kubernetes.Interface, svcN // WaitForChangedEndpoints waits until the endpoints for the given service differ from origEndpoints. func WaitForChangedEndpoints(ctx context.Context, client kubernetes.Interface, svcName, svcNamespace string, origEndpoints []string) error { - return wait.PollImmediate(1*time.Second, 2*time.Minute, func() (bool, error) { - newEndpoints, err := GetEndpointAddresses(ctx, client, svcName, svcNamespace) + var newEndpoints []string + waitErr := wait.PollImmediate(1*time.Second, 2*time.Minute, func() (bool, error) { + var err error + newEndpoints, err = GetEndpointAddresses(ctx, client, svcName, svcNamespace) return !cmp.Equal(origEndpoints, newEndpoints), err }) + if waitErr != nil { + return fmt.Errorf("new endpoints are not different from the original ones, got %q: %w", newEndpoints, waitErr) + } + return nil } // GetConfigMap gets the configmaps for a given namespace @@ -175,14 +207,20 @@ func DeploymentScaledToZeroFunc() func(d *appsv1.Deployment) (bool, error) { // WaitForLogContent waits until logs for given Pod/Container include the given content. // If the content is not present within timeout it returns error. -func WaitForLogContent(ctx context.Context, client *KubeClient, podName, containerName, namespace, content string) error { - return wait.PollImmediate(interval, logTimeout, func() (bool, error) { - logs, err := client.PodLogs(ctx, podName, containerName, namespace) +func WaitForLogContent(ctx context.Context, client kubernetes.Interface, podName, containerName, namespace, content string) error { + var logs []byte + waitErr := wait.PollImmediate(interval, logTimeout, func() (bool, error) { + var err error + logs, err = PodLogs(ctx, client, podName, containerName, namespace) if err != nil { return true, err } return strings.Contains(string(logs), content), nil }) + if waitErr != nil { + return fmt.Errorf("logs do not contain the desired content %q, got %q: %w", content, logs, waitErr) + } + return nil } // WaitForAllPodsRunning waits for all the pods to be in running state @@ -192,14 +230,20 @@ func WaitForAllPodsRunning(ctx context.Context, client kubernetes.Interface, nam // WaitForPodRunning waits for the given pod to be in running state func WaitForPodRunning(ctx context.Context, client kubernetes.Interface, name string, namespace string) error { - p := client.CoreV1().Pods(namespace) - return wait.PollImmediate(interval, podTimeout, func() (bool, error) { - p, err := p.Get(ctx, name, metav1.GetOptions{}) + var p *corev1.Pod + pods := client.CoreV1().Pods(namespace) + waitErr := wait.PollImmediate(interval, podTimeout, func() (bool, error) { + var err error + p, err = pods.Get(ctx, name, metav1.GetOptions{}) if err != nil { return true, err } return podRunning(p), nil }) + if waitErr != nil { + return fmt.Errorf("pod %q did not reach the running state, got %+v: %w", name, p.Status.Phase, waitErr) + } + return nil } // podsRunning will check the status conditions of the pod list and return true all pods are Running diff --git a/vendor/knative.dev/pkg/test/request.go b/vendor/knative.dev/pkg/test/request.go index ecaecd68f3..821696f5d0 100644 --- a/vendor/knative.dev/pkg/test/request.go +++ b/vendor/knative.dev/pkg/test/request.go @@ -24,8 +24,6 @@ import ( "strings" "time" - "knative.dev/pkg/test/flags" - "k8s.io/client-go/kubernetes" "knative.dev/pkg/test/logging" "knative.dev/pkg/test/spoof" @@ -102,7 +100,7 @@ func WaitForEndpointState( resolvable bool, opts ...interface{}) (*spoof.Response, error) { return WaitForEndpointStateWithTimeout(ctx, kubeClient, logf, url, inState, - desc, resolvable, flags.Flags().SpoofRequestTimeout, opts...) + desc, resolvable, Flags.SpoofRequestTimeout, opts...) } // WaitForEndpointStateWithTimeout will poll an endpoint until inState indicates the state is achieved @@ -122,6 +120,22 @@ func WaitForEndpointStateWithTimeout( timeout time.Duration, opts ...interface{}) (*spoof.Response, error) { + client, rOpts, err := makeSpoofClient(ctx, kubeClient, logf, url, resolvable, timeout /* true, */, opts...) + if err != nil { + return nil, err + } + return client.WaitForEndpointState(ctx, url, inState, desc, rOpts...) +} + +func makeSpoofClient( + ctx context.Context, + kubeClient kubernetes.Interface, + logf logging.FormatLogger, + url *url.URL, + resolvable bool, + timeout time.Duration, + opts ...interface{}) (*spoof.SpoofingClient, []spoof.RequestOption, error) { + var tOpts []spoof.TransportOption var rOpts []spoof.RequestOption @@ -136,9 +150,26 @@ func WaitForEndpointStateWithTimeout( client, err := NewSpoofingClient(ctx, kubeClient, logf, url.Hostname(), resolvable, tOpts...) if err != nil { - return nil, err + return nil, nil, err } client.RequestTimeout = timeout - return client.WaitForEndpointState(ctx, url, inState, desc, rOpts...) + return client, rOpts, nil +} + +func CheckEndpointState( + ctx context.Context, + kubeClient kubernetes.Interface, + logf logging.FormatLogger, + url *url.URL, + inState spoof.ResponseChecker, + desc string, + resolvable bool, + opts ...interface{}, +) (*spoof.Response, error) { + client, rOpts, err := makeSpoofClient(ctx, kubeClient, logf, url, resolvable, Flags.SpoofRequestTimeout /* false, */, opts...) + if err != nil { + return nil, err + } + return client.CheckEndpointState(ctx, url, inState, desc, rOpts...) } diff --git a/vendor/knative.dev/pkg/test/spoof/error_checks.go b/vendor/knative.dev/pkg/test/spoof/error_checks.go index e152e11d79..378bf58240 100644 --- a/vendor/knative.dev/pkg/test/spoof/error_checks.go +++ b/vendor/knative.dev/pkg/test/spoof/error_checks.go @@ -58,3 +58,7 @@ func isConnectionRefused(err error) bool { func isConnectionReset(err error) bool { return err != nil && strings.Contains(err.Error(), "connection reset by peer") } + +func isNoRouteToHostError(err error) bool { + return err != nil && strings.Contains(err.Error(), "connect: no route to host") +} diff --git a/vendor/knative.dev/pkg/test/spoof/spoof.go b/vendor/knative.dev/pkg/test/spoof/spoof.go index 48ffcffa85..c50108c565 100644 --- a/vendor/knative.dev/pkg/test/spoof/spoof.go +++ b/vendor/knative.dev/pkg/test/spoof/spoof.go @@ -208,7 +208,7 @@ func DefaultErrorRetryChecker(err error) (bool, error) { if isTCPTimeout(err) { return true, fmt.Errorf("retrying for TCP timeout: %w", err) } - // Retrying on DNS error, since we may be using xip.io or nip.io in tests. + // Retrying on DNS error, since we may be using sslip.io or nip.io in tests. if isDNSError(err) { return true, fmt.Errorf("retrying for DNS error: %w", err) } @@ -223,6 +223,11 @@ func DefaultErrorRetryChecker(err error) (bool, error) { if errors.Is(err, io.EOF) { return true, fmt.Errorf("retrying for: %w", err) } + // No route to host errors are in the same category as connection refused errors and + // are usually transient. + if isNoRouteToHostError(err) { + return true, fmt.Errorf("retrying for 'no route to host' error: %w", err) + } return false, err } @@ -254,7 +259,25 @@ func (sc *SpoofingClient) WaitForEndpointState( desc string, opts ...RequestOption) (*Response, error) { - defer logging.GetEmitableSpan(ctx, "WaitForEndpointState/"+desc).End() + return sc.endpointState( + ctx, + url, + inState, + desc, + func(req *http.Request, check ResponseChecker) (*Response, error) { return sc.Poll(req, check) }, + "WaitForEndpointState", + opts...) +} + +func (sc *SpoofingClient) endpointState( + ctx context.Context, + url *url.URL, + inState ResponseChecker, + desc string, + f func(*http.Request, ResponseChecker) (*Response, error), + logName string, + opts ...RequestOption) (*Response, error) { + defer logging.GetEmitableSpan(ctx, logName+"/"+desc).End() if url.Scheme == "" || url.Host == "" { return nil, fmt.Errorf("invalid URL: %q", url.String()) @@ -269,5 +292,57 @@ func (sc *SpoofingClient) WaitForEndpointState( opt(req) } - return sc.Poll(req, inState) + return f(req, inState) +} + +func (sc *SpoofingClient) Check(req *http.Request, inState ResponseChecker) (*Response, error) { + traceContext, span := trace.StartSpan(req.Context(), "SpoofingClient-Trace") + defer span.End() + rawResp, err := sc.Client.Do(req.WithContext(traceContext)) + if err != nil { + sc.Logf("NOT Retrying %s: %v", req.URL.String(), err) + return nil, err + } + defer rawResp.Body.Close() + + body, err := ioutil.ReadAll(rawResp.Body) + if err != nil { + return nil, err + } + rawResp.Header.Add(zipkin.ZipkinTraceIDHeader, span.SpanContext().TraceID.String()) + + resp := &Response{ + Status: rawResp.Status, + StatusCode: rawResp.StatusCode, + Header: rawResp.Header, + Body: body, + } + ok, err := inState(resp) + + sc.logZipkinTrace(resp) + + if err != nil { + return resp, fmt.Errorf("response: %s did not pass checks: %w", resp, err) + } + if ok { + return resp, nil + } + + return nil, err +} + +func (sc *SpoofingClient) CheckEndpointState( + ctx context.Context, + url *url.URL, + inState ResponseChecker, + desc string, + opts ...RequestOption) (*Response, error) { + return sc.endpointState( + ctx, + url, + inState, + desc, + sc.Check, + "CheckEndpointState", + opts...) } diff --git a/vendor/knative.dev/pkg/test/tinterface.go b/vendor/knative.dev/pkg/test/tinterface.go index 4cda2c90ac..530ff0a2ee 100644 --- a/vendor/knative.dev/pkg/test/tinterface.go +++ b/vendor/knative.dev/pkg/test/tinterface.go @@ -20,6 +20,7 @@ limitations under the License. package test +// T is an interface mimicking *testing.T. // Deprecated: Do not use this. Define your own interface. type T interface { Name() string @@ -30,6 +31,7 @@ type T interface { Error(args ...interface{}) } +// TLegacy is an interface mimicking *testing.T. // Deprecated: Do not use this. Define your own interface. type TLegacy interface { T diff --git a/vendor/knative.dev/pkg/tracing/config/OWNERS b/vendor/knative.dev/pkg/tracing/config/OWNERS index 2480fc6d43..c3ff67e25b 100644 --- a/vendor/knative.dev/pkg/tracing/config/OWNERS +++ b/vendor/knative.dev/pkg/tracing/config/OWNERS @@ -1,4 +1,6 @@ # The OWNERS file is used by prow to automatically merge approved PRs. approvers: -- configmap-approvers +- pkg-configmap-writers +reviewers: +- pkg-configmap-reviewers diff --git a/vendor/knative.dev/pkg/tracing/config/tracing.go b/vendor/knative.dev/pkg/tracing/config/tracing.go index db60cf00be..4d1831fc1b 100644 --- a/vendor/knative.dev/pkg/tracing/config/tracing.go +++ b/vendor/knative.dev/pkg/tracing/config/tracing.go @@ -23,7 +23,6 @@ import ( "reflect" "strconv" - "cloud.google.com/go/compute/metadata" corev1 "k8s.io/api/core/v1" cm "knative.dev/pkg/configmap" ) @@ -32,12 +31,11 @@ const ( // ConfigName is the name of the configmap ConfigName = "config-tracing" - enableKey = "enable" - backendKey = "backend" - zipkinEndpointKey = "zipkin-endpoint" - debugKey = "debug" - sampleRateKey = "sample-rate" - stackdriverProjectIDKey = "stackdriver-project-id" + enableKey = "enable" + backendKey = "backend" + zipkinEndpointKey = "zipkin-endpoint" + debugKey = "debug" + sampleRateKey = "sample-rate" ) // BackendType specifies the backend to use for tracing @@ -46,17 +44,15 @@ type BackendType string const ( // None is used for no backend. None BackendType = "none" - // Stackdriver is used for Stackdriver backend. - Stackdriver BackendType = "stackdriver" + // Zipkin is used for Zipkin backend. Zipkin BackendType = "zipkin" ) // Config holds the configuration for tracers type Config struct { - Backend BackendType - ZipkinEndpoint string - StackdriverProjectID string + Backend BackendType + ZipkinEndpoint string Debug bool SampleRate float64 @@ -82,7 +78,7 @@ func NewTracingConfigFromMap(cfgMap map[string]string) (*Config, error) { if backend, ok := cfgMap[backendKey]; ok { switch bt := BackendType(backend); bt { - case Stackdriver, Zipkin, None: + case Zipkin, None: tc.Backend = bt default: return nil, fmt.Errorf("unsupported tracing backend value %q", backend) @@ -100,7 +96,6 @@ func NewTracingConfigFromMap(cfgMap map[string]string) (*Config, error) { if err := cm.Parse(cfgMap, cm.AsString(zipkinEndpointKey, &tc.ZipkinEndpoint), - cm.AsString(stackdriverProjectIDKey, &tc.StackdriverProjectID), cm.AsBool(debugKey, &tc.Debug), cm.AsFloat64(sampleRateKey, &tc.SampleRate), ); err != nil { @@ -111,14 +106,6 @@ func NewTracingConfigFromMap(cfgMap map[string]string) (*Config, error) { return nil, errors.New("zipkin tracing enabled without a zipkin endpoint specified") } - if tc.Backend == Stackdriver && tc.StackdriverProjectID == "" { - projectID, err := metadata.ProjectID() - if err != nil { - return nil, fmt.Errorf("stackdriver tracing enabled without a project-id specified: %w", err) - } - tc.StackdriverProjectID = projectID - } - if tc.SampleRate < 0 || tc.SampleRate > 1 { return nil, fmt.Errorf("sample-rate = %v must be in [0, 1] range", tc.SampleRate) } @@ -163,9 +150,6 @@ func TracingConfigToJSON(cfg *Config) (string, error) { //nolint // for backcomp if cfg.ZipkinEndpoint != "" { out[zipkinEndpointKey] = cfg.ZipkinEndpoint } - if cfg.StackdriverProjectID != "" { - out[stackdriverProjectIDKey] = cfg.StackdriverProjectID - } out[debugKey] = fmt.Sprint(cfg.Debug) out[sampleRateKey] = fmt.Sprint(cfg.SampleRate) diff --git a/vendor/knative.dev/pkg/tracker/zz_generated.deepcopy.go b/vendor/knative.dev/pkg/tracker/zz_generated.deepcopy.go index 9494168c6f..3b3e7a17a1 100644 --- a/vendor/knative.dev/pkg/tracker/zz_generated.deepcopy.go +++ b/vendor/knative.dev/pkg/tracker/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2020 The Knative Authors +Copyright 2021 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/knative.dev/pkg/version/version.go b/vendor/knative.dev/pkg/version/version.go index e14cbb1b37..86745aacba 100644 --- a/vendor/knative.dev/pkg/version/version.go +++ b/vendor/knative.dev/pkg/version/version.go @@ -32,9 +32,8 @@ const ( // NOTE: If you are changing this line, please also update the minimum kubernetes // version listed here: - // https://github.com/knative/docs/blob/main/docs/install/any-kubernetes-cluster.md#before-you-begin - // https://github.com/knative/docs/blob/main/docs/install/knative-with-operators.md#prerequisites - defaultMinimumVersion = "v1.18.0" + // https://github.com/knative/docs/blob/mkdocs/docs/snippets/prerequisites.md + defaultMinimumVersion = "v1.19.0" ) func getMinimumVersion() string { diff --git a/vendor/knative.dev/pkg/webhook/OWNERS b/vendor/knative.dev/pkg/webhook/OWNERS index 001df221fe..c1d307e337 100644 --- a/vendor/knative.dev/pkg/webhook/OWNERS +++ b/vendor/knative.dev/pkg/webhook/OWNERS @@ -1,7 +1,9 @@ # The OWNERS file is used by prow to automatically merge approved PRs. approvers: -- webhook-approvers +- dprotaso +- mattmoor +- tcnghia reviewers: -- webhook-reviewers +- whaught diff --git a/vendor/knative.dev/pkg/webhook/admission.go b/vendor/knative.dev/pkg/webhook/admission.go index 4c799f2464..4d00c746b2 100644 --- a/vendor/knative.dev/pkg/webhook/admission.go +++ b/vendor/knative.dev/pkg/webhook/admission.go @@ -143,6 +143,7 @@ func admissionHandler(rootLogger *zap.SugaredLogger, stats StatsReporter, c Admi } } +// StatelessAdmissionImpl marks a reconciler as stateless. // Inline this type to implement StatelessAdmissionController. type StatelessAdmissionImpl struct{} diff --git a/vendor/knative.dev/pkg/webhook/certificates/resources/certs.go b/vendor/knative.dev/pkg/webhook/certificates/resources/certs.go index c7ab8f6a3d..811eb569c2 100644 --- a/vendor/knative.dev/pkg/webhook/certificates/resources/certs.go +++ b/vendor/knative.dev/pkg/webhook/certificates/resources/certs.go @@ -18,7 +18,8 @@ package resources import ( "context" - "crypto/ed25519" + "crypto/ecdsa" + "crypto/elliptic" "crypto/rand" "crypto/x509" "crypto/x509/pkix" @@ -62,7 +63,7 @@ func createCertTemplate(name, namespace string, notAfter time.Time) (*x509.Certi Organization: []string{organization}, CommonName: commonName, }, - SignatureAlgorithm: x509.PureEd25519, + SignatureAlgorithm: x509.ECDSAWithSHA256, NotBefore: time.Now(), NotAfter: notAfter, BasicConstraintsValid: true, @@ -112,13 +113,14 @@ func createCert(template, parent *x509.Certificate, pub, parentPriv interface{}) return } -func createCA(ctx context.Context, name, namespace string, notAfter time.Time) (ed25519.PrivateKey, *x509.Certificate, []byte, error) { +func createCA(ctx context.Context, name, namespace string, notAfter time.Time) (*ecdsa.PrivateKey, *x509.Certificate, []byte, error) { logger := logging.FromContext(ctx) - publicKey, privateKey, err := ed25519.GenerateKey(rand.Reader) + privateKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) if err != nil { logger.Errorw("error generating random key", zap.Error(err)) return nil, nil, nil, err } + publicKey := privateKey.Public() rootCertTmpl, err := createCACertTemplate(name, namespace, notAfter) if err != nil { @@ -148,11 +150,13 @@ func CreateCerts(ctx context.Context, name, namespace string, notAfter time.Time } // Then create the private key for the serving cert - publicKey, privateKey, err := ed25519.GenerateKey(rand.Reader) + privateKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) if err != nil { logger.Errorw("error generating random key", zap.Error(err)) return nil, nil, nil, err } + publicKey := privateKey.Public() + servCertTemplate, err := createServerCertTemplate(name, namespace, notAfter) if err != nil { logger.Errorw("failed to create the server certificate template", zap.Error(err)) diff --git a/vendor/knative.dev/pkg/webhook/configmaps/configmaps.go b/vendor/knative.dev/pkg/webhook/configmaps/configmaps.go index c90f3b2e71..4d7ea5bfd3 100644 --- a/vendor/knative.dev/pkg/webhook/configmaps/configmaps.go +++ b/vendor/knative.dev/pkg/webhook/configmaps/configmaps.go @@ -137,9 +137,13 @@ func (ac *reconciler) reconcileValidatingWebhook(ctx context.Context, caCert []b webhook := configuredWebhook.DeepCopy() - // Clear out any previous (bad) OwnerReferences. - // See: https://github.com/knative/serving/issues/5845 - webhook.OwnerReferences = nil + // Set the owner to namespace. + ns, err := ac.client.CoreV1().Namespaces().Get(ctx, system.Namespace(), metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("failed to fetch namespace: %w", err) + } + nsRef := *metav1.NewControllerRef(ns, corev1.SchemeGroupVersion.WithKind("Namespace")) + webhook.OwnerReferences = []metav1.OwnerReference{nsRef} for i, wh := range webhook.Webhooks { if wh.Name != webhook.Name { diff --git a/vendor/knative.dev/pkg/webhook/resourcesemantics/defaulting/defaulting.go b/vendor/knative.dev/pkg/webhook/resourcesemantics/defaulting/defaulting.go index d57cc41f19..a5bcc4137d 100644 --- a/vendor/knative.dev/pkg/webhook/resourcesemantics/defaulting/defaulting.go +++ b/vendor/knative.dev/pkg/webhook/resourcesemantics/defaulting/defaulting.go @@ -30,6 +30,7 @@ import ( jsonpatch "gomodules.xyz/jsonpatch/v2" admissionv1 "k8s.io/api/admission/v1" admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" @@ -172,9 +173,12 @@ func (ac *reconciler) reconcileMutatingWebhook(ctx context.Context, caCert []byt current := configuredWebhook.DeepCopy() - // Clear out any previous (bad) OwnerReferences. - // See: https://github.com/knative/serving/issues/5845 - current.OwnerReferences = nil + ns, err := ac.client.CoreV1().Namespaces().Get(ctx, system.Namespace(), metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("failed to fetch namespace: %w", err) + } + nsRef := *metav1.NewControllerRef(ns, corev1.SchemeGroupVersion.WithKind("Namespace")) + current.OwnerReferences = []metav1.OwnerReference{nsRef} for i, wh := range current.Webhooks { if wh.Name != current.Name { diff --git a/vendor/knative.dev/pkg/webhook/resourcesemantics/validation/reconcile_config.go b/vendor/knative.dev/pkg/webhook/resourcesemantics/validation/reconcile_config.go index 7d1a7adcf5..0078b61cef 100644 --- a/vendor/knative.dev/pkg/webhook/resourcesemantics/validation/reconcile_config.go +++ b/vendor/knative.dev/pkg/webhook/resourcesemantics/validation/reconcile_config.go @@ -25,6 +25,7 @@ import ( "github.com/gobuffalo/flect" "go.uber.org/zap" admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" @@ -135,9 +136,13 @@ func (ac *reconciler) reconcileValidatingWebhook(ctx context.Context, caCert []b current := configuredWebhook.DeepCopy() - // Clear out any previous (bad) OwnerReferences. - // See: https://github.com/knative/serving/issues/5845 - current.OwnerReferences = nil + // Set the owner to namespace. + ns, err := ac.client.CoreV1().Namespaces().Get(ctx, system.Namespace(), metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("failed to fetch namespace: %w", err) + } + nsRef := *metav1.NewControllerRef(ns, corev1.SchemeGroupVersion.WithKind("Namespace")) + current.OwnerReferences = []metav1.OwnerReference{nsRef} for i, wh := range current.Webhooks { if wh.Name != current.Name { diff --git a/vendor/modules.txt b/vendor/modules.txt index 77c3182a80..0de8d4fc35 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,17 +1,9 @@ # cloud.google.com/go v0.72.0 cloud.google.com/go/compute/metadata -cloud.google.com/go/container/apiv1 -cloud.google.com/go/monitoring/apiv3 -cloud.google.com/go/trace/apiv2 # contrib.go.opencensus.io/exporter/ocagent v0.7.1-0.20200907061046-05415f1de66d contrib.go.opencensus.io/exporter/ocagent -# contrib.go.opencensus.io/exporter/prometheus v0.2.1-0.20200609204449-6bcf6f8577f0 +# contrib.go.opencensus.io/exporter/prometheus v0.3.0 contrib.go.opencensus.io/exporter/prometheus -# contrib.go.opencensus.io/exporter/stackdriver v0.13.5 -contrib.go.opencensus.io/exporter/stackdriver -contrib.go.opencensus.io/exporter/stackdriver/monitoredresource -contrib.go.opencensus.io/exporter/stackdriver/monitoredresource/aws -contrib.go.opencensus.io/exporter/stackdriver/monitoredresource/gcp # github.com/PuerkitoBio/purell v1.1.1 github.com/PuerkitoBio/purell # github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 @@ -21,44 +13,6 @@ github.com/alecthomas/template github.com/alecthomas/template/parse # github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d github.com/alecthomas/units -# github.com/antlr/antlr4 v0.0.0-20200503195918-621b933c7a7f -github.com/antlr/antlr4/runtime/Go/antlr -# github.com/aws/aws-sdk-go v1.31.12 -github.com/aws/aws-sdk-go/aws -github.com/aws/aws-sdk-go/aws/awserr -github.com/aws/aws-sdk-go/aws/awsutil -github.com/aws/aws-sdk-go/aws/client -github.com/aws/aws-sdk-go/aws/client/metadata -github.com/aws/aws-sdk-go/aws/corehandlers -github.com/aws/aws-sdk-go/aws/credentials -github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds -github.com/aws/aws-sdk-go/aws/credentials/endpointcreds -github.com/aws/aws-sdk-go/aws/credentials/processcreds -github.com/aws/aws-sdk-go/aws/credentials/stscreds -github.com/aws/aws-sdk-go/aws/csm -github.com/aws/aws-sdk-go/aws/defaults -github.com/aws/aws-sdk-go/aws/ec2metadata -github.com/aws/aws-sdk-go/aws/endpoints -github.com/aws/aws-sdk-go/aws/request -github.com/aws/aws-sdk-go/aws/session -github.com/aws/aws-sdk-go/aws/signer/v4 -github.com/aws/aws-sdk-go/internal/context -github.com/aws/aws-sdk-go/internal/ini -github.com/aws/aws-sdk-go/internal/sdkio -github.com/aws/aws-sdk-go/internal/sdkmath -github.com/aws/aws-sdk-go/internal/sdkrand -github.com/aws/aws-sdk-go/internal/sdkuri -github.com/aws/aws-sdk-go/internal/shareddefaults -github.com/aws/aws-sdk-go/internal/strings -github.com/aws/aws-sdk-go/internal/sync/singleflight -github.com/aws/aws-sdk-go/private/protocol -github.com/aws/aws-sdk-go/private/protocol/json/jsonutil -github.com/aws/aws-sdk-go/private/protocol/query -github.com/aws/aws-sdk-go/private/protocol/query/queryutil -github.com/aws/aws-sdk-go/private/protocol/rest -github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil -github.com/aws/aws-sdk-go/service/sts -github.com/aws/aws-sdk-go/service/sts/stsiface # github.com/beorn7/perks v1.0.1 github.com/beorn7/perks/quantile # github.com/blang/semver/v4 v4.0.0 @@ -81,7 +35,7 @@ github.com/emicklei/go-restful github.com/emicklei/go-restful/log # github.com/evanphx/json-patch v4.9.0+incompatible github.com/evanphx/json-patch -# github.com/evanphx/json-patch/v5 v5.2.0 +# github.com/evanphx/json-patch/v5 v5.5.0 github.com/evanphx/json-patch/v5 # github.com/ghodss/yaml v1.0.0 github.com/ghodss/yaml @@ -97,18 +51,18 @@ github.com/go-openapi/jsonpointer github.com/go-openapi/jsonreference # github.com/go-openapi/spec v0.20.2 github.com/go-openapi/spec -# github.com/go-openapi/swag v0.19.13 +# github.com/go-openapi/swag v0.19.15 github.com/go-openapi/swag # github.com/gobuffalo/envy v1.6.5 github.com/gobuffalo/envy -# github.com/gobuffalo/flect v0.2.2 +# github.com/gobuffalo/flect v0.2.3 github.com/gobuffalo/flect # github.com/gogo/protobuf v1.3.2 github.com/gogo/protobuf/proto github.com/gogo/protobuf/sortkeys # github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e github.com/golang/groupcache/lru -# github.com/golang/protobuf v1.4.3 +# github.com/golang/protobuf v1.5.2 github.com/golang/protobuf/descriptor github.com/golang/protobuf/jsonpb github.com/golang/protobuf/proto @@ -118,24 +72,7 @@ github.com/golang/protobuf/ptypes/any github.com/golang/protobuf/ptypes/duration github.com/golang/protobuf/ptypes/timestamp github.com/golang/protobuf/ptypes/wrappers -# github.com/google/cel-go v0.7.3 -github.com/google/cel-go/cel -github.com/google/cel-go/checker -github.com/google/cel-go/checker/decls -github.com/google/cel-go/common -github.com/google/cel-go/common/containers -github.com/google/cel-go/common/debug -github.com/google/cel-go/common/operators -github.com/google/cel-go/common/overloads -github.com/google/cel-go/common/types -github.com/google/cel-go/common/types/pb -github.com/google/cel-go/common/types/ref -github.com/google/cel-go/common/types/traits -github.com/google/cel-go/interpreter -github.com/google/cel-go/interpreter/functions -github.com/google/cel-go/parser -github.com/google/cel-go/parser/gen -# github.com/google/go-cmp v0.5.5 +# github.com/google/go-cmp v0.5.6 ## explicit github.com/google/go-cmp/cmp github.com/google/go-cmp/cmp/cmpopts @@ -150,8 +87,6 @@ github.com/google/gofuzz github.com/google/gofuzz/bytesource # github.com/google/uuid v1.2.0 github.com/google/uuid -# github.com/googleapis/gax-go/v2 v2.0.5 -github.com/googleapis/gax-go/v2 # github.com/googleapis/gnostic v0.5.3 github.com/googleapis/gnostic/compiler github.com/googleapis/gnostic/extensions @@ -166,17 +101,15 @@ github.com/hashicorp/golang-lru github.com/hashicorp/golang-lru/simplelru # github.com/imdario/mergo v0.3.10 github.com/imdario/mergo -# github.com/jmespath/go-jmespath v0.3.0 -github.com/jmespath/go-jmespath # github.com/joho/godotenv v1.3.0 github.com/joho/godotenv # github.com/josharian/intern v1.0.0 github.com/josharian/intern -# github.com/json-iterator/go v1.1.10 +# github.com/json-iterator/go v1.1.11 github.com/json-iterator/go # github.com/kelseyhightower/envconfig v1.4.0 github.com/kelseyhightower/envconfig -# github.com/mailru/easyjson v0.7.6 +# github.com/mailru/easyjson v0.7.7 github.com/mailru/easyjson/buffer github.com/mailru/easyjson/jlexer github.com/mailru/easyjson/jwriter @@ -207,30 +140,28 @@ github.com/modern-go/reflect2 github.com/openzipkin/zipkin-go/model # github.com/pkg/errors v0.9.1 github.com/pkg/errors -# github.com/prometheus/client_golang v1.9.0 +# github.com/prometheus/client_golang v1.11.0 github.com/prometheus/client_golang/prometheus github.com/prometheus/client_golang/prometheus/internal github.com/prometheus/client_golang/prometheus/promhttp # github.com/prometheus/client_model v0.2.0 github.com/prometheus/client_model/go -# github.com/prometheus/common v0.19.0 +# github.com/prometheus/common v0.26.0 github.com/prometheus/common/expfmt github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg github.com/prometheus/common/log github.com/prometheus/common/model -# github.com/prometheus/procfs v0.2.0 +# github.com/prometheus/procfs v0.6.0 github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util -# github.com/prometheus/statsd_exporter v0.15.0 +# github.com/prometheus/statsd_exporter v0.20.0 github.com/prometheus/statsd_exporter/pkg/mapper github.com/prometheus/statsd_exporter/pkg/mapper/fsm # github.com/sirupsen/logrus v1.7.0 github.com/sirupsen/logrus # github.com/spf13/pflag v1.0.5 github.com/spf13/pflag -# github.com/stoewer/go-strcase v1.2.0 -github.com/stoewer/go-strcase # github.com/tektoncd/pipeline v0.24.1 github.com/tektoncd/pipeline/pkg/apis/config github.com/tektoncd/pipeline/pkg/apis/pipeline @@ -248,10 +179,12 @@ github.com/tektoncd/pipeline/pkg/substitution ## explicit github.com/tektoncd/plumbing github.com/tektoncd/plumbing/scripts -# github.com/tektoncd/triggers v0.14.1 +# github.com/tektoncd/triggers v0.15.1 ## explicit +github.com/tektoncd/triggers/pkg/apis/triggers github.com/tektoncd/triggers/pkg/apis/triggers/config -github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1 +github.com/tektoncd/triggers/pkg/apis/triggers/contexts +github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1 # go.opencensus.io v0.23.0 go.opencensus.io go.opencensus.io/internal @@ -264,7 +197,6 @@ go.opencensus.io/plugin/ochttp go.opencensus.io/plugin/ochttp/propagation/b3 go.opencensus.io/plugin/ochttp/propagation/tracecontext go.opencensus.io/resource -go.opencensus.io/resource/resourcekeys go.opencensus.io/stats go.opencensus.io/stats/internal go.opencensus.io/stats/view @@ -273,15 +205,15 @@ go.opencensus.io/trace go.opencensus.io/trace/internal go.opencensus.io/trace/propagation go.opencensus.io/trace/tracestate -# go.uber.org/atomic v1.7.0 +# go.uber.org/atomic v1.8.0 go.uber.org/atomic # go.uber.org/automaxprocs v1.4.0 go.uber.org/automaxprocs/internal/cgroups go.uber.org/automaxprocs/internal/runtime go.uber.org/automaxprocs/maxprocs -# go.uber.org/multierr v1.5.0 +# go.uber.org/multierr v1.6.0 go.uber.org/multierr -# go.uber.org/zap v1.16.0 +# go.uber.org/zap v1.18.1 ## explicit go.uber.org/zap go.uber.org/zap/buffer @@ -291,13 +223,13 @@ go.uber.org/zap/internal/exit go.uber.org/zap/internal/ztest go.uber.org/zap/zapcore go.uber.org/zap/zaptest -# golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad +# golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a golang.org/x/crypto/ssh/terminal -# golang.org/x/mod v0.4.1 +# golang.org/x/mod v0.4.2 ## explicit golang.org/x/mod/module golang.org/x/mod/semver -# golang.org/x/net v0.0.0-20210119194325-5f4716e94777 +# golang.org/x/net v0.0.0-20210614182718-04defd469f4e golang.org/x/net/context golang.org/x/net/context/ctxhttp golang.org/x/net/http/httpguts @@ -307,17 +239,18 @@ golang.org/x/net/http2/hpack golang.org/x/net/idna golang.org/x/net/internal/timeseries golang.org/x/net/trace -# golang.org/x/oauth2 v0.0.0-20210126194326-f9ce19ea3013 +# golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914 golang.org/x/oauth2 +golang.org/x/oauth2/authhandler golang.org/x/oauth2/google golang.org/x/oauth2/google/internal/externalaccount golang.org/x/oauth2/internal golang.org/x/oauth2/jws golang.org/x/oauth2/jwt -# golang.org/x/sync v0.0.0-20201207232520-09787c993a3a +# golang.org/x/sync v0.0.0-20210220032951-036812b2e83c golang.org/x/sync/errgroup golang.org/x/sync/semaphore -# golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c +# golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40 golang.org/x/sys/execabs golang.org/x/sys/internal/unsafeheader golang.org/x/sys/plan9 @@ -327,15 +260,15 @@ golang.org/x/sys/windows/registry golang.org/x/sys/windows/svc/eventlog # golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf golang.org/x/term -# golang.org/x/text v0.3.5 +# golang.org/x/text v0.3.6 golang.org/x/text/secure/bidirule golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm golang.org/x/text/width -# golang.org/x/time v0.0.0-20201208040808-7e3f01d25324 +# golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba golang.org/x/time/rate -# golang.org/x/tools v0.1.0 +# golang.org/x/tools v0.1.4 golang.org/x/tools/go/ast/astutil golang.org/x/tools/imports golang.org/x/tools/internal/event @@ -346,22 +279,15 @@ golang.org/x/tools/internal/fastwalk golang.org/x/tools/internal/gocommand golang.org/x/tools/internal/gopathwalk golang.org/x/tools/internal/imports +golang.org/x/tools/internal/typeparams # golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 golang.org/x/xerrors golang.org/x/xerrors/internal -# gomodules.xyz/jsonpatch/v2 v2.1.0 +# gomodules.xyz/jsonpatch/v2 v2.2.0 ## explicit gomodules.xyz/jsonpatch/v2 # google.golang.org/api v0.36.0 -google.golang.org/api/internal -google.golang.org/api/internal/impersonate -google.golang.org/api/iterator -google.golang.org/api/option -google.golang.org/api/option/internaloption google.golang.org/api/support/bundler -google.golang.org/api/transport/cert -google.golang.org/api/transport/grpc -google.golang.org/api/transport/internal/dca # google.golang.org/appengine v1.6.7 google.golang.org/appengine google.golang.org/appengine/internal @@ -371,48 +297,24 @@ google.golang.org/appengine/internal/datastore google.golang.org/appengine/internal/log google.golang.org/appengine/internal/modules google.golang.org/appengine/internal/remote_api -google.golang.org/appengine/internal/socket google.golang.org/appengine/internal/urlfetch -google.golang.org/appengine/socket google.golang.org/appengine/urlfetch -# google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d -google.golang.org/genproto/googleapis/api -google.golang.org/genproto/googleapis/api/annotations -google.golang.org/genproto/googleapis/api/distribution -google.golang.org/genproto/googleapis/api/expr/v1alpha1 +# google.golang.org/genproto v0.0.0-20210416161957-9910b6c460de google.golang.org/genproto/googleapis/api/httpbody -google.golang.org/genproto/googleapis/api/label -google.golang.org/genproto/googleapis/api/metric -google.golang.org/genproto/googleapis/api/monitoredres -google.golang.org/genproto/googleapis/container/v1 -google.golang.org/genproto/googleapis/devtools/cloudtrace/v2 -google.golang.org/genproto/googleapis/monitoring/v3 google.golang.org/genproto/googleapis/rpc/status -google.golang.org/genproto/googleapis/type/calendarperiod google.golang.org/genproto/protobuf/field_mask -# google.golang.org/grpc v1.36.0 +# google.golang.org/grpc v1.38.0 google.golang.org/grpc google.golang.org/grpc/attributes google.golang.org/grpc/backoff google.golang.org/grpc/balancer google.golang.org/grpc/balancer/base -google.golang.org/grpc/balancer/grpclb -google.golang.org/grpc/balancer/grpclb/grpc_lb_v1 google.golang.org/grpc/balancer/grpclb/state google.golang.org/grpc/balancer/roundrobin google.golang.org/grpc/binarylog/grpc_binarylog_v1 google.golang.org/grpc/codes google.golang.org/grpc/connectivity google.golang.org/grpc/credentials -google.golang.org/grpc/credentials/alts -google.golang.org/grpc/credentials/alts/internal -google.golang.org/grpc/credentials/alts/internal/authinfo -google.golang.org/grpc/credentials/alts/internal/conn -google.golang.org/grpc/credentials/alts/internal/handshaker -google.golang.org/grpc/credentials/alts/internal/handshaker/service -google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp -google.golang.org/grpc/credentials/google -google.golang.org/grpc/credentials/oauth google.golang.org/grpc/encoding google.golang.org/grpc/encoding/proto google.golang.org/grpc/grpclog @@ -446,7 +348,7 @@ google.golang.org/grpc/serviceconfig google.golang.org/grpc/stats google.golang.org/grpc/status google.golang.org/grpc/tap -# google.golang.org/protobuf v1.25.0 +# google.golang.org/protobuf v1.26.0 google.golang.org/protobuf/encoding/protojson google.golang.org/protobuf/encoding/prototext google.golang.org/protobuf/encoding/protowire @@ -459,13 +361,12 @@ google.golang.org/protobuf/internal/encoding/messageset google.golang.org/protobuf/internal/encoding/tag google.golang.org/protobuf/internal/encoding/text google.golang.org/protobuf/internal/errors -google.golang.org/protobuf/internal/fieldsort google.golang.org/protobuf/internal/filedesc google.golang.org/protobuf/internal/filetype google.golang.org/protobuf/internal/flags google.golang.org/protobuf/internal/genid google.golang.org/protobuf/internal/impl -google.golang.org/protobuf/internal/mapsort +google.golang.org/protobuf/internal/order google.golang.org/protobuf/internal/pragma google.golang.org/protobuf/internal/set google.golang.org/protobuf/internal/strs @@ -477,18 +378,13 @@ google.golang.org/protobuf/reflect/protoregistry google.golang.org/protobuf/runtime/protoiface google.golang.org/protobuf/runtime/protoimpl google.golang.org/protobuf/types/descriptorpb -google.golang.org/protobuf/types/dynamicpb google.golang.org/protobuf/types/known/anypb google.golang.org/protobuf/types/known/durationpb -google.golang.org/protobuf/types/known/emptypb google.golang.org/protobuf/types/known/fieldmaskpb -google.golang.org/protobuf/types/known/structpb google.golang.org/protobuf/types/known/timestamppb google.golang.org/protobuf/types/known/wrapperspb # gopkg.in/alecthomas/kingpin.v2 v2.2.6 gopkg.in/alecthomas/kingpin.v2 -# gopkg.in/evanphx/json-patch.v4 v4.9.0 -gopkg.in/evanphx/json-patch.v4 # gopkg.in/inf.v0 v0.9.1 gopkg.in/inf.v0 # gopkg.in/yaml.v2 v2.4.0 @@ -513,11 +409,12 @@ gotest.tools/v3/internal/cleanup gotest.tools/v3/internal/difflib gotest.tools/v3/internal/format gotest.tools/v3/internal/source -# k8s.io/api v0.19.7 +# k8s.io/api v0.20.7 ## explicit k8s.io/api/admission/v1 k8s.io/api/admissionregistration/v1 k8s.io/api/admissionregistration/v1beta1 +k8s.io/api/apiserverinternal/v1alpha1 k8s.io/api/apps/v1 k8s.io/api/apps/v1beta1 k8s.io/api/apps/v1beta2 @@ -542,8 +439,10 @@ k8s.io/api/events/v1 k8s.io/api/events/v1beta1 k8s.io/api/extensions/v1beta1 k8s.io/api/flowcontrol/v1alpha1 +k8s.io/api/flowcontrol/v1beta1 k8s.io/api/networking/v1 k8s.io/api/networking/v1beta1 +k8s.io/api/node/v1 k8s.io/api/node/v1alpha1 k8s.io/api/node/v1beta1 k8s.io/api/policy/v1beta1 @@ -553,11 +452,10 @@ k8s.io/api/rbac/v1beta1 k8s.io/api/scheduling/v1 k8s.io/api/scheduling/v1alpha1 k8s.io/api/scheduling/v1beta1 -k8s.io/api/settings/v1alpha1 k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 -# k8s.io/apiextensions-apiserver v0.19.7 +# k8s.io/apiextensions-apiserver v0.20.7 ## explicit k8s.io/apiextensions-apiserver/pkg/apis/apiextensions k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1 @@ -566,7 +464,7 @@ k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1 k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1 -# k8s.io/apimachinery v0.19.7 +# k8s.io/apimachinery v0.20.7 ## explicit k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/errors @@ -617,7 +515,7 @@ k8s.io/apimachinery/pkg/version k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/client-go v0.19.7 +# k8s.io/client-go v0.20.7 ## explicit k8s.io/client-go/discovery k8s.io/client-go/discovery/fake @@ -626,6 +524,8 @@ k8s.io/client-go/informers k8s.io/client-go/informers/admissionregistration k8s.io/client-go/informers/admissionregistration/v1 k8s.io/client-go/informers/admissionregistration/v1beta1 +k8s.io/client-go/informers/apiserverinternal +k8s.io/client-go/informers/apiserverinternal/v1alpha1 k8s.io/client-go/informers/apps k8s.io/client-go/informers/apps/v1 k8s.io/client-go/informers/apps/v1beta1 @@ -656,11 +556,13 @@ k8s.io/client-go/informers/extensions k8s.io/client-go/informers/extensions/v1beta1 k8s.io/client-go/informers/flowcontrol k8s.io/client-go/informers/flowcontrol/v1alpha1 +k8s.io/client-go/informers/flowcontrol/v1beta1 k8s.io/client-go/informers/internalinterfaces k8s.io/client-go/informers/networking k8s.io/client-go/informers/networking/v1 k8s.io/client-go/informers/networking/v1beta1 k8s.io/client-go/informers/node +k8s.io/client-go/informers/node/v1 k8s.io/client-go/informers/node/v1alpha1 k8s.io/client-go/informers/node/v1beta1 k8s.io/client-go/informers/policy @@ -673,8 +575,6 @@ k8s.io/client-go/informers/scheduling k8s.io/client-go/informers/scheduling/v1 k8s.io/client-go/informers/scheduling/v1alpha1 k8s.io/client-go/informers/scheduling/v1beta1 -k8s.io/client-go/informers/settings -k8s.io/client-go/informers/settings/v1alpha1 k8s.io/client-go/informers/storage k8s.io/client-go/informers/storage/v1 k8s.io/client-go/informers/storage/v1alpha1 @@ -686,6 +586,8 @@ k8s.io/client-go/kubernetes/typed/admissionregistration/v1 k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1 k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake +k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1 +k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/fake k8s.io/client-go/kubernetes/typed/apps/v1 k8s.io/client-go/kubernetes/typed/apps/v1/fake k8s.io/client-go/kubernetes/typed/apps/v1beta1 @@ -734,10 +636,14 @@ k8s.io/client-go/kubernetes/typed/extensions/v1beta1 k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1 k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/fake +k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1 +k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake k8s.io/client-go/kubernetes/typed/networking/v1 k8s.io/client-go/kubernetes/typed/networking/v1/fake k8s.io/client-go/kubernetes/typed/networking/v1beta1 k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake +k8s.io/client-go/kubernetes/typed/node/v1 +k8s.io/client-go/kubernetes/typed/node/v1/fake k8s.io/client-go/kubernetes/typed/node/v1alpha1 k8s.io/client-go/kubernetes/typed/node/v1alpha1/fake k8s.io/client-go/kubernetes/typed/node/v1beta1 @@ -756,8 +662,6 @@ k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1 k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake k8s.io/client-go/kubernetes/typed/scheduling/v1beta1 k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake -k8s.io/client-go/kubernetes/typed/settings/v1alpha1 -k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake k8s.io/client-go/kubernetes/typed/storage/v1 k8s.io/client-go/kubernetes/typed/storage/v1/fake k8s.io/client-go/kubernetes/typed/storage/v1alpha1 @@ -766,6 +670,7 @@ k8s.io/client-go/kubernetes/typed/storage/v1beta1 k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake k8s.io/client-go/listers/admissionregistration/v1 k8s.io/client-go/listers/admissionregistration/v1beta1 +k8s.io/client-go/listers/apiserverinternal/v1alpha1 k8s.io/client-go/listers/apps/v1 k8s.io/client-go/listers/apps/v1beta1 k8s.io/client-go/listers/apps/v1beta2 @@ -786,8 +691,10 @@ k8s.io/client-go/listers/events/v1 k8s.io/client-go/listers/events/v1beta1 k8s.io/client-go/listers/extensions/v1beta1 k8s.io/client-go/listers/flowcontrol/v1alpha1 +k8s.io/client-go/listers/flowcontrol/v1beta1 k8s.io/client-go/listers/networking/v1 k8s.io/client-go/listers/networking/v1beta1 +k8s.io/client-go/listers/node/v1 k8s.io/client-go/listers/node/v1alpha1 k8s.io/client-go/listers/node/v1beta1 k8s.io/client-go/listers/policy/v1beta1 @@ -797,7 +704,6 @@ k8s.io/client-go/listers/rbac/v1beta1 k8s.io/client-go/listers/scheduling/v1 k8s.io/client-go/listers/scheduling/v1alpha1 k8s.io/client-go/listers/scheduling/v1beta1 -k8s.io/client-go/listers/settings/v1alpha1 k8s.io/client-go/listers/storage/v1 k8s.io/client-go/listers/storage/v1alpha1 k8s.io/client-go/listers/storage/v1beta1 @@ -836,7 +742,7 @@ k8s.io/client-go/util/jsonpath k8s.io/client-go/util/keyutil k8s.io/client-go/util/retry k8s.io/client-go/util/workqueue -# k8s.io/code-generator v0.19.7 +# k8s.io/code-generator v0.20.7 ## explicit k8s.io/code-generator/cmd/client-gen k8s.io/code-generator/cmd/client-gen/args @@ -858,7 +764,7 @@ k8s.io/code-generator/cmd/lister-gen/args k8s.io/code-generator/cmd/lister-gen/generators k8s.io/code-generator/pkg/namer k8s.io/code-generator/pkg/util -# k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027 +# k8s.io/gengo v0.0.0-20210203185629-de9496dff47b k8s.io/gengo/args k8s.io/gengo/examples/deepcopy-gen/generators k8s.io/gengo/examples/defaulter-gen/generators @@ -879,13 +785,12 @@ k8s.io/utils/buffer k8s.io/utils/integer k8s.io/utils/pointer k8s.io/utils/trace -# knative.dev/pkg v0.0.0-20210331065221-952fdd90dbb0 +# knative.dev/pkg v0.0.0-20210730172132-bb4aaf09c430 ## explicit knative.dev/pkg/apis knative.dev/pkg/apis/duck knative.dev/pkg/apis/duck/ducktypes knative.dev/pkg/apis/duck/v1 -knative.dev/pkg/apis/duck/v1alpha1 knative.dev/pkg/apis/duck/v1beta1 knative.dev/pkg/apis/testing knative.dev/pkg/changeset @@ -901,6 +806,7 @@ knative.dev/pkg/codegen/cmd/injection-gen/generators knative.dev/pkg/configmap knative.dev/pkg/configmap/informer knative.dev/pkg/controller +knative.dev/pkg/environment knative.dev/pkg/hash knative.dev/pkg/injection knative.dev/pkg/injection/clients/namespacedkube/informers/core/v1/secret @@ -924,7 +830,7 @@ knative.dev/pkg/signals knative.dev/pkg/system knative.dev/pkg/system/testing knative.dev/pkg/test -knative.dev/pkg/test/flags +knative.dev/pkg/test/environment knative.dev/pkg/test/ingress knative.dev/pkg/test/logging knative.dev/pkg/test/monitoring @@ -948,7 +854,7 @@ sigs.k8s.io/controller-runtime/pkg/client sigs.k8s.io/controller-runtime/pkg/client/apiutil sigs.k8s.io/controller-runtime/pkg/client/fake sigs.k8s.io/controller-runtime/pkg/internal/objectutil -# sigs.k8s.io/structured-merge-diff/v4 v4.0.2 +# sigs.k8s.io/structured-merge-diff/v4 v4.0.3 sigs.k8s.io/structured-merge-diff/v4/value # sigs.k8s.io/yaml v1.2.0 sigs.k8s.io/yaml diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/value/reflectcache.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/value/reflectcache.go index 49e6dd1690..a5a467c0f0 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/value/reflectcache.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/value/reflectcache.go @@ -70,11 +70,11 @@ func (f *FieldCacheEntry) CanOmit(fieldVal reflect.Value) bool { return f.isOmitEmpty && (safeIsNil(fieldVal) || isZero(fieldVal)) } -// GetUsing returns the field identified by this FieldCacheEntry from the provided struct. +// GetFrom returns the field identified by this FieldCacheEntry from the provided struct. func (f *FieldCacheEntry) GetFrom(structVal reflect.Value) reflect.Value { // field might be nested within 'inline' structs for _, elem := range f.fieldPath { - structVal = structVal.FieldByIndex(elem) + structVal = dereference(structVal).FieldByIndex(elem) } return structVal } @@ -150,7 +150,11 @@ func buildStructCacheEntry(t reflect.Type, infos map[string]*FieldCacheEntry, fi continue } if isInline { - buildStructCacheEntry(field.Type, infos, append(fieldPath, field.Index)) + e := field.Type + if field.Type.Kind() == reflect.Ptr { + e = field.Type.Elem() + } + buildStructCacheEntry(e, infos, append(fieldPath, field.Index)) continue } info := &FieldCacheEntry{JsonName: jsonName, isOmitEmpty: isOmitempty, fieldPath: append(fieldPath, field.Index), fieldType: field.Type}